content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from typing import Tuple
import yaml
class World:
world = None
"""
The first index is the Y coordinate, and the second index is the X coordinate
:type world: List[List[int]]
"""
width = None
"""
:type width: int
"""
height = None
"""
:type height: int
"""
def __init__(self, path):
self.world = []
self.load(path)
def get_value(self, coordinates: Tuple[int, int]) -> int:
return self.world[coordinates[0]][coordinates[1]]
def load(self, path):
self.world = []
with open(path, 'r') as f:
yml = yaml.load(f)
self.height = yml['info']['height']
self.width = yml['info']['width']
# Create an empty world
for y in range(0, self.height):
self.world.append([])
for x in range(0, self.width):
self.world[y].append(0)
row_index = -1
for row in yml['data']:
row_index += 1
col_index = -2
# If our row is an even row, it is a horizontal row.
# All the streets have an offset of +1 on a horizontal row
if row_index % 2 == 0:
col_index += 1
for col in row:
col_index += 2
self.world[row_index][col_index] = col
| 21.95082 | 81 | 0.511576 | [
"MIT"
] | Sidesplitter/Informatica-Olympiade-2016-2017 | src/B2/world.py | 1,339 | Python |
import os
import sys
sys.path.append('../')
import speedify
from speedify import State, Priority, SpeedifyError, SpeedifyAPIError
import speedifysettings
import speedifyutil
import logging
import unittest
import time
logging.basicConfig(handlers=[logging.FileHandler('test.log'),logging.StreamHandler(sys.stdout)],format='%(asctime)s\t%(levelname)s\t%(module)s\t%(message)s', level=logging.INFO)
# Test the speedify library
# assumes you're logged in
class TestSpeedify(unittest.TestCase):
#Note doesn't test login/logout. but then we have to deal with credentials being stored.
def setUp(self):
speedify.encryption(True)
speedify.transport("auto")
speedify.jumbo(True)
speedify.crashreports(True)
speedify.packetaggregation(True)
speedify.routedefault(True)
speedify.connectmethod("closest")
speedify.disconnect()
def test_connect(self):
serverinfo = speedify.connect_closest()
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
self.assertIn("tag", serverinfo)
self.assertIn("country", serverinfo)
def test_connect_country(self):
serverinfo = speedify.connect_country("sg")
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
self.assertIn("tag", serverinfo)
self.assertIn("country", serverinfo)
self.assertEqual(serverinfo["country"], "sg")
new_serverinfo = speedify.show_currentserver()
self.assertEqual(new_serverinfo["country"], "sg")
def test_transport(self):
mysettings = speedify.transport("https")
serverinfo = speedify.connect()
mysettings = speedify.show_settings()
self.assertEqual(mysettings["transportMode"], "https")
# to make sure runtime changed, could check stats and look for connectionstats : connections[] : protocol
mysettings = speedify.transport("tcp")
self.assertEqual(mysettings["transportMode"], "tcp")
serverinfo = speedify.connect()
mysettings = speedify.show_settings()
self.assertEqual(mysettings["transportMode"], "tcp")
def test_bad_country(self):
#logging.disable(logging.ERROR);
logging.info("Testing error handling, ignore next few errors")
state = speedify.show_state()
self.assertEqual(state,State.LOGGED_IN)
logging.debug("connecting to bad country")
with self.assertRaises(SpeedifyAPIError):
speedify.connect_country("pp")
logging.debug("after connecting to bad country")
state = speedify.show_state()
self.assertEqual(state,State.LOGGED_IN)
logging.info("Done testing error handling")
#logging.disable(logging.NOTSET)
def test_disconnect(self):
speedify.connect_closest()
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
speedify.disconnect()
state = speedify.show_state()
self.assertEqual(state,speedify.State.LOGGED_IN)
def test_connectmethod(self):
speedify.connect_closest()
speedify.connectmethod("private", "jp")
#pull settings from speedify to be sure they really set
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"private")
# country is ignored on
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
speedify.connectmethod("p2p")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"p2p")
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
retval = speedify.connectmethod("country", country="sg")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"country")
self.assertEqual(cm_settings["country"], "sg")
# the settings were returned by the actual connectmethod call,
# and should be exactly the same
self.assertEqual(cm_settings["connectMethod"],retval["connectMethod"])
self.assertEqual(cm_settings["country"], retval["country"])
self.assertEqual(cm_settings["num"],retval["num"])
self.assertEqual(cm_settings["city"], retval["city"])
speedify.connectmethod("closest")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"closest")
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
def test_version(self):
version = speedify.show_version()
self.assertIn("maj",version)
# expect at least Speedify 8.0
self.assertGreater(version["maj"], 7)
self.assertIn("min",version)
self.assertIn("bug",version)
self.assertIn("build",version)
def test_settings(self):
# test some basic settings
speedify.packetaggregation(False)
speedify.jumbo(False)
my_settings = speedify.show_settings()
self.assertFalse(my_settings["packetAggregation"])
self.assertFalse(my_settings["jumboPackets"])
speedify.packetaggregation(True)
speedify.jumbo(True)
my_settings = speedify.show_settings()
self.assertTrue(my_settings["packetAggregation"])
self.assertTrue(my_settings["jumboPackets"])
def test_badarguments(self):
# reaching into private methods to force some errors to be sure they're handled
try:
goterror = False
#invalid command
speedify._run_speedify_cmd(["invalidcommand"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Unknown Parameter" in sapie.message)
goterror = True
self.assertTrue(goterror)
try:
#valid command, missing required argument
goterror = False
speedify._run_speedify_cmd(["overflow"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Missing parameters" in sapie.message)
goterror = True
self.assertTrue(goterror)
try:
goterror = False
#valid command, invalid argument
speedify._run_speedify_cmd(["overflow", "bob"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Invalid parameters" in sapie.message)
goterror = True
self.assertTrue(goterror)
def test_privacy(self):
speedify.crashreports(False)
privacy_settings = speedify.show_privacy()
self.assertFalse(privacy_settings["crashReports"])
speedify.crashreports(True)
privacy_settings = speedify.show_privacy()
self.assertTrue(privacy_settings["crashReports"])
if os.name == 'nt':
#the windows only calls
speedify.killswitch(True)
privacy_settings = speedify.show_privacy()
self.assertTrue(privacy_settings["killswitch"])
speedify.killswitch(False)
privacy_settings = speedify.show_privacy()
self.assertFalse(privacy_settings["killswitch"])
else:
# shouldn't be there if we're not windows
with self.assertRaises(SpeedifyError):
logging.disable(logging.ERROR);
speedify.killswitch(True)
logging.disable(logging.NOTSET)
def test_routedefault(self):
speedify.connect()
if not speedifyutil.using_speedify():
time.sleep(3)
self.assertTrue(speedifyutil.using_speedify())
speedify.routedefault(False)
self.assertFalse(speedify.show_settings()[ "enableDefaultRoute"])
time.sleep(1)
if speedifyutil.using_speedify():
# try twice in case it takes a moment to settle
time.sleep(1)
self.assertFalse(speedifyutil.using_speedify())
speedify.routedefault(True)
# for whatever reason getting the route back takes longer than giving it up
self.assertTrue(speedify.show_settings()[ "enableDefaultRoute"])
time.sleep(2)
if not speedifyutil.using_speedify():
# try twice in case it takes a moment to settle
time.sleep(2)
self.assertTrue(speedifyutil.using_speedify())
def test_serverlist(self):
# also tests connecting to one server
server_list = speedify.show_servers()
self.assertIn("public", server_list)
public_list = server_list["public"]
server_info = public_list[0]
self.assertIn("tag", server_info)
self.assertIn("country", server_info)
self.assertIn("city", server_info)
self.assertIn("num", server_info)
self.assertFalse(server_info["isPrivate"])
connectstring = server_info["tag"]
new_server = speedify.connect(connectstring)
self.assertEqual(new_server["tag"], connectstring)
self.assertEqual(server_info["country"], new_server["country"])
self.assertEqual(server_info["city"], new_server["city"])
self.assertEqual(server_info["num"], new_server["num"])
def test_stats(self):
speedify.connect_closest()
report_list = speedify.stats(2)
self.assertTrue(report_list) #Check for non empty list
reports = [item[0] for item in report_list]
self.assertIn("adapters", reports) #Check for at least one adapters report
def test_adapters(self):
adapters = speedify.show_adapters()
self.assertTrue(adapters)
adapterIDs = [adapter['adapterID'] for adapter in adapters]
self._set_and_test_adapter_list(adapterIDs, Priority.BACKUP, 10000000)
self._set_and_test_adapter_list(adapterIDs, Priority.ALWAYS, 0)
def test_encryption(self):
adapters = speedify.show_adapters()
self.assertTrue(adapters)
# just grab first adapter for testing
adapterID = [adapter['adapterID'] for adapter in adapters][0]
speedify.adapter_encryption(adapterID, False)
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertTrue(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
perConnectionEncryptionSettings = mysettings["perConnectionEncryptionSettings"]
firstadapter = perConnectionEncryptionSettings[0]
self.assertEqual(firstadapter["adapterID"], adapterID)
self.assertEqual(firstadapter["encrypted"], False)
# main thing should still be encrypted just not our one adapter
self.assertTrue(encrypted)
speedify.encryption(False)
#this should both turn off encryption and wipe the custom settings
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertFalse(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
self.assertFalse(encrypted)
# now let's test with only the adapter being encrypted
speedify.adapter_encryption(adapterID, True)
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertTrue(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
perConnectionEncryptionSettings = mysettings["perConnectionEncryptionSettings"]
firstadapter = perConnectionEncryptionSettings[0]
self.assertEqual(firstadapter["adapterID"], adapterID)
self.assertEqual(firstadapter["encrypted"], True)
speedify.encryption(True)
#this should both turn on encryption and wipe the custom settings
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertFalse(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
self.assertTrue(encrypted)
def _set_and_test_adapter_list(self, adapterIDs, priority, limit):
for adapterID in adapterIDs:
speedify.adapter_priority(adapterID, priority)
speedify.adapter_ratelimit(adapterID, limit)
speedify.adapter_datalimit_daily(adapterID, limit)
speedify.adapter_datalimit_monthly(adapterID, limit,0)
updated_adapters = speedify.show_adapters()
priorities = [adapter['priority'] for adapter in updated_adapters]
rate_limits = [adapter['rateLimit'] for adapter in updated_adapters]
daily_limits = [adapter['dataUsage']['usageDailyLimit'] for adapter in updated_adapters]
monthly_limits = [adapter['dataUsage']['usageMonthlyLimit'] for adapter in updated_adapters]
for set_priority, rate_limit, daily_limit, monthly_limit in zip(priorities, rate_limits, daily_limits, monthly_limits):
# Disconnected adapters speedify is aware of will have an unchangable priority never
if (set_priority != Priority.NEVER.value):
self.assertEqual(set_priority, priority.value)
self.assertEqual(rate_limit, limit)
self.assertEqual(daily_limit, limit)
self.assertEqual(monthly_limit, limit)
if __name__ == '__main__':
speedifysettings.apply_speedify_settings(speedifysettings.speedify_defaults)
unittest.main()
speedifysettings.apply_speedify_settings(speedifysettings.speedify_defaults)
| 44.280645 | 179 | 0.678298 | [
"Apache-2.0"
] | Sarvesh-Kesharwani/speedify-py | tests/test_speedify.py | 13,727 | Python |
"""
Adds the source files to the path for files in any subdirectory
TODO: check that we have not alredy added to our path.
"""
import os
import sys
fileLocation = os.path.dirname(os.path.abspath(__file__))
sourceLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/source/'))
nkLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/nkData/'))
netlistLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/netlist/'))
testLocation = os.path.abspath(os.path.join(fileLocation, 'RCWA/test/'))
sys.path.insert(0, sourceLocation)
sys.path.insert(0, nkLocation)
sys.path.insert(0, netlistLocation)
sys.path.insert(0, testLocation)
| 35.833333 | 78 | 0.765891 | [
"MIT"
] | FelixSCT/rcwa | context.py | 645 | Python |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface-ref/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configured reference to interface / subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv4",
"unnumbered",
"interface-ref",
"config",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
subinterface = __builtin__.property(_get_subinterface, _set_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
| 38.756 | 338 | 0.618846 | [
"Apache-2.0"
] | ABitMoreDepth/napalm-yang | napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/__init__.py | 9,689 | Python |
#
# Copyright 2020 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class BaseStatus(object):
@classmethod
def status_list(cls):
return [cls.__dict__[k] for k in cls.__dict__.keys() if not callable(getattr(cls, k)) and not k.startswith("__")]
@classmethod
def contains(cls, status):
return status in cls.status_list()
class BaseStateTransitionRule(object):
RULES = {}
@classmethod
def if_pass(cls, src_status, dest_status):
if src_status not in cls.RULES:
return False
if dest_status not in cls.RULES[src_status]:
return False
else:
return True
class StatusSet(BaseStatus):
WAITING = 'waiting'
READY = 'ready'
RUNNING = "running"
CANCELED = "canceled"
TIMEOUT = "timeout"
FAILED = "failed"
SUCCESS = "success"
SKIPPED = "skipped"
@classmethod
def get_level(cls, status):
return dict(zip(cls.status_list(), range(len(cls.status_list())))).get(status, None)
class JobStatus(BaseStatus):
WAITING = StatusSet.WAITING
READY = StatusSet.READY
RUNNING = StatusSet.RUNNING
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
SUCCESS = StatusSet.SUCCESS
class StateTransitionRule(BaseStateTransitionRule):
RULES = {
StatusSet.WAITING: [StatusSet.READY, StatusSet.RUNNING, StatusSet.CANCELED, StatusSet.TIMEOUT, StatusSet.FAILED, StatusSet.SUCCESS],
StatusSet.READY: [StatusSet.WAITING, StatusSet.RUNNING, StatusSet.CANCELED, StatusSet.TIMEOUT, StatusSet.FAILED],
StatusSet.RUNNING: [StatusSet.CANCELED, StatusSet.TIMEOUT, StatusSet.FAILED, StatusSet.SUCCESS],
StatusSet.CANCELED: [StatusSet.WAITING],
StatusSet.TIMEOUT: [StatusSet.FAILED, StatusSet.SUCCESS, StatusSet.WAITING],
StatusSet.FAILED: [StatusSet.WAITING],
StatusSet.SUCCESS: [StatusSet.WAITING],
}
class PlayStatus(BaseStatus):
WAITING = StatusSet.WAITING
READY = StatusSet.READY
RUNNING = StatusSet.RUNNING
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
SUCCESS = StatusSet.SUCCESS
class StateTransitionRule(BaseStateTransitionRule):
RULES = {
StatusSet.WAITING: [StatusSet.READY, StatusSet.RUNNING, StatusSet.CANCELED, StatusSet.TIMEOUT, StatusSet.FAILED, StatusSet.SUCCESS],
StatusSet.READY: [StatusSet.RUNNING, StatusSet.CANCELED, StatusSet.TIMEOUT, StatusSet.FAILED],
StatusSet.RUNNING: [StatusSet.CANCELED, StatusSet.TIMEOUT, StatusSet.FAILED, StatusSet.SUCCESS],
StatusSet.CANCELED: [StatusSet.WAITING],
StatusSet.TIMEOUT: [StatusSet.FAILED, StatusSet.SUCCESS],
StatusSet.FAILED: [StatusSet.WAITING],
StatusSet.SUCCESS: [],
}
class TaskStatus(BaseStatus):
WAITING = StatusSet.WAITING
RUNNING = StatusSet.RUNNING
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
SUCCESS = StatusSet.SUCCESS
SKIPPED = StatusSet.SKIPPED
class StateTransitionRule(BaseStateTransitionRule):
RULES = {
StatusSet.WAITING: [StatusSet.RUNNING, StatusSet.TIMEOUT, StatusSet.FAILED, StatusSet.SUCCESS, StatusSet.SKIPPED],
StatusSet.RUNNING: [StatusSet.CANCELED, StatusSet.TIMEOUT, StatusSet.FAILED, StatusSet.SUCCESS, StatusSet.SKIPPED],
StatusSet.CANCELED: [StatusSet.WAITING],
StatusSet.TIMEOUT: [StatusSet.FAILED, StatusSet.SUCCESS],
StatusSet.FAILED: [],
StatusSet.SUCCESS: [],
StatusSet.SKIPPED: []
}
class EndStatus(BaseStatus):
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
COMPLETE = StatusSet.SUCCESS
SKIPPED = StatusSet.SKIPPED
class StandbyStatus(BaseStatus):
WAITING = StatusSet.WAITING
READY = StatusSet.READY
class OngoingStatus(BaseStatus):
WAITING = StatusSet.WAITING
READY = StatusSet.READY
RUNNING = StatusSet.RUNNING
class InterruptStatus(BaseStatus):
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
class NoneKillStatus(BaseStatus):
WAITING = StatusSet.WAITING
READY = StatusSet.READY
CANCELED = StatusSet.CANCELED
TIMEOUT = StatusSet.TIMEOUT
FAILED = StatusSet.FAILED
SUCCESS = StatusSet.SUCCESS
SKIPPED = StatusSet.SKIPPED | 33.423841 | 144 | 0.69467 | [
"Apache-2.0"
] | cold-code/FATE-Cloud | fate-manager/hyperion/entity/types.py | 5,047 | Python |
# testing/assertions.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import contextlib
import re
import sys
import warnings
from . import assertsql
from . import config
from . import engines
from . import mock
from .exclusions import db_spec
from .util import fail
from .. import exc as sa_exc
from .. import schema
from .. import sql
from .. import types as sqltypes
from .. import util
from ..engine import default
from ..engine import url
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..util import compat
from ..util import decorator
def expect_warnings(*messages, **kw):
"""Context manager which expects one or more warnings.
With no arguments, squelches all SAWarning and RemovedIn20Warning emitted via
sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise
pass string expressions that will match selected warnings via regex;
all non-matching warnings are sent through.
The expect version **asserts** that the warnings were in fact seen.
Note that the test suite sets SAWarning warnings to raise exceptions.
""" # noqa
return _expect_warnings(
(sa_exc.RemovedIn20Warning, sa_exc.SAWarning), messages, **kw
)
@contextlib.contextmanager
def expect_warnings_on(db, *messages, **kw):
"""Context manager which expects one or more warnings on specific
dialects.
The expect version **asserts** that the warnings were in fact seen.
"""
spec = db_spec(db)
if isinstance(db, util.string_types) and not spec(config._current):
yield
else:
with expect_warnings(*messages, **kw):
yield
def emits_warning(*messages):
"""Decorator form of expect_warnings().
Note that emits_warning does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_warnings(assert_=False, *messages):
return fn(*args, **kw)
return decorate
def expect_deprecated(*messages, **kw):
return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw)
def expect_deprecated_20(*messages, **kw):
return _expect_warnings(sa_exc.RemovedIn20Warning, messages, **kw)
def emits_warning_on(db, *messages):
"""Mark a test as emitting a warning on a specific dialect.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
Note that emits_warning_on does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_warnings_on(db, assert_=False, *messages):
return fn(*args, **kw)
return decorate
def uses_deprecated(*messages):
"""Mark a test as immune from fatal deprecation warnings.
With no arguments, squelches all SADeprecationWarning failures.
Or pass one or more strings; these will be matched to the root
of the warning description by warnings.filterwarnings().
As a special case, you may pass a function name prefixed with //
and it will be re-written as needed to match the standard warning
verbiage emitted by the sqlalchemy.util.deprecated decorator.
Note that uses_deprecated does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_deprecated(*messages, assert_=False):
return fn(*args, **kw)
return decorate
@contextlib.contextmanager
def _expect_warnings(
exc_cls, messages, regex=True, assert_=True, py2konly=False
):
if regex:
filters = [re.compile(msg, re.I | re.S) for msg in messages]
else:
filters = messages
seen = set(filters)
real_warn = warnings.warn
def our_warn(msg, *arg, **kw):
if isinstance(msg, exc_cls):
exception = type(msg)
msg = str(msg)
elif arg:
exception = arg[0]
else:
exception = None
if not exception or not issubclass(exception, exc_cls):
return real_warn(msg, *arg, **kw)
if not filters:
return
for filter_ in filters:
if (regex and filter_.match(msg)) or (
not regex and filter_ == msg
):
seen.discard(filter_)
break
else:
real_warn(msg, *arg, **kw)
with mock.patch("warnings.warn", our_warn), mock.patch(
"sqlalchemy.util.SQLALCHEMY_WARN_20", True
), mock.patch(
"sqlalchemy.util.deprecations.SQLALCHEMY_WARN_20", True
), mock.patch(
"sqlalchemy.engine.row.LegacyRow._default_key_style", 2
):
yield
if assert_ and (not py2konly or not compat.py3k):
assert not seen, "Warnings were not seen: %s" % ", ".join(
"%r" % (s.pattern if regex else s) for s in seen
)
def global_cleanup_assertions():
"""Check things that have to be finalized at the end of a test suite.
Hardcoded at the moment, a modular system can be built here
to support things like PG prepared transactions, tables all
dropped, etc.
"""
_assert_no_stray_pool_connections()
def _assert_no_stray_pool_connections():
engines.testing_reaper.assert_all_closed()
def eq_regex(a, b, msg=None):
assert re.match(b, a), msg or "%r !~ %r" % (a, b)
def eq_(a, b, msg=None):
"""Assert a == b, with repr messaging on failure."""
assert a == b, msg or "%r != %r" % (a, b)
def ne_(a, b, msg=None):
"""Assert a != b, with repr messaging on failure."""
assert a != b, msg or "%r == %r" % (a, b)
def le_(a, b, msg=None):
"""Assert a <= b, with repr messaging on failure."""
assert a <= b, msg or "%r != %r" % (a, b)
def is_instance_of(a, b, msg=None):
assert isinstance(a, b), msg or "%r is not an instance of %r" % (a, b)
def is_none(a, msg=None):
is_(a, None, msg=msg)
def is_not_none(a, msg=None):
is_not(a, None, msg=msg)
def is_true(a, msg=None):
is_(bool(a), True, msg=msg)
def is_false(a, msg=None):
is_(bool(a), False, msg=msg)
def is_(a, b, msg=None):
"""Assert a is b, with repr messaging on failure."""
assert a is b, msg or "%r is not %r" % (a, b)
def is_not(a, b, msg=None):
"""Assert a is not b, with repr messaging on failure."""
assert a is not b, msg or "%r is %r" % (a, b)
# deprecated. See #5429
is_not_ = is_not
def in_(a, b, msg=None):
"""Assert a in b, with repr messaging on failure."""
assert a in b, msg or "%r not in %r" % (a, b)
def not_in(a, b, msg=None):
"""Assert a in not b, with repr messaging on failure."""
assert a not in b, msg or "%r is in %r" % (a, b)
# deprecated. See #5429
not_in_ = not_in
def startswith_(a, fragment, msg=None):
"""Assert a.startswith(fragment), with repr messaging on failure."""
assert a.startswith(fragment), msg or "%r does not start with %r" % (
a,
fragment,
)
def eq_ignore_whitespace(a, b, msg=None):
a = re.sub(r"^\s+?|\n", "", a)
a = re.sub(r" {2,}", " ", a)
b = re.sub(r"^\s+?|\n", "", b)
b = re.sub(r" {2,}", " ", b)
assert a == b, msg or "%r != %r" % (a, b)
def _assert_proper_exception_context(exception):
"""assert that any exception we're catching does not have a __context__
without a __cause__, and that __suppress_context__ is never set.
Python 3 will report nested as exceptions as "during the handling of
error X, error Y occurred". That's not what we want to do. we want
these exceptions in a cause chain.
"""
if not util.py3k:
return
if (
exception.__context__ is not exception.__cause__
and not exception.__suppress_context__
):
assert False, (
"Exception %r was correctly raised but did not set a cause, "
"within context %r as its cause."
% (exception, exception.__context__)
)
def assert_raises(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw, check_context=True)
def assert_raises_context_ok(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw)
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
return _assert_raises(
except_cls, callable_, args, kwargs, msg=msg, check_context=True
)
def assert_raises_message_context_ok(
except_cls, msg, callable_, *args, **kwargs
):
return _assert_raises(except_cls, callable_, args, kwargs, msg=msg)
def _assert_raises(
except_cls, callable_, args, kwargs, msg=None, check_context=False
):
with _expect_raises(except_cls, msg, check_context) as ec:
callable_(*args, **kwargs)
return ec.error
class _ErrorContainer(object):
error = None
@contextlib.contextmanager
def _expect_raises(except_cls, msg=None, check_context=False):
ec = _ErrorContainer()
if check_context:
are_we_already_in_a_traceback = sys.exc_info()[0]
try:
yield ec
success = False
except except_cls as err:
ec.error = err
success = True
if msg is not None:
assert re.search(
msg, util.text_type(err), re.UNICODE
), "%r !~ %s" % (msg, err)
if check_context and not are_we_already_in_a_traceback:
_assert_proper_exception_context(err)
print(util.text_type(err).encode("utf-8"))
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def expect_raises(except_cls, check_context=True):
return _expect_raises(except_cls, check_context=check_context)
def expect_raises_message(except_cls, msg, check_context=True):
return _expect_raises(except_cls, msg=msg, check_context=check_context)
class AssertsCompiledSQL(object):
def assert_compile(
self,
clause,
result,
params=None,
checkparams=None,
for_executemany=False,
check_literal_execute=None,
check_post_param=None,
dialect=None,
checkpositional=None,
check_prefetch=None,
use_default_dialect=False,
allow_dialect_select=False,
supports_default_values=True,
literal_binds=False,
render_postcompile=False,
schema_translate_map=None,
render_schema_translate=False,
default_schema_name=None,
):
if use_default_dialect:
dialect = default.DefaultDialect()
dialect.supports_default_values = supports_default_values
elif allow_dialect_select:
dialect = None
else:
if dialect is None:
dialect = getattr(self, "__dialect__", None)
if dialect is None:
dialect = config.db.dialect
elif dialect == "default":
dialect = default.DefaultDialect()
dialect.supports_default_values = supports_default_values
elif dialect == "default_enhanced":
dialect = default.StrCompileDialect()
elif isinstance(dialect, util.string_types):
dialect = url.URL.create(dialect).get_dialect()()
if default_schema_name:
dialect.default_schema_name = default_schema_name
kw = {}
compile_kwargs = {}
if schema_translate_map:
kw["schema_translate_map"] = schema_translate_map
if params is not None:
kw["column_keys"] = list(params)
if literal_binds:
compile_kwargs["literal_binds"] = True
if render_postcompile:
compile_kwargs["render_postcompile"] = True
if for_executemany:
kw["for_executemany"] = True
if render_schema_translate:
kw["render_schema_translate"] = True
from sqlalchemy import orm
if isinstance(clause, orm.dynamic.AppenderQuery):
clause = clause._statement
if isinstance(clause, orm.Query):
compile_state = clause._compile_state()
compile_state.statement._label_style = (
LABEL_STYLE_TABLENAME_PLUS_COL
)
clause = compile_state.statement
if compile_kwargs:
kw["compile_kwargs"] = compile_kwargs
class DontAccess(object):
def __getattribute__(self, key):
raise NotImplementedError(
"compiler accessed .statement; use "
"compiler.current_executable"
)
class CheckCompilerAccess(object):
def __init__(self, test_statement):
self.test_statement = test_statement
self._annotations = {}
self.supports_execution = getattr(
test_statement, "supports_execution", False
)
if self.supports_execution:
self._execution_options = test_statement._execution_options
if isinstance(
test_statement, (sql.Insert, sql.Update, sql.Delete)
):
self._returning = test_statement._returning
if isinstance(test_statement, (sql.Insert, sql.Update)):
self._inline = test_statement._inline
self._return_defaults = test_statement._return_defaults
def _default_dialect(self):
return self.test_statement._default_dialect()
def compile(self, dialect, **kw):
return self.test_statement.compile.__func__(
self, dialect=dialect, **kw
)
def _compiler(self, dialect, **kw):
return self.test_statement._compiler.__func__(
self, dialect, **kw
)
def _compiler_dispatch(self, compiler, **kwargs):
if hasattr(compiler, "statement"):
with mock.patch.object(
compiler, "statement", DontAccess()
):
return self.test_statement._compiler_dispatch(
compiler, **kwargs
)
else:
return self.test_statement._compiler_dispatch(
compiler, **kwargs
)
# no construct can assume it's the "top level" construct in all cases
# as anything can be nested. ensure constructs don't assume they
# are the "self.statement" element
c = CheckCompilerAccess(clause).compile(dialect=dialect, **kw)
param_str = repr(getattr(c, "params", {}))
if util.py3k:
param_str = param_str.encode("utf-8").decode("ascii", "ignore")
print(
("\nSQL String:\n" + util.text_type(c) + param_str).encode(
"utf-8"
)
)
else:
print(
"\nSQL String:\n"
+ util.text_type(c).encode("utf-8")
+ param_str
)
cc = re.sub(r"[\n\t]", "", util.text_type(c))
eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect))
if checkparams is not None:
eq_(c.construct_params(params), checkparams)
if checkpositional is not None:
p = c.construct_params(params)
eq_(tuple([p[x] for x in c.positiontup]), checkpositional)
if check_prefetch is not None:
eq_(c.prefetch, check_prefetch)
if check_literal_execute is not None:
eq_(
{
c.bind_names[b]: b.effective_value
for b in c.literal_execute_params
},
check_literal_execute,
)
if check_post_param is not None:
eq_(
{
c.bind_names[b]: b.effective_value
for b in c.post_compile_params
},
check_post_param,
)
class ComparesTables(object):
def assert_tables_equal(self, table, reflected_table, strict_types=False):
assert len(table.c) == len(reflected_table.c)
for c, reflected_c in zip(table.c, reflected_table.c):
eq_(c.name, reflected_c.name)
assert reflected_c is reflected_table.c[c.name]
eq_(c.primary_key, reflected_c.primary_key)
eq_(c.nullable, reflected_c.nullable)
if strict_types:
msg = "Type '%s' doesn't correspond to type '%s'"
assert isinstance(reflected_c.type, type(c.type)), msg % (
reflected_c.type,
c.type,
)
else:
self.assert_types_base(reflected_c, c)
if isinstance(c.type, sqltypes.String):
eq_(c.type.length, reflected_c.type.length)
eq_(
{f.column.name for f in c.foreign_keys},
{f.column.name for f in reflected_c.foreign_keys},
)
if c.server_default:
assert isinstance(
reflected_c.server_default, schema.FetchedValue
)
assert len(table.primary_key) == len(reflected_table.primary_key)
for c in table.primary_key:
assert reflected_table.primary_key.columns[c.name] is not None
def assert_types_base(self, c1, c2):
assert c1.type._compare_type_affinity(
c2.type
), "On column %r, type '%s' doesn't correspond to type '%s'" % (
c1.name,
c1.type,
c2.type,
)
class AssertsExecutionResults(object):
def assert_result(self, result, class_, *objects):
result = list(result)
print(repr(result))
self.assert_list(result, class_, objects)
def assert_list(self, result, class_, list_):
self.assert_(
len(result) == len(list_),
"result list is not the same size as test list, "
+ "for class "
+ class_.__name__,
)
for i in range(0, len(list_)):
self.assert_row(class_, result[i], list_[i])
def assert_row(self, class_, rowobj, desc):
self.assert_(
rowobj.__class__ is class_, "item class is not " + repr(class_)
)
for key, value in desc.items():
if isinstance(value, tuple):
if isinstance(value[1], list):
self.assert_list(getattr(rowobj, key), value[0], value[1])
else:
self.assert_row(value[0], getattr(rowobj, key), value[1])
else:
self.assert_(
getattr(rowobj, key) == value,
"attribute %s value %s does not match %s"
% (key, getattr(rowobj, key), value),
)
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = {immutabledict(e) for e in expected}
for wrong in util.itertools_filterfalse(
lambda o: isinstance(o, cls), found
):
fail(
'Unexpected type "%s", expected "%s"'
% (type(wrong).__name__, cls.__name__)
)
if len(found) != len(expected):
fail(
'Unexpected object count "%s", expected "%s"'
% (len(found), len(expected))
)
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1]
)
except AssertionError:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found."
% (cls.__name__, repr(expected_item))
)
return True
def sql_execution_asserter(self, db=None):
if db is None:
from . import db as db
return assertsql.assert_engine(db)
def assert_sql_execution(self, db, callable_, *rules):
with self.sql_execution_asserter(db) as asserter:
result = callable_()
asserter.assert_(*rules)
return result
def assert_sql(self, db, callable_, rules):
newrules = []
for rule in rules:
if isinstance(rule, dict):
newrule = assertsql.AllOf(
*[assertsql.CompiledSQL(k, v) for k, v in rule.items()]
)
else:
newrule = assertsql.CompiledSQL(*rule)
newrules.append(newrule)
return self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
self.assert_sql_execution(
db, callable_, assertsql.CountStatements(count)
)
def assert_multiple_sql_count(self, dbs, callable_, counts):
recs = [
(self.sql_execution_asserter(db), db, count)
for (db, count) in zip(dbs, counts)
]
asserters = []
for ctx, db, count in recs:
asserters.append(ctx.__enter__())
try:
return callable_()
finally:
for asserter, (ctx, db, count) in zip(asserters, recs):
ctx.__exit__(None, None, None)
asserter.assert_(assertsql.CountStatements(count))
@contextlib.contextmanager
def assert_execution(self, db, *rules):
with self.sql_execution_asserter(db) as asserter:
yield
asserter.assert_(*rules)
def assert_statement_count(self, db, count):
return self.assert_execution(db, assertsql.CountStatements(count))
| 30.965007 | 81 | 0.590299 | [
"MIT"
] | ai-mocap/sqlalchemy | lib/sqlalchemy/testing/assertions.py | 23,007 | Python |
import collections
import datetime
from django.utils.translation import gettext_lazy as _
from .base import * # noqa
# Override static and media URL for prefix in WSGI server.
# https://code.djangoproject.com/ticket/25598
STATIC_URL = '/2016/static/'
MEDIA_URL = '/2016/media/'
CONFERENCE_DEFAULT_SLUG = 'pycontw-2016'
TALK_PROPOSAL_DURATION_CHOICES = (
('NOPREF', _('No preference')),
('PREF25', _('Prefer 25min')),
('PREF45', _('Prefer 45min')),
)
EVENTS_DAY_NAMES = collections.OrderedDict([
(datetime.date(2016, 6, 3), _('Day 1')),
(datetime.date(2016, 6, 4), _('Day 2')),
(datetime.date(2016, 6, 5), _('Day 3')),
])
| 25.192308 | 58 | 0.674809 | [
"MIT"
] | DoubleTakoMeat/pycon.tw | src/pycontw2016/settings/production/pycontw2016.py | 655 | Python |
"""llvm
Tool-specific initialization for LLVM
"""
#
# Copyright (c) 2009 VMware, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import os.path
import re
import sys
import distutils.version
import SCons.Errors
import SCons.Util
required_llvm_version = '3.3'
def generate(env):
env['llvm'] = False
try:
llvm_dir = os.environ['LLVM']
except KeyError:
# Do nothing -- use the system headers/libs
llvm_dir = None
else:
if not os.path.isdir(llvm_dir):
raise SCons.Errors.InternalError("Specified LLVM directory not found")
if env['debug']:
llvm_subdir = 'Debug'
else:
llvm_subdir = 'Release'
llvm_bin_dir = os.path.join(llvm_dir, llvm_subdir, 'bin')
if not os.path.isdir(llvm_bin_dir):
llvm_bin_dir = os.path.join(llvm_dir, 'bin')
if not os.path.isdir(llvm_bin_dir):
raise SCons.Errors.InternalError("LLVM binary directory not found")
env.PrependENVPath('PATH', llvm_bin_dir)
if env['platform'] == 'windows':
# XXX: There is no llvm-config on Windows, so assume a standard layout
if llvm_dir is None:
print('scons: LLVM environment variable must be specified when building for windows')
return
# Try to determine the LLVM version from llvm/Config/config.h
llvm_config = os.path.join(llvm_dir, 'include/llvm/Config/llvm-config.h')
if not os.path.exists(llvm_config):
print('scons: could not find %s' % llvm_config)
return
llvm_version_major_re = re.compile(r'^#define LLVM_VERSION_MAJOR ([0-9]+)')
llvm_version_minor_re = re.compile(r'^#define LLVM_VERSION_MINOR ([0-9]+)')
llvm_version = None
llvm_version_major = None
llvm_version_minor = None
for line in open(llvm_config, 'rt'):
mo = llvm_version_major_re.match(line)
if mo:
llvm_version_major = mo.group(1)
mo = llvm_version_minor_re.match(line)
if mo:
llvm_version_minor = mo.group(1)
if llvm_version_major is not None and llvm_version_minor is not None:
llvm_version = distutils.version.LooseVersion('%s.%s' % (llvm_version_major, llvm_version_minor))
if llvm_version is None:
print('scons: could not determine the LLVM version from %s' % llvm_config)
return
if llvm_version < distutils.version.LooseVersion(required_llvm_version):
print('scons: LLVM version %s found, but %s is required' % (llvm_version, required_llvm_version))
return
env.Prepend(CPPPATH = [os.path.join(llvm_dir, 'include')])
env.AppendUnique(CPPDEFINES = [
'HAVE_STDINT_H',
])
env.Prepend(LIBPATH = [os.path.join(llvm_dir, 'lib')])
# LIBS should match the output of `llvm-config --libs engine mcjit bitwriter x86asmprinter irreader`
if llvm_version >= distutils.version.LooseVersion('5.0'):
env.Prepend(LIBS = [
'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMDebugInfoCodeView', 'LLVMCodeGen',
'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMTransformUtils',
'LLVMBitWriter', 'LLVMX86Desc',
'LLVMMCDisassembler', 'LLVMX86Info',
'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMExecutionEngine', 'LLVMTarget',
'LLVMAnalysis', 'LLVMProfileData',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore',
'LLVMSupport',
'LLVMIRReader', 'LLVMAsmParser',
'LLVMDemangle', 'LLVMGlobalISel', 'LLVMDebugInfoMSF',
'LLVMBinaryFormat',
])
elif llvm_version >= distutils.version.LooseVersion('4.0'):
env.Prepend(LIBS = [
'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMDebugInfoCodeView', 'LLVMCodeGen',
'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMTransformUtils',
'LLVMBitWriter', 'LLVMX86Desc',
'LLVMMCDisassembler', 'LLVMX86Info',
'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMExecutionEngine', 'LLVMTarget',
'LLVMAnalysis', 'LLVMProfileData',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore',
'LLVMSupport',
'LLVMIRReader', 'LLVMAsmParser',
'LLVMDemangle', 'LLVMGlobalISel', 'LLVMDebugInfoMSF',
])
elif llvm_version >= distutils.version.LooseVersion('3.9'):
env.Prepend(LIBS = [
'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMDebugInfoCodeView', 'LLVMCodeGen',
'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMInstrumentation', 'LLVMTransformUtils',
'LLVMBitWriter', 'LLVMX86Desc',
'LLVMMCDisassembler', 'LLVMX86Info',
'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMExecutionEngine', 'LLVMTarget',
'LLVMAnalysis', 'LLVMProfileData',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore',
'LLVMSupport',
'LLVMIRReader', 'LLVMASMParser'
])
elif llvm_version >= distutils.version.LooseVersion('3.7'):
env.Prepend(LIBS = [
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMCodeGen', 'LLVMScalarOpts', 'LLVMProfileData',
'LLVMInstCombine', 'LLVMInstrumentation', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMX86Desc', 'LLVMMCDisassembler',
'LLVMX86Info', 'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMTarget', 'LLVMExecutionEngine',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore', 'LLVMSupport'
])
elif llvm_version >= distutils.version.LooseVersion('3.6'):
env.Prepend(LIBS = [
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMSelectionDAG', 'LLVMAsmPrinter',
'LLVMCodeGen', 'LLVMScalarOpts', 'LLVMProfileData',
'LLVMInstCombine', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMX86Desc', 'LLVMMCDisassembler',
'LLVMX86Info', 'LLVMX86AsmPrinter', 'LLVMX86Utils',
'LLVMMCJIT', 'LLVMTarget', 'LLVMExecutionEngine',
'LLVMRuntimeDyld', 'LLVMObject', 'LLVMMCParser',
'LLVMBitReader', 'LLVMMC', 'LLVMCore', 'LLVMSupport'
])
elif llvm_version >= distutils.version.LooseVersion('3.5'):
env.Prepend(LIBS = [
'LLVMMCDisassembler',
'LLVMBitWriter', 'LLVMMCJIT', 'LLVMRuntimeDyld',
'LLVMX86Disassembler', 'LLVMX86AsmParser', 'LLVMX86CodeGen',
'LLVMSelectionDAG', 'LLVMAsmPrinter', 'LLVMX86Desc',
'LLVMObject', 'LLVMMCParser', 'LLVMBitReader', 'LLVMX86Info',
'LLVMX86AsmPrinter', 'LLVMX86Utils', 'LLVMJIT',
'LLVMExecutionEngine', 'LLVMCodeGen', 'LLVMScalarOpts',
'LLVMInstCombine', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMTarget', 'LLVMMC', 'LLVMCore',
'LLVMSupport'
])
else:
env.Prepend(LIBS = [
'LLVMMCDisassembler',
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMX86CodeGen', 'LLVMX86Desc', 'LLVMSelectionDAG',
'LLVMAsmPrinter', 'LLVMMCParser', 'LLVMX86AsmPrinter',
'LLVMX86Utils', 'LLVMX86Info', 'LLVMMCJIT', 'LLVMJIT',
'LLVMExecutionEngine', 'LLVMCodeGen', 'LLVMScalarOpts',
'LLVMInstCombine', 'LLVMTransformUtils', 'LLVMipa',
'LLVMAnalysis', 'LLVMTarget', 'LLVMMC', 'LLVMCore',
'LLVMSupport', 'LLVMRuntimeDyld', 'LLVMObject'
])
env.Append(LIBS = [
'imagehlp',
'psapi',
'shell32',
'advapi32'
])
if env['msvc']:
# Some of the LLVM C headers use the inline keyword without
# defining it.
env.Append(CPPDEFINES = [('inline', '__inline')])
# Match some of the warning options from llvm/cmake/modules/HandleLLVMOptions.cmake
env.AppendUnique(CXXFLAGS = [
'/wd4355', # 'this' : used in base member initializer list
'/wd4624', # 'derived class' : destructor could not be generated because a base class destructor is inaccessible
])
if env['build'] in ('debug', 'checked'):
# LLVM libraries are static, build with /MT, and they
# automatically link agains LIBCMT. When we're doing a
# debug build we'll be linking against LIBCMTD, so disable
# that.
env.Append(LINKFLAGS = ['/nodefaultlib:LIBCMT'])
else:
llvm_config = os.environ.get('LLVM_CONFIG', 'llvm-config')
if not env.Detect(llvm_config):
print('scons: %s script not found' % llvm_config)
return
llvm_version = env.backtick('%s --version' % llvm_config).rstrip()
llvm_version = distutils.version.LooseVersion(llvm_version)
if llvm_version < distutils.version.LooseVersion(required_llvm_version):
print('scons: LLVM version %s found, but %s is required' % (llvm_version, required_llvm_version))
return
try:
# Treat --cppflags specially to prevent NDEBUG from disabling
# assertion failures in debug builds.
cppflags = env.ParseFlags('!%s --cppflags' % llvm_config)
try:
cppflags['CPPDEFINES'].remove('NDEBUG')
except ValueError:
pass
env.MergeFlags(cppflags)
# Match llvm --fno-rtti flag
cxxflags = env.backtick('%s --cxxflags' % llvm_config).split()
if '-fno-rtti' in cxxflags:
env.Append(CXXFLAGS = ['-fno-rtti'])
components = ['engine', 'mcjit', 'bitwriter', 'x86asmprinter', 'mcdisassembler', 'irreader']
env.ParseConfig('%s --libs ' % llvm_config + ' '.join(components))
env.ParseConfig('%s --ldflags' % llvm_config)
if llvm_version >= distutils.version.LooseVersion('3.5'):
env.ParseConfig('%s --system-libs' % llvm_config)
env.Append(CXXFLAGS = ['-std=c++11'])
except OSError:
print('scons: llvm-config version %s failed' % llvm_version)
return
assert llvm_version is not None
env['llvm'] = True
print('scons: Found LLVM version %s' % llvm_version)
env['LLVM_VERSION'] = llvm_version
# Define HAVE_LLVM macro with the major/minor version number (e.g., 0x0206 for 2.6)
llvm_version_major = int(llvm_version.version[0])
llvm_version_minor = int(llvm_version.version[1])
llvm_version_hex = '0x%02x%02x' % (llvm_version_major, llvm_version_minor)
env.Prepend(CPPDEFINES = [('HAVE_LLVM', llvm_version_hex)])
def exists(env):
return True
# vim:set ts=4 sw=4 et:
| 45.384615 | 128 | 0.598844 | [
"MIT"
] | VincentWei/mg-mesa3d | scons/llvm.py | 12,980 | Python |
#!/usr/bin/env python
# In this example we show the use of the
# vtkBandedPolyDataContourFilter. This filter creates separate,
# constant colored bands for a range of scalar values. Each band is
# bounded by two scalar values, and the cell data lying within the
# value has the same cell scalar value.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# The lookup table is similar to that used by maps. Two hues are used:
# a brown for land, and a blue for water. The value of the hue is
# changed to give the effect of elevation.
Scale = 5
lutWater = vtk.vtkLookupTable()
lutWater.SetNumberOfColors(10)
lutWater.SetHueRange(0.58, 0.58)
lutWater.SetSaturationRange(0.5, 0.1)
lutWater.SetValueRange(0.5, 1.0)
lutWater.Build()
lutLand = vtk.vtkLookupTable()
lutLand.SetNumberOfColors(10)
lutLand.SetHueRange(0.1, 0.1)
lutLand.SetSaturationRange(0.4, 0.1)
lutLand.SetValueRange(0.55, 0.9)
lutLand.Build()
# The DEM reader reads data and creates an output image.
demModel = vtk.vtkDEMReader()
demModel.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demModel.Update()
# We shrink the terrain data down a bit to yield better performance for
# this example.
shrinkFactor = 4
shrink = vtk.vtkImageShrink3D()
shrink.SetShrinkFactors(shrinkFactor, shrinkFactor, 1)
shrink.SetInputConnection(demModel.GetOutputPort())
shrink.AveragingOn()
# Convert the image into polygons.
geom = vtk.vtkImageDataGeometryFilter()
geom.SetInputConnection(shrink.GetOutputPort())
# Warp the polygons based on elevation.
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(geom.GetOutputPort())
warp.SetNormal(0, 0, 1)
warp.UseNormalOn()
warp.SetScaleFactor(Scale)
# Create the contour bands.
bcf = vtk.vtkBandedPolyDataContourFilter()
bcf.SetInput(warp.GetPolyDataOutput())
bcf.GenerateValues(15, demModel.GetOutput().GetScalarRange())
bcf.SetScalarModeToIndex()
bcf.GenerateContourEdgesOn()
# Compute normals to give a better look.
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(bcf.GetOutputPort())
normals.SetFeatureAngle(60)
normals.ConsistencyOff()
normals.SplittingOff()
demMapper = vtk.vtkPolyDataMapper()
demMapper.SetInputConnection(normals.GetOutputPort())
demMapper.SetScalarRange(0, 10)
demMapper.SetLookupTable(lutLand)
demMapper.SetScalarModeToUseCellData()
demActor = vtk.vtkLODActor()
demActor.SetMapper(demMapper)
## Create contour edges
edgeMapper = vtk.vtkPolyDataMapper()
edgeMapper.SetInput(bcf.GetContourEdgesOutput())
edgeMapper.SetResolveCoincidentTopologyToPolygonOffset()
edgeActor = vtk.vtkActor()
edgeActor.SetMapper(edgeMapper)
edgeActor.GetProperty().SetColor(0, 0, 0)
## Test clipping
# Create the contour bands.
bcf2 = vtk.vtkBandedPolyDataContourFilter()
bcf2.SetInput(warp.GetPolyDataOutput())
bcf2.ClippingOn()
bcf2.GenerateValues(10, 1000, 2000)
bcf2.SetScalarModeToValue()
# Compute normals to give a better look.
normals2 = vtk.vtkPolyDataNormals()
normals2.SetInputConnection(bcf2.GetOutputPort())
normals2.SetFeatureAngle(60)
normals2.ConsistencyOff()
normals2.SplittingOff()
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(10)
demMapper2 = vtk.vtkPolyDataMapper()
demMapper2.SetInputConnection(normals2.GetOutputPort())
demMapper2.SetScalarRange(demModel.GetOutput().GetScalarRange())
demMapper2.SetLookupTable(lut)
demMapper2.SetScalarModeToUseCellData()
demActor2 = vtk.vtkLODActor()
demActor2.SetMapper(demMapper2)
demActor2.AddPosition(0, 15000, 0)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(demActor)
ren.AddActor(demActor2)
ren.AddActor(edgeActor)
ren.SetBackground(.4, .4, .4)
renWin.SetSize(375, 200)
cam = vtk.vtkCamera()
cam.SetPosition(-17438.8, 2410.62, 25470.8)
cam.SetFocalPoint(3985.35, 11930.6, 5922.14)
cam.SetViewUp(0, 0, 1)
ren.SetActiveCamera(cam)
ren.ResetCamera()
cam.Zoom(2)
iren.Initialize()
iren.SetDesiredUpdateRate(1)
def CheckAbort(obj, event):
foo = renWin.GetEventPending()
if foo != 0:
renWin.SetAbortRender(1)
renWin.AddObserver("AbortCheckEvent", CheckAbort)
renWin.Render()
renWin.Render()
iren.Start()
| 28.546667 | 72 | 0.79262 | [
"BSD-3-Clause"
] | Armand0s/VTK | Examples/VisualizationAlgorithms/Python/BandContourTerrain.py | 4,282 | Python |
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant .variant_caller."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from absl.testing import parameterized
import mock
import numpy as np
import numpy.testing as npt
from deeptrio import testdata
from deeptrio import variant_caller
from deepvariant.protos import deepvariant_pb2
from third_party.nucleus.util import variant_utils
from third_party.nucleus.util import variantcall_utils
def setUpModule():
testdata.init()
def _reference_model_options(p_error, max_gq, gq_resolution=1):
return deepvariant_pb2.VariantCallerOptions(
sample_name='UNKNOWN',
p_error=p_error,
max_gq=max_gq,
gq_resolution=gq_resolution,
ploidy=2)
class PlaceholderVariantCaller(variant_caller.VariantCaller):
"""A placeholder VariantCaller.
This class provides a get_candidates implementation and so allows
the base class to be instantiated and its methods tested.
"""
def __init__(self,
p_error,
max_gq,
gq_resolution=1,
use_cache_table=False,
max_cache_coverage=100):
super(PlaceholderVariantCaller, self).__init__(
options=_reference_model_options(p_error, max_gq, gq_resolution),
use_cache_table=use_cache_table,
max_cache_coverage=max_cache_coverage)
def get_candidates(self, allele_counter, target_sample):
return None
class VariantCallerTests(parameterized.TestCase):
def fake_allele_counter(self, start_pos, counts):
allele_counter = mock.Mock()
# pylint: disable=g-complex-comprehension
allele_counter.summary_counts.return_value = [
deepvariant_pb2.AlleleCountSummary(
ref_supporting_read_count=n_ref,
total_read_count=n_ref + n_alt,
ref_base=ref,
reference_name='chr1',
position=start_pos + i)
for i, (n_alt, n_ref, ref) in enumerate(counts)
]
allele_counter.counts.return_value = counts
# pylint: enable=g-complex-comprehension
return allele_counter
# R code to produce the testdata expectation table.
# expected <- function(n_ref, n_alt, perr, max_gq = 100) {
# p_ref <- dbinom(n_alt, n_ref, perr)
# p_het <- dbinom(n_alt, n_ref, 0.5)
# p_alt <- dbinom(n_ref - n_alt, n_ref, perr)
# raw <- c(p_ref, p_het, p_alt)
# norm <- raw / sum(raw)
# gq = min(floor(-10 * log10(1 - norm[1])), max_gq)
# likelihoods = paste(sprintf("%.6f", log10(norm)), collapse=", ")
# likelihoods = paste("[", likelihoods, "]", sep="")
# result = paste(n_ref, n_alt, perr, 100, 1, likelihoods, gq, sep=", ")
# cat(paste("[", result, "],\n", sep=""))
# }
#
# for (n in c(10, 20)) {
# for (k in seq(0, n)) {
# expected(n, k, 0.01)
# }
# }
#
# for (perr in c(0.1, 0.01, 0.001, 0.0001)) {
# expected(10, 0, perr)
# expected(10, 1, perr)
# }
#
# for (n_ref in c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000)) {
# expected(n_ref, 0, 0.01)
# }
@parameterized.parameters(
# No coverage case.
[0, 0, 0.01, 100, [-0.477121, -0.477121, -0.477121], 1],
# Test systematically values of n and k.
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],
[10, 2, 0.01, 100, [-1.063830, -0.039211, -13.037641], 0],
[10, 3, 0.01, 100, [-3.020668, -0.000414, -11.003209], 0],
[10, 4, 0.01, 100, [-5.015893, -0.000004, -9.007163], 0],
[10, 5, 0.01, 100, [-7.011524, -0.000000, -7.011524], 0],
[10, 6, 0.01, 100, [-9.007163, -0.000004, -5.015893], 0],
[10, 7, 0.01, 100, [-11.003209, -0.000414, -3.020668], 0],
[10, 8, 0.01, 100, [-13.037641, -0.039211, -1.063830], 0],
[10, 9, 0.01, 100, [-16.009190, -1.015126, -0.044109], 0],
[10, 10, 0.01, 100, [-19.956821, -2.967121, -0.000469], 0],
[20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],
[20, 1, 0.01, 100, [-0.000050, -3.937719, -35.921484], 39],
[20, 2, 0.01, 100, [-0.004935, -1.946968, -31.935098], 19],
[20, 3, 0.01, 100, [-0.328657, -0.275056, -28.267550], 2],
[20, 4, 0.01, 100, [-2.053097, -0.003860, -26.000720], 0],
[20, 5, 0.01, 100, [-4.044911, -0.000039, -24.001263], 0],
[20, 6, 0.01, 100, [-6.040508, -0.000000, -22.005589], 0],
[20, 7, 0.01, 100, [-8.036143, -0.000000, -20.009954], 0],
[20, 8, 0.01, 100, [-10.031778, -0.000000, -18.014319], 0],
[20, 9, 0.01, 100, [-12.027413, -0.000000, -16.018683], 0],
[20, 10, 0.01, 100, [-14.023048, -0.000000, -14.023048], 0],
[20, 11, 0.01, 100, [-16.018683, -0.000000, -12.027413], 0],
[20, 12, 0.01, 100, [-18.014319, -0.000000, -10.031778], 0],
[20, 13, 0.01, 100, [-20.009954, -0.000000, -8.036143], 0],
[20, 14, 0.01, 100, [-22.005589, -0.000000, -6.040508], 0],
[20, 15, 0.01, 100, [-24.001263, -0.000039, -4.044911], 0],
[20, 16, 0.01, 100, [-26.000720, -0.003860, -2.053097], 0],
[20, 17, 0.01, 100, [-28.267550, -0.275056, -0.328657], 0],
[20, 18, 0.01, 100, [-31.935098, -1.946968, -0.004935], 0],
[20, 19, 0.01, 100, [-35.921484, -3.937719, -0.000050], 0],
[20, 20, 0.01, 100, [-39.912704, -5.933304, -0.000001], 0],
# Testing different values of p_error.
[10, 0, 0.1, 100, [-0.001215, -2.553940, -9.543640], 25],
[10, 1, 0.1, 100, [-0.010811, -1.609294, -7.644752], 16],
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],
[10, 0, 0.001, 100, [-0.000428, -3.006383, -29.996083], 30],
[10, 1, 0.001, 100, [-0.297847, -0.304236, -24.294371], 3],
[10, 0, 1e-04, 100, [-0.000424, -3.010290, -39.999990], 30],
[10, 1, 1e-04, 100, [-1.032394, -0.042303, -33.032046], 0],
# Test scaling of calculation with more coverage, hitting max_gq.
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],
[30, 0, 0.01, 100, [-0.000000, -8.899956, -59.869056], 88],
[40, 0, 0.01, 100, [-0.000000, -11.866608, -79.825408], 100],
[50, 0, 0.01, 100, [-0.000000, -14.833260, -99.781760], 100],
[60, 0, 0.01, 100, [0.000000, -17.799911, -119.738112], 100],
[70, 0, 0.01, 100, [0.000000, -20.766563, -139.694464], 100],
[80, 0, 0.01, 100, [0.000000, -23.733215, -159.650816], 100],
[90, 0, 0.01, 100, [0.000000, -26.699867, -179.607168], 100],
[100, 0, 0.01, 100, [0.000000, -29.666519, -199.563519], 100],
)
def test_ref_calc(self, total_n, alt_n, p_error, max_gq, expected_likelihoods,
expected_gq):
caller = PlaceholderVariantCaller(p_error, max_gq)
gq, likelihoods = caller.reference_confidence(total_n - alt_n, total_n)
npt.assert_allclose(expected_likelihoods, likelihoods, atol=1e-6)
self.assertEqual(expected_gq, gq)
@parameterized.parameters(
# Values below max_allowed_reads are returned without modification.
[0, 10, 100, (0, 10)],
[5, 10, 100, (5, 10)],
[10, 10, 100, (10, 10)],
[10, 100, 100, (10, 100)],
[100, 100, 100, (100, 100)],
# Checks that the rescaling works when n_total_reads > max_allowed.
[0, 200, 100, (0, 100)],
[0, 200, 100, (0, 100)],
[0, 1000, 100, (0, 100)],
[0, 10000, 100, (0, 100)],
[1, 200, 100, (1, 100)],
[1, 1000, 100, (1, 100)],
[1, 10000, 100, (1, 100)],
[1, 100000, 100, (1, 100)],
[2, 200, 100, (1, 100)],
[3, 200, 100, (2, 100)],
[4, 200, 100, (2, 100)],
[10, 200, 100, (5, 100)],
[50, 200, 100, (25, 100)],
[100, 200, 100, (50, 100)],
[200, 200, 100, (100, 100)],
# I saw a bug at runtime, and the testcase makes sure we scale values of
# n_ref_reads close to n_total_reads appropriately.
[99, 100, 100, (99, 100)],
)
def test_rescale_read_counts(self, n_ref, n_total, max_allowed_reads,
expected):
actual = variant_caller._rescale_read_counts_if_necessary(
n_ref, n_total, max_allowed_reads)
self.assertEqual(actual, expected)
# pylint: disable=g-complex-comprehension
@parameterized.parameters((n_ref, n_alt_fraction)
for n_ref in [1000, 10000, 100000, 1000000]
for n_alt_fraction in [0.0, 0.01, 0.02])
# pylint: enable=g-complex-comprehension
def test_handles_large_reference_counts(self, n_ref, n_alt_fraction):
"""Tests that we don't blow up when the coverage gets really high."""
caller = PlaceholderVariantCaller(0.01, 100)
n_alt = int(n_alt_fraction * n_ref)
gq, likelihoods = caller._calc_reference_confidence(n_ref, n_ref + n_alt)
self.assertTrue(
np.isfinite(likelihoods).all(),
'Non-finite likelihoods {}'.format(likelihoods))
self.assertEqual(100, gq)
@parameterized.parameters(*variant_caller.CANONICAL_DNA_BASES)
def test_gvcf_basic(self, ref):
options = _reference_model_options(0.01, 100)
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
gvcfs = list(caller.make_gvcfs(allele_counter.summary_counts()))
self.assertLen(gvcfs, 1)
self.assertGVCF(
gvcfs[0],
ref=ref,
gq=1.0,
start=100,
end=101,
min_dp=0,
chrom='chr1',
gls=[-0.47712125472] * 3,
sample_name=options.sample_name)
@parameterized.parameters('N', 'R', 'W', 'B')
def test_gvcf_basic_skips_iupac_ref_base(self, ref):
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
self.assertEmpty(list(caller.make_gvcfs(allele_counter.summary_counts())))
@parameterized.parameters('X', '>', '!')
def test_gvcf_basic_raises_with_bad_ref_base(self, ref):
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
with self.assertRaisesRegexp(ValueError,
'Invalid reference base={}'.format(ref)):
list(caller.make_gvcfs(allele_counter.summary_counts()))
def assertGVCF(self,
gvcf,
ref,
gq,
start,
end,
min_dp,
chrom='chr1',
gls=None,
sample_name=None,
gts=None):
if chrom:
self.assertEqual(gvcf.reference_name, chrom)
call = variant_utils.only_call(gvcf)
self.assertNotEmpty(gvcf.reference_name)
self.assertEqual(gvcf.reference_bases, ref)
self.assertEqual(gvcf.alternate_bases, ['<*>'])
self.assertEqual(gvcf.start, start)
self.assertEqual(gvcf.end, end if end else start + 1)
self.assertEqual(variantcall_utils.get_gq(call), gq)
self.assertNotEmpty(call.genotype_likelihood)
self.assertIn('MIN_DP', call.info)
self.assertLen(call.info['MIN_DP'].values, 1)
self.assertEqual(variantcall_utils.get_min_dp(call), min_dp)
if gls is not None:
npt.assert_allclose(list(gvcf.calls[0].genotype_likelihood), gls)
if sample_name:
self.assertEqual(gvcf.calls[0].call_set_name, sample_name)
if gts is not None:
self.assertEqual(list(gvcf.calls[0].genotype), gts)
@parameterized.parameters(
# Check some basics.
([(0, 0, 'A')], [dict(start=1, end=2, ref='A', gq=1, min_dp=0)]),
# Two equal records are merged, and the reference base is the first one.
([(0, 0, 'A'),
(0, 0, 'C')], [dict(start=1, end=3, ref='A', gq=1, min_dp=0)]),
([(0, 0, 'C'),
(0, 0, 'A')], [dict(start=1, end=3, ref='C', gq=1, min_dp=0)]),
# Three equal records are merged into a single block.
([(0, 0, 'A'), (0, 0, 'C'),
(0, 0, 'T')], [dict(start=1, end=4, ref='A', gq=1, min_dp=0)]),
# We don't merge together different GQ value blocks:
([(0, 0, 'A'), (0, 100, 'C')], [
dict(start=1, end=2, ref='A', gq=1, min_dp=0),
dict(start=2, end=3, ref='C', gq=100, min_dp=100),
]),
([(0, 100, 'A'), (0, 0, 'C')], [
dict(start=1, end=2, ref='A', gq=100, min_dp=100),
dict(start=2, end=3, ref='C', gq=1, min_dp=0),
]),
([(0, 0, 'A'), (0, 20, 'C'), (0, 100, 'T')], [
dict(start=1, end=2, ref='A', gq=1, min_dp=0),
dict(start=2, end=3, ref='C', gq=59, min_dp=20),
dict(start=3, end=4, ref='T', gq=100, min_dp=100),
]),
)
def test_make_gvcfs(self, counts, expecteds):
allele_counts = self.fake_allele_counter(1, counts).summary_counts()
caller = PlaceholderVariantCaller(0.01, 100)
gvcfs = list(caller.make_gvcfs(allele_counts))
self.assertLen(gvcfs, len(expecteds))
for actual, expected in zip(gvcfs, expecteds):
self.assertGVCF(actual, **expected)
@parameterized.parameters(
dict(
gq_resolution=1,
expecteds=[
dict(start=1, end=2, ref='A', gq=53, min_dp=18),
dict(start=2, end=3, ref='C', gq=56, min_dp=19),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
# Binning by 3 does not cause any records to be merged.
dict(
gq_resolution=3,
expecteds=[
dict(start=1, end=2, ref='A', gq=53, min_dp=18),
dict(start=2, end=3, ref='C', gq=56, min_dp=19),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
# Binning by 4 causes the first merge, of the first two records.
dict(
gq_resolution=4,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
dict(
gq_resolution=10,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=10, ref='T', gq=56, min_dp=19),
]),
dict(
gq_resolution=45,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=10, ref='A', gq=56, min_dp=19),
]),
)
def test_quantize_gvcfs(self, gq_resolution, expecteds):
# Each count tuple is n_alt, n_ref, ref_base.
# The third, fourth, and the fifth ones should never be merged, since
# either het or hom_alt has bigger GL than hom_ref.
counts = [(0, 18, 'A'), (0, 19, 'C'), (35, 0, 'A'), (10, 10, 'T'),
(4, 12, 'A'), (1, 30, 'A'), (1, 34, 'C'), (0, 20, 'T'),
(0, 19, 'G')]
allele_counts = self.fake_allele_counter(1, counts).summary_counts()
caller = PlaceholderVariantCaller(0.01, 100, gq_resolution)
gvcfs = list(caller.make_gvcfs(allele_counts))
self.assertLen(gvcfs, len(expecteds))
for actual, expected in zip(gvcfs, expecteds):
self.assertGVCF(actual, **expected)
@parameterized.parameters(True, False)
def test_gvcfs_counts(self, include_gvcfs):
# Only tests the 'gvcfs' creation part of calls_and_gvcfs. The `calls`
# portion of this method needs to be tested in subclasses, which have
# implemented the get_candidates method.
counts = [(0, 0, 'A'), (10, 10, 'G'), (0, 0, 'G'), (0, 0, 'G'),
(10, 10, 'T')]
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(10, counts)
allele_counters = {}
allele_counters['sample_id'] = allele_counter
_, gvcfs = caller.calls_and_gvcfs(allele_counters, include_gvcfs,
'sample_id')
# We expect our gvcfs to occur at the 10 position and that 12 and 13 have
# been merged into a 2 bp block, if enabled. Otherwise should be empty.
if include_gvcfs:
self.assertLen(gvcfs, 4)
# Expected diploid genotype likelihoods when there's no coverage. The
# chance of having each genotype is 1/3, in log10 space.
flat_gls = np.log10([1.0 / 3] * 3)
self.assertGVCF(
gvcfs[0], ref='A', start=10, end=11, gq=1, min_dp=0, gls=flat_gls)
self.assertGVCF(
gvcfs[1],
ref='G',
start=11,
end=12,
gq=0,
min_dp=20,
gls=np.array([-14.0230482368, -7.993606e-15, -14.0230482368]),
# The genotype should NOT be called here ("./.") as the likelihood
# for het is greater than hom_ref.
gts=[-1, -1])
self.assertGVCF(
gvcfs[2], ref='G', start=12, end=14, gq=1, min_dp=0, gls=flat_gls)
else:
self.assertEmpty(gvcfs)
_CACHE_COVERAGE = 20 # Outside class so we can refer to it in @Parameters.
class VariantCallerCacheTests(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(VariantCallerCacheTests, cls).setUpClass()
cls.raw_caller = PlaceholderVariantCaller(0.1, 50, use_cache_table=False)
cls.cache_caller = PlaceholderVariantCaller(
0.1, 50, use_cache_table=True, max_cache_coverage=_CACHE_COVERAGE)
# pylint: disable=g-complex-comprehension
@parameterized.parameters((n_alt, n_total)
for n_total in range(_CACHE_COVERAGE + 1)
for n_alt in range(n_total + 1))
# pylint: enable=g-complex-comprehension
def test_caching(self, n_alt, n_total):
# Note that we only expect the gq and gls to be close if we are not
# rescaling the counts, so we are only looping over values that should be
# cached. In practice the cache is set to values sufficiently large that
# these differences don't matter, but for this test we are limiting the
# cache size to a small value in _CACHE_COVERAGE so we can test that the
# cache lookups are correct.
raw_gq, raw_gls = self.raw_caller.reference_confidence(n_alt, n_total)
cache_gq, cache_gls = self.cache_caller.reference_confidence(n_alt, n_total)
self.assertEqual(raw_gq, cache_gq)
npt.assert_allclose(raw_gls, cache_gls)
if __name__ == '__main__':
absltest.main()
| 43.432049 | 80 | 0.600738 | [
"BSD-3-Clause"
] | FrogEnthusiast7/deepvariant | deeptrio/variant_caller_test.py | 21,412 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import enum
from typing import Dict
@enum.unique
class EmbOptimType(enum.Enum):
SGD = "sgd" # uses non-deterministic updates (atomicAdd(..)) with duplicate ids
EXACT_SGD = (
"exact_sgd" # uses deterministic updates (via sorting + segment reduction)
)
LAMB = "lamb"
ADAM = "adam"
# exact/dedup: gradients to the same row are applied with coalesce then apply
# together, instead of applied in sequence (approx).
EXACT_ADAGRAD = "exact_adagrad"
EXACT_ROWWISE_ADAGRAD = "exact_row_wise_adagrad"
LARS_SGD = "lars_sgd"
PARTIAL_ROWWISE_ADAM = "partial_row_wise_adam"
PARTIAL_ROWWISE_LAMB = "partial_row_wise_lamb"
ROWWISE_ADAGRAD = "row_wise_adagrad"
MADGRAD = "madgrad"
def __str__(self) -> str:
return self.value
@enum.unique
class SparseType(enum.Enum):
FP32 = "fp32"
FP16 = "fp16"
INT8 = "int8"
INT4 = "int4"
INT2 = "int2"
def __str__(self) -> str:
return self.value
@staticmethod
def from_int(ty: int) -> "SparseType":
if ty == 0:
return SparseType("fp32")
elif ty == 1:
return SparseType("fp16")
elif ty == 2:
return SparseType("int8")
elif ty == 3:
return SparseType("int4")
elif ty == 4:
return SparseType("int2")
else:
raise ValueError(f"Unsupported sparse type: {ty}")
def as_int(self) -> int:
return {
SparseType.FP32.value: 0,
SparseType.FP16.value: 1,
SparseType.INT8.value: 2,
SparseType.INT4.value: 3,
SparseType.INT2.value: 4,
}[self.value]
def bit_rate(self) -> int:
return {
SparseType.FP32.value: 32,
SparseType.FP16.value: 16,
SparseType.INT8.value: 8,
SparseType.INT4.value: 4,
SparseType.INT2.value: 2,
}[self.value]
def align_size(self) -> int:
return {
SparseType.FP32.value: 1,
SparseType.FP16.value: 2,
SparseType.INT8.value: 4,
SparseType.INT4.value: 8,
SparseType.INT2.value: 16,
}[self.value]
def is_float(self) -> bool:
if self.value == SparseType.FP32.value or self.value == SparseType.FP16.value:
return True
else:
return False
ELEMENT_SIZE: Dict[SparseType, int] = {
SparseType.FP32: 4,
SparseType.FP16: 2,
SparseType.INT8: 1,
# SparseType.INT4: 0.5,
}
| 27.59 | 86 | 0.594418 | [
"BSD-3-Clause"
] | 842974287/FBGEMM | fbgemm_gpu/fbgemm_gpu/split_embedding_configs.py | 2,759 | Python |
"""
Builder for web assembly
"""
import subprocess
import sys
from SCons.Script import AlwaysBuild, Default, DefaultEnvironment
try:
subprocess.check_output(["em++", "--version"])
except FileNotFoundError:
print(
"Could not find emscripten. Maybe install it? (e.g. `brew install emscripten` on macOS. See also: https://emscripten.org/docs/getting_started/downloads.html)",
file=sys.stderr,
)
exit(1)
env = DefaultEnvironment()
env.Append(
LINKFLAGS=["--bind"],
)
env.Replace(
CXX="em++",
CC="emcc",
AR="emar",
RANLIB="emranlib",
PROGSUFFIX=".html"
)
#
# Target: Build wasm
#
target_bin = env.BuildProgram()
#
# Default targets
#
Default([target_bin])
| 17.512195 | 167 | 0.66156 | [
"MIT"
] | johnboiles/platformio-platform-wasm | builder/main.py | 718 | Python |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.6.0.dev0")
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task or a training/validation file.")
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in datasets and "validation_matched" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
if "test" not in datasets and "test_matched" not in datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
# TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(datasets["validation_mismatched"])
for eval_dataset, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
predict_datasets = [predict_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
predict_datasets.append(datasets["test_mismatched"])
for predict_dataset, task in zip(predict_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
predict_dataset.remove_columns_("label")
predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_predict_file, "w") as writer:
logger.info(f"***** Predict results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "text-classification"}
if data_args.task_name is not None:
kwargs["language"] = "en"
kwargs["dataset_tags"] = "glue"
kwargs["dataset_args"] = data_args.task_name
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
trainer.push_to_hub(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 44.514925 | 119 | 0.666639 | [
"Apache-2.0"
] | MarcelWilnicki/transformers | examples/pytorch/text-classification/run_glue.py | 23,860 | Python |
import tensorflow as tf
import math as m
from rec_errors import euclidean_norm_squared
def silverman_rule_of_thumb(N: int):
return tf.pow(4/(3*N), 0.4)
def cw_1d(X, y=None):
def N0(mean, variance):
return 1.0/(tf.sqrt(2.0 * m.pi * variance)) * tf.exp((-(mean**2))/(2*variance))
N = tf.cast(tf.shape(X)[0], tf.float32)
if y is None:
y = silverman_rule_of_thumb(N)
A = tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1))
return (1.0/(N*N)) * tf.reduce_sum(N0(A, 2*y)) + N0(0.0, 2.0 + 2*y) - (2/N) * tf.reduce_sum(N0(X, 1.0 + 2*y))
def cw_2d(X, y=None):
def __phi(x):
def __phi_f(s):
t = s/7.5
return tf.exp(-s/2) * (1 + 3.5156229*t**2 + 3.0899424*t**4 + 1.2067492*t**6 + 0.2659732*t**8
+ 0.0360768*t**10 + 0.0045813*t**12)
def __phi_g(s):
t = s/7.5
return tf.sqrt(2/s) * (0.39894228 + 0.01328592*t**(-1) + 0.00225319*t**(-2) - 0.00157565*t**(-3)
+ 0.0091628*t**(-4) - 0.02057706*t**(-5) + 0.02635537*t**(-6) - 0.01647633*t**(-7)
+ 0.00392377*t**(-8))
a = 7.5
return __phi_f(tf.minimum(x, a)) - __phi_f(a) + __phi_g(tf.maximum(x, a))
N = tf.cast(tf.shape(X)[0], tf.float32)
if y is None:
y = silverman_rule_of_thumb(N)
A = 1/(N*N*tf.sqrt(y))
B = 2.0/(N*tf.sqrt(y+0.5))
A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)/(4*y)
B1 = euclidean_norm_squared(X, axis=1)/(2+4*y)
return 1/tf.sqrt(1+y) + A*tf.reduce_sum(__phi(A1)) - B*tf.reduce_sum(__phi(B1))
def cw(X, y=None):
D = tf.cast(tf.shape(X)[1], tf.float32)
N = tf.cast(tf.shape(X)[0], tf.float32)
if y is None:
y = silverman_rule_of_thumb(N)
K = 1/(2*D-3)
A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)
A = (1/(N**2)) * tf.reduce_sum((1/tf.sqrt(y + K*A1)))
B1 = euclidean_norm_squared(X, axis=1)
B = (2/N)*tf.reduce_sum((1/tf.sqrt(y + 0.5 + K*B1)))
return (1/tf.sqrt(1+y)) + A - B
def cw_choose(z_dim: int):
if z_dim == 1:
return cw_1d
elif z_dim == 2:
return cw_2d
elif z_dim >= 20:
return cw
else:
raise ValueError('Not defined for this latent dimension')
def cw_sampling(X, y=None):
def phi_sampling(s, D):
return tf.pow(1.0 + 4.0*s/(2.0*D-3), -0.5)
D = tf.cast(tf.shape(X)[1], tf.float32)
N = tf.cast(tf.shape(X)[0], tf.float32)
D_int = tf.cast(D, tf.int32)
N_int = tf.cast(N, tf.int32)
if y is None:
y = silverman_rule_of_thumb(N)
YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32),
scale_diag=tf.ones(D_int, tf.float32))
Y = YDistr.sample(N_int)
T = 1.0/(2.0*N*tf.sqrt(m.pi*y))
A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)
A = tf.reduce_sum(phi_sampling(A0/(4*y), D))
B0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(Y, 0), tf.expand_dims(Y, 1)), axis=2)
B = tf.reduce_sum(phi_sampling(B0/(4*y), D))
C0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(Y, 1)), axis=2)
C = tf.reduce_sum(phi_sampling(C0/(4*y), D))
return T*(A + B - 2*C)
| 32.685714 | 117 | 0.558275 | [
"MIT"
] | gmum/cwae | src/cw.py | 3,432 | Python |
from tkinter import *
#Palomo, Nemuel Rico O.
class ButtonLab:
def __init__(self, window):
self.color = Button(window, text='Color', fg='red', bg='blue')
self.button = Button(window, text='<---Click to change the color of the button :)', fg='black', command=self.changeColor)
self.color.place(x=120, y=150)
self.button.place(x=200, y=150)
def changeColor(self):
self.color.config(bg='yellow')
window = Tk()
mywin = ButtonLab(window)
window.title('Button (The Coders)')
window.geometry("500x220+10+10")
window.mainloop()
| 19.774194 | 130 | 0.613377 | [
"Apache-2.0"
] | nemuelpalomo/OOP--58002 | Lab (The Coders) #5.py | 613 | Python |
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by ./gengl.py.
# Wrapper for /usr/include/GL/glx.h
from OpenGL import platform, constant
from ctypes import *
c_void = None
# H (/usr/include/GL/glx.h:26)
GLX_VERSION_1_1 = constant.Constant( 'GLX_VERSION_1_1', 1 )
GLX_VERSION_1_2 = constant.Constant( 'GLX_VERSION_1_2', 1 )
GLX_VERSION_1_3 = constant.Constant( 'GLX_VERSION_1_3', 1 )
GLX_VERSION_1_4 = constant.Constant( 'GLX_VERSION_1_4', 1 )
GLX_USE_GL = constant.Constant( 'GLX_USE_GL', 1 )
GLX_BUFFER_SIZE = constant.Constant( 'GLX_BUFFER_SIZE', 2 )
GLX_LEVEL = constant.Constant( 'GLX_LEVEL', 3 )
GLX_RGBA = constant.Constant( 'GLX_RGBA', 4 )
GLX_DOUBLEBUFFER = constant.Constant( 'GLX_DOUBLEBUFFER', 5 )
GLX_STEREO = constant.Constant( 'GLX_STEREO', 6 )
GLX_AUX_BUFFERS = constant.Constant( 'GLX_AUX_BUFFERS', 7 )
GLX_RED_SIZE = constant.Constant( 'GLX_RED_SIZE', 8 )
GLX_GREEN_SIZE = constant.Constant( 'GLX_GREEN_SIZE', 9 )
GLX_BLUE_SIZE = constant.Constant( 'GLX_BLUE_SIZE', 10 )
GLX_ALPHA_SIZE = constant.Constant( 'GLX_ALPHA_SIZE', 11 )
GLX_DEPTH_SIZE = constant.Constant( 'GLX_DEPTH_SIZE', 12 )
GLX_STENCIL_SIZE = constant.Constant( 'GLX_STENCIL_SIZE', 13 )
GLX_ACCUM_RED_SIZE = constant.Constant( 'GLX_ACCUM_RED_SIZE', 14 )
GLX_ACCUM_GREEN_SIZE = constant.Constant( 'GLX_ACCUM_GREEN_SIZE', 15 )
GLX_ACCUM_BLUE_SIZE = constant.Constant( 'GLX_ACCUM_BLUE_SIZE', 16 )
GLX_ACCUM_ALPHA_SIZE = constant.Constant( 'GLX_ACCUM_ALPHA_SIZE', 17 )
GLX_BAD_SCREEN = constant.Constant( 'GLX_BAD_SCREEN', 1 )
GLX_BAD_ATTRIBUTE = constant.Constant( 'GLX_BAD_ATTRIBUTE', 2 )
GLX_NO_EXTENSION = constant.Constant( 'GLX_NO_EXTENSION', 3 )
GLX_BAD_VISUAL = constant.Constant( 'GLX_BAD_VISUAL', 4 )
GLX_BAD_CONTEXT = constant.Constant( 'GLX_BAD_CONTEXT', 5 )
GLX_BAD_VALUE = constant.Constant( 'GLX_BAD_VALUE', 6 )
GLX_BAD_ENUM = constant.Constant( 'GLX_BAD_ENUM', 7 )
GLX_VENDOR = constant.Constant( 'GLX_VENDOR', 1 )
GLX_VERSION = constant.Constant( 'GLX_VERSION', 2 )
GLX_EXTENSIONS = constant.Constant( 'GLX_EXTENSIONS', 3 )
GLX_CONFIG_CAVEAT = constant.Constant( 'GLX_CONFIG_CAVEAT', 32 )
GLX_DONT_CARE = constant.Constant( 'GLX_DONT_CARE', 4294967295 )
GLX_X_VISUAL_TYPE = constant.Constant( 'GLX_X_VISUAL_TYPE', 34 )
GLX_TRANSPARENT_TYPE = constant.Constant( 'GLX_TRANSPARENT_TYPE', 35 )
GLX_TRANSPARENT_INDEX_VALUE = constant.Constant( 'GLX_TRANSPARENT_INDEX_VALUE', 36 )
GLX_TRANSPARENT_RED_VALUE = constant.Constant( 'GLX_TRANSPARENT_RED_VALUE', 37 )
GLX_TRANSPARENT_GREEN_VALUE = constant.Constant( 'GLX_TRANSPARENT_GREEN_VALUE', 38 )
GLX_TRANSPARENT_BLUE_VALUE = constant.Constant( 'GLX_TRANSPARENT_BLUE_VALUE', 39 )
GLX_TRANSPARENT_ALPHA_VALUE = constant.Constant( 'GLX_TRANSPARENT_ALPHA_VALUE', 40 )
GLX_WINDOW_BIT = constant.Constant( 'GLX_WINDOW_BIT', 1 )
GLX_PIXMAP_BIT = constant.Constant( 'GLX_PIXMAP_BIT', 2 )
GLX_PBUFFER_BIT = constant.Constant( 'GLX_PBUFFER_BIT', 4 )
GLX_AUX_BUFFERS_BIT = constant.Constant( 'GLX_AUX_BUFFERS_BIT', 16 )
GLX_FRONT_LEFT_BUFFER_BIT = constant.Constant( 'GLX_FRONT_LEFT_BUFFER_BIT', 1 )
GLX_FRONT_RIGHT_BUFFER_BIT = constant.Constant( 'GLX_FRONT_RIGHT_BUFFER_BIT', 2 )
GLX_BACK_LEFT_BUFFER_BIT = constant.Constant( 'GLX_BACK_LEFT_BUFFER_BIT', 4 )
GLX_BACK_RIGHT_BUFFER_BIT = constant.Constant( 'GLX_BACK_RIGHT_BUFFER_BIT', 8 )
GLX_DEPTH_BUFFER_BIT = constant.Constant( 'GLX_DEPTH_BUFFER_BIT', 32 )
GLX_STENCIL_BUFFER_BIT = constant.Constant( 'GLX_STENCIL_BUFFER_BIT', 64 )
GLX_ACCUM_BUFFER_BIT = constant.Constant( 'GLX_ACCUM_BUFFER_BIT', 128 )
GLX_NONE = constant.Constant( 'GLX_NONE', 32768 )
GLX_SLOW_CONFIG = constant.Constant( 'GLX_SLOW_CONFIG', 32769 )
GLX_TRUE_COLOR = constant.Constant( 'GLX_TRUE_COLOR', 32770 )
GLX_DIRECT_COLOR = constant.Constant( 'GLX_DIRECT_COLOR', 32771 )
GLX_PSEUDO_COLOR = constant.Constant( 'GLX_PSEUDO_COLOR', 32772 )
GLX_STATIC_COLOR = constant.Constant( 'GLX_STATIC_COLOR', 32773 )
GLX_GRAY_SCALE = constant.Constant( 'GLX_GRAY_SCALE', 32774 )
GLX_STATIC_GRAY = constant.Constant( 'GLX_STATIC_GRAY', 32775 )
GLX_TRANSPARENT_RGB = constant.Constant( 'GLX_TRANSPARENT_RGB', 32776 )
GLX_TRANSPARENT_INDEX = constant.Constant( 'GLX_TRANSPARENT_INDEX', 32777 )
GLX_VISUAL_ID = constant.Constant( 'GLX_VISUAL_ID', 32779 )
GLX_SCREEN = constant.Constant( 'GLX_SCREEN', 32780 )
GLX_NON_CONFORMANT_CONFIG = constant.Constant( 'GLX_NON_CONFORMANT_CONFIG', 32781 )
GLX_DRAWABLE_TYPE = constant.Constant( 'GLX_DRAWABLE_TYPE', 32784 )
GLX_RENDER_TYPE = constant.Constant( 'GLX_RENDER_TYPE', 32785 )
GLX_X_RENDERABLE = constant.Constant( 'GLX_X_RENDERABLE', 32786 )
GLX_FBCONFIG_ID = constant.Constant( 'GLX_FBCONFIG_ID', 32787 )
GLX_RGBA_TYPE = constant.Constant( 'GLX_RGBA_TYPE', 32788 )
GLX_COLOR_INDEX_TYPE = constant.Constant( 'GLX_COLOR_INDEX_TYPE', 32789 )
GLX_MAX_PBUFFER_WIDTH = constant.Constant( 'GLX_MAX_PBUFFER_WIDTH', 32790 )
GLX_MAX_PBUFFER_HEIGHT = constant.Constant( 'GLX_MAX_PBUFFER_HEIGHT', 32791 )
GLX_MAX_PBUFFER_PIXELS = constant.Constant( 'GLX_MAX_PBUFFER_PIXELS', 32792 )
GLX_PRESERVED_CONTENTS = constant.Constant( 'GLX_PRESERVED_CONTENTS', 32795 )
GLX_LARGEST_PBUFFER = constant.Constant( 'GLX_LARGEST_PBUFFER', 32796 )
GLX_WIDTH = constant.Constant( 'GLX_WIDTH', 32797 )
GLX_HEIGHT = constant.Constant( 'GLX_HEIGHT', 32798 )
GLX_EVENT_MASK = constant.Constant( 'GLX_EVENT_MASK', 32799 )
GLX_DAMAGED = constant.Constant( 'GLX_DAMAGED', 32800 )
GLX_SAVED = constant.Constant( 'GLX_SAVED', 32801 )
GLX_WINDOW = constant.Constant( 'GLX_WINDOW', 32802 )
GLX_PBUFFER = constant.Constant( 'GLX_PBUFFER', 32803 )
GLX_PBUFFER_HEIGHT = constant.Constant( 'GLX_PBUFFER_HEIGHT', 32832 )
GLX_PBUFFER_WIDTH = constant.Constant( 'GLX_PBUFFER_WIDTH', 32833 )
GLX_RGBA_BIT = constant.Constant( 'GLX_RGBA_BIT', 1 )
GLX_COLOR_INDEX_BIT = constant.Constant( 'GLX_COLOR_INDEX_BIT', 2 )
GLX_PBUFFER_CLOBBER_MASK = constant.Constant( 'GLX_PBUFFER_CLOBBER_MASK', 134217728 )
GLX_SAMPLE_BUFFERS = constant.Constant( 'GLX_SAMPLE_BUFFERS', 100000 )
GLX_SAMPLES = constant.Constant( 'GLX_SAMPLES', 100001 )
class struct___GLXcontextRec(Structure):
__slots__ = [
]
struct___GLXcontextRec._fields_ = [
('_opaque_struct', c_int)
]
class struct___GLXcontextRec(Structure):
__slots__ = [
]
struct___GLXcontextRec._fields_ = [
('_opaque_struct', c_int)
]
GLXContext = POINTER(struct___GLXcontextRec) # /usr/include/GL/glx.h:178
XID = c_ulong # /usr/include/X11/X.h:66
GLXPixmap = XID # /usr/include/GL/glx.h:179
GLXDrawable = XID # /usr/include/GL/glx.h:180
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
GLXFBConfig = POINTER(struct___GLXFBConfigRec) # /usr/include/GL/glx.h:182
GLXFBConfigID = XID # /usr/include/GL/glx.h:183
GLXContextID = XID # /usr/include/GL/glx.h:184
GLXWindow = XID # /usr/include/GL/glx.h:185
GLXPbuffer = XID # /usr/include/GL/glx.h:186
GLX_PbufferClobber = constant.Constant( 'GLX_PbufferClobber', 0 )
GLX_BufferSwapComplete = constant.Constant( 'GLX_BufferSwapComplete', 1 )
class struct_anon_103(Structure):
__slots__ = [
'visual',
'visualid',
'screen',
'depth',
'class',
'red_mask',
'green_mask',
'blue_mask',
'colormap_size',
'bits_per_rgb',
]
class struct_anon_18(Structure):
__slots__ = [
'ext_data',
'visualid',
'class',
'red_mask',
'green_mask',
'blue_mask',
'bits_per_rgb',
'map_entries',
]
class struct__XExtData(Structure):
__slots__ = [
'number',
'next',
'free_private',
'private_data',
]
XPointer = c_char_p # /usr/include/X11/Xlib.h:84
struct__XExtData._fields_ = [
('number', c_int),
('next', POINTER(struct__XExtData)),
('free_private', POINTER(CFUNCTYPE(c_int, POINTER(struct__XExtData)))),
('private_data', XPointer),
]
XExtData = struct__XExtData # /usr/include/X11/Xlib.h:163
VisualID = c_ulong # /usr/include/X11/X.h:76
struct_anon_18._fields_ = [
('ext_data', POINTER(XExtData)),
('visualid', VisualID),
('class', c_int),
('red_mask', c_ulong),
('green_mask', c_ulong),
('blue_mask', c_ulong),
('bits_per_rgb', c_int),
('map_entries', c_int),
]
Visual = struct_anon_18 # /usr/include/X11/Xlib.h:246
struct_anon_103._fields_ = [
('visual', POINTER(Visual)),
('visualid', VisualID),
('screen', c_int),
('depth', c_int),
('class', c_int),
('red_mask', c_ulong),
('green_mask', c_ulong),
('blue_mask', c_ulong),
('colormap_size', c_int),
('bits_per_rgb', c_int),
]
XVisualInfo = struct_anon_103 # /usr/include/X11/Xutil.h:294
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
Display = struct__XDisplay # /usr/include/X11/Xlib.h:495
glXChooseVisual = platform.createBaseFunction(
'glXChooseVisual', dll=platform.GL, resultType=POINTER(XVisualInfo),
argTypes=[POINTER(Display), c_int, POINTER(c_int)],
doc='glXChooseVisual( POINTER(Display)(dpy), c_int(screen), POINTER(c_int)(attribList) ) -> POINTER(XVisualInfo)',
argNames=['dpy', 'screen', 'attribList'],
)
glXCreateContext = platform.createBaseFunction(
'glXCreateContext', dll=platform.GL, resultType=GLXContext,
argTypes=[POINTER(Display), POINTER(XVisualInfo), GLXContext, c_int],
doc='glXCreateContext( POINTER(Display)(dpy), POINTER(XVisualInfo)(vis), GLXContext(shareList), c_int(direct) ) -> GLXContext',
argNames=['dpy', 'vis', 'shareList', 'direct'],
)
glXDestroyContext = platform.createBaseFunction(
'glXDestroyContext', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXContext],
doc='glXDestroyContext( POINTER(Display)(dpy), GLXContext(ctx) ) -> None',
argNames=['dpy', 'ctx'],
)
glXMakeCurrent = platform.createBaseFunction(
'glXMakeCurrent', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXDrawable, GLXContext],
doc='glXMakeCurrent( POINTER(Display)(dpy), GLXDrawable(drawable), GLXContext(ctx) ) -> c_int',
argNames=['dpy', 'drawable', 'ctx'],
)
glXCopyContext = platform.createBaseFunction(
'glXCopyContext', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXContext, GLXContext, c_ulong],
doc='glXCopyContext( POINTER(Display)(dpy), GLXContext(src), GLXContext(dst), c_ulong(mask) ) -> None',
argNames=['dpy', 'src', 'dst', 'mask'],
)
glXSwapBuffers = platform.createBaseFunction(
'glXSwapBuffers', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXDrawable],
doc='glXSwapBuffers( POINTER(Display)(dpy), GLXDrawable(drawable) ) -> None',
argNames=['dpy', 'drawable'],
)
Pixmap = XID # /usr/include/X11/X.h:102
glXCreateGLXPixmap = platform.createBaseFunction(
'glXCreateGLXPixmap', dll=platform.GL, resultType=GLXPixmap,
argTypes=[POINTER(Display), POINTER(XVisualInfo), Pixmap],
doc='glXCreateGLXPixmap( POINTER(Display)(dpy), POINTER(XVisualInfo)(visual), Pixmap(pixmap) ) -> GLXPixmap',
argNames=['dpy', 'visual', 'pixmap'],
)
glXDestroyGLXPixmap = platform.createBaseFunction(
'glXDestroyGLXPixmap', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXPixmap],
doc='glXDestroyGLXPixmap( POINTER(Display)(dpy), GLXPixmap(pixmap) ) -> None',
argNames=['dpy', 'pixmap'],
)
glXQueryExtension = platform.createBaseFunction(
'glXQueryExtension', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), POINTER(c_int), POINTER(c_int)],
doc='glXQueryExtension( POINTER(Display)(dpy), POINTER(c_int)(errorb), POINTER(c_int)(event) ) -> c_int',
argNames=['dpy', 'errorb', 'event'],
)
glXQueryVersion = platform.createBaseFunction(
'glXQueryVersion', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), POINTER(c_int), POINTER(c_int)],
doc='glXQueryVersion( POINTER(Display)(dpy), POINTER(c_int)(maj), POINTER(c_int)(min) ) -> c_int',
argNames=['dpy', 'maj', 'min'],
)
glXIsDirect = platform.createBaseFunction(
'glXIsDirect', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXContext],
doc='glXIsDirect( POINTER(Display)(dpy), GLXContext(ctx) ) -> c_int',
argNames=['dpy', 'ctx'],
)
glXGetConfig = platform.createBaseFunction(
'glXGetConfig', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), POINTER(XVisualInfo), c_int, POINTER(c_int)],
doc='glXGetConfig( POINTER(Display)(dpy), POINTER(XVisualInfo)(visual), c_int(attrib), POINTER(c_int)(value) ) -> c_int',
argNames=['dpy', 'visual', 'attrib', 'value'],
)
glXGetCurrentContext = platform.createBaseFunction(
'glXGetCurrentContext', dll=platform.GL, resultType=GLXContext,
argTypes=[],
doc='glXGetCurrentContext( ) -> GLXContext',
argNames=[],
)
glXGetCurrentDrawable = platform.createBaseFunction(
'glXGetCurrentDrawable', dll=platform.GL, resultType=GLXDrawable,
argTypes=[],
doc='glXGetCurrentDrawable( ) -> GLXDrawable',
argNames=[],
)
glXWaitGL = platform.createBaseFunction(
'glXWaitGL', dll=platform.GL, resultType=None,
argTypes=[],
doc='glXWaitGL( ) -> None',
argNames=[],
)
glXWaitX = platform.createBaseFunction(
'glXWaitX', dll=platform.GL, resultType=None,
argTypes=[],
doc='glXWaitX( ) -> None',
argNames=[],
)
Font = XID # /usr/include/X11/X.h:100
glXUseXFont = platform.createBaseFunction(
'glXUseXFont', dll=platform.GL, resultType=None,
argTypes=[Font, c_int, c_int, c_int],
doc='glXUseXFont( Font(font), c_int(first), c_int(count), c_int(list) ) -> None',
argNames=['font', 'first', 'count', 'list'],
)
glXQueryExtensionsString = platform.createBaseFunction(
'glXQueryExtensionsString', dll=platform.GL, resultType=c_char_p,
argTypes=[POINTER(Display), c_int],
doc='glXQueryExtensionsString( POINTER(Display)(dpy), c_int(screen) ) -> c_char_p',
argNames=['dpy', 'screen'],
)
glXQueryServerString = platform.createBaseFunction(
'glXQueryServerString', dll=platform.GL, resultType=c_char_p,
argTypes=[POINTER(Display), c_int, c_int],
doc='glXQueryServerString( POINTER(Display)(dpy), c_int(screen), c_int(name) ) -> c_char_p',
argNames=['dpy', 'screen', 'name'],
)
glXGetClientString = platform.createBaseFunction(
'glXGetClientString', dll=platform.GL, resultType=c_char_p,
argTypes=[POINTER(Display), c_int],
doc='glXGetClientString( POINTER(Display)(dpy), c_int(name) ) -> c_char_p',
argNames=['dpy', 'name'],
)
glXGetCurrentDisplay = platform.createBaseFunction(
'glXGetCurrentDisplay', dll=platform.GL, resultType=POINTER(Display),
argTypes=[],
doc='glXGetCurrentDisplay( ) -> POINTER(Display)',
argNames=[],
)
glXChooseFBConfig = platform.createBaseFunction(
'glXChooseFBConfig', dll=platform.GL, resultType=POINTER(GLXFBConfig),
argTypes=[POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)],
doc='glXChooseFBConfig( POINTER(Display)(dpy), c_int(screen), POINTER(c_int)(attribList), POINTER(c_int)(nitems) ) -> POINTER(GLXFBConfig)',
argNames=['dpy', 'screen', 'attribList', 'nitems'],
)
glXGetFBConfigAttrib = platform.createBaseFunction(
'glXGetFBConfigAttrib', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXFBConfig, c_int, POINTER(c_int)],
doc='glXGetFBConfigAttrib( POINTER(Display)(dpy), GLXFBConfig(config), c_int(attribute), POINTER(c_int)(value) ) -> c_int',
argNames=['dpy', 'config', 'attribute', 'value'],
)
glXGetFBConfigs = platform.createBaseFunction(
'glXGetFBConfigs', dll=platform.GL, resultType=POINTER(GLXFBConfig),
argTypes=[POINTER(Display), c_int, POINTER(c_int)],
doc='glXGetFBConfigs( POINTER(Display)(dpy), c_int(screen), POINTER(c_int)(nelements) ) -> POINTER(GLXFBConfig)',
argNames=['dpy', 'screen', 'nelements'],
)
glXGetVisualFromFBConfig = platform.createBaseFunction(
'glXGetVisualFromFBConfig', dll=platform.GL, resultType=POINTER(XVisualInfo),
argTypes=[POINTER(Display), GLXFBConfig],
doc='glXGetVisualFromFBConfig( POINTER(Display)(dpy), GLXFBConfig(config) ) -> POINTER(XVisualInfo)',
argNames=['dpy', 'config'],
)
Window = XID # /usr/include/X11/X.h:96
glXCreateWindow = platform.createBaseFunction(
'glXCreateWindow', dll=platform.GL, resultType=GLXWindow,
argTypes=[POINTER(Display), GLXFBConfig, Window, POINTER(c_int)],
doc='glXCreateWindow( POINTER(Display)(dpy), GLXFBConfig(config), Window(win), POINTER(c_int)(attribList) ) -> GLXWindow',
argNames=['dpy', 'config', 'win', 'attribList'],
)
glXDestroyWindow = platform.createBaseFunction(
'glXDestroyWindow', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXWindow],
doc='glXDestroyWindow( POINTER(Display)(dpy), GLXWindow(window) ) -> None',
argNames=['dpy', 'window'],
)
glXCreatePixmap = platform.createBaseFunction(
'glXCreatePixmap', dll=platform.GL, resultType=GLXPixmap,
argTypes=[POINTER(Display), GLXFBConfig, Pixmap, POINTER(c_int)],
doc='glXCreatePixmap( POINTER(Display)(dpy), GLXFBConfig(config), Pixmap(pixmap), POINTER(c_int)(attribList) ) -> GLXPixmap',
argNames=['dpy', 'config', 'pixmap', 'attribList'],
)
glXDestroyPixmap = platform.createBaseFunction(
'glXDestroyPixmap', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXPixmap],
doc='glXDestroyPixmap( POINTER(Display)(dpy), GLXPixmap(pixmap) ) -> None',
argNames=['dpy', 'pixmap'],
)
glXCreatePbuffer = platform.createBaseFunction(
'glXCreatePbuffer', dll=platform.GL, resultType=GLXPbuffer,
argTypes=[POINTER(Display), GLXFBConfig, POINTER(c_int)],
doc='glXCreatePbuffer( POINTER(Display)(dpy), GLXFBConfig(config), POINTER(c_int)(attribList) ) -> GLXPbuffer',
argNames=['dpy', 'config', 'attribList'],
)
glXDestroyPbuffer = platform.createBaseFunction(
'glXDestroyPbuffer', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXPbuffer],
doc='glXDestroyPbuffer( POINTER(Display)(dpy), GLXPbuffer(pbuf) ) -> None',
argNames=['dpy', 'pbuf'],
)
glXQueryDrawable = platform.createBaseFunction(
'glXQueryDrawable', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXDrawable, c_int, POINTER(c_uint)],
doc='glXQueryDrawable( POINTER(Display)(dpy), GLXDrawable(draw), c_int(attribute), POINTER(c_uint)(value) ) -> None',
argNames=['dpy', 'draw', 'attribute', 'value'],
)
glXCreateNewContext = platform.createBaseFunction(
'glXCreateNewContext', dll=platform.GL, resultType=GLXContext,
argTypes=[POINTER(Display), GLXFBConfig, c_int, GLXContext, c_int],
doc='glXCreateNewContext( POINTER(Display)(dpy), GLXFBConfig(config), c_int(renderType), GLXContext(shareList), c_int(direct) ) -> GLXContext',
argNames=['dpy', 'config', 'renderType', 'shareList', 'direct'],
)
glXMakeContextCurrent = platform.createBaseFunction(
'glXMakeContextCurrent', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXDrawable, GLXDrawable, GLXContext],
doc='glXMakeContextCurrent( POINTER(Display)(dpy), GLXDrawable(draw), GLXDrawable(read), GLXContext(ctx) ) -> c_int',
argNames=['dpy', 'draw', 'read', 'ctx'],
)
glXGetCurrentReadDrawable = platform.createBaseFunction(
'glXGetCurrentReadDrawable', dll=platform.GL, resultType=GLXDrawable,
argTypes=[],
doc='glXGetCurrentReadDrawable( ) -> GLXDrawable',
argNames=[],
)
glXQueryContext = platform.createBaseFunction(
'glXQueryContext', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXContext, c_int, POINTER(c_int)],
doc='glXQueryContext( POINTER(Display)(dpy), GLXContext(ctx), c_int(attribute), POINTER(c_int)(value) ) -> c_int',
argNames=['dpy', 'ctx', 'attribute', 'value'],
)
glXSelectEvent = platform.createBaseFunction(
'glXSelectEvent', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXDrawable, c_ulong],
doc='glXSelectEvent( POINTER(Display)(dpy), GLXDrawable(drawable), c_ulong(mask) ) -> None',
argNames=['dpy', 'drawable', 'mask'],
)
glXGetSelectedEvent = platform.createBaseFunction(
'glXGetSelectedEvent', dll=platform.GL, resultType=None,
argTypes=[POINTER(Display), GLXDrawable, POINTER(c_ulong)],
doc='glXGetSelectedEvent( POINTER(Display)(dpy), GLXDrawable(drawable), POINTER(c_ulong)(mask) ) -> None',
argNames=['dpy', 'drawable', 'mask'],
)
# ARB_get_proc_address (/usr/include/GL/glx.h:327)
GLX_ARB_get_proc_address = constant.Constant( 'GLX_ARB_get_proc_address', 1 )
__GLXextFuncPtr = CFUNCTYPE(None) # /usr/include/GL/glx.h:330
GLubyte = c_ubyte # /usr/include/GL/gl.h:162
glXGetProcAddressARB = platform.createBaseFunction(
'glXGetProcAddressARB', dll=platform.GL, resultType=__GLXextFuncPtr,
argTypes=[POINTER(GLubyte)],
doc='glXGetProcAddressARB( POINTER(GLubyte)() ) -> __GLXextFuncPtr',
argNames=[''],
)
glXGetProcAddress = platform.createBaseFunction(
'glXGetProcAddress', dll=platform.GL, resultType=POINTER(CFUNCTYPE(None)),
argTypes=[POINTER(GLubyte)],
doc='glXGetProcAddress( POINTER(GLubyte)(procname) ) -> POINTER(CFUNCTYPE(None))',
argNames=['procname'],
)
# GLXEXT_LEGACY (/usr/include/GL/glx.h:344)
# VERSION_1_3 (/usr/include/GL/glxext.h:55)
# VERSION_1_4 (/usr/include/GL/glxext.h:114)
# ARB_get_proc_address (/usr/include/GL/glxext.h:119)
# ARB_multisample (/usr/include/GL/glxext.h:122)
# ARB_vertex_buffer_object (/usr/include/GL/glxext.h:127)
# ARB_fbconfig_float (/usr/include/GL/glxext.h:131)
# ARB_framebuffer_sRGB (/usr/include/GL/glxext.h:136)
# ARB_create_context (/usr/include/GL/glxext.h:140)
# ARB_create_context_profile (/usr/include/GL/glxext.h:148)
# ARB_create_context_robustness (/usr/include/GL/glxext.h:154)
# SGIS_multisample (/usr/include/GL/glxext.h:161)
# EXT_visual_info (/usr/include/GL/glxext.h:166)
# SGI_swap_control (/usr/include/GL/glxext.h:185)
# SGI_video_sync (/usr/include/GL/glxext.h:188)
# SGI_make_current_read (/usr/include/GL/glxext.h:191)
# SGIX_video_source (/usr/include/GL/glxext.h:194)
# EXT_visual_rating (/usr/include/GL/glxext.h:197)
# EXT_import_context (/usr/include/GL/glxext.h:204)
# SGIX_fbconfig (/usr/include/GL/glxext.h:210)
# SGIX_pbuffer (/usr/include/GL/glxext.h:224)
# SGI_cushion (/usr/include/GL/glxext.h:252)
# SGIX_video_resize (/usr/include/GL/glxext.h:255)
# SGIX_dmbuffer (/usr/include/GL/glxext.h:260)
# SGIX_swap_group (/usr/include/GL/glxext.h:264)
# SGIX_swap_barrier (/usr/include/GL/glxext.h:267)
# SGIS_blended_overlay (/usr/include/GL/glxext.h:270)
# SGIS_shared_multisample (/usr/include/GL/glxext.h:274)
# SUN_get_transparent_index (/usr/include/GL/glxext.h:279)
# 3DFX_multisample (/usr/include/GL/glxext.h:282)
# MESA_copy_sub_buffer (/usr/include/GL/glxext.h:287)
# MESA_pixmap_colormap (/usr/include/GL/glxext.h:290)
# MESA_release_buffers (/usr/include/GL/glxext.h:293)
# MESA_set_3dfx_mode (/usr/include/GL/glxext.h:296)
# SGIX_visual_select_group (/usr/include/GL/glxext.h:301)
# OML_swap_method (/usr/include/GL/glxext.h:305)
# OML_sync_control (/usr/include/GL/glxext.h:312)
# NV_float_buffer (/usr/include/GL/glxext.h:315)
# SGIX_hyperpipe (/usr/include/GL/glxext.h:319)
# MESA_agp_offset (/usr/include/GL/glxext.h:332)
# EXT_fbconfig_packed_float (/usr/include/GL/glxext.h:335)
# EXT_framebuffer_sRGB (/usr/include/GL/glxext.h:340)
# EXT_texture_from_pixmap (/usr/include/GL/glxext.h:344)
# NV_present_video (/usr/include/GL/glxext.h:380)
# NV_video_out (/usr/include/GL/glxext.h:384)
# NV_swap_group (/usr/include/GL/glxext.h:397)
# NV_video_capture (/usr/include/GL/glxext.h:400)
# EXT_swap_control (/usr/include/GL/glxext.h:406)
# NV_copy_image (/usr/include/GL/glxext.h:411)
# INTEL_swap_event (/usr/include/GL/glxext.h:414)
# NV_multisample_coverage (/usr/include/GL/glxext.h:421)
# AMD_gpu_association (/usr/include/GL/glxext.h:426)
# EXT_create_context_es2_profile (/usr/include/GL/glxext.h:439)
# ARB_get_proc_address (/usr/include/GL/glxext.h:446)
# SGIX_video_source (/usr/include/GL/glxext.h:450)
# SGIX_fbconfig (/usr/include/GL/glxext.h:454)
# SGIX_pbuffer (/usr/include/GL/glxext.h:459)
# NV_video_output (/usr/include/GL/glxext.h:476)
# NV_video_capture (/usr/include/GL/glxext.h:480)
# VERSION_1_3 (/usr/include/GL/glxext.h:521)
# VERSION_1_4 (/usr/include/GL/glxext.h:563)
# ARB_get_proc_address (/usr/include/GL/glxext.h:571)
# ARB_multisample (/usr/include/GL/glxext.h:579)
# ARB_fbconfig_float (/usr/include/GL/glxext.h:583)
# ARB_framebuffer_sRGB (/usr/include/GL/glxext.h:587)
# ARB_create_context (/usr/include/GL/glxext.h:591)
# ARB_create_context_profile (/usr/include/GL/glxext.h:599)
# ARB_create_context_robustness (/usr/include/GL/glxext.h:603)
# SGIS_multisample (/usr/include/GL/glxext.h:607)
# EXT_visual_info (/usr/include/GL/glxext.h:611)
# SGI_swap_control (/usr/include/GL/glxext.h:615)
# SGI_video_sync (/usr/include/GL/glxext.h:623)
# SGI_make_current_read (/usr/include/GL/glxext.h:633)
# SGIX_video_source (/usr/include/GL/glxext.h:643)
# EXT_visual_rating (/usr/include/GL/glxext.h:655)
# EXT_import_context (/usr/include/GL/glxext.h:659)
# SGIX_fbconfig (/usr/include/GL/glxext.h:675)
# SGIX_pbuffer (/usr/include/GL/glxext.h:693)
# SGI_cushion (/usr/include/GL/glxext.h:709)
# SGIX_video_resize (/usr/include/GL/glxext.h:717)
# SGIX_dmbuffer (/usr/include/GL/glxext.h:733)
# SGIX_swap_group (/usr/include/GL/glxext.h:743)
# SGIX_swap_barrier (/usr/include/GL/glxext.h:751)
# SUN_get_transparent_index (/usr/include/GL/glxext.h:761)
# MESA_copy_sub_buffer (/usr/include/GL/glxext.h:769)
# MESA_pixmap_colormap (/usr/include/GL/glxext.h:777)
# MESA_release_buffers (/usr/include/GL/glxext.h:785)
# MESA_set_3dfx_mode (/usr/include/GL/glxext.h:793)
# SGIX_visual_select_group (/usr/include/GL/glxext.h:801)
# OML_swap_method (/usr/include/GL/glxext.h:805)
# OML_sync_control (/usr/include/GL/glxext.h:809)
# NV_float_buffer (/usr/include/GL/glxext.h:825)
# SGIX_hyperpipe (/usr/include/GL/glxext.h:829)
# MESA_agp_offset (/usr/include/GL/glxext.h:876)
# EXT_fbconfig_packed_float (/usr/include/GL/glxext.h:884)
# EXT_framebuffer_sRGB (/usr/include/GL/glxext.h:888)
# EXT_texture_from_pixmap (/usr/include/GL/glxext.h:892)
# NV_present_video (/usr/include/GL/glxext.h:902)
# NV_video_output (/usr/include/GL/glxext.h:912)
# NV_swap_group (/usr/include/GL/glxext.h:930)
# NV_video_capture (/usr/include/GL/glxext.h:948)
# EXT_swap_control (/usr/include/GL/glxext.h:964)
# NV_copy_image (/usr/include/GL/glxext.h:972)
# INTEL_swap_event (/usr/include/GL/glxext.h:980)
# NV_multisample_coverage (/usr/include/GL/glxext.h:984)
# NV_vertex_array_range (/usr/include/GL/glx.h:359)
GLsizei = c_int # /usr/include/GL/gl.h:165
GLfloat = c_float # /usr/include/GL/gl.h:166
glXAllocateMemoryNV = platform.createBaseFunction(
'glXAllocateMemoryNV', dll=platform.GL, resultType=POINTER(c_void),
argTypes=[GLsizei, GLfloat, GLfloat, GLfloat],
doc='glXAllocateMemoryNV( GLsizei(size), GLfloat(readfreq), GLfloat(writefreq), GLfloat(priority) ) -> POINTER(c_void)',
argNames=['size', 'readfreq', 'writefreq', 'priority'],
)
GLvoid = None # /usr/include/GL/gl.h:158
glXFreeMemoryNV = platform.createBaseFunction(
'glXFreeMemoryNV', dll=platform.GL, resultType=None,
argTypes=[POINTER(GLvoid)],
doc='glXFreeMemoryNV( POINTER(GLvoid)(pointer) ) -> None',
argNames=['pointer'],
)
# ARB_render_texture (/usr/include/GL/glx.h:374)
GLX_ARB_render_texture = constant.Constant( 'GLX_ARB_render_texture', 1 )
glXBindTexImageARB = platform.createBaseFunction(
'glXBindTexImageARB', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXPbuffer, c_int],
doc='glXBindTexImageARB( POINTER(Display)(dpy), GLXPbuffer(pbuffer), c_int(buffer) ) -> c_int',
argNames=['dpy', 'pbuffer', 'buffer'],
)
glXReleaseTexImageARB = platform.createBaseFunction(
'glXReleaseTexImageARB', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXPbuffer, c_int],
doc='glXReleaseTexImageARB( POINTER(Display)(dpy), GLXPbuffer(pbuffer), c_int(buffer) ) -> c_int',
argNames=['dpy', 'pbuffer', 'buffer'],
)
glXDrawableAttribARB = platform.createBaseFunction(
'glXDrawableAttribARB', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXDrawable, POINTER(c_int)],
doc='glXDrawableAttribARB( POINTER(Display)(dpy), GLXDrawable(draw), POINTER(c_int)(attribList) ) -> c_int',
argNames=['dpy', 'draw', 'attribList'],
)
# NV_float_buffer (/usr/include/GL/glx.h:387)
# MESA_swap_frame_usage (/usr/include/GL/glx.h:399)
GLX_MESA_swap_frame_usage = constant.Constant( 'GLX_MESA_swap_frame_usage', 1 )
glXGetFrameUsageMESA = platform.createBaseFunction(
'glXGetFrameUsageMESA', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXDrawable, POINTER(c_float)],
doc='glXGetFrameUsageMESA( POINTER(Display)(dpy), GLXDrawable(drawable), POINTER(c_float)(usage) ) -> c_int',
argNames=['dpy', 'drawable', 'usage'],
)
glXBeginFrameTrackingMESA = platform.createBaseFunction(
'glXBeginFrameTrackingMESA', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXDrawable],
doc='glXBeginFrameTrackingMESA( POINTER(Display)(dpy), GLXDrawable(drawable) ) -> c_int',
argNames=['dpy', 'drawable'],
)
glXEndFrameTrackingMESA = platform.createBaseFunction(
'glXEndFrameTrackingMESA', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXDrawable],
doc='glXEndFrameTrackingMESA( POINTER(Display)(dpy), GLXDrawable(drawable) ) -> c_int',
argNames=['dpy', 'drawable'],
)
glXQueryFrameTrackingMESA = platform.createBaseFunction(
'glXQueryFrameTrackingMESA', dll=platform.GL, resultType=c_int,
argTypes=[POINTER(Display), GLXDrawable, POINTER(c_int64), POINTER(c_int64), POINTER(c_float)],
doc='glXQueryFrameTrackingMESA( POINTER(Display)(dpy), GLXDrawable(drawable), POINTER(c_int64)(swapCount), POINTER(c_int64)(missedFrames), POINTER(c_float)(lastMissedUsage) ) -> c_int',
argNames=['dpy', 'drawable', 'swapCount', 'missedFrames', 'lastMissedUsage'],
)
# MESA_swap_control (/usr/include/GL/glx.h:419)
GLX_MESA_swap_control = constant.Constant( 'GLX_MESA_swap_control', 1 )
glXSwapIntervalMESA = platform.createBaseFunction(
'glXSwapIntervalMESA', dll=platform.GL, resultType=c_int,
argTypes=[c_uint],
doc='glXSwapIntervalMESA( c_uint(interval) ) -> c_int',
argNames=['interval'],
)
glXGetSwapIntervalMESA = platform.createBaseFunction(
'glXGetSwapIntervalMESA', dll=platform.GL, resultType=c_int,
argTypes=[],
doc='glXGetSwapIntervalMESA( ) -> c_int',
argNames=[],
)
# EXT_texture_from_pixmap (/usr/include/GL/glx.h:436)
class struct_anon_111(Structure):
__slots__ = [
'event_type',
'draw_type',
'serial',
'send_event',
'display',
'drawable',
'buffer_mask',
'aux_buffer',
'x',
'y',
'width',
'height',
'count',
]
struct_anon_111._fields_ = [
('event_type', c_int),
('draw_type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', GLXDrawable),
('buffer_mask', c_uint),
('aux_buffer', c_uint),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
]
GLXPbufferClobberEvent = struct_anon_111 # /usr/include/GL/glx.h:502
class struct_anon_112(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'drawable',
'event_type',
'ust',
'msc',
'sbc',
]
struct_anon_112._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', GLXDrawable),
('event_type', c_int),
('ust', c_int64),
('msc', c_int64),
('sbc', c_int64),
]
GLXBufferSwapComplete = struct_anon_112 # /usr/include/GL/glx.h:514
class struct___GLXEvent(Union):
__slots__ = [
'glxpbufferclobber',
'glxbufferswapcomplete',
'pad',
]
struct___GLXEvent._fields_ = [
('glxpbufferclobber', GLXPbufferClobberEvent),
('glxbufferswapcomplete', GLXBufferSwapComplete),
('pad', c_long * 24),
]
GLXEvent = struct___GLXEvent # /usr/include/GL/glx.h:520
__all__ = ['GLX_VERSION_1_1', 'GLX_VERSION_1_2', 'GLX_VERSION_1_3',
'GLX_VERSION_1_4', 'GLX_USE_GL', 'GLX_BUFFER_SIZE', 'GLX_LEVEL', 'GLX_RGBA',
'GLX_DOUBLEBUFFER', 'GLX_STEREO', 'GLX_AUX_BUFFERS', 'GLX_RED_SIZE',
'GLX_GREEN_SIZE', 'GLX_BLUE_SIZE', 'GLX_ALPHA_SIZE', 'GLX_DEPTH_SIZE',
'GLX_STENCIL_SIZE', 'GLX_ACCUM_RED_SIZE', 'GLX_ACCUM_GREEN_SIZE',
'GLX_ACCUM_BLUE_SIZE', 'GLX_ACCUM_ALPHA_SIZE', 'GLX_BAD_SCREEN',
'GLX_BAD_ATTRIBUTE', 'GLX_NO_EXTENSION', 'GLX_BAD_VISUAL', 'GLX_BAD_CONTEXT',
'GLX_BAD_VALUE', 'GLX_BAD_ENUM', 'GLX_VENDOR', 'GLX_VERSION',
'GLX_EXTENSIONS', 'GLX_CONFIG_CAVEAT', 'GLX_DONT_CARE', 'GLX_X_VISUAL_TYPE',
'GLX_TRANSPARENT_TYPE', 'GLX_TRANSPARENT_INDEX_VALUE',
'GLX_TRANSPARENT_RED_VALUE', 'GLX_TRANSPARENT_GREEN_VALUE',
'GLX_TRANSPARENT_BLUE_VALUE', 'GLX_TRANSPARENT_ALPHA_VALUE', 'GLX_WINDOW_BIT',
'GLX_PIXMAP_BIT', 'GLX_PBUFFER_BIT', 'GLX_AUX_BUFFERS_BIT',
'GLX_FRONT_LEFT_BUFFER_BIT', 'GLX_FRONT_RIGHT_BUFFER_BIT',
'GLX_BACK_LEFT_BUFFER_BIT', 'GLX_BACK_RIGHT_BUFFER_BIT',
'GLX_DEPTH_BUFFER_BIT', 'GLX_STENCIL_BUFFER_BIT', 'GLX_ACCUM_BUFFER_BIT',
'GLX_NONE', 'GLX_SLOW_CONFIG', 'GLX_TRUE_COLOR', 'GLX_DIRECT_COLOR',
'GLX_PSEUDO_COLOR', 'GLX_STATIC_COLOR', 'GLX_GRAY_SCALE', 'GLX_STATIC_GRAY',
'GLX_TRANSPARENT_RGB', 'GLX_TRANSPARENT_INDEX', 'GLX_VISUAL_ID', 'GLX_SCREEN',
'GLX_NON_CONFORMANT_CONFIG', 'GLX_DRAWABLE_TYPE', 'GLX_RENDER_TYPE',
'GLX_X_RENDERABLE', 'GLX_FBCONFIG_ID', 'GLX_RGBA_TYPE',
'GLX_COLOR_INDEX_TYPE', 'GLX_MAX_PBUFFER_WIDTH', 'GLX_MAX_PBUFFER_HEIGHT',
'GLX_MAX_PBUFFER_PIXELS', 'GLX_PRESERVED_CONTENTS', 'GLX_LARGEST_PBUFFER',
'GLX_WIDTH', 'GLX_HEIGHT', 'GLX_EVENT_MASK', 'GLX_DAMAGED', 'GLX_SAVED',
'GLX_WINDOW', 'GLX_PBUFFER', 'GLX_PBUFFER_HEIGHT', 'GLX_PBUFFER_WIDTH',
'GLX_RGBA_BIT', 'GLX_COLOR_INDEX_BIT', 'GLX_PBUFFER_CLOBBER_MASK',
'GLX_SAMPLE_BUFFERS', 'GLX_SAMPLES', 'GLXContext', 'GLXPixmap', 'GLXDrawable',
'GLXFBConfig', 'GLXFBConfigID', 'GLXContextID', 'GLXWindow', 'GLXPbuffer',
'GLX_PbufferClobber', 'GLX_BufferSwapComplete', 'glXChooseVisual',
'glXCreateContext', 'glXDestroyContext', 'glXMakeCurrent', 'glXCopyContext',
'glXSwapBuffers', 'glXCreateGLXPixmap', 'glXDestroyGLXPixmap',
'glXQueryExtension', 'glXQueryVersion', 'glXIsDirect', 'glXGetConfig',
'glXGetCurrentContext', 'glXGetCurrentDrawable', 'glXWaitGL', 'glXWaitX',
'glXUseXFont', 'glXQueryExtensionsString', 'glXQueryServerString',
'glXGetClientString', 'glXGetCurrentDisplay', 'glXChooseFBConfig',
'glXGetFBConfigAttrib', 'glXGetFBConfigs', 'glXGetVisualFromFBConfig',
'glXCreateWindow', 'glXDestroyWindow', 'glXCreatePixmap', 'glXDestroyPixmap',
'glXCreatePbuffer', 'glXDestroyPbuffer', 'glXQueryDrawable',
'glXCreateNewContext', 'glXMakeContextCurrent', 'glXGetCurrentReadDrawable',
'glXQueryContext', 'glXSelectEvent', 'glXGetSelectedEvent',
'GLX_ARB_get_proc_address', 'glXGetProcAddressARB', 'glXGetProcAddress',
'glXAllocateMemoryNV', 'glXFreeMemoryNV', 'GLX_ARB_render_texture',
'glXBindTexImageARB', 'glXReleaseTexImageARB', 'glXDrawableAttribARB',
'GLX_MESA_swap_frame_usage', 'glXGetFrameUsageMESA',
'glXBeginFrameTrackingMESA', 'glXEndFrameTrackingMESA',
'glXQueryFrameTrackingMESA', 'GLX_MESA_swap_control', 'glXSwapIntervalMESA',
'glXGetSwapIntervalMESA', 'GLXPbufferClobberEvent', 'GLXBufferSwapComplete',
'GLXEvent']
# END GENERATED CONTENT (do not edit above this line)
| 43.576456 | 189 | 0.734286 | [
"BSD-2-Clause"
] | frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/raw/_GLX.py | 35,907 | Python |
#!/usr/bin/python3
import gameinstance
from colour import Colour
from constants import Constants
from vec2 import vec2
from misc import Fade
import sdl2
import hud
class Menu(gameinstance.GameInstance):
"""Game menu representation."""
# Variables to control menu background.
backgrounds = []
current_bg = 0
bg_offset_at_start = None #250
bg_offset = None #bg_offset_at_start
bg_dimness_peak = None #0x88
bg_dimness_current = None #0xff
bg_diminish_rate = None #0.33
def __init__(self, renderer, highscores=None):
# Background stuff.
Menu.bg_offset_at_start = 250
Menu.bg_offset = Menu.bg_offset_at_start
Menu.bg_dimness_peak = 0x88
Menu.bg_dimness_current = 0xff
Menu.bg_diminish_rate = 0.33
self.choice = None
self.is_open = True
self.title = hud.Text('pyNoid', renderer, Constants.TITLE_FONT_SIZE)
self.title.position = vec2(50, 50)
self.credits = hud.Text('Kacper Tonia 2017/18', renderer, Constants.TINY_FONT_SIZE)
self.credits.position = self.title.position + vec2(self.title.size[0]//2, self.title.size[1])
grey = Colour.greyscale(0.75)
sub1 = hud.Button.buildClickableText('New Game', renderer,
Colour.White, grey, grey, Constants.MENU_FONT_SIZE
)
sub2 = hud.Button.buildClickableText('Exit', renderer,
Colour.White, grey, grey, Constants.MENU_FONT_SIZE
)
self.menu = hud.VerticalContainer([sub1, sub2], Constants.WINDOW_SIZE.y//2)
if highscores:
leaderboard = []
player_name_length = max([len(x[0]) for x in highscores])
score_length = max([len(str(x[1])) for x in highscores])
s_format = '{:>%d} {}{}' % player_name_length
for idx,item in enumerate(highscores):
leaderboard.append( hud.Text(s_format.format(
item[0], item[1], ' '*(score_length-len(str(item[1])))),
renderer, Constants.FONT_SIZE_1,
Colour.greyscale((5-idx) / 5.0 ))
)
self.render_content = hud.VerticalContainer(leaderboard, Constants.WINDOW_SIZE.y*3//4)
else:
self.render_content = []
def update(self):
"""Update game state."""
Menu.bg_offset += 2
if Menu.bg_offset > 0.85 * Constants.WINDOW_SIZE.x:
Menu.bg_dimness_current += Menu.bg_diminish_rate
if Menu.bg_dimness_current >= 0xff:
Menu.current_bg = (Menu.current_bg + 1) % len(Menu.backgrounds)
Menu.bg_offset = Menu.bg_offset_at_start
Menu.bg_dimness_current = 0xff
elif Menu.bg_dimness_current > Menu.bg_dimness_peak:
Menu.bg_dimness_current -= Menu.bg_diminish_rate
def handleEvent(self, e):
"""Process relevant events."""
for i in self.menu.elem:
i.handleEvent(e)
if self.menu.elem[0].isPressed():
self.fading = True
elif self.menu.elem[1].isPressed():
self.is_open = False
def render(self, renderer):
"""Render scene."""
rect = (Constants.WINDOW_SIZE.x - Menu.bg_offset, 0, *Constants.WINDOW_SIZE)
renderer.copy(Menu.backgrounds[Menu.current_bg], None, rect)
renderer.fill((0, 0, Constants.WINDOW_SIZE.x, Constants.WINDOW_SIZE.y), (0, 0, 0, Menu.bg_dimness_current))
self.title.render(renderer)
self.credits.render(renderer)
self.menu.render(renderer)
if self.render_content:
self.render_content.render(renderer)
if self.fading:
self.fader.draw(renderer)
if self.fader.finished():
self.fading = False
self.fader.reset()
self.choice = 0
def isOpen(self):
"""Returns False if GameInstance should be no longer active."""
return self.is_open
def typeOf(self):
return 'Menu' | 31.145455 | 109 | 0.718622 | [
"MIT"
] | marax27/pyNoid | menu.py | 3,426 | Python |
from .config import *
import MySQLdb
class DBReader:
def __init__(self):
self.conn = None
self.db = None
# Initalize the connection
self.db = MySQLdb.connect(user=DB_USER, passwd=DB_PASS, db=DB_NAME, host=DB_HOST,
port=DB_PORT)
if self.db == None:
raise Exception("DBLogger: Unable to connect to database.")
self.conn = self.db.cursor()
def close(self):
if self.conn != None:
self.conn.close()
self.conn = None
def query_one(self, table, columns, where):
sql = "SELECT %s FROM %s WHERE %s LIMIT 1" % (columns, table, where)
self.conn.execute(sql)
if self.conn.rowcount == 1: return self.conn.fetchone()
else: return None
def query(self, sql):
self.conn.execute(sql)
if self.conn.rowcount >= 1:
row = self.conn.fetchone()
while row != None:
yield row
row = self.conn.fetchone()
else: return
class DBLogger:
def __init__(self, datasource=None, channel=None, page=0):
self.conn = None
if datasource == None: #datasource must be provided if DBLogger is desired
return None
self.db = None
self.channel = channel
self.page = page
# Initalize the connection
try:
self.db = MySQLdb.connect(user=DB_USER, passwd=DB_PASS, db=DB_NAME, host=DB_HOST, port=DB_PORT)
except Exception as e:
raise Exception("DBLogger was unable to connect to the database: " \
+"(error %d): %s (Note: connection values should be in config.py)." \
+"error %s" % e)
if self.db == None: #this backup check may be redundant
raise Exception("DBLogger: Unable to connect to database.")
self.conn = self.db.cursor()
# Set the ds_id attribute to correspond to the requested data source name
self.conn.execute("SELECT ds_id FROM datasources WHERE ds_name LIKE %s LIMIT 1", (datasource,))
if self.conn.rowcount == 1: self.ds_id = self.conn.fetchone()
else: print("No datasource found matching name:", datasource)
def close(self):
if self.conn != None:
self.conn.close()
self.conn = None
def set_channel(self, chan, page):
self.channel = chan
self.page = page
def add_packet(self, full=None, scapy=None,
bytes=None, rssi=None, location=None, datetime=None, channel=None, page=0):
if (self.conn==None): raise Exception("DBLogger requires active connection status.")
# Use values in 'full' parameter to provide data for undefined other parameters
if bytes == None and 'bytes' in full: bytes = full['bytes']
if rssi == None and 'rssi' in full: rssi = full['rssi']
if datetime == None and 'datetime' in full: datetime = full['datetime']
if location == None and 'location' in full: location = full['location']
# Get the location ID, or create one, if GPS data is available
loc_id = self.add_location(location) if location is not None else None
# Dissect the packet's bytes, using the Scapy'd version in parameter scapy if provided
if scapy == None:
# Import Scapy extensions
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import Dot15d4
scapy = Dot15d4(bytes)
#from kbutils import hexdump
#scapy.show2()
# This try/except logic is dumb, but Scapy will just throw an exception if the field doesn't exist
try: srcaddr = scapy.src_addr
except: srcaddr = None
try: srcpan = scapy.src_panid
except: srcpan = None
srcdevid = self.add_device(srcaddr, srcpan)
try: destaddr = scapy.dest_addr
except: destaddr = None
try: destpanid = scapy.dest_panid
except: destpanid = None
destdevid = self.add_device(destaddr, destpanid)
sql = []
sql.append("ds_id=%d" % self.ds_id)
sql.append("db_datetime=NOW()")
if datetime != None: sql.append("cap_datetime='%s'" % str(datetime))
if self.channel != None: sql.append("channel=%d" % self.channel)
if self.page: sql.append("page=%d" % self.page)
if srcdevid != None: sql.append("source=%d" % srcdevid)
if destdevid != None: sql.append("dest=%d" % destdevid)
if rssi != None: sql.append("rssi=%d" % rssi)
if loc_id != None: sql.append("loc_id=%d" % loc_id)
if channel != None: sql.append("channel=%d" % channel) # TODO: bug? why is this in here twice?
if page: sql.append("page=%d" % page) # TODO: bug?
sql.append("fcf_panidcompress=%d" % scapy.fcf_panidcompress)
sql.append("fcf_ackreq=%d" % scapy.fcf_ackreq)
sql.append("fcf_pending=%d" % scapy.fcf_pending)
sql.append("fcf_security=%d" % scapy.fcf_security)
sql.append("fcf_frametype=%d" % scapy.fcf_frametype)
sql.append("fcf_srcaddrmode=%d" % scapy.fcf_srcaddrmode)
sql.append("fcf_framever=%d" % scapy.fcf_framever)
sql.append("fcf_destaddrmode=%d" % scapy.fcf_destaddrmode)
sql.append("seqnum=%d" % scapy.seqnum)
# adding the bytes of the packet are handled in the insert method b/c they are binary
return self.insert(' '.join(['INSERT INTO packets SET', ', '.join(sql)]), packetbytes=bytes)
def add_location(self, location):
if (self.conn==None): raise Exception("DBLogger requires active connection status.")
(lon, lat, alt) = location
self.conn.execute("SELECT loc_id FROM locations WHERE %s AND %s AND %s LIMIT 1" % \
( ("longitude = '%f'" % lon) if lon != None else "longitude IS NULL" , \
("latitude = '%f'" % lat) if lat != None else "latitude IS NULL" , \
("elevation = '%f'" % alt) if alt != None else "elevation IS NULL" ))
res = self.conn.fetchone()
if (res != None):
return res #location already in db, return loc_id
else:
self.conn.execute("INSERT INTO locations SET %s, %s, %s" % \
( ("longitude = '%f'" % lon) if lon != None else "longitude = NULL" , \
("latitude = '%f'" % lat) if lat != None else "latitude = NULL" , \
("elevation = '%f'" % alt) if alt != None else "elevation = NULL" ))
if self.conn.rowcount != 1: raise Exception("Location insert did not succeed.")
self.db.commit()
return self.conn.lastrowid
def add_device(self, shortaddr, panid):
if (self.conn==None): raise Exception("DBLogger requires active connection status.")
self.conn.execute("SELECT dev_id FROM devices WHERE %s AND %s LIMIT 1" % \
( ("short_addr = '%04x'" % shortaddr) if shortaddr != None else "short_addr IS NULL" , \
("pan_id = '%04x'" % panid) if panid != None else "pan_id IS NULL" ))
res = self.conn.fetchone()
if (res != None):
return res #device already exists
else:
self.conn.execute("INSERT INTO devices SET %s, %s" % \
(("short_addr = '%04x'" % shortaddr) if shortaddr != None else "short_addr = NULL" , \
("pan_id = '%04x'" % panid) if panid != None else "pan_id = NULL" ))
if self.conn.rowcount != 1: raise Exception("Device insert did not succeed.")
self.db.commit()
return self.conn.lastrowid
def insert(self, sql, packetbytes=None):
params = None
if packetbytes != None:
sql = sql + ", packetbytes=%s"
params = (MySQLdb.Binary(packetbytes), )
self.conn.execute(sql, params)
if self.conn.rowcount != 1: raise Exception("DBLogger: Insert did not succeed.")
self.db.commit()
return self.conn.lastrowid
| 47.508671 | 116 | 0.576956 | [
"BSD-3-Clause"
] | dmnugu4755642434/killerbee | killerbee/dblog.py | 8,219 | Python |
import asyncio
from pyrogram.types import Message
from tronx import app
from tronx.helpers import (
gen,
)
app.CMD_HELP.update(
{"spam" : (
"spam",
{
"spam [number] [text]" : "You Know The Use Of This Command.",
"dspam [delay] [count] [msg]" : "Delay spam use it to spam with a delay between spamming msg."
}
)
}
)
@app.on_message(gen("spam", allow = ["sudo"]))
async def spam(_, m: Message):
replied = m.reply_to_message
reply_to_id = replied.message_id if replied else ""
if not replied and len(m.command) > 1:
await m.delete()
times = m.command[1]
to_spam = " ".join(m.command[2:])
if m.chat.type in ["supergroup", "group"]:
for _ in range(int(times)):
await app.send_message(
m.chat.id,
to_spam,
reply_to_message_id=reply_to_id
)
await asyncio.sleep(0.20)
elif m.chat.type == "private":
await m.delete()
for _ in range(int(times)):
await app.send_message(
m.chat.id,
to_spam
)
await asyncio.sleep(0.20)
elif replied and len(m.command) > 1:
await m.delete()
times = m.command[1]
print(f"{times} messages will be sent")
cont = m.reply_to_message.message_id
if m.chat.type in ["supergroup", "group", "private"]:
for x in range(int(times)):
await app.copy_message(
m.chat.id,
m.chat.id,
m.reply_to_message.message_id
)
else:
return
@app.on_message(gen("dspam", allow = ["sudo"]))
async def delay_spam(_, m: Message):
if len(m.command) > 2 and not m.reply_to_message:
await m.delete()
msg = m.text.split(None, 3)
sec = int(msg[1])
times = int(msg[2])
text = msg[3]
for x in range(times):
await app.send_message(
m.chat.id,
text
)
await asyncio.sleep(sec)
else:
await app.send_edit(m,"Something wrong in spam command !")
| 20.976744 | 96 | 0.636364 | [
"MIT"
] | JayPatel1314/Tron | tronx/modules/spam.py | 1,804 | Python |
from keras.preprocessing.image import img_to_array
import imutils
import cv2
from keras.models import load_model
import numpy as np
import geocoder
import streamlink
#import mysql.connector as con
#mydb = con.connect(
# host="localhost",
# user="yourusername",
# passwd="yourpassword",
# database="mydatabase"
#)
#mycursor = mydb.cursor()
g = geocoder.ip('me')
# parameters for loading data and images
detection_model_path = 'C:\\Users\\rajiyer\\Documents\\Test Data\\Sentiment Analysis\\Emotion-recognition-master\\haarcascade_files\\haarcascade_frontalface_default.xml'
emotion_model_path = 'C:\\Users\\rajiyer\\Documents\\Test Data\\Sentiment Analysis\\Emotion-recognition-master\\models\\_mini_XCEPTION.102-0.66.hdf5'
# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised",
"neutral"]
#feelings_faces = []
#for index, emotion in enumerate(EMOTIONS):
# feelings_faces.append(cv2.imread('emojis/' + emotion + '.png', -1))
# starting video streaming
url = 'https://youtu.be/Bchx0mS7XOY'
streams = streamlink.streams(url)
cv2.namedWindow('Live Footage')
camera = cv2.VideoCapture(streams["360p"].url)
f= open("C:\\Users\\rajiyer\\Documents\\Test Data\\Probability.txt","a+")
while True:
frame = camera.read()[1]
#reading the frame
frame = imutils.resize(frame,width=300)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detection.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
canvas = np.zeros((250, 300, 3), dtype="uint8")
frameClone = frame.copy()
if len(faces) > 0:
faces = sorted(faces, reverse=True,
key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
# Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
# the ROI for classification via the CNN
roi = gray[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (64, 64))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = emotion_classifier.predict(roi)[0]
emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
else: continue
for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
# construct the label text
text = "{}: {:.2f}%".format(emotion, prob * 100)
#sql = "INSERT INTO predData (Metadata, Probability) VALUES (%s, %s)"
#val = ("Meta", prob * 100)
f.write(text)
str1 = ''.join(str(e) for e in g.latlng)
f.write(str1)
#mycursor.execute(sql, val)
#mydb.commit()
# draw the label + probability bar on the canvas
# emoji_face = feelings_faces[np.argmax(preds)]
w = int(prob * 300)
cv2.rectangle(canvas, (7, (i * 35) + 5),
(w, (i * 35) + 35), (0, 0, 255), -1)
cv2.putText(canvas, text, (10, (i * 35) + 23),
cv2.FONT_HERSHEY_SIMPLEX, 0.45,
(255, 255, 255), 2)
cv2.putText(frameClone, label, (fX, fY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
(0, 0, 255), 2)
# for c in range(0, 3):
# frame[200:320, 10:130, c] = emoji_face[:, :, c] * \
# (emoji_face[:, :, 3] / 255.0) + frame[200:320,
# 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)
cv2.imshow('your_face', frameClone)
cv2.imshow("Probabilities", canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| 36.405405 | 169 | 0.600346 | [
"BSD-3-Clause"
] | ActuarialIntelligence/Base | src/ActuarialIntelligence.Infrastructure.PythonScripts/StreamFootageAnalyse.py | 4,041 | Python |
from collections import namedtuple
from datetime import datetime
import pprint
import pickle
import json
import sqlite3
BUILDINGS = ['AGNEW', 'LARNA', 'AJ E', 'AJPAV', 'AQUAC', 'AA', 'ADRF', 'ARMRY', 'ART C', 'BEEF', 'BFH', 'BRNCH', 'PRC', 'BURCH', 'BUR', 'CAM', 'CARNA', 'CAM M', 'CAPRI', 'CSB', 'LIBR', 'COL', 'CMMID', 'CSSER', 'CHEMP', 'COLSQ', 'COLS2', 'CEC', 'CO', 'CIC', 'DAIRY', 'DAV', 'DER', 'DTRIK', 'DBHCC', 'DB', 'DTNA', 'DURHM', 'EGG E', 'ENGEL', 'FEM', 'FST', 'FRALN', 'FSBRN', 'GBJ', 'GOODW', 'GLCDB', 'GH', 'HAHN N', 'HAHN S', 'HARP', 'HEND', 'HILL', 'HOLD', 'HORSE', 'HABB1', 'HUTCH', 'CRCIA', 'TC', 'ICTAS', 'ILSB', 'HAN', 'JCH', 'KENT', 'KAMF', 'LANE', 'STAD', 'LATH', 'WLH', 'LEE', 'LARTS', 'LFSCI', 'LITRV', 'LYRIC', 'MAJWM', 'EMPOR', 'MCB', 'MCCOM', 'MRYMN', 'MIL', 'MAC', 'NCB', 'NEB', 'NHW', 'NEW', 'NOR', 'GRNDP', 'SEC', 'OWENS', 'PK', 'PAM', 'PAT', 'PAYNE', 'PY', 'PAB', 'PWCOM', 'PRICE', 'P HSE', 'PRINT', 'PRT E', 'PSC', 'RAND', 'RCTR1', 'ROB', 'SANDY', 'SARDO', 'SAUND', 'SEITZ', 'SHANK', 'SHEEP', 'SHULT', 'SEB', 'SL TW', 'SLUSH', 'SM CC', 'SMYTH', 'SOL', 'SQUIR', 'SURGE', 'SWINE', 'HAHN', 'TESKE', 'T101', 'CPAP', 'TORG', 'UPEST', 'VTC', 'NOVAC', 'NVC', 'VMIA', 'VM 1', 'VM 2', 'VM 3', 'VM4C1', 'WAL', 'GYM', 'WHIT', 'WMS', 'BROOK', 'BFPC']
BUILDING_NAMES = ['Agnew Hall', 'Alphin-Stuart Arena', 'Ambler Johnston East', 'Animal Judging Pavilion', 'Aquaculture Facility', 'Architectural Annex', 'Architecture Demo & Res Fac', 'Armory (Art)', 'Art & Design Learning Center', 'Beef Barn', 'Bishop-Favrao Hall', 'Branch Building', 'Brooder House', 'Burchard Hall', 'Burruss Hall', 'Campbell Arena', 'Campbell Arena', 'Campbell Main', 'Capri Building', 'Career Services Building', 'Carol M. Newman Library', 'Cassell Coliseum', 'Center Molecular Med Infec Dis', 'Center for Space Sci & Engr Re', 'Chemistry Physics Building', 'Collegiate Square', 'Collegiate Square Two', 'Continuing Education Center', 'Cowgill Hall', 'Cranwell Int\'l Center', 'Dairy Barn', 'Davidson Hall', 'Derring Hall', 'Dietrick Hall', 'Donaldson Brown Hotel & Conf', 'Donaldson-Brown Hall', 'Downtown North', 'Durham Hall', 'Eggleston East', 'Engel Hall', 'Femoyer Hall', 'Food Science & Technology Lab', 'Fralin Biotechnology Center', 'Free Stall Barn', 'G. Burke Johnston Student Ctr', 'Goodwin Hall', 'Graduate Life Ctr Dnldsn Brown', 'Greenhouse', 'Hahn Hall North Wing', 'Hahn Hall South Wing', 'Harper', 'Henderson Hall', 'Hillcrest', 'Holden Hall', 'Horse Barn', 'Human & Ag Biosciences Bldg 1', 'Hutcheson Hall', 'ICTAS A', 'Indoor Tennis Courts', 'Inst for Crit Tech & Appld Sci', 'Integrated Life Sciences Bldg', 'John W. Hancock Jr. Hall', 'Julian Cheatham Hall', 'Kent Square', 'Kroehling Adv Material Foundry', 'Lane Hall', 'Lane Stadium', 'Latham Hall', 'Lavery Hall', 'Lee', 'Liberal Arts Building', 'Life Sciences I', 'Litton-Reaves Hall', 'Lyric Theater', 'Major Williams Hall', 'Math Emporium', 'McBryde Hall', 'McComas Hall', 'Merryman Athletic Facility', 'Military Building/Laundry', 'Moss Arts Center', 'New Classroom Building', 'New Engineering Building', 'New Hall West', 'Newman', 'Norris Hall', 'Old Grand Piano Building', 'Old Security Building', 'Owens Hall', 'PK', 'Pamplin Hall', 'Patton Hall', 'Payne', 'Peddrew-Yates', 'Performing Arts Building', 'Pointe West Commons', 'Price Hall', 'Price House', 'Print Shop', 'Pritchard East', 'Psychological Services Center', 'Randolph Hall', 'Riverside Center 1', 'Robeson Hall', 'Sandy Hall', 'Sardo Laboratory', 'Saunders Hall', 'Seitz Hall', 'Shanks Hall', 'Sheep Barn', 'Shultz Hall', 'Signature Engineering Building', 'Slusher Tower', 'Slusher Wing', 'Smith Career Center', 'Smyth Hall', 'Solitude', 'Squires Student Center', 'Surge Space Building', 'Swine Center', 'T. Marshall Hahn, Jr. Hall', 'Teske House', 'Theatre 101', 'Thomas Conner House', 'Torgersen Hall', 'Urban Pest Control Facility', 'VT/Carilion Medicl Sch/Res Ins', 'VT/UVA Northern VA Ctr', 'VT/UVA Northern VA Ctr', 'Vet Med Instructional Addition', 'Vet Med Phase 1', 'Vet Med Phase 2', 'Vet Med Phase 3', 'Vet Med Phase 4C-Non-Client', 'Wallace Hall', 'War Memorial Gymnasium', 'Whittemore Hall', 'Williams Hall', 'Wood Engineering Lab', 'Wood Processing Lab']
TERMS = ['201601', '201606', '201607', '201609', '201612', '201701', '201706', '201707', '201709', '201712', '201801', '201806', '201807', '201809', '201812', '201901', '201906', '201907', '201909', '201912', '202001', '202006', '202007', '202009', '202012']
TERM_NAMES = ['Spring 2016', 'Summer I 2016', 'Summer II 2016', 'Fall 2016', 'Winter 2017', 'Spring 2017', 'Summer I 2017', 'Summer II 2017', 'Fall 2017', 'Winter 2018', 'Spring 2018', 'Summer I 2018', 'Summer II 2018', 'Fall 2018', 'Winter 2019', 'Spring 2019', 'Summer I 2019', 'Summer II 2019', 'Fall 2019', 'Winter 2020', 'Spring 2020', 'Summer I 2020', 'Summer II 2020', 'Fall 2020 (tentative)', 'Winter 2021']
DAYS = ['U', 'M', 'T', 'W', 'R', 'F', 'S']
ATTRIBS = ['06', 'AV01', 'AV02', 'AV03', 'AV04', 'AV05', 'AV06', 'AV07', 'AV08', 'AV09', 'AV10', 'AV11', 'AV12', 'AV15', 'FX13', 'FX14', 'PC04', 'PC15', 'PC16', 'PC17', 'PC18', 'PC19', 'PC30', 'PC31', 'PC32', 'ST20', 'ST21', 'ST22', 'ST23', 'ST24', 'ST29', 'TY20', 'TY24', 'TY25', 'TY26', 'TY27', 'TY28', 'TY32', 'TY33']
ATTRIB_NAMES = ['DO NOT USE WHD', 'Sound System', 'Wireless Microphone', 'Slide Projector', 'Technical Support Needed', 'WiFi', 'DVD/VCR Combination Unit', 'Computer Projection', 'Document Camera', 'DVD/Blueray Player', 'Crestron Control', 'AC Outlets at seats/floor', 'Computer Installed', 'Lecture Capture', 'Chalk Board - Multiple', 'Whiteboard - Multiple', 'Do not use', 'Windows', 'Shades or Blinds', 'Handicapped Accessible', 'Do not use', 'Air Conditioning', 'General Assignment Classroom', 'Non General Assignment Clsrm', 'Gen Assignment Restricted Use', 'Tablet Arm Chairs', 'Fixed Seating', 'Tables (round)', 'Do not use', 'Sled style desk', 'Moveable Chairs and Tables', 'Do not use', 'Do not use', 'Computer Lab', 'Auditorium', 'Scale up', 'Interactive Classroom', 'Classroom', 'Tiered Classroom']
ClassBlock = namedtuple('ClassBlock', ['room', 'name', 'crn'])
EventBlock = namedtuple('EventBlock', ['room', 'times', 'name'])
RoomAttribs = namedtuple('RoomAttribs', ['capacity', 'attribs'])
with open('201801-rooms.dat', 'rb') as f:
rooms = pickle.load(f)
with open('201801-blocks.dat', 'rb') as f:
blocks = pickle.load(f)
with open('201801-attribs.dat', 'rb') as f:
attribs = pickle.load(f)
conn = sqlite3.connect(':memory:', detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = conn.cursor()
c.execute('''
CREATE TABLE buildings
(code TEXT PRIMARY KEY, name TEXT, latitude DECIMAL(9,6), longitude DECIMAL(9,6))
''')
c.execute('''
CREATE TABLE rooms
(id INT PRIMARY KEY, building_code TEXT, room TEXT, capacity INT, attribs TEXT,
FOREIGN KEY(building_code) REFERENCES buildings(code))
''')
c.execute('''
CREATE TABLE classes
(id INT PRIMARY KEY, room_id INT, days TEXT, start TIMESTAMP, end TIMESTAMP, crn TEXT, name TEXT,
FOREIGN KEY(room_id) REFERENCES rooms(id))
''')
c.execute('''
CREATE TABLE events
(id INT PRIMARY KEY, room_id INT, days TEXT, start_day TIMESTAMP, end_day TIMESTAMP, start TIMESTAMP, end TIMESTAMP, name TEXT,
FOREIGN KEY(room_id) REFERENCES rooms(id))
''')
conn.commit()
# Load buildings and rooms
for building, rooms in attribs.items():
c.execute('''
INSERT INTO buildings
(code, name) values (?, ?)
''', (building, BUILDING_NAMES[BUILDINGS.index(building)]))
for room, value, in rooms.items():
c.execute('''
INSERT INTO rooms
(building_code, room, capacity, attribs) values (?, ?, ?, ?)
''', (building, room, value[0], ','.join(value[1])))
# Load classes
| 97.848101 | 2,933 | 0.66119 | [
"MIT"
] | branw/campus-cuckoo | scraper/processor.py | 7,730 | Python |
import glob
import argparse
import os
import shutil
"""
This module helps to filter only the images
that have binary masks within a dataset
"""
if __name__ == '__main__':
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Filter masked data from dataset')
parser.add_argument('--data_dir', required=True,
metavar="/path/to/dataset",
help="Path to dataset")
args = parser.parse_args()
data_dir = os.path.join(args.data_dir,"data")
output_dir = os.path.join(args.data_dir,"data2")
#Create paths if not exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#Open the train_masked.txt
file_base_names = open(os.path.join(args.data_dir,"base_names.txt"), 'r')
for name in file_base_names.readlines():
name = name.strip()
extentions = [".jpg", ".npz"]
for ext in extentions:
# Copying files
src = os.path.join(data_dir,name+ext)
dst = os.path.join(output_dir,name+ext)
shutil.copy2(src,dst) | 27.8 | 78 | 0.630396 | [
"MIT"
] | Tubaher/grapes_project | samples/uvas/utils/filter_with_mask.py | 1,112 | Python |
"""GoldenTimes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from django.views.generic import RedirectView
urlpatterns = [
# url(r'^$', RedirectView.as_view(url='http://liujin.jios.org:8888')),
url(r'^$', RedirectView.as_view(url='/portal/')),
url(r'^admin/', admin.site.urls),
url(r'^portal/', include('portal.urls')),
url(r'^api/', include('api.urls')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.35 | 91 | 0.719236 | [
"BSD-3-Clause"
] | liuxue0905/GoldenTimes | GoldenTimes/urls.py | 1,414 | Python |
import sys
##print ("This is the name of the script: ", sys.argv[0])
##print ("Number of arguments: ", len(sys.argv))
##print ("The arguments are: " , str(sys.argv))
lemmas = []
lemmas_cleaned = []
nums = ['1','2','3','4','5','6','7','8','9','0']
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','k','l','m','n','o',
'p','q','r','s','t','u','v','w','x','y','z',
'ā','â','ā',
'ê','ė', 'ē',
'ô','ō',
'ū','û',
'ī','î']
POS_tags = ['as., st. V. (1)', 'as., st. V. (2)', 'as., st. V. (3a)', 'as., st. V. (3b)', 'as., st. V. (4)', 'as., st. V. (5)', 'as., st. V. (6)', 'as., st. V. (7)', \
'as., sw. V. (1a)', 'as., sw. V. (1b)', 'as., sw. V. (2)', \
'as., red. V.',\
'as., st. M. (a)']
def dict_scrape(POS, dictionaryfile='as_freq.txt'):
"""Scrapes a dictionary for a given part of speech. POS tags in POS_tags.
POS(str), dictionaryfile(str-of-filename) -> list-of-strings
"""
if POS in POS_tags:
with open(dictionaryfile) as to_scrape:
for line in to_scrape:
if POS in line:
lemmas.append(line)
for line in lemmas:
#1, bāga, as., st. F. (ō)?, sw. F. (n)?: nhd. Streit
i=0
for char in line[:44]:
if char not in alphabet:
i=i+1
lemmas_cleaned.append(line[i:].strip().replace('*','').replace('?','')+'\n')
#scrub line of the frequency data, begin with headword?
## print("Found " + str(len(lemmas_cleaned)) + " lemmas matching that category")
return lemmas_cleaned
dict_scrape(sys.argv[1])
for line in lemmas_cleaned:
with open(sys.argv[1][5:],'w+') as to_write:
to_write.write(line)
| 36.142857 | 167 | 0.477132 | [
"MIT"
] | tykniess/muspilli | dictionaries/archives/dict_scrape.py | 1,785 | Python |
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import tempfile
import datetime
import uuid
from types import ListType
from shutil import rmtree
from os.path import join, exists, dirname, basename, abspath, normpath, splitext
from os.path import relpath
from os import linesep, remove, makedirs
from time import time
from intelhex import IntelHex
from json import load, dump
from tools.utils import mkdir, run_cmd, run_cmd_ext, NotSupportedException,\
ToolException, InvalidReleaseTargetException, intelhex_offset
from tools.paths import MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,\
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL, MBED_CONFIG_FILE,\
MBED_LIBRARIES_DRIVERS, MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,\
BUILD_DIR
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.libraries import Library
from tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from tools.config import Config
RELEASE_VERSIONS = ['2', '5']
def prep_report(report, target_name, toolchain_name, id_name):
"""Setup report keys
Positional arguments:
report - the report to fill
target_name - the target being used
toolchain_name - the toolchain being used
id_name - the name of the executable or library being built
"""
if not target_name in report:
report[target_name] = {}
if not toolchain_name in report[target_name]:
report[target_name][toolchain_name] = {}
if not id_name in report[target_name][toolchain_name]:
report[target_name][toolchain_name][id_name] = []
def prep_properties(properties, target_name, toolchain_name, vendor_label):
"""Setup test properties
Positional arguments:
properties - the dict to fill
target_name - the target the test is targeting
toolchain_name - the toolchain that will compile the test
vendor_label - the vendor
"""
if not target_name in properties:
properties[target_name] = {}
if not toolchain_name in properties[target_name]:
properties[target_name][toolchain_name] = {}
properties[target_name][toolchain_name]["target"] = target_name
properties[target_name][toolchain_name]["vendor"] = vendor_label
properties[target_name][toolchain_name]["toolchain"] = toolchain_name
def create_result(target_name, toolchain_name, id_name, description):
"""Create a result dictionary
Positional arguments:
target_name - the target being built for
toolchain_name - the toolchain doing the building
id_name - the name of the executable or library being built
description - a human readable description of what's going on
"""
cur_result = {}
cur_result["target_name"] = target_name
cur_result["toolchain_name"] = toolchain_name
cur_result["id"] = id_name
cur_result["description"] = description
cur_result["elapsed_time"] = 0
cur_result["output"] = ""
return cur_result
def add_result_to_report(report, result):
"""Add a single result to a report dictionary
Positional arguments:
report - the report to append to
result - the result to append
"""
result["date"] = datetime.datetime.utcnow().isoformat()
result["uuid"] = str(uuid.uuid1())
target = result["target_name"]
toolchain = result["toolchain_name"]
id_name = result['id']
result_wrap = {0: result}
report[target][toolchain][id_name].append(result_wrap)
def get_config(src_paths, target, toolchain_name):
"""Get the configuration object for a target-toolchain combination
Positional arguments:
src_paths - paths to scan for the configuration files
target - the device we are building for
toolchain_name - the string that identifies the build tools
"""
# Convert src_paths to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Pass all params to the unified prepare_resources()
toolchain = prepare_toolchain(src_paths, None, target, toolchain_name)
# Scan src_path for config files
resources = toolchain.scan_resources(src_paths[0])
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path))
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Update the configuration with any .json files found while scanning
toolchain.config.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(toolchain.config.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources += resources.features[feature]
prev_features = features
toolchain.config.validate_config()
if toolchain.config.has_regions:
_ = list(toolchain.config.regions)
cfg, macros = toolchain.config.get_config_data()
features = toolchain.config.get_features()
return cfg, macros, features
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
Positional arguments:
target_name - Name if the target (ex. 'K64F')
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') \
and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(
set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the") + \
((" mbed 2.0 official release: %s" + linesep) %
", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = set(['ARM', 'GCC_ARM', 'IAR'])
required_toolchains_sorted = list(required_toolchains)
required_toolchains_sorted.sort()
supported_toolchains = set(target.supported_toolchains)
supported_toolchains_sorted = list(supported_toolchains)
supported_toolchains_sorted.sort()
if not required_toolchains.issubset(supported_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the") + \
((" mbed OS 5.0 official release: %s" + linesep) %
", ".join(required_toolchains_sorted)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(supported_toolchains_sorted))
elif not target.default_lib == 'std':
result = False
reason = ("Target '%s' must set the " % target.name) + \
("'default_lib' to 'std' to be included in the ") + \
("mbed OS 5.0 official release." + linesep) + \
("Currently it is set to '%s'" % target.default_lib)
else:
result = False
reason = ("Target '%s' has set an invalid release version of '%s'" %
version) + \
("Please choose from the following release versions: %s" %
', '.join(RELEASE_VERSIONS))
else:
result = False
if not hasattr(target, 'release_versions'):
reason = "Target '%s' " % target.name
reason += "does not have the 'release_versions' key set"
elif not version in target.release_versions:
reason = "Target '%s' does not contain the version '%s' " % \
(target.name, version)
reason += "in its 'release_versions' key"
return result, reason
def transform_release_toolchains(toolchains, version):
""" Given a list of toolchains and a release version, return a list of
only the supported toolchains for that release
Positional arguments:
toolchains - The list of toolchains
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
if version == '5':
return ['ARM', 'GCC_ARM', 'IAR']
else:
return toolchains
def get_mbed_official_release(version):
""" Given a release version string, return a tuple that contains a target
and the supported toolchains for that release.
Ex. Given '2', return (('LPC1768', ('ARM', 'GCC_ARM')),
('K64F', ('ARM', 'GCC_ARM')), ...)
Positional arguments:
version - The version string. Should be a string contained within
RELEASE_VERSIONS
"""
mbed_official_release = (
tuple(
tuple(
[
TARGET_MAP[target].name,
tuple(transform_release_toolchains(
TARGET_MAP[target].supported_toolchains, version))
]
) for target in TARGET_NAMES \
if (hasattr(TARGET_MAP[target], 'release_versions')
and version in TARGET_MAP[target].release_versions)
)
)
for target in mbed_official_release:
is_official, reason = is_official_target(target[0], version)
if not is_official:
raise InvalidReleaseTargetException(reason)
return mbed_official_release
def add_regions_to_profile(profile, config, toolchain_class):
"""Add regions to the build profile, if there are any.
Positional Arguments:
profile - the profile to update
config - the configuration object that owns the region
toolchain_class - the class of the toolchain being used
"""
if not profile:
return
regions = list(config.regions)
for region in regions:
for define in [(region.name.upper() + "_ADDR", region.start),
(region.name.upper() + "_SIZE", region.size)]:
profile["common"].append("-D%s=0x%x" % define)
active_region = [r for r in regions if r.active][0]
for define in [("MBED_APP_START", active_region.start),
("MBED_APP_SIZE", active_region.size)]:
profile["ld"].append(toolchain_class.make_ld_define(*define))
print("Using regions in this build:")
for region in regions:
print(" Region %s size 0x%x, offset 0x%x"
% (region.name, region.size, region.start))
def prepare_toolchain(src_paths, build_dir, target, toolchain_name,
macros=None, clean=False, jobs=1,
notify=None, silent=False, verbose=False,
extra_verbose=False, config=None,
app_config=None, build_profile=None):
""" Prepares resource related objects - toolchain, target, config
Positional arguments:
src_paths - the paths to source directories
target - ['LPC1768', 'LPC11U24', etc.]
toolchain_name - ['ARM', 'uARM', 'GCC_ARM', 'GCC_CR']
Keyword arguments:
macros - additional macros
clean - Rebuild everything if True
jobs - how many compilers we can run at once
notify - Notify function for logs
silent - suppress printing of progress indicators
verbose - Write the actual tools command lines used if True
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a list of mergeable build profiles
"""
# We need to remove all paths which are repeated to avoid
# multiple compilations and linking with the same objects
src_paths = [src_paths[0]] + list(set(src_paths[1:]))
# If the configuration object was not yet created, create it now
config = config or Config(target, src_paths, app_config=app_config)
target = config.target
try:
cur_tc = TOOLCHAIN_CLASSES[toolchain_name]
except KeyError:
raise KeyError("Toolchain %s not supported" % toolchain_name)
profile = {'c': [], 'cxx': [], 'common': [], 'asm': [], 'ld': []}
for contents in build_profile or []:
for key in profile:
profile[key].extend(contents[toolchain_name][key])
if config.has_regions:
add_regions_to_profile(profile, config, cur_tc)
toolchain = cur_tc(target, notify, macros, silent, build_dir=build_dir,
extra_verbose=extra_verbose, build_profile=profile)
toolchain.config = config
toolchain.jobs = jobs
toolchain.build_all = clean
toolchain.VERBOSE = verbose
return toolchain
def merge_region_list(region_list, destination, padding=b'\xFF'):
"""Merege the region_list into a single image
Positional Arguments:
region_list - list of regions, which should contain filenames
destination - file name to write all regions to
padding - bytes to fill gapps with
"""
merged = IntelHex()
print("Merging Regions:")
for region in region_list:
if region.active and not region.filename:
raise ToolException("Active region has no contents: No file found.")
if region.filename:
print(" Filling region %s with %s" % (region.name, region.filename))
part = intelhex_offset(region.filename, offset=region.start)
part_size = (part.maxaddr() - part.minaddr()) + 1
if part_size > region.size:
raise ToolException("Contents of region %s does not fit"
% region.name)
merged.merge(part)
pad_size = region.size - part_size
if pad_size > 0 and region != region_list[-1]:
print(" Padding region %s with 0x%x bytes" % (region.name, pad_size))
merged.puts(merged.maxaddr() + 1, padding * pad_size)
if not exists(dirname(destination)):
makedirs(dirname(destination))
print("Space used after regions merged: 0x%x" %
(merged.maxaddr() - merged.minaddr() + 1))
with open(destination, "wb+") as output:
merged.tofile(output, format='bin')
def scan_resources(src_paths, toolchain, dependencies_paths=None,
inc_dirs=None, base_path=None, collect_ignores=False):
""" Scan resources using initialized toolcain
Positional arguments
src_paths - the paths to source directories
toolchain - valid toolchain object
dependencies_paths - dependency paths that we should scan for include dirs
inc_dirs - additional include directories which should be added to
the scanner resources
"""
# Scan src_path
resources = toolchain.scan_resources(src_paths[0], base_path=base_path,
collect_ignores=collect_ignores)
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path, base_path=base_path,
collect_ignores=collect_ignores))
# Scan dependency paths for include dirs
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
resources.inc_dirs.extend(lib_resources.inc_dirs)
# Add additional include directories if passed
if inc_dirs:
if type(inc_dirs) == ListType:
resources.inc_dirs.extend(inc_dirs)
else:
resources.inc_dirs.append(inc_dirs)
# Load resources into the config system which might expand/modify resources
# based on config data
resources = toolchain.config.load_resources(resources)
# Set the toolchain's configuration data
toolchain.set_config_data(toolchain.config.get_config_data())
if (hasattr(toolchain.target, "release_versions") and
"5" not in toolchain.target.release_versions and
"rtos" in toolchain.config.lib_config_data):
if "Cortex-A" in toolchain.target.core:
raise NotSupportedException(
("%s Will be supported in mbed OS 5.6. "
"To use the %s, please checkout the mbed OS 5.4 release branch. "
"See https://developer.mbed.org/platforms/Renesas-GR-PEACH/#important-notice "
"for more information") % (toolchain.target.name, toolchain.target.name))
else:
raise NotSupportedException("Target does not support mbed OS 5")
return resources
def build_project(src_paths, build_path, target, toolchain_name,
libraries_paths=None, linker_script=None,
clean=False, notify=None, verbose=False, name=None,
macros=None, inc_dirs=None, jobs=1, silent=False,
report=None, properties=None, project_id=None,
project_description=None, extra_verbose=False, config=None,
app_config=None, build_profile=None, stats_depth=None):
""" Build a project. A project may be a test or a user program.
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the project
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
libraries_paths - The location of libraries to include when linking
linker_script - the file that drives the linker to do it's job
clean - Rebuild everything if True
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
name - the name of the project
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
project_id - the name put in the report
project_description - the human-readable version of what this thing does
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
stats_depth - depth level for memap to display file/dirs
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Extend src_paths wiht libraries_paths
if libraries_paths is not None:
src_paths.extend(libraries_paths)
inc_dirs.extend(map(dirname, libraries_paths))
if clean and exists(build_path):
rmtree(build_path)
mkdir(build_path)
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, silent=silent, verbose=verbose,
extra_verbose=extra_verbose, config=config, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
name = (name or toolchain.config.name or
basename(normpath(abspath(src_paths[0]))))
toolchain.info("Building project %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = project_description if project_description else name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain, inc_dirs=inc_dirs)
# Change linker script if specified
if linker_script is not None:
resources.linker_script = linker_script
# Compile Sources
objects = toolchain.compile_sources(resources, resources.inc_dirs)
resources.objects.extend(objects)
# Link Program
if toolchain.config.has_regions:
res, _ = toolchain.link_program(resources, build_path, name + "_application")
region_list = list(toolchain.config.regions)
region_list = [r._replace(filename=res) if r.active else r
for r in region_list]
res = join(build_path, name) + ".bin"
merge_region_list(region_list, res)
else:
res, _ = toolchain.link_program(resources, build_path, name)
memap_instance = getattr(toolchain, 'memap_instance', None)
memap_table = ''
if memap_instance:
# Write output to stdout in text (pretty table) format
memap_table = memap_instance.generate_output('table', stats_depth)
if not silent:
print memap_table
# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
memap_instance.generate_output('json', stats_depth, map_out)
# Write output to file in CSV format for the CI
map_csv = join(build_path, name + "_map.csv")
memap_instance.generate_output('csv-ci', stats_depth, map_csv)
resources.detect_duplicates(toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output() + memap_table
cur_result["result"] = "OK"
cur_result["memory_usage"] = memap_instance.mem_report
cur_result["bin"] = res
cur_result["elf"] = splitext(res)[0] + ".elf"
cur_result.update(toolchain.report)
add_result_to_report(report, cur_result)
return res
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
else:
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def build_library(src_paths, build_path, target, toolchain_name,
dependencies_paths=None, name=None, clean=False,
archive=True, notify=None, verbose=False, macros=None,
inc_dirs=None, jobs=1, silent=False, report=None,
properties=None, extra_verbose=False, project_id=None,
remove_config_header_file=False, app_config=None,
build_profile=None):
""" Build a library
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the library
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
dependencies_paths - The location of libraries to include when linking
name - the name of the library
clean - Rebuild everything if True
archive - whether the library will create an archive file
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
project_id - the name that goes in the report
remove_config_header_file - delete config header file when done building
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Build path
if archive:
# Use temp path when building archive
tmp_path = join(build_path, '.temp')
mkdir(tmp_path)
else:
tmp_path = build_path
# Clean the build directory
if clean and exists(tmp_path):
rmtree(tmp_path)
mkdir(tmp_path)
# Pass all params to the unified prepare_toolchain()
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, silent=silent,
verbose=verbose, extra_verbose=extra_verbose, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
if name is None:
name = basename(normpath(abspath(src_paths[0])))
toolchain.info("Building library %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
cur_result['type'] = 'library'
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain,
dependencies_paths=dependencies_paths,
inc_dirs=inc_dirs)
# Copy headers, objects and static libraries - all files needed for
# static lib
toolchain.copy_files(resources.headers, build_path, resources=resources)
toolchain.copy_files(resources.objects, build_path, resources=resources)
toolchain.copy_files(resources.libraries, build_path,
resources=resources)
toolchain.copy_files(resources.json_files, build_path,
resources=resources)
if resources.linker_script:
toolchain.copy_files(resources.linker_script, build_path,
resources=resources)
if resources.hex_files:
toolchain.copy_files(resources.hex_files, build_path,
resources=resources)
# Compile Sources
objects = toolchain.compile_sources(resources, resources.inc_dirs)
resources.objects.extend(objects)
if archive:
toolchain.build_library(objects, build_path, name)
if remove_config_header_file:
config_header_path = toolchain.get_config_header()
if config_header_path:
remove(config_header_path)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, ToolException):
cur_result["result"] = "FAIL"
elif isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
######################
### Legacy methods ###
######################
def mbed2_obj_path(target_name, toolchain_name):
real_tc_name = TOOLCHAIN_CLASSES[toolchain_name].__name__
return join("TARGET_" + target_name, "TOOLCHAIN_" + real_tc_name)
def build_lib(lib_id, target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Legacy method for building mbed libraries
Positional arguments:
lib_id - the library's unique identifier
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
clean - Rebuild everything if True
verbose - Write the actual tools command lines used if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
lib = Library(lib_id)
if not lib.is_supported(target, toolchain_name):
print('Library "%s" is not yet supported on target %s with toolchain %s'
% (lib_id, target.name, toolchain_name))
return False
# We need to combine macros from parameter list with macros from library
# definition
lib_macros = lib.macros if lib.macros else []
if macros:
macros.extend(lib_macros)
else:
macros = lib_macros
src_paths = lib.source_dir
build_path = lib.build_dir
dependencies_paths = lib.dependencies
inc_dirs = lib.inc_dirs
inc_dirs_ext = lib.inc_dirs_ext
if type(src_paths) != ListType:
src_paths = [src_paths]
# The first path will give the name to the library
name = basename(src_paths[0])
if report != None:
start = time()
id_name = name.upper()
description = name
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Toolchain instance
# Create the desired build directory structure
bin_path = join(build_path, mbed2_obj_path(target.name, toolchain_name))
mkdir(bin_path)
tmp_path = join(build_path, '.temp', mbed2_obj_path(target.name,
toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
src_paths, tmp_path, target, toolchain_name, macros=macros,
notify=notify, silent=silent, extra_verbose=extra_verbose,
build_profile=build_profile, jobs=jobs, clean=clean)
toolchain.info("Building library %s (%s, %s)" %
(name.upper(), target.name, toolchain_name))
# Take into account the library configuration (MBED_CONFIG_FILE)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Add extra include directories / files which are required by library
# This files usually are not in the same directory as source files so
# previous scan will not include them
if inc_dirs_ext is not None:
for inc_ext in inc_dirs_ext:
resources.append(toolchain.scan_resources(inc_ext))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
dependencies_include_dir.extend(map(dirname, lib_resources.inc_dirs))
if inc_dirs:
dependencies_include_dir.extend(inc_dirs)
# Add other discovered configuration data to the configuration object
for res in resources:
config.load_resources(res)
toolchain.set_config_data(toolchain.config.get_config_data())
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path,
resources=resource)
dependencies_include_dir.extend(
toolchain.scan_resources(build_path).inc_dirs)
# Compile Sources
objects = []
for resource in resources:
objects.extend(toolchain.compile_sources(resource, dependencies_include_dir))
needed_update = toolchain.build_library(objects, bin_path, name)
if report != None and needed_update:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
# We do have unique legacy conventions about how we build and package the mbed
# library
def build_mbed_libs(target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Function returns True is library was built and false if building was
skipped
Positional arguments:
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
verbose - Write the actual tools command lines used if True
clean - Rebuild everything if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
if report != None:
start = time()
id_name = "MBED"
description = "mbed SDK"
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
# Check toolchain support
if toolchain_name not in target.supported_toolchains:
supported_toolchains_text = ", ".join(target.supported_toolchains)
print('%s target is not yet supported by toolchain %s' %
(target.name, toolchain_name))
print('%s target supports %s toolchain%s' %
(target.name, supported_toolchains_text, 's'
if len(target.supported_toolchains) > 1 else ''))
if report != None:
cur_result["result"] = "SKIP"
add_result_to_report(report, cur_result)
return False
try:
# Source and Build Paths
build_target = join(MBED_LIBRARIES, "TARGET_" + target.name)
build_toolchain = join(MBED_LIBRARIES, mbed2_obj_path(target.name, toolchain_name))
mkdir(build_toolchain)
# Toolchain
tmp_path = join(MBED_LIBRARIES, '.temp', mbed2_obj_path(target.name, toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
[""], tmp_path, target, toolchain_name, macros=macros,verbose=verbose,
notify=notify, silent=silent, extra_verbose=extra_verbose,
build_profile=build_profile, jobs=jobs, clean=clean)
# Take into account the library configuration (MBED_CONFIG_FILE)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
toolchain.set_config_data(toolchain.config.get_config_data())
# CMSIS
toolchain.info("Building library %s (%s, %s)" %
('CMSIS', target.name, toolchain_name))
cmsis_src = MBED_CMSIS_PATH
resources = toolchain.scan_resources(cmsis_src)
toolchain.copy_files(resources.headers, build_target)
toolchain.copy_files(resources.linker_script, build_toolchain)
toolchain.copy_files(resources.bin_files, build_toolchain)
objects = toolchain.compile_sources(resources, tmp_path)
toolchain.copy_files(objects, build_toolchain)
# mbed
toolchain.info("Building library %s (%s, %s)" %
('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files([MBED_HEADER], MBED_LIBRARIES)
library_incdirs = [dirname(MBED_LIBRARIES), MBED_LIBRARIES]
for dir, dest in [(MBED_DRIVERS, MBED_LIBRARIES_DRIVERS),
(MBED_PLATFORM, MBED_LIBRARIES_PLATFORM),
(MBED_HAL, MBED_LIBRARIES_HAL)]:
resources = toolchain.scan_resources(dir)
toolchain.copy_files(resources.headers, dest)
library_incdirs.append(dest)
# Target specific sources
hal_src = MBED_TARGETS_PATH
hal_implementation = toolchain.scan_resources(hal_src)
toolchain.copy_files(hal_implementation.headers +
hal_implementation.hex_files +
hal_implementation.libraries +
[MBED_CONFIG_FILE],
build_target, resources=hal_implementation)
toolchain.copy_files(hal_implementation.linker_script, build_toolchain)
toolchain.copy_files(hal_implementation.bin_files, build_toolchain)
incdirs = toolchain.scan_resources(build_target).inc_dirs
objects = toolchain.compile_sources(hal_implementation,
library_incdirs + incdirs)
toolchain.copy_files(objects, build_toolchain)
# Common Sources
mbed_resources = None
for dir in [MBED_DRIVERS, MBED_PLATFORM, MBED_HAL]:
mbed_resources += toolchain.scan_resources(dir)
objects = toolchain.compile_sources(mbed_resources,
library_incdirs + incdirs)
# A number of compiled files need to be copied as objects as opposed to
# way the linker search for symbols in archives. These are:
# - mbed_retarget.o: to make sure that the C standard lib symbols get
# overridden
# - mbed_board.o: mbed_die is weak
# - mbed_overrides.o: this contains platform overrides of various
# weak SDK functions
# - mbed_main.o: this contains main redirection
separate_names, separate_objects = ['mbed_retarget.o', 'mbed_board.o',
'mbed_overrides.o', 'mbed_main.o', 'mbed_sdk_boot.o'], []
for obj in objects:
for name in separate_names:
if obj.endswith(name):
separate_objects.append(obj)
for obj in separate_objects:
objects.remove(obj)
toolchain.build_library(objects, build_toolchain, "mbed")
for obj in separate_objects:
toolchain.copy_files(obj, build_toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
cur_result["output"] += str(exc)
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def get_unique_supported_toolchains(release_targets=None):
""" Get list of all unique toolchains supported by targets
Keyword arguments:
release_targets - tuple structure returned from get_mbed_official_release().
If release_targets is not specified, then it queries all
known targets
"""
unique_supported_toolchains = []
if not release_targets:
for target in TARGET_NAMES:
for toolchain in TARGET_MAP[target].supported_toolchains:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
else:
for target in release_targets:
for toolchain in target[1]:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
if "ARM" in unique_supported_toolchains:
unique_supported_toolchains.append("ARMC6")
return unique_supported_toolchains
def mcu_toolchain_list(release_version='5'):
""" Shows list of toolchains
"""
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
columns = ["mbed OS %s" % x for x in RELEASE_VERSIONS] + unique_supported_toolchains
return "\n".join(columns)
def mcu_target_list(release_version='5'):
""" Shows target list
"""
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
return "\n".join(target_names)
def mcu_toolchain_matrix(verbose_html=False, platform_filter=None,
release_version='5'):
""" Shows target map using prettytable
Keyword arguments:
verbose_html - emit html instead of a simple table
platform_filter - remove results that match the string
release_version - get the matrix for this major version number
"""
# Only use it in this function so building works without extra modules
from prettytable import PrettyTable
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
prepend_columns = ["Target"] + ["mbed OS %s" % x for x in RELEASE_VERSIONS]
# All tests status table print
columns = prepend_columns + unique_supported_toolchains
table_printer = PrettyTable(columns)
# Align table
for col in columns:
table_printer.align[col] = "c"
table_printer.align["Target"] = "l"
perm_counter = 0
target_counter = 0
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
for target in sorted(target_names):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, target) is None:
continue
target_counter += 1
row = [target] # First column is platform name
for version in RELEASE_VERSIONS:
if target in version_release_target_names[version]:
text = "Supported"
else:
text = "-"
row.append(text)
for unique_toolchain in unique_supported_toolchains:
if (unique_toolchain in TARGET_MAP[target].supported_toolchains or
(unique_toolchain == "ARMC6" and
"ARM" in TARGET_MAP[target].supported_toolchains)):
text = "Supported"
perm_counter += 1
else:
text = "-"
row.append(text)
table_printer.add_row(row)
result = table_printer.get_html_string() if verbose_html \
else table_printer.get_string()
result += "\n"
result += "Supported targets: %d\n"% (target_counter)
if target_counter == 1:
result += "Supported toolchains: %d"% (perm_counter)
return result
def get_target_supported_toolchains(target):
""" Returns target supported toolchains list
Positional arguments:
target - the target to get the supported toolchains of
"""
return TARGET_MAP[target].supported_toolchains if target in TARGET_MAP \
else None
def print_build_results(result_list, build_name):
""" Generate result string for build results
Positional arguments:
result_list - the list of results to print
build_name - the name of the build we are printing result for
"""
result = ""
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def print_build_memory_usage(report):
""" Generate result table with memory usage values for build results
Aggregates (puts together) reports obtained from self.get_memory_summary()
Positional arguments:
report - Report generated during build procedure.
"""
from prettytable import PrettyTable
columns_text = ['name', 'target', 'toolchain']
columns_int = ['static_ram', 'total_flash']
table = PrettyTable(columns_text + columns_int)
for col in columns_text:
table.align[col] = 'l'
for col in columns_int:
table.align[col] = 'r'
for target in report:
for toolchain in report[target]:
for name in report[target][toolchain]:
for dlist in report[target][toolchain][name]:
for dlistelem in dlist:
# Get 'memory_usage' record and build table with
# statistics
record = dlist[dlistelem]
if 'memory_usage' in record and record['memory_usage']:
# Note that summary should be in the last record of
# 'memory_usage' section. This is why we are
# grabbing last "[-1]" record.
row = [
record['description'],
record['target_name'],
record['toolchain_name'],
record['memory_usage'][-1]['summary'][
'static_ram'],
record['memory_usage'][-1]['summary'][
'total_flash'],
]
table.add_row(row)
result = "Memory map breakdown for built projects (values in Bytes):\n"
result += table.get_string(sortby='name')
return result
def write_build_report(build_report, template_filename, filename):
"""Write a build report to disk using a template file
Positional arguments:
build_report - a report generated by the build system
template_filename - a file that contains the template for the style of build
report
filename - the location on disk to write the file to
"""
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as placeholder:
placeholder.write(template.render(
failing_builds=build_report_failing,
passing_builds=build_report_passing))
def merge_build_data(filename, toolchain_report, app_type):
path_to_file = dirname(abspath(filename))
try:
build_data = load(open(filename))
except (IOError, ValueError):
build_data = {'builds': []}
for tgt in toolchain_report.values():
for tc in tgt.values():
for project in tc.values():
for build in project:
try:
build[0]['elf'] = relpath(build[0]['elf'], path_to_file)
build[0]['bin'] = relpath(build[0]['bin'], path_to_file)
except KeyError:
pass
if 'type' not in build[0]:
build[0]['type'] = app_type
build_data['builds'].append(build[0])
dump(build_data, open(filename, "wb"), indent=4, separators=(',', ': '))
| 38.458333 | 101 | 0.635676 | [
"Apache-2.0"
] | SaiVK/BenchIoT | os-lib/mbed-os/tools/build_api.py | 54,457 | Python |
"""
Django settings for berlapan project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# for best-practices.
# SECURITY WARNING: keep the secret key used in production secret!
# Please set SECRET_KEY environment variable in your production environment
# (e.g. Heroku).
SECRET_KEY = os.getenv('SECRET_KEY', 'django-insecure-nk@v31jj#vq_xd)s9uns%nkmj^o0efdm$-bj7dm8jz=t76_q-c')
# Automatically determine environment by detecting if DATABASE_URL variable.
# DATABASE_URL is provided by Heroku if a database add-on is added
# (e.g. Heroku Postgres).
PRODUCTION = os.getenv('DATABASE_URL') is not None
# SECURITY WARNING: don't run with debug turned on in production!
# If you want to enable debugging on Heroku for learning purposes,
# set this to True.
DEBUG = not PRODUCTION
HEROKU_APP_NAME = os.getenv('HEROKU_APP_NAME', '')
ALLOWED_HOSTS = [f'{HEROKU_APP_NAME}.herokuapp.com']
if not PRODUCTION:
ALLOWED_HOSTS += ['.localhost', '127.0.0.1', '[::1]']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'salingbantu',
'users',
'daftar_vaksinasi',
'donordarah',
'relawanvaksin',
'corsheaders',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'berlapan.urls'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS =[
'GET',
'POST',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR / 'templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'berlapan.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Set database settings automatically using DATABASE_URL.
if PRODUCTION:
DATABASES['default'] = dj_database_url.config(
conn_max_age=600, ssl_require=True
)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
# Feel free to change these according to your needs.
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# This is the directory for storing `collectstatic` results.
# This shouldn't be included in your Git repository.
STATIC_ROOT = BASE_DIR / 'staticfiles'
# You can use this directory to store project-wide static files.
STATICFILES_DIRS = [
BASE_DIR / 'static',
]
# Make sure the directories exist to prevent errors when doing `collectstatic`.
for directory in [*STATICFILES_DIRS, STATIC_ROOT]:
directory.mkdir(exist_ok=True)
# Enable compression and caching features of whitenoise.
# You can remove this if it causes problems on your setup.
| 26.839779 | 106 | 0.70914 | [
"Unlicense"
] | rafiatha09/berlapan | berlapan/settings.py | 4,858 | Python |
from random import randint
import unicornhat as unicorn
def run(params):
width,height=unicorn.get_shape()
while True:
x = randint(0, (width-1))
y = randint(0, (height-1))
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
unicorn.set_pixel(x, y, r, g, b)
unicorn.show() | 21.625 | 40 | 0.563584 | [
"MIT"
] | kfechter/unicorn-remote | app/programs/original/random_sparkles.py | 346 | Python |
'''Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores ‘M’ ou ‘F’.Caso esteja errado, peça
a digitação novamente até ter um valor correto.'''
sexo = str(input('Informe seu sexo: [M/F] ')).strip().upper()[0]
while sexo not in 'MmFf':
sexo = str(input('Dados inválidos. Por favor, informe seu sexo: ')).strip().upper()[0]
print('Sexo {} registrado com sucesso.'.format(sexo))
| 50 | 111 | 0.6875 | [
"MIT"
] | Roberto-Sartore/Python | exercicios/PythonExercicios/ex057.py | 415 | Python |
import typing
import sys
import numpy as np
def set_val(
a: np.array,
i: int,
x: int,
) -> typing.NoReturn:
while i < a.size:
a[i] = max(a[i], x)
i += i & -i
def get_mx(
a: np.array,
i: int,
) -> int:
mx = 0
while i > 0:
mx = max(mx, a[i])
i -= i & -i
return mx
def solve(
n: int,
h: np.array,
a: np.array,
) -> typing.NoReturn:
fw = np.zeros(
n + 1,
dtype=np.int64,
)
mx = 0
for i in range(n):
v = get_mx(fw, h[i] - 1)
set_val(fw, h[i], v + a[i])
print(get_mx(fw, n))
def main() -> typing.NoReturn:
n = int(input())
h = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
solve(n, h, a)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import njit, i8
from numba.pycc import CC
cc = CC('my_module')
fn = solve
sig = (i8, i8[:], i8[:])
get_mx = njit(get_mx)
set_val = njit(set_val)
cc.export(
fn.__name__,
sig,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main() | 13.7375 | 31 | 0.535032 | [
"MIT"
] | kagemeka/competitive-programming | src/atcoder/dp/q/sol_4.py | 1,099 | Python |
""" Solver classes for domain adaptation experiments
"""
__author__ = "Steffen Schneider"
__email__ = "[email protected]"
import os, time
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
import torch.utils.data
import torch.nn as nn
from .. import Solver, BaseClassSolver
from ... import layers, optim
import itertools
class DABaseSolver(BaseClassSolver):
""" Base Class for Unsupervised Domain Adaptation Approaches
"""
def __init__(self, *args, **kwargs):
super(DABaseSolver, self).__init__(*args, **kwargs)
def _init_losses(self, **kwargs):
super()._init_losses(**kwargs)
self.register_loss(layers.AccuracyScore(), name = 'acc_s', weight = None)
self.register_loss(layers.AccuracyScore(), name = 'acc_t', weight = None)
class DATeacher(Solver):
""" Base Class for Unsupervised Domain Adaptation Approaches using a teacher model
"""
def __init__(self, model, teacher, dataset, *args, **kwargs):
super().__init__(model, dataset, *args, **kwargs)
self.teacher = teacher
def _init_models(self, **kwargs):
super()._init_models(**kwargs)
self.register_model(self.teacher, 'teacher')
class DABaselineLoss(object):
def __init__(self, solver):
self.solver = solver
def _predict(self, x, y):
_ , y_ = self.solver.model(x)
if not self.solver.multiclass:
y_ = y_.squeeze()
y = y.float()
return y_, y
def __call__(self, batch):
losses = {}
(x, y) = batch[0]
losses['acc_s'] = losses['ce'] = self._predict(x,y)
with torch.no_grad():
x,y = batch[1]
losses['acc_t'] = self._predict(x,y)
return losses
class BaselineDASolver(DABaseSolver):
""" A domain adaptation solver that actually does not run any adaptation algorithm
This is useful to establish baseline results for the case of no adaptation, for measurement
of the domain shift between datasets.
"""
def _init_optims(self, lr = 3e-4, **kwargs):
super()._init_optims(**kwargs)
self.register_optimizer(torch.optim.Adam(self.model.parameters(),
lr=lr, amsgrad=True),
DABaselineLoss(self)) | 26.602273 | 95 | 0.629645 | [
"MPL-2.0"
] | artix41/salad | salad/solver/da/base.py | 2,341 | Python |
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from pathlib import Path
import pytest
from abex.plotting.expected_basenames import expected_basenames_2d
from cellsig_sim.scripts.run_cell_signalling_loop import main
from psbutils.misc import find_subrepo_directory
# @pytest.mark.timeout(1800)
@pytest.mark.skip("Can cause ADO timeout on all platforms")
def test_tutorial_wetlab_simulation():
# For real use, we would want something like --num_iter 15 --num_runs 100, but to test everything is working,
# smaller values are sufficient, and reduce the compute time from hours to minutes.
num_iter = 2
num_runs = 3
subrepo_dir = find_subrepo_directory()
main(
[
"--spec_file",
f"{subrepo_dir}/tests/data/Specs/tutorial-wetlab-sim.yml",
"--num_iter",
str(num_iter),
"--num_runs",
str(num_runs),
"--enable_multiprocessing",
"--plot_simulated_slices",
]
)
results_dir = Path("Results") / "tutorial-wetlab-sim"
assert (results_dir / "config.yml").is_file()
for i_run in range(num_runs):
run_dir = results_dir / "fixed" / f"seed{i_run}"
assert (run_dir / "init_batch.csv").is_file()
for i_iter in range(1, num_iter + 1):
iter_dir = run_dir / f"iter{i_iter}"
assert iter_dir.is_dir()
basenames = [f.name for f in sorted(iter_dir.iterdir())]
assert basenames == expected_basenames_2d(4, variant=2)
| 42.093023 | 113 | 0.593923 | [
"MIT"
] | Biological-Computation/station-b-libraries | PyStationB/projects/CellSignalling/slow_tests/simulation/test_cellsig_tutorials.py | 1,810 | Python |
# Solution to Problem 8
# Program outputs today's date and time in the format "Monday, January 10th 2019 at 1:15pm"
# To start we import the Python datetime module as dt.
from datetime import datetime as dt
#now equals the date and time now.
now = dt.now()
# Copied verbatim initially from stacoverflow Reference 1 below but amended to fit my referenceing of time as now.
# Suffix equals 'st' if the date now is 1,21 or 23 else it is 'nd' if the date noe is 2 or 22 else it is 'rd' if date now is 3 or23 for eveything else it is 'th.
suffix = 'st' if now in [1,21,31] else 'nd' if now in [2, 22] else 'rd' if now in [3, 23] else 'th'
# Display to the user the Heading "Todays Date and Time:"
print("Todays Date and time:")
# Below displays to the user a the date and time in a string in inverted commas todays date and time in the format Day, Month Date year at Current Time am/pm.
# Used Reference 3 below to remove the leading 0 when desplaying the time.
print(now.strftime('%A, %B %d%%s %Y at %#I:%M %p',) % suffix,)
# Reference 1: https://stackoverflow.com/a/11645978
# Reference 2: https://www.saltycrane.com/blog/2008/06/how-to-get-current-date-and-time-in/
# Reference 3: https://stackoverflow.com/questions/904928/python-strftime-date-without-leading-0One problem is that '{dt.hour}' uses a 24 hour clock :(. Using the second option still brings you back to using '{%#I}' on Windows and '{%-I}' on Unix. – ubomb May 24 '16 at 22:47
# Used lecture from week 6 as a base for the problem also looked at the Python tutorial.
# Laura Brogan 19/03/2019 | 67.869565 | 276 | 0.726457 | [
"Apache-2.0"
] | LauraBrogan/pands-problem-set-2019 | solution-8.py | 1,563 | Python |
import math
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D
from .sn_linear import SNLinear
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
def _downsample(x):
return F.average_pooling_2d(x, 2)
def upsample_conv(x, conv):
return conv(_upsample(x))
def _upsample_frq(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))
def _downsample_frq(x):
return F.average_pooling_2d(x, (1,2))
def upsample_conv_frq(x, conv):
return conv(_upsample_frq(x))
class ResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):
super(ResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
self.learnable_sc = in_channels != out_channels
self.dr = dr
self.bn = bn
with self.init_scope():
self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
if bn:
self.b1 = L.BatchNormalization(out_channels)
self.b2 = L.BatchNormalization(out_channels)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.c1(h)
if self.bn:
h = self.b1(h)
if self.activation:
h = self.activation(h)
if self.mode:
h = self.mode(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c2(h)
if self.bn:
h = self.b2(h)
if self.activation:
h = self.activation(h)
return h
def shortcut(self, x):
if self.mode:
x = self.mode(x)
if self.learnable_sc:
x = self.c_sc(x)
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class ConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class CoPSBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True):
super(CoPSBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
with self.init_scope():
self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer)
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer)
if bn:
self.b = L.BatchNormalization(out_channels)
def pixel_shuffle(self, x):
out = self.ps(x)
b = out.shape[0]
c = out.shape[1]
h = out.shape[2]
w = out.shape[3]
out = F.reshape(out, (b, 2, 2, c//4, h, w))
out = F.transpose(out, (0, 3, 4, 1, 5, 2))
out = F.reshape(out, (b, c//4, h*2, w*2))
return out
def __call__(self, h):
h = self.pixel_shuffle(h)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None):
super(SNResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None
self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up'
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer)
self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
if self.sample:
h = self.sample(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.sample:
return self.sample(x)
else:
return x
else:
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class SNConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(SNConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNLinearBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None):
super(SNLinearBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
if type(out_channels) is tuple:
self.out_shape = (-1,)+out_channels
else:
self.out_shape = None
with self.init_scope():
self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = self.l(x)
x = self.activation(x)
if self.out_shape:
x = F.reshape(x, self.out_shape)
return x
class SNMDBlock(chainer.Chain):
def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None):
super(SNMDBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.B = B
self.C = C
self.dr = dr
self.gap = gap
if gap:
in_size = 1
if type(in_size) is int:
in_size = (in_size, in_size)
with self.init_scope():
self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer)
self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer)
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
class SNL1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNL1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class L1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(L1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class CLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None):
super(CLBlock, self).__init__()
self.dr = dr
if out_ch - liner_out_ch <= 0:
raise Exception('out_ch <= liner_out_ch!')
with self.init_scope():
self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation)
self.l = L1DBlock(in_ch, liner_out_ch, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
class SNCLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNCLBlock, self).__init__()
self.dr = dr
with self.init_scope():
self.c = SNConvBlock(in_ch, out_ch-1, activation=activation)
self.l = SNL1DBlock(in_ch, 1, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
| 39.899204 | 134 | 0.582103 | [
"MIT"
] | VirtualOilCake/Deep_VoiceChanger | nets/block.py | 15,042 | Python |
estado = dict()
brasil = list()
for c in range(0,3):
estado['uf'] = str(input('Uf: '))
estado['sigla'] = str(input('Sigla: '))
brasil.append(estado.copy())
print(brasil)
for e in brasil:
for k, v in e.items():
print(f'O campo {k} tem valor {v}')
| 22.583333 | 43 | 0.571956 | [
"MIT"
] | Kauan677/Projetos-Python | Python/Dicionarios.py | 271 | Python |
#
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import importlib
import os
import pkgutil
import platform
import shutil
import sys
import subprocess
import traceback
YELLOW_COLOR="\033[0;33m"
RED_COLOR="\033[0;31m"
CYAN_COLOR="\033[0;36m"
NO_COLOR="\033[0m"
SEPARATOR = "-" * 80
BUILD_GROUP_COMMON = 1
BUILD_GROUP_INSTRUMENTED = 2
BUILD_TYPE_COMMON = 'common'
BUILD_TYPE_UNINSTRUMENTED = 'uninstrumented'
BUILD_TYPE_ASAN = 'asan'
BUILD_TYPE_TSAN = 'tsan'
BUILD_TYPES = [BUILD_TYPE_COMMON, BUILD_TYPE_UNINSTRUMENTED, BUILD_TYPE_ASAN, BUILD_TYPE_TSAN]
TAR_EXTRACT = 'tar xf {}'
# -o -- force overwriting existing files
ZIP_EXTRACT = 'unzip -q -o {}'
ARCHIVE_TYPES = {
'.tar.bz2': TAR_EXTRACT,
'.tar.gz': TAR_EXTRACT,
'.tar.xz': TAR_EXTRACT,
'.tgz': TAR_EXTRACT,
'.zip': ZIP_EXTRACT,
}
def fatal(message):
log(message)
traceback.print_stack()
sys.exit(1)
def log(message=""):
sys.stderr.write(message + "\n")
def colored_log(color, message):
sys.stderr.write(color + message + NO_COLOR + "\n")
def log_output(prefix, args):
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(process.stdout.readline, ''):
log("{}{} {}{}".format(CYAN_COLOR, prefix, NO_COLOR, line.rstrip()))
process.stdout.close()
exit_code = process.wait()
if exit_code:
fatal("Execution failed with code: {}".format(exit_code))
def unset_if_set(name):
if name in os.environ:
log('Unsetting {} for third-party build (was set to "{}").'.format(name, os.environ[name]))
del os.environ[name]
def log_separator():
log("")
log(SEPARATOR)
log("")
def heading(title):
log("")
log(SEPARATOR)
log(title)
log(SEPARATOR)
log("")
def is_mac():
return platform.system().lower() == 'darwin'
def is_linux():
return platform.system().lower() == 'linux'
def is_jenkins_user():
return os.environ['USER'] == "jenkins"
def is_jenkins():
return 'BUILD_ID' in os.environ and 'JOB_NAME' in os.environ and is_jenkins_user()
def remove_path(path):
if not os.path.exists(path):
return
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def mkdir_if_missing(path):
if os.path.exists(path):
if not os.path.isdir(path):
fatal("Trying to create dir {}, but file with the same path already exists"
.format(path))
return
os.makedirs(path)
def make_archive_name(name, version, download_url):
if download_url is None:
return '{}-{}{}'.format(name, version, '.tar.gz')
for ext in ARCHIVE_TYPES:
if download_url.endswith(ext):
return '{}-{}{}'.format(name, version, ext)
return None
def which(exe):
return subprocess.check_output(['which', exe]).rstrip()
def import_submodules(package, recursive=True):
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
class Dependency(object):
def __init__(self, name, version, url_pattern, build_group):
self.name = name
self.version = version
self.dir = '{}-{}'.format(name, version)
self.underscored_version = version.replace('.', '_')
if url_pattern is not None:
self.download_url = url_pattern.format(version, self.underscored_version)
else:
self.download_url = None
self.build_group = build_group
self.archive_name = make_archive_name(name, version, self.download_url)
self.patch_version = 0
def should_build(self, instrumented):
return True
class ExtraDownload(object):
def __init__(self, name, version, url_pattern, dir, post_exec=None):
self.name = name
self.version = version
self.download_url = url_pattern.format(version)
self.archive_name = make_archive_name(name, version, self.download_url)
self.dir = dir
if post_exec is not None:
self.post_exec = post_exec
class PushDir:
def __init__(self, dir):
self.dir = dir
self.prev = None
def __enter__(self):
self.prev = os.getcwd()
os.chdir(self.dir)
def __exit__(self, type, value, traceback):
os.chdir(self.prev)
| 26.299492 | 99 | 0.663192 | [
"Apache-2.0"
] | everyonce/yugabyte-db | thirdparty/build_definitions/__init__.py | 5,181 | Python |
#
# cbpro/order_book.py
# David Caseria
#
# Live order book updated from the Coinbase Websocket Feed
from sortedcontainers import SortedDict
from decimal import Decimal
import pickle
from cbpro.public_client import PublicClient
from cbpro.websocket_client import WebsocketClient
class OrderBook(WebsocketClient):
def __init__(self, product_id='BTC-USD', log_to=None):
super(OrderBook, self).__init__(products=product_id)
self._asks = SortedDict()
self._bids = SortedDict()
self._client = PublicClient()
self._sequence = -1
self._log_to = log_to
if self._log_to:
assert hasattr(self._log_to, 'write')
self._current_ticker = None
@property
def product_id(self):
''' Currently OrderBook only supports a single product even though it is stored as a list of products. '''
return self.products[0]
def on_open(self):
self._sequence = -1
print("-- Subscribed to OrderBook! --\n")
def on_close(self):
print("\n-- OrderBook Socket Closed! --")
def reset_book(self):
self._asks = SortedDict()
self._bids = SortedDict()
res = self._client.get_product_order_book(product_id=self.product_id, level=3)
for bid in res['bids']:
self.add({
'id': bid[2],
'side': 'buy',
'price': Decimal(bid[0]),
'size': Decimal(bid[1])
})
for ask in res['asks']:
self.add({
'id': ask[2],
'side': 'sell',
'price': Decimal(ask[0]),
'size': Decimal(ask[1])
})
self._sequence = res['sequence']
def on_message(self, message):
if self._log_to:
pickle.dump(message, self._log_to)
sequence = message.get('sequence', -1)
if self._sequence == -1:
self.reset_book()
return
if sequence <= self._sequence:
# ignore older messages (e.g. before order book initialization from getProductOrderBook)
return
elif sequence > self._sequence + 1:
self.on_sequence_gap(self._sequence, sequence)
return
msg_type = message['type']
if msg_type == 'open':
self.add(message)
elif msg_type == 'done' and 'price' in message:
self.remove(message)
elif msg_type == 'match':
self.match(message)
self._current_ticker = message
elif msg_type == 'change':
self.change(message)
self._sequence = sequence
def on_sequence_gap(self, gap_start, gap_end):
self.reset_book()
print('Error: messages missing ({} - {}). Re-initializing book at sequence.'.format(
gap_start, gap_end, self._sequence))
def add(self, order):
order = {
'id': order.get('order_id') or order['id'],
'side': order['side'],
'price': Decimal(order['price']),
'size': Decimal(order.get('size') or order['remaining_size'])
}
if order['side'] == 'buy':
bids = self.get_bids(order['price'])
if bids is None:
bids = [order]
else:
bids.append(order)
self.set_bids(order['price'], bids)
else:
asks = self.get_asks(order['price'])
if asks is None:
asks = [order]
else:
asks.append(order)
self.set_asks(order['price'], asks)
def remove(self, order):
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is not None:
bids = [o for o in bids if o['id'] != order['order_id']]
if len(bids) > 0:
self.set_bids(price, bids)
else:
self.remove_bids(price)
else:
asks = self.get_asks(price)
if asks is not None:
asks = [o for o in asks if o['id'] != order['order_id']]
if len(asks) > 0:
self.set_asks(price, asks)
else:
self.remove_asks(price)
def match(self, order):
size = Decimal(order['size'])
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if not bids:
return
assert bids[0]['id'] == order['maker_order_id']
if bids[0]['size'] == size:
self.set_bids(price, bids[1:])
else:
bids[0]['size'] -= size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if not asks:
return
assert asks[0]['id'] == order['maker_order_id']
if asks[0]['size'] == size:
self.set_asks(price, asks[1:])
else:
asks[0]['size'] -= size
self.set_asks(price, asks)
def change(self, order):
try:
new_size = Decimal(order['new_size'])
except KeyError:
return
try:
price = Decimal(order['price'])
except KeyError:
return
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is None or not any(o['id'] == order['order_id'] for o in bids):
return
index = [b['id'] for b in bids].index(order['order_id'])
bids[index]['size'] = new_size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if asks is None or not any(o['id'] == order['order_id'] for o in asks):
return
index = [a['id'] for a in asks].index(order['order_id'])
asks[index]['size'] = new_size
self.set_asks(price, asks)
tree = self._asks if order['side'] == 'sell' else self._bids
node = tree.get(price)
if node is None or not any(o['id'] == order['order_id'] for o in node):
return
def get_current_ticker(self):
return self._current_ticker
def get_current_book(self):
result = {
'sequence': self._sequence,
'asks': [],
'bids': [],
}
for ask in self._asks:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_ask = self._asks[ask]
except KeyError:
continue
for order in this_ask:
result['asks'].append([order['price'], order['size'], order['id']])
for bid in self._bids:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_bid = self._bids[bid]
except KeyError:
continue
for order in this_bid:
result['bids'].append([order['price'], order['size'], order['id']])
return result
def get_ask(self):
return self._asks.peekitem(0)[0]
def get_asks(self, price):
return self._asks.get(price)
def remove_asks(self, price):
del self._asks[price]
def set_asks(self, price, asks):
self._asks[price] = asks
def get_bid(self):
return self._bids.peekitem(-1)[0]
def get_bids(self, price):
return self._bids.get(price)
def remove_bids(self, price):
del self._bids[price]
def set_bids(self, price, bids):
self._bids[price] = bids
if __name__ == '__main__':
import sys
import time
import datetime as dt
class OrderBookConsole(OrderBook):
''' Logs real-time changes to the bid-ask spread to the console '''
def __init__(self, product_id=None):
super(OrderBookConsole, self).__init__(product_id=product_id)
# latest values of bid-ask spread
self._bid = None
self._ask = None
self._bid_depth = None
self._ask_depth = None
def on_message(self, message):
super(OrderBookConsole, self).on_message(message)
# Calculate newest bid-ask spread
bid = self.get_bid()
bids = self.get_bids(bid)
bid_depth = sum([b['size'] for b in bids])
ask = self.get_ask()
asks = self.get_asks(ask)
ask_depth = sum([a['size'] for a in asks])
if self._bid == bid and self._ask == ask and self._bid_depth == bid_depth and self._ask_depth == ask_depth:
# If there are no changes to the bid-ask spread since the last update, no need to print
pass
else:
# If there are differences, update the cache
self._bid = bid
self._ask = ask
self._bid_depth = bid_depth
self._ask_depth = ask_depth
print('{} {} bid: {:.3f} @ {:.2f}\task: {:.3f} @ {:.2f}'.format(
dt.datetime.now(), self.product_id, bid_depth, bid, ask_depth, ask))
order_book = OrderBookConsole()
order_book.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
order_book.close()
if order_book.error:
sys.exit(1)
else:
sys.exit(0)
| 32.036789 | 119 | 0.524167 | [
"MIT"
] | 1M15M3/coinbasepro-python | cbpro/order_book.py | 9,579 | Python |
import json
import boto3
import os
from helper import AwsHelper
import time
def startJob(bucketName, objectName, itemId, snsTopic, snsRole, apiName):
print("Starting job with itemId: {}, bucketName: {}, objectName: {}".format(itemId, bucketName, objectName))
response = None
client = AwsHelper().getClient('rekognition')
if(apiName == "labels"):
response = client.start_label_detection(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
elif(apiName == "text"):
response = client.start_text_detection(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
elif(apiName == "faces"):
response = client.start_face_detection(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
elif(apiName == "moderation"):
response = client.start_content_moderation(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
elif(apiName == "celebrities"):
response = client.start_celebrity_recognition(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
else:
response = client.start_label_detection(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
return response["JobId"]
def processItem(message, snsTopic, snsRole):
print('message:')
print(message)
messageBody = json.loads(message['Body'])
bucketName = messageBody['bucketName']
objectName = messageBody['objectName']
itemId = messageBody['itemId']
apiName = objectName.split("/")[0]
print('Bucket Name: ' + bucketName)
print('Object Name: ' + objectName)
print('Task ID: ' + itemId)
print('starting Rekognition job...')
jobId = startJob(bucketName, objectName, itemId, snsTopic, snsRole, apiName)
if(jobId):
print("Started Job with Id: {}".format(jobId))
return jobId
def changeVisibility(sqs, qUrl, receipt_handle):
try:
sqs.change_message_visibility(
QueueUrl=qUrl,
ReceiptHandle=receipt_handle,
VisibilityTimeout=0
)
except Exception as e:
print("Failed to change visibility for {} with error: {}".format(receipt_handle, e))
def getMessagesFromQueue(sqs, qUrl,):
# Receive message from SQS queue
response = sqs.receive_message(
QueueUrl=qUrl,
MaxNumberOfMessages=1,
VisibilityTimeout=60 #14400
)
print('SQS Response Received:')
print(response)
if('Messages' in response):
return response['Messages']
else:
print("No messages in queue.")
return None
def processItems(qUrl, snsTopic, snsRole):
sqs = AwsHelper().getClient('sqs')
messages = getMessagesFromQueue(sqs, qUrl)
jc = 0
totalMessages = 0
hitLimit = False
limitException = None
if(messages):
totalMessages = len(messages)
print("Total messages: {}".format(totalMessages))
for message in messages:
receipt_handle = message['ReceiptHandle']
try:
if(hitLimit):
changeVisibility(sqs, qUrl, receipt_handle)
else:
print("starting job...")
processItem(message, snsTopic, snsRole)
print("started job...")
print('Deleting item from queue...')
# Delete received message from queue
sqs.delete_message(
QueueUrl=qUrl,
ReceiptHandle=receipt_handle
)
print('Deleted item from queue...')
jc += 1
except Exception as e:
print("Error while starting job or deleting from queue: {}".format(e))
changeVisibility(sqs, qUrl, receipt_handle)
if(e.__class__.__name__ == 'LimitExceededException'
or e.__class__.__name__ == "ProvisionedThroughputExceededException"):
hitLimit = True
limitException = e
if(hitLimit):
raise limitException()
return totalMessages, jc
def processRequest(request):
qUrl = request['qUrl']
snsTopic = request['snsTopic']
snsRole = request['snsRole']
i = 0
max = 100
totalJobsScheduled = 0
hitLimit = False
provisionedThroughputExceededCount = 0
while(i < max):
try:
tc, jc = processItems(qUrl, snsTopic, snsRole)
totalJobsScheduled += jc
if(tc == 0):
i = max
except Exception as e:
if(e.__class__.__name__ == 'LimitExceededException'):
print("Exception: Hit limit.")
hitLimit = True
i = max
elif(e.__class__.__name__ == "ProvisionedThroughputExceededException"):
print("ProvisionedThroughputExceededException.")
provisionedThroughputExceededCount += 1
if(provisionedThroughputExceededCount > 5):
i = max
else:
print("Waiting for few seconds...")
time.sleep(5)
print("Waking up...")
i += 1
output = "Started {} jobs.".format(totalJobsScheduled)
if(hitLimit):
output += " Hit limit."
print(output)
return {
'statusCode': 200,
'body': output
}
def lambda_handler(event, context):
print("event: {}".format(event))
request = {}
request["qUrl"] = os.environ['ASYNC_QUEUE_URL']
request["snsTopic"] = os.environ['SNS_TOPIC_ARN']
request["snsRole"] = os.environ['SNS_ROLE_ARN']
return processRequest(request)
| 28.02974 | 112 | 0.516844 | [
"MIT-0"
] | aspi92/amazon-rekognition-serverless-large-scale-image-and-video-processing | rekognition-pipeline/lambda/asyncprocessor/lambda_function.py | 7,540 | Python |
# coding: utf-8
"""
Laserfiche API
Welcome to the Laserfiche API Swagger Playground. You can try out any of our API calls against your live Laserfiche Cloud account. Visit the developer center for more details: <a href=\"https://developer.laserfiche.com\">https://developer.laserfiche.com</a><p><strong>Build# : </strong>650780</p> # noqa: E501
OpenAPI spec version: 1-alpha
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Watermark(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'watermark_text': 'str',
'watermark_text_size': 'int',
'watermark_position': 'OneOfWatermarkWatermarkPosition',
'watermark_rotation_angle': 'int',
'is_watermark_mandatory': 'bool',
'watermark_intensity': 'int'
}
attribute_map = {
'watermark_text': 'watermarkText',
'watermark_text_size': 'watermarkTextSize',
'watermark_position': 'watermarkPosition',
'watermark_rotation_angle': 'watermarkRotationAngle',
'is_watermark_mandatory': 'isWatermarkMandatory',
'watermark_intensity': 'watermarkIntensity'
}
def __init__(self, watermark_text=None, watermark_text_size=None, watermark_position=None, watermark_rotation_angle=None, is_watermark_mandatory=None, watermark_intensity=None): # noqa: E501
"""Watermark - a model defined in Swagger""" # noqa: E501
self._watermark_text = None
self._watermark_text_size = None
self._watermark_position = None
self._watermark_rotation_angle = None
self._is_watermark_mandatory = None
self._watermark_intensity = None
self.discriminator = None
if watermark_text is not None:
self.watermark_text = watermark_text
if watermark_text_size is not None:
self.watermark_text_size = watermark_text_size
if watermark_position is not None:
self.watermark_position = watermark_position
if watermark_rotation_angle is not None:
self.watermark_rotation_angle = watermark_rotation_angle
if is_watermark_mandatory is not None:
self.is_watermark_mandatory = is_watermark_mandatory
if watermark_intensity is not None:
self.watermark_intensity = watermark_intensity
@property
def watermark_text(self):
"""Gets the watermark_text of this Watermark. # noqa: E501
The watermark text associated with the tag defintion. # noqa: E501
:return: The watermark_text of this Watermark. # noqa: E501
:rtype: str
"""
return self._watermark_text
@watermark_text.setter
def watermark_text(self, watermark_text):
"""Sets the watermark_text of this Watermark.
The watermark text associated with the tag defintion. # noqa: E501
:param watermark_text: The watermark_text of this Watermark. # noqa: E501
:type: str
"""
self._watermark_text = watermark_text
@property
def watermark_text_size(self):
"""Gets the watermark_text_size of this Watermark. # noqa: E501
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:return: The watermark_text_size of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_text_size
@watermark_text_size.setter
def watermark_text_size(self, watermark_text_size):
"""Sets the watermark_text_size of this Watermark.
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:param watermark_text_size: The watermark_text_size of this Watermark. # noqa: E501
:type: int
"""
self._watermark_text_size = watermark_text_size
@property
def watermark_position(self):
"""Gets the watermark_position of this Watermark. # noqa: E501
The position of the watermark on the page. # noqa: E501
:return: The watermark_position of this Watermark. # noqa: E501
:rtype: OneOfWatermarkWatermarkPosition
"""
return self._watermark_position
@watermark_position.setter
def watermark_position(self, watermark_position):
"""Sets the watermark_position of this Watermark.
The position of the watermark on the page. # noqa: E501
:param watermark_position: The watermark_position of this Watermark. # noqa: E501
:type: OneOfWatermarkWatermarkPosition
"""
self._watermark_position = watermark_position
@property
def watermark_rotation_angle(self):
"""Gets the watermark_rotation_angle of this Watermark. # noqa: E501
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:return: The watermark_rotation_angle of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_rotation_angle
@watermark_rotation_angle.setter
def watermark_rotation_angle(self, watermark_rotation_angle):
"""Sets the watermark_rotation_angle of this Watermark.
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:param watermark_rotation_angle: The watermark_rotation_angle of this Watermark. # noqa: E501
:type: int
"""
self._watermark_rotation_angle = watermark_rotation_angle
@property
def is_watermark_mandatory(self):
"""Gets the is_watermark_mandatory of this Watermark. # noqa: E501
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:return: The is_watermark_mandatory of this Watermark. # noqa: E501
:rtype: bool
"""
return self._is_watermark_mandatory
@is_watermark_mandatory.setter
def is_watermark_mandatory(self, is_watermark_mandatory):
"""Sets the is_watermark_mandatory of this Watermark.
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:param is_watermark_mandatory: The is_watermark_mandatory of this Watermark. # noqa: E501
:type: bool
"""
self._is_watermark_mandatory = is_watermark_mandatory
@property
def watermark_intensity(self):
"""Gets the watermark_intensity of this Watermark. # noqa: E501
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:return: The watermark_intensity of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_intensity
@watermark_intensity.setter
def watermark_intensity(self, watermark_intensity):
"""Sets the watermark_intensity of this Watermark.
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:param watermark_intensity: The watermark_intensity of this Watermark. # noqa: E501
:type: int
"""
self._watermark_intensity = watermark_intensity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Watermark, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Watermark):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.217391 | 314 | 0.653716 | [
"BSD-2-Clause"
] | Layer8Err/laserfiche-api | laserfiche_api/models/watermark.py | 9,163 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
#
# @Author: oesteban
# @Date: 2016-02-23 19:25:39
# @Email: [email protected]
# @Last Modified by: oesteban
# @Last Modified time: 2016-02-29 11:43:16
"""
Computation of the quality assessment measures on functional MRI
"""
import os.path as op
import numpy as np
import nibabel as nb
from nitime import algorithms as nta
import scipy
def gsr(epi_data, mask, direction="y", ref_file=None, out_file=None):
"""
Computes the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_. The
procedure is as follows:
#. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.
#. Rotate by :math:`N/2`
#. Remove the intersection with the original mask
#. Generate a non-ghost background
#. Calculate the :abbr:`GSR (ghost to signal ratio)`
.. warning ::
This should be used with EPI images for which the phase
encoding direction is known.
:param str epi_file: path to epi file
:param str mask_file: path to brain mask
:param str direction: the direction of phase encoding (x, y, all)
:return: the computed gsr
"""
direction = direction.lower()
if direction[-1] not in ['x', 'y', 'all']:
raise Exception("Unknown direction %s, should be one of x, -x, y, -y, all"
% direction)
if direction == 'all':
result = []
for newdir in ['x', 'y']:
ofile = None
if out_file is not None:
fname, ext = op.splitext(ofile)
if ext == '.gz':
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
ofile = '%s_%s%s' % (fname, newdir, ext)
result += [gsr(epi_data, mask, newdir,
ref_file=ref_file, out_file=ofile)]
return result
# Step 1
n2_mask = np.zeros_like(mask)
# Step 2
if direction == "x":
n2lim = np.floor(mask.shape[0]/2)
n2_mask[:n2lim, :, :] = mask[n2lim:(n2lim*2), :, :]
n2_mask[n2lim:(n2lim*2), :, :] = mask[:n2lim, :, :]
elif direction == "y":
n2lim = np.floor(mask.shape[1]/2)
n2_mask[:, :n2lim, :] = mask[:, n2lim:(n2lim*2), :]
n2_mask[:, n2lim:(n2lim*2), :] = mask[:, :n2lim, :]
elif direction == "z":
n2lim = np.floor(mask.shape[2]/2)
n2_mask[:, :, :n2lim] = mask[:, :, n2lim:(n2lim*2)]
n2_mask[:, :, n2lim:(n2lim*2)] = mask[:, :, :n2lim]
# Step 3
n2_mask = n2_mask * (1-mask)
# Step 4: non-ghost background region is labeled as 2
n2_mask = n2_mask + 2 * (1 - n2_mask - mask)
# Save mask
if ref_file is not None and out_file is not None:
ref = nb.load(ref_file)
out = nb.Nifti1Image(n2_mask, ref.get_affine(), ref.get_header())
out.to_filename(out_file)
# Step 5: signal is the entire foreground image
ghost = epi_data[n2_mask == 1].mean() - epi_data[n2_mask == 2].mean()
signal = epi_data[n2_mask == 0].mean()
return float(ghost/signal)
def dvars(func, mask, output_all=False, out_file=None):
"""
Compute the mean :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Power2012]_.
Particularly, the *standardized* :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Nichols2013]_ are computed.
.. note:: Implementation details
Uses the implementation of the `Yule-Walker equations
from nitime
<http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html\
#nitime.algorithms.autoregressive.AR_est_YW>`_
for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.
:param numpy.ndarray func: functional data, after head-motion-correction.
:param numpy.ndarray mask: a 3D mask of the brain
:param bool output_all: write out all dvars
:param str out_file: a path to which the standardized dvars should be saved.
:return: the standardized DVARS
"""
if len(func.shape) != 4:
raise RuntimeError(
"Input fMRI dataset should be 4-dimensional" % func)
# Remove zero-variance voxels across time axis
zv_mask = zero_variance(func, mask)
idx = np.where(zv_mask > 0)
mfunc = func[idx[0], idx[1], idx[2], :]
# Robust standard deviation
func_sd = (np.percentile(mfunc, 75) -
np.percentile(mfunc, 25)) / 1.349
# Demean
mfunc -= mfunc.mean(axis=1)[..., np.newaxis]
# AR1
ak_coeffs = np.apply_along_axis(nta.AR_est_YW, 1, mfunc, 1)
# Predicted standard deviation of temporal derivative
func_sd_pd = np.squeeze(np.sqrt((2 * (1 - ak_coeffs[:, 0])).tolist()) * func_sd)
diff_sd_mean = func_sd_pd[func_sd_pd > 0].mean()
# Compute temporal difference time series
func_diff = np.diff(mfunc, axis=1)
# DVARS (no standardization)
dvars_nstd = func_diff.std(axis=0)
# standardization
dvars_stdz = dvars_nstd / diff_sd_mean
# voxelwise standardization
diff_vx_stdz = func_diff / np.array([func_sd_pd] * func_diff.shape[-1]).T
dvars_vx_stdz = diff_vx_stdz.std(1, ddof=1)
if output_all:
gendvars = np.vstack((dvars_stdz, dvars_nstd, dvars_vx_stdz))
else:
gendvars = dvars_stdz.reshape(len(dvars_stdz), 1)
if out_file is not None:
np.savetxt(out_file, gendvars, fmt='%.12f')
return gendvars
def fd_jenkinson(in_file, rmax=80., out_file=None):
"""
Compute the :abbr:`FD (framewise displacement)` [Jenkinson2002]_
on a 4D dataset, after ``3dvolreg`` has been executed
(generally a file named ``*.affmat12.1D``).
:param str in_file: path to epi file
:param float rmax: the default radius (as in FSL) of a sphere represents
the brain in which the angular displacements are projected.
:param str out_file: a path for the output file with the FD
:return: the output file with the FD, and the average FD along
the time series
:rtype: tuple(str, float)
.. note ::
:code:`infile` should have one 3dvolreg affine matrix in one row -
NOT the motion parameters
"""
import sys
import math
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
out_file = op.abspath('%s_fdfile%s' % (fname, ext))
# if in_file (coordinate_transformation) is actually the rel_mean output
# of the MCFLIRT command, forward that file
if 'rel.rms' in in_file:
return in_file
pm_ = np.genfromtxt(in_file)
original_shape = pm_.shape
pm = np.zeros((pm_.shape[0], pm_.shape[1] + 4))
pm[:, :original_shape[1]] = pm_
pm[:, original_shape[1]:] = [0.0, 0.0, 0.0, 1.0]
# rigid body transformation matrix
T_rb_prev = np.matrix(np.eye(4))
flag = 0
X = [0] # First timepoint
for i in range(0, pm.shape[0]):
# making use of the fact that the order of aff12 matrix is "row-by-row"
T_rb = np.matrix(pm[i].reshape(4, 4))
if flag == 0:
flag = 1
else:
M = np.dot(T_rb, T_rb_prev.I) - np.eye(4)
A = M[0:3, 0:3]
b = M[0:3, 3]
FD_J = math.sqrt(
(rmax * rmax / 5) * np.trace(np.dot(A.T, A)) + np.dot(b.T, b))
X.append(FD_J)
T_rb_prev = T_rb
np.savetxt(out_file, X)
return out_file
def gcor(func, mask):
"""
Compute the :abbr:`GCOR (global correlation)`.
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the computed GCOR value
"""
# Remove zero-variance voxels across time axis
tv_mask = zero_variance(func, mask)
idx = np.where(tv_mask > 0)
zscores = scipy.stats.mstats.zscore(func[idx[0], idx[1], idx[2], :], axis=1)
avg_ts = zscores.mean(axis=0)
return float(avg_ts.transpose().dot(avg_ts) / len(avg_ts))
def zero_variance(func, mask):
"""
Mask out voxels with zero variance across t-axis
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the 3D mask of voxels with nonzero variance across :math:`t`.
:rtype: numpy.ndarray
"""
idx = np.where(mask > 0)
func = func[idx[0], idx[1], idx[2], :]
tvariance = func.var(axis=1)
tv_mask = np.zeros_like(tvariance)
tv_mask[tvariance > 0] = 1
newmask = np.zeros_like(mask)
newmask[idx] = tv_mask
return newmask
| 31.078292 | 89 | 0.620749 | [
"Apache-2.0"
] | amakropoulos/structural-pipeline-measures | packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py | 8,733 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tutotrial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.913043 | 73 | 0.679699 | [
"BSD-3-Clause"
] | aiueocode/djangorest | tutorial/manage.py | 665 | Python |
import os
from dominate import document
import dominate.tags as tags
import shlex
import subprocess as sp
from tqdm.auto import tqdm
style = (
"""
#myInput {
background-image: url('/css/searchicon.png'); /* Add a search icon to input */
background-position: 10px 12px; /* Position the search icon */
background-repeat: no-repeat; /* Do not repeat the icon image */
width: 100%; /* Full-width */
font-size: 16px; /* Increase font-size */
padding: 12px 20px 12px 40px; /* Add some padding */
border: 1px solid #ddd; /* Add a grey border */
margin-bottom: 12px; /* Add some space below the input */
}
#myUL {
/* Remove default list styling */
list-style-type: none;
padding: 0;
margin: 0;
}
#myUL li a {
border: 1px solid #ddd; /* Add a border to all links */
margin-top: -1px; /* Prevent double borders */
background-color: #f6f6f6; /* Grey background color */
padding: 12px; /* Add some padding */
text-decoration: none; /* Remove default text underline */
font-size: 18px; /* Increase the font-size */
color: black; /* Add a black text color */
display: block; /* Make it into a block element to fill the whole list */
}
#myUL li a:hover:not(.header) {
background-color: #eee; /* Add a hover effect to all links, except for headers */
}
""")
style2 = (
"""
.row {
display: flex;
}
.column {
flex: 33.33%;
padding: 5px;
}
""")
def runcommand(cmd):
p = sp.run(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE)
return p.stdout, p.stderr
def generate_html(dirname, outdir, title="images"):
if not os.path.exists(outdir):
os.makedirs(outdir)
doc = document(title=title)
with doc.head:
tags.style(style)
with doc:
with tags.ul(id="myUL"):
for category in os.listdir(dirname):
tags.li(tags.a(category, href=category))
with open(os.path.join(outdir, "index.html"), 'w') as f:
f.write(doc.render())
pbar1 = tqdm(os.listdir(dirname), dynamic_ncols=False)
for category in pbar1:
pbar1.set_description(category)
if not os.path.exists(os.path.join(outdir, category)):
os.makedirs(os.path.join(outdir, category))
subdoc = document(title=category)
with subdoc.head:
tags.style(style)
with subdoc:
tags.a("back", href="..")
with tags.ul(id="myUL"):
for subcat in os.listdir(os.path.join(dirname, category)):
tags.li(tags.a(subcat, href=subcat))
with open(os.path.join(outdir, category, "index.html"), 'w') as f:
f.write(subdoc.render())
pbar2 = tqdm(os.listdir(os.path.join(dirname, category)), dynamic_ncols=False)
for subcat in pbar2:
pbar2.set_description(subcat)
if not os.path.exists(os.path.join(outdir, category, subcat)):
os.makedirs(os.path.join(outdir, category, subcat))
ssubdoc = document(title=subcat)
with ssubdoc.head:
tags.style(style2)
imgs = []
pbar3 = tqdm(os.listdir(os.path.join(dirname, category, subcat)), dynamic_ncols=False)
for img in pbar3:
pbar3.set_description(img)
imgpng = img.replace(".pdf", ".png")
imgs.append(imgpng)
runcommand(
"convert -density 150 {} -quality 100 {}".format(
os.path.join(dirname, category, subcat, img),
os.path.join(outdir, category, subcat, imgpng),
)
)
with ssubdoc:
tags.a("back", href="..")
ncols = 3
for idx in range(0, len(imgs), ncols):
with tags.div(_class="row"):
final = idx+ncols
if final>len(imgs)-1:
final = len(imgs)-1
for sidx in range(idx, final):
with tags.div(_class="column"):
tags.img(
src=imgs[sidx],
alt=os.path.splitext(imgs[sidx])[0],
style="height:500px",
)
with open(os.path.join(outdir, category, subcat, "index.html"), 'w') as f:
f.write(ssubdoc.render())
| 32.224638 | 98 | 0.54756 | [
"MIT"
] | shane-breeze/zdb-analysis | zdb/drawing/generate_html.py | 4,447 | Python |
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
num_classes = 2
img_height, img_width = 64, 64#572, 572
out_height, out_width = 64, 64#388, 388
GPU = False
torch.manual_seed(0)
class Mynet(torch.nn.Module):
def __init__(self):
super(Mynet, self).__init__() # necessarry?
enc1 = []
enc1.append(torch.nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
self.enc1 = torch.nn.Sequential(*enc1)
self.out = torch.nn.Conv2d(32, 1, kernel_size, padding=0, stride=1)
| 30.979592 | 80 | 0.656126 | [
"MIT"
] | skn047/DeepLearningMugenKnock | Question_semaseg/my_answers/bin_loss_pytorch.py | 1,518 | Python |
# -*- coding: utf-8 -*-
"""
celery.result
~~~~~~~~~~~~~
Task results/state and groups of results.
"""
from __future__ import absolute_import
import time
import warnings
from collections import deque
from contextlib import contextmanager
from copy import copy
from kombu.utils import cached_property
from kombu.utils.compat import OrderedDict
from . import current_app
from . import states
from ._state import _set_task_join_will_block, task_join_will_block
from .app import app_or_default
from .datastructures import DependencyGraph, GraphFormatter
from .exceptions import IncompleteStream, TimeoutError
from .five import items, range, string_t, monotonic
from .utils import deprecated
__all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult',
'EagerResult', 'result_from_tuple']
E_WOULDBLOCK = """\
Never call result.get() within a task!
See http://docs.celeryq.org/en/latest/userguide/tasks.html\
#task-synchronous-subtasks
In Celery 3.2 this will result in an exception being
raised instead of just being a warning.
"""
def assert_will_not_block():
if task_join_will_block():
warnings.warn(RuntimeWarning(E_WOULDBLOCK))
@contextmanager
def allow_join_result():
reset_value = task_join_will_block()
_set_task_join_will_block(False)
try:
yield
finally:
_set_task_join_will_block(reset_value)
class ResultBase(object):
"""Base class for all results"""
#: Parent result (if part of a chain)
parent = None
class AsyncResult(ResultBase):
"""Query task state.
:param id: see :attr:`id`.
:keyword backend: see :attr:`backend`.
"""
app = None
#: Error raised for timeouts.
TimeoutError = TimeoutError
#: The task's UUID.
id = None
#: The task result backend to use.
backend = None
def __init__(self, id, backend=None, task_name=None,
app=None, parent=None):
self.app = app_or_default(app or self.app)
self.id = id
self.backend = backend or self.app.backend
self.task_name = task_name
self.parent = parent
self._cache = None
def as_tuple(self):
parent = self.parent
return (self.id, parent and parent.as_tuple()), None
serializable = as_tuple # XXX compat
def forget(self):
"""Forget about (and possibly remove the result of) this task."""
self._cache = None
self.backend.forget(self.id)
def revoke(self, connection=None, terminate=False, signal=None,
wait=False, timeout=None):
"""Send revoke signal to all workers.
Any worker receiving the task, or having reserved the
task, *must* ignore it.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from workers. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.
"""
self.app.control.revoke(self.id, connection=connection,
terminate=terminate, signal=signal,
reply=wait, timeout=timeout)
def get(self, timeout=None, propagate=True, interval=0.5,
no_ack=True, follow_parents=True,
EXCEPTION_STATES=states.EXCEPTION_STATES,
PROPAGATE_STATES=states.PROPAGATE_STATES):
"""Wait until task is ready, and return its result.
.. warning::
Waiting for tasks within a task may lead to deadlocks.
Please read :ref:`task-synchronous-subtasks`.
:keyword timeout: How long to wait, in seconds, before the
operation times out.
:keyword propagate: Re-raise exception if the task failed.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve the result. Note that this does not have any effect
when using the amqp result store backend, as it does not
use polling.
:keyword no_ack: Enable amqp no ack (automatically acknowledge
message). If this is :const:`False` then the message will
**not be acked**.
:keyword follow_parents: Reraise any exception raised by parent task.
:raises celery.exceptions.TimeoutError: if `timeout` is not
:const:`None` and the result does not arrive within `timeout`
seconds.
If the remote call raised an exception then that exception will
be re-raised.
"""
assert_will_not_block()
on_interval = None
if follow_parents and propagate and self.parent:
on_interval = self._maybe_reraise_parent_error
on_interval()
if self._cache:
if propagate:
self.maybe_reraise()
return self.result
meta = self.backend.wait_for(
self.id, timeout=timeout,
interval=interval,
on_interval=on_interval,
no_ack=no_ack,
)
if meta:
self._maybe_set_cache(meta)
status = meta['status']
if status in PROPAGATE_STATES and propagate:
raise meta['result']
return meta['result']
wait = get # deprecated alias to :meth:`get`.
def _maybe_reraise_parent_error(self):
for node in reversed(list(self._parents())):
node.maybe_reraise()
def _parents(self):
node = self.parent
while node:
yield node
node = node.parent
def collect(self, intermediate=False, **kwargs):
"""Iterator, like :meth:`get` will wait for the task to complete,
but will also follow :class:`AsyncResult` and :class:`ResultSet`
returned by the task, yielding ``(result, value)`` tuples for each
result in the tree.
An example would be having the following tasks:
.. code-block:: python
from celery import group
from proj.celery import app
@app.task(trail=True)
def A(how_many):
return group(B.s(i) for i in range(how_many))()
@app.task(trail=True)
def B(i):
return pow2.delay(i)
@app.task(trail=True)
def pow2(i):
return i ** 2
Note that the ``trail`` option must be enabled
so that the list of children is stored in ``result.children``.
This is the default but enabled explicitly for illustration.
Calling :meth:`collect` would return:
.. code-block:: python
>>> from celery.result import ResultBase
>>> from proj.tasks import A
>>> result = A.delay(10)
>>> [v for v in result.collect()
... if not isinstance(v, (ResultBase, tuple))]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
"""
for _, R in self.iterdeps(intermediate=intermediate):
yield R, R.get(**kwargs)
def get_leaf(self):
value = None
for _, R in self.iterdeps():
value = R.get()
return value
def iterdeps(self, intermediate=False):
stack = deque([(None, self)])
while stack:
parent, node = stack.popleft()
yield parent, node
if node.ready():
stack.extend((node, child) for child in node.children or [])
else:
if not intermediate:
raise IncompleteStream()
def ready(self):
"""Returns :const:`True` if the task has been executed.
If the task is still running, pending, or is waiting
for retry then :const:`False` is returned.
"""
return self.state in self.backend.READY_STATES
def successful(self):
"""Returns :const:`True` if the task executed successfully."""
return self.state == states.SUCCESS
def failed(self):
"""Returns :const:`True` if the task failed."""
return self.state == states.FAILURE
def maybe_reraise(self):
if self.state in states.PROPAGATE_STATES:
raise self.result
def build_graph(self, intermediate=False, formatter=None):
graph = DependencyGraph(
formatter=formatter or GraphFormatter(root=self.id, shape='oval'),
)
for parent, node in self.iterdeps(intermediate=intermediate):
graph.add_arc(node)
if parent:
graph.add_edge(parent, node)
return graph
def __str__(self):
"""`str(self) -> self.id`"""
return str(self.id)
def __hash__(self):
"""`hash(self) -> hash(self.id)`"""
return hash(self.id)
def __repr__(self):
return '<{0}: {1}>'.format(type(self).__name__, self.id)
def __eq__(self, other):
if isinstance(other, AsyncResult):
return other.id == self.id
elif isinstance(other, string_t):
return other == self.id
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __copy__(self):
return self.__class__(
self.id, self.backend, self.task_name, self.app, self.parent,
)
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return self.id, self.backend, self.task_name, None, self.parent
def __del__(self):
self._cache = None
@cached_property
def graph(self):
return self.build_graph()
@property
def supports_native_join(self):
return self.backend.supports_native_join
@property
def children(self):
return self._get_task_meta().get('children')
def _maybe_set_cache(self, meta):
if meta:
state = meta['status']
if state == states.SUCCESS or state in states.PROPAGATE_STATES:
return self._set_cache(meta)
return meta
def _get_task_meta(self):
if self._cache is None:
return self._maybe_set_cache(self.backend.get_task_meta(self.id))
return self._cache
def _set_cache(self, d):
children = d.get('children')
if children:
d['children'] = [
result_from_tuple(child, self.app) for child in children
]
self._cache = d
return d
@property
def result(self):
"""When the task has been executed, this contains the return value.
If the task raised an exception, this will be the exception
instance."""
return self._get_task_meta()['result']
info = result
@property
def traceback(self):
"""Get the traceback of a failed task."""
return self._get_task_meta().get('traceback')
@property
def state(self):
"""The tasks current state.
Possible values includes:
*PENDING*
The task is waiting for execution.
*STARTED*
The task has been started.
*RETRY*
The task is to be retried, possibly because of failure.
*FAILURE*
The task raised an exception, or has exceeded the retry limit.
The :attr:`result` attribute then contains the
exception raised by the task.
*SUCCESS*
The task executed successfully. The :attr:`result` attribute
then contains the tasks return value.
"""
return self._get_task_meta()['status']
status = state
@property
def task_id(self):
"""compat alias to :attr:`id`"""
return self.id
@task_id.setter # noqa
def task_id(self, id):
self.id = id
BaseAsyncResult = AsyncResult # for backwards compatibility.
class ResultSet(ResultBase):
"""Working with more than one result.
:param results: List of result instances.
"""
app = None
#: List of results in in the set.
results = None
def __init__(self, results, app=None, **kwargs):
self.app = app_or_default(app or self.app)
self.results = results
def add(self, result):
"""Add :class:`AsyncResult` as a new member of the set.
Does nothing if the result is already a member.
"""
if result not in self.results:
self.results.append(result)
def remove(self, result):
"""Remove result from the set; it must be a member.
:raises KeyError: if the result is not a member.
"""
if isinstance(result, string_t):
result = self.app.AsyncResult(result)
try:
self.results.remove(result)
except ValueError:
raise KeyError(result)
def discard(self, result):
"""Remove result from the set if it is a member.
If it is not a member, do nothing.
"""
try:
self.remove(result)
except KeyError:
pass
def update(self, results):
"""Update set with the union of itself and an iterable with
results."""
self.results.extend(r for r in results if r not in self.results)
def clear(self):
"""Remove all results from this set."""
self.results[:] = [] # don't create new list.
def successful(self):
"""Was all of the tasks successful?
:returns: :const:`True` if all of the tasks finished
successfully (i.e. did not raise an exception).
"""
return all(result.successful() for result in self.results)
def failed(self):
"""Did any of the tasks fail?
:returns: :const:`True` if one of the tasks failed.
(i.e., raised an exception)
"""
return any(result.failed() for result in self.results)
def maybe_reraise(self):
for result in self.results:
result.maybe_reraise()
def waiting(self):
"""Are any of the tasks incomplete?
:returns: :const:`True` if one of the tasks are still
waiting for execution.
"""
return any(not result.ready() for result in self.results)
def ready(self):
"""Did all of the tasks complete? (either by success of failure).
:returns: :const:`True` if all of the tasks has been
executed.
"""
return all(result.ready() for result in self.results)
def completed_count(self):
"""Task completion count.
:returns: the number of tasks completed.
"""
return sum(int(result.successful()) for result in self.results)
def forget(self):
"""Forget about (and possible remove the result of) all the tasks."""
for result in self.results:
result.forget()
def revoke(self, connection=None, terminate=False, signal=None,
wait=False, timeout=None):
"""Send revoke signal to all workers for all tasks in the set.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from worker. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.
"""
self.app.control.revoke([r.id for r in self.results],
connection=connection, timeout=timeout,
terminate=terminate, signal=signal, reply=wait)
def __iter__(self):
return iter(self.results)
def __getitem__(self, index):
"""`res[i] -> res.results[i]`"""
return self.results[index]
@deprecated('3.2', '3.3')
def iterate(self, timeout=None, propagate=True, interval=0.5):
"""Deprecated method, use :meth:`get` with a callback argument."""
elapsed = 0.0
results = OrderedDict((result.id, copy(result))
for result in self.results)
while results:
removed = set()
for task_id, result in items(results):
if result.ready():
yield result.get(timeout=timeout and timeout - elapsed,
propagate=propagate)
removed.add(task_id)
else:
if result.backend.subpolling_interval:
time.sleep(result.backend.subpolling_interval)
for task_id in removed:
results.pop(task_id, None)
time.sleep(interval)
elapsed += interval
if timeout and elapsed >= timeout:
raise TimeoutError('The operation timed out')
def get(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True):
"""See :meth:`join`
This is here for API compatibility with :class:`AsyncResult`,
in addition it uses :meth:`join_native` if available for the
current result backend.
"""
return (self.join_native if self.supports_native_join else self.join)(
timeout=timeout, propagate=propagate,
interval=interval, callback=callback, no_ack=no_ack)
def join(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True):
"""Gathers the results of all tasks as a list in order.
.. note::
This can be an expensive operation for result store
backends that must resort to polling (e.g. database).
You should consider using :meth:`join_native` if your backend
supports it.
.. warning::
Waiting for tasks within a task may lead to deadlocks.
Please see :ref:`task-synchronous-subtasks`.
:keyword timeout: The number of seconds to wait for results before
the operation times out.
:keyword propagate: If any of the tasks raises an exception, the
exception will be re-raised.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve a result from the set. Note that this
does not have any effect when using the amqp
result store backend, as it does not use polling.
:keyword callback: Optional callback to be called for every result
received. Must have signature ``(task_id, value)``
No results will be returned by this function if
a callback is specified. The order of results
is also arbitrary when a callback is used.
To get access to the result object for a particular
id you will have to generate an index first:
``index = {r.id: r for r in gres.results.values()}``
Or you can create new result objects on the fly:
``result = app.AsyncResult(task_id)`` (both will
take advantage of the backend cache anyway).
:keyword no_ack: Automatic message acknowledgement (Note that if this
is set to :const:`False` then the messages *will not be
acknowledged*).
:raises celery.exceptions.TimeoutError: if ``timeout`` is not
:const:`None` and the operation takes longer than ``timeout``
seconds.
"""
assert_will_not_block()
time_start = monotonic()
remaining = None
results = []
for result in self.results:
remaining = None
if timeout:
remaining = timeout - (monotonic() - time_start)
if remaining <= 0.0:
raise TimeoutError('join operation timed out')
value = result.get(
timeout=remaining, propagate=propagate,
interval=interval, no_ack=no_ack,
)
if callback:
callback(result.id, value)
else:
results.append(value)
return results
def iter_native(self, timeout=None, interval=0.5, no_ack=True):
"""Backend optimized version of :meth:`iterate`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
"""
results = self.results
if not results:
return iter([])
return self.backend.get_many(
set(r.id for r in results),
timeout=timeout, interval=interval, no_ack=no_ack,
)
def join_native(self, timeout=None, propagate=True,
interval=0.5, callback=None, no_ack=True):
"""Backend optimized version of :meth:`join`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
"""
assert_will_not_block()
order_index = None if callback else dict(
(result.id, i) for i, result in enumerate(self.results)
)
acc = None if callback else [None for _ in range(len(self))]
for task_id, meta in self.iter_native(timeout, interval, no_ack):
value = meta['result']
if propagate and meta['status'] in states.PROPAGATE_STATES:
raise value
if callback:
callback(task_id, value)
else:
acc[order_index[task_id]] = value
return acc
def _failed_join_report(self):
return (res for res in self.results
if res.backend.is_cached(res.id) and
res.state in states.PROPAGATE_STATES)
def __len__(self):
return len(self.results)
def __eq__(self, other):
if isinstance(other, ResultSet):
return other.results == self.results
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<{0}: [{1}]>'.format(type(self).__name__,
', '.join(r.id for r in self.results))
@property
def subtasks(self):
"""Deprecated alias to :attr:`results`."""
return self.results
@property
def supports_native_join(self):
try:
return self.results[0].supports_native_join
except IndexError:
pass
@property
def backend(self):
return self.app.backend if self.app else self.results[0].backend
class GroupResult(ResultSet):
"""Like :class:`ResultSet`, but with an associated id.
This type is returned by :class:`~celery.group`, and the
deprecated TaskSet, meth:`~celery.task.TaskSet.apply_async` method.
It enables inspection of the tasks state and return values as
a single entity.
:param id: The id of the group.
:param results: List of result instances.
"""
#: The UUID of the group.
id = None
#: List/iterator of results in the group
results = None
def __init__(self, id=None, results=None, **kwargs):
self.id = id
ResultSet.__init__(self, results, **kwargs)
def save(self, backend=None):
"""Save group-result for later retrieval using :meth:`restore`.
Example::
>>> def save_and_restore(result):
... result.save()
... result = GroupResult.restore(result.id)
"""
return (backend or self.app.backend).save_group(self.id, self)
def delete(self, backend=None):
"""Remove this result if it was previously saved."""
(backend or self.app.backend).delete_group(self.id)
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return self.id, self.results
def __eq__(self, other):
if isinstance(other, GroupResult):
return other.id == self.id and other.results == self.results
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id,
', '.join(r.id for r in self.results))
def as_tuple(self):
return self.id, [r.as_tuple() for r in self.results]
serializable = as_tuple # XXX compat
@property
def children(self):
return self.results
@classmethod
def restore(self, id, backend=None):
"""Restore previously saved group result."""
return (
backend or (self.app.backend if self.app else current_app.backend)
).restore_group(id)
class TaskSetResult(GroupResult):
"""Deprecated version of :class:`GroupResult`"""
def __init__(self, taskset_id, results=None, **kwargs):
# XXX supports the taskset_id kwarg.
# XXX previously the "results" arg was named "subtasks".
if 'subtasks' in kwargs:
results = kwargs['subtasks']
GroupResult.__init__(self, taskset_id, results, **kwargs)
def itersubtasks(self):
"""Deprecated. Use ``iter(self.results)`` instead."""
return iter(self.results)
@property
def total(self):
"""Deprecated: Use ``len(r)``."""
return len(self)
@property
def taskset_id(self):
"""compat alias to :attr:`self.id`"""
return self.id
@taskset_id.setter # noqa
def taskset_id(self, id):
self.id = id
class EagerResult(AsyncResult):
"""Result that we know has already been executed."""
task_name = None
def __init__(self, id, ret_value, state, traceback=None):
self.id = id
self._result = ret_value
self._state = state
self._traceback = traceback
def _get_task_meta(self):
return {'task_id': self.id, 'result': self._result, 'status':
self._state, 'traceback': self._traceback}
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return (self.id, self._result, self._state, self._traceback)
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def ready(self):
return True
def get(self, timeout=None, propagate=True, **kwargs):
if self.successful():
return self.result
elif self.state in states.PROPAGATE_STATES:
if propagate:
raise self.result
return self.result
wait = get
def forget(self):
pass
def revoke(self, *args, **kwargs):
self._state = states.REVOKED
def __repr__(self):
return '<EagerResult: {0.id}>'.format(self)
@property
def result(self):
"""The tasks return value"""
return self._result
@property
def state(self):
"""The tasks state."""
return self._state
status = state
@property
def traceback(self):
"""The traceback if the task failed."""
return self._traceback
@property
def supports_native_join(self):
return False
def result_from_tuple(r, app=None):
# earlier backends may just pickle, so check if
# result is already prepared.
app = app_or_default(app)
Result = app.AsyncResult
if not isinstance(r, ResultBase):
res, nodes = r
if nodes:
return app.GroupResult(
res, [result_from_tuple(child, app) for child in nodes],
)
# previously did not include parent
id, parent = res if isinstance(res, (list, tuple)) else (res, None)
if parent:
parent = result_from_tuple(parent, app)
return Result(id, parent=parent)
return r
from_serializable = result_from_tuple # XXX compat
| 30.976139 | 79 | 0.591071 | [
"MIT"
] | CharleyFarley/ovvio | venv/lib/python2.7/site-packages/celery/result.py | 28,560 | Python |
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from django.urls import reverse_lazy
from .models import Pokemon
class PokemonListView(ListView):
template_name = "pages/pokemon_list.html"
model = Pokemon
class PokemonDetailView(DetailView):
template_name = "pages/pokemon_detail.html"
model = Pokemon
class PokemonCreateView(CreateView):
template_name = "pages/pokemon_create.html"
model = Pokemon
fields = ['name', 'description', 'owner']
class PokemonUpdateView(UpdateView):
template_name = "pages/pokemon_update.html"
model = Pokemon
fields = ['name', 'description', 'owner']
class PokemonDeleteView(DeleteView):
template_name = "pages/pokemon_delete.html"
model = Pokemon
success_url = reverse_lazy("pokemon_list")
| 25.545455 | 47 | 0.725979 | [
"MIT"
] | danieldills/pokemon-django | pokemon/views.py | 843 | Python |
import tensorflow as tf
from tensorflow.keras.models import Model
import pandas as pd
import matplotlib.pyplot as plt
import os
import logging
from .common import create_directories
def get_prepared_model(stage: str, no_classes: int, input_shape: list, loss: str, optimizer: str, metrics: list) -> \
Model:
"""Function creates ANN model and compile.
Args:
stage ([str]): stage of experiment
no_classes ([INT]): No of classes for classification
input_shape ([int, int]): Input shape for model's input layer
loss ([str]): Loss function for model
optimizer ([str]): Optimizer for model
metrics ([str]): Metrics to watch while training
Returns:
model: ANN demo model
"""
# Define layers
LAYERS = []
BASE_LAYERS = [
tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),
tf.keras.layers.Dense(units=392, activation='relu', name='hidden1'),
tf.keras.layers.Dense(units=196, activation='relu', name='hidden2'),
tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')
]
KERNEL_INIT_LAYERS = [
tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),
tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform',
bias_initializer='zeros'),
tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform',
bias_initializer='zeros'),
tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')
]
BN_BEFORE_LAYERS = [
tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),
tf.keras.layers.Dense(units=392, name='hidden1', kernel_initializer='glorot_uniform'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Dense(units=196, name='hidden2', kernel_initializer='glorot_uniform'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')
]
BN_AFTER_LAYERS = [
tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),
tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform',
bias_initializer='zeros'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform',
bias_initializer='zeros'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')
]
logging.info("Creating Model..")
if stage == 'BASE_MODEL':
LAYERS = BASE_LAYERS
elif stage == 'KERNEL_INIT_MODEL':
LAYERS = KERNEL_INIT_LAYERS
elif stage == 'BN_BEFORE_MODEL':
LAYERS = BN_BEFORE_LAYERS
elif stage == 'BN_AFTER_MODEL':
LAYERS = BN_AFTER_LAYERS
model_ann = tf.keras.models.Sequential(LAYERS)
logging.info("Compiling Model..")
model_ann.compile(loss=loss, optimizer=optimizer, metrics=metrics)
return model_ann
def save_model(model_dir: str, model: Model, model_suffix: str) -> None:
"""
args:
model_dir: directory to save the model
model: model object to save
model_suffix: Suffix to save the model
"""
create_directories([model_dir])
model_file = os.path.join(model_dir, f"{model_suffix}.h5")
model.save(model_file)
logging.info(f"Saved model: {model_file}")
def save_history_plot(history, plot_dir: str, stage: str) -> None:
"""
Args:
history: History object for plotting loss/accuracy curves
plot_dir: Directory to save plot files
stage: Stage name for training
"""
pd.DataFrame(history.history).plot(figsize=(10, 8))
plt.grid(True)
create_directories([plot_dir])
plot_file = os.path.join(plot_dir, stage + "_loss_accuracy.png")
plt.savefig(plot_file)
logging.info(f"Loss accuracy plot saved: {plot_file}")
def get_callbacks(checkpoint_dir: str, tensorboard_logs: str, stage: str) -> list:
"""
Args:
checkpoint_dir: Directory to save the model at checkpoint
tensorboard_logs: Directory to save tensorboard logs
stage: Stage name for training
Returns:
callback_list: List of created callbacks
"""
create_directories([checkpoint_dir, tensorboard_logs])
tensorboard_cb = tf.keras.callbacks.TensorBoard(tensorboard_logs)
early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True)
ckpt_file_path = os.path.join(checkpoint_dir, f"{stage}_ckpt_model.h5")
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_file_path, save_best_only=True)
callback_list = [tensorboard_cb, early_stopping_cb, checkpoint_cb]
logging.info(f"Callbacks created: {callback_list}")
return callback_list
| 40.217054 | 117 | 0.680802 | [
"MIT"
] | iDataAstro/MNIST_CLASSIFICATION | src/utils/model.py | 5,188 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** Thomas Larsson, Jonas Hauquier
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Geometry export
"""
import math
import numpy as np
import log
from progress import Progress
#----------------------------------------------------------------------
# library_geometry
#----------------------------------------------------------------------
def writeLibraryGeometry(fp, meshes, config, shapes=None):
progress = Progress(len(meshes), None)
fp.write('\n <library_geometries>\n')
for mIdx,mesh in enumerate(meshes):
if shapes is None:
shape = None
else:
shape = shapes[mIdx]
writeGeometry(fp, mesh, config, shape)
progress.step()
fp.write(' </library_geometries>\n')
# TODO make shared function, config.getTransform() and mesh.clone(transform)
def rotateCoord(coord, config):
if config.meshOrientation == 'yUpFaceZ':
pass
elif config.meshOrientation == 'yUpFaceX':
# z,y,-x
coord = np.dstack((coord[:,2],coord[:,1],-coord[:,0]))[0]
elif config.meshOrientation == 'zUpFaceNegY':
# x,z,-y
coord = np.dstack((coord[:,0],-coord[:,2],coord[:,1]))[0]
elif config.meshOrientation == 'zUpFaceX':
# z,x,y
coord = np.dstack((coord[:,2],coord[:,0],coord[:,1]))[0]
return coord
def writeGeometry(fp, mesh, config, shapes=None):
progress = Progress()
progress(0)
coord = mesh.coord + config.offset
coord = rotateCoord(coord, config)
nVerts = len(coord)
fp.write('\n' +
' <geometry id="%sMesh" name="%s">\n' % (mesh.name,mesh.name) +
' <mesh>\n' +
' <source id="%s-Position">\n' % mesh.name +
' <float_array count="%d" id="%s-Position-array">\n' % (3*nVerts,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(co)) for co in coord]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-Position-array" stride="3">\n' % (nVerts,mesh.name) +
' <param type="float" name="X"></param>\n' +
' <param type="float" name="Y"></param>\n' +
' <param type="float" name="Z"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.2)
# Normals
if config.useNormals:
mesh.calcNormals()
vnorm = rotateCoord(mesh.vnorm, config)
nNormals = len(mesh.vnorm)
fp.write(
' <source id="%s-Normals">\n' % mesh.name +
' <float_array count="%d" id="%s-Normals-array">\n' % (3*nNormals,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(no)) for no in vnorm]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-Normals-array" stride="3">\n' % (nNormals,mesh.name) +
' <param type="float" name="X"></param>\n' +
' <param type="float" name="Y"></param>\n' +
' <param type="float" name="Z"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.35)
# UV coordinates
nUvVerts = len(mesh.texco)
fp.write(
' <source id="%s-UV">\n' % mesh.name +
' <float_array count="%d" id="%s-UV-array">\n' % (2*nUvVerts,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f " % tuple(uv)) for uv in mesh.texco]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-UV-array" stride="2">\n' % (nUvVerts,mesh.name) +
' <param type="float" name="S"></param>\n' +
' <param type="float" name="T"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.5, 0.7)
# Faces
fp.write(
' <vertices id="%s-Vertex">\n' % mesh.name +
' <input semantic="POSITION" source="#%s-Position"/>\n' % mesh.name +
' </vertices>\n')
checkFaces(mesh, nVerts, nUvVerts)
progress(0.7, 0.9)
writePolylist(fp, mesh, config)
progress(0.9, 0.99)
fp.write(
' </mesh>\n' +
' </geometry>\n')
if shapes is not None:
shaprog = Progress(len(shapes))
for name,shape in shapes:
writeShapeKey(fp, name, shape, mesh, config)
shaprog.step()
progress(1)
def writeShapeKey(fp, name, shape, mesh, config):
if len(shape.verts) == 0:
log.debug("Shapekey %s has zero verts. Ignored" % name)
return
progress = Progress()
# Verts
progress(0)
target = mesh.coord.copy()
target[:] += config.offset
target[shape.verts] += shape.data[np.s_[...]]
target = rotateCoord(config.scale*target, config)
nVerts = len(target)
fp.write(
' <geometry id="%sMeshMorph_%s" name="%s">\n' % (mesh.name, name, name) +
' <mesh>\n' +
' <source id="%sMeshMorph_%s-positions">\n' % (mesh.name, name) +
' <float_array id="%sMeshMorph_%s-positions-array" count="%d">\n' % (mesh.name, name, 3*nVerts) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(co)) for co in target]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor source="#%sMeshMorph_%s-positions-array" count="%d" stride="3">\n' % (mesh.name, name, nVerts) +
' <param name="X" type="float"/>\n' +
' <param name="Y" type="float"/>\n' +
' <param name="Z" type="float"/>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.3)
# Polylist
nFaces = len(mesh.fvert)
fp.write(
' <vertices id="%sMeshMorph_%s-vertices">\n' % (mesh.name, name) +
' <input semantic="POSITION" source="#%sMeshMorph_%s-positions"/>\n' % (mesh.name, name) +
' </vertices>\n' +
' <polylist count="%d">\n' % nFaces +
' <input semantic="VERTEX" source="#%sMeshMorph_%s-vertices" offset="0"/>\n' % (mesh.name, name) +
#' <input semantic="NORMAL" source="#%sMeshMorph_%s-normals" offset="1"/>\n' % (mesh.name, name) +
' <vcount>')
fp.write( ''.join(["4 " for fv in mesh.fvert]) )
fp.write('\n' +
' </vcount>\n' +
' <p>')
fp.write( ''.join([("%d %d %d %d " % tuple(fv)) for fv in mesh.fvert]) )
fp.write('\n' +
' </p>\n' +
' </polylist>\n' +
' </mesh>\n' +
' </geometry>\n')
progress(1)
#
# writePolylist(fp, mesh, config):
#
def writePolylist(fp, mesh, config):
progress = Progress(2)
nFaces = len(mesh.fvert)
fp.write(
' <polylist count="%d">\n' % nFaces +
' <input offset="0" semantic="VERTEX" source="#%s-Vertex"/>\n' % mesh.name)
if config.useNormals:
fp.write(
' <input offset="1" semantic="NORMAL" source="#%s-Normals"/>\n' % mesh.name +
' <input offset="2" semantic="TEXCOORD" source="#%s-UV"/>\n' % mesh.name)
else:
fp.write(
' <input offset="1" semantic="TEXCOORD" source="#%s-UV"/>\n' % mesh.name)
vc = ''
p = ''
# get number of vertices per face
r = mesh.vertsPerFaceForExport
for fn,fv in enumerate(mesh.fvert):
fuv = mesh.fuvs[fn]
vc += str(r) + ' '
if config.useNormals:
p += ''.join([("%d %d %d " % (fv[n], fv[n], fuv[n])) for n in range(r)])
else:
p += ''.join([("%d %d " % (fv[n], fuv[n])) for n in range(r)])
fp.write(
' <vcount>' + vc + '\n' +
' </vcount>\n' +
' <p>' + p + '\n' +
' </p>\n' +
' </polylist>\n')
progress.step()
#
# checkFaces(mesh, nVerts, nUvVerts):
#
def checkFaces(mesh, nVerts, nUvVerts):
# TODO document: what does this do (apart from slowing down export)?
for fn,fvs in enumerate(mesh.fvert):
for n,vn in enumerate(fvs):
uv = mesh.fuvs[fn][n]
if vn > nVerts:
raise NameError("v %d > %d" % (vn, nVerts))
if uv > nUvVerts:
raise NameError("uv %d > %d" % (uv, nUvVerts))
| 32.69967 | 127 | 0.495458 | [
"MIT"
] | Phantori/Radiian-Arts-BioSource | makehuman-master/makehuman/plugins/9_export_collada/dae_geometry.py | 9,908 | Python |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.data import CacheDataset, DataLoader, Dataset
from monai.transforms import Compose, DataStatsd, Randomizable, SimulateDelayd
from monai.utils import set_determinism
TEST_CASE_1 = [[{"image": np.asarray([1, 2, 3])}, {"image": np.asarray([4, 5])}]]
TEST_CASE_2 = [[{"label": torch.as_tensor([[3], [2]])}, {"label": np.asarray([[1], [2]])}]]
class TestDataLoader(unittest.TestCase):
def test_values(self):
datalist = [
{"image": "spleen_19.nii.gz", "label": "spleen_label_19.nii.gz"},
{"image": "spleen_31.nii.gz", "label": "spleen_label_31.nii.gz"},
]
transform = Compose(
[
DataStatsd(keys=["image", "label"], data_shape=False, value_range=False, data_value=True),
SimulateDelayd(keys=["image", "label"], delay_time=0.1),
]
)
dataset = CacheDataset(data=datalist, transform=transform, cache_rate=0.5, cache_num=1)
n_workers = 0 if sys.platform == "win32" else 2
dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=n_workers)
for d in dataloader:
self.assertEqual(d["image"][0], "spleen_19.nii.gz")
self.assertEqual(d["image"][1], "spleen_31.nii.gz")
self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_exception(self, datalist):
dataset = Dataset(data=datalist, transform=None)
dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)
with self.assertRaisesRegex((TypeError, RuntimeError), "Collate error on the key"):
for _ in dataloader:
pass
class _RandomDataset(torch.utils.data.Dataset, Randomizable):
def __getitem__(self, index):
return self.R.randint(0, 1000, (1,))
def __len__(self):
return 8
class TestLoaderRandom(unittest.TestCase):
"""
Testing data loader working with the randomizable interface
"""
def setUp(self):
set_determinism(0)
def tearDown(self):
set_determinism(None)
def test_randomize(self):
dataset = _RandomDataset()
dataloader = DataLoader(dataset, batch_size=2, num_workers=3)
output = []
for _ in range(2):
for batch in dataloader:
output.extend(batch.data.numpy().flatten().tolist())
self.assertListEqual(output, [594, 170, 524, 778, 370, 906, 292, 589, 762, 763, 156, 886, 42, 405, 221, 166])
if __name__ == "__main__":
unittest.main()
| 37.202247 | 117 | 0.657807 | [
"Apache-2.0"
] | Borda/MONAI | tests/test_dataloader.py | 3,311 | Python |
# qubit number=5
# total number=45
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.x(input_qubit[0]) # number=29
prog.h(input_qubit[0]) # number=42
prog.cz(input_qubit[1],input_qubit[0]) # number=43
prog.h(input_qubit[0]) # number=44
prog.cx(input_qubit[0],input_qubit[1]) # number=32
prog.cx(input_qubit[0],input_qubit[1]) # number=39
prog.x(input_qubit[1]) # number=40
prog.cx(input_qubit[0],input_qubit[1]) # number=41
prog.cx(input_qubit[0],input_qubit[1]) # number=34
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=35
prog.h(input_qubit[2]) # number=27
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.z(input_qubit[1]) # number=31
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1005.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 30.901515 | 82 | 0.602844 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | benchmark/startQiskit1005.py | 4,079 | Python |
#from mq import *
import sys, time
import urllib3
#networking library
import json
try:
print("Press CTRL+C to abort.")
#mq = MQ();
while True:
http = urllib3.PoolManager()
#perc = mq.MQPercentage()
sys.stdout.write("\r")
sys.stdout.write("\033[K")
data = {
"error":False,
"device_id":"device123",
"fuse_stat":["0","1","0","1","0"]
}
encoded_data = json.dumps(data).encode('utf-8')#create JSON object
http.request(
'POST',
'http://192.168.43.156/smartdbbox/api/public/api/device/db/update',#IP add server
body=encoded_data,
headers={'Content-Type': 'application/json'} )
sys.stdout.flush()
time.sleep(0.1)
except:
print("\nAbort by user") | 25.65625 | 93 | 0.544458 | [
"Apache-2.0"
] | haziquehaikal/smartdb | hardware/testing/fusecontrol.py | 821 | Python |
import pytest
from mitmproxy.contentviews import protobuf
from . import full_eval
datadir = "mitmproxy/contentviews/test_protobuf_data/"
def test_view_protobuf_request(tdata):
v = full_eval(protobuf.ViewProtobuf())
p = tdata.path(datadir + "protobuf01")
with open(p, "rb") as f:
raw = f.read()
content_type, output = v(raw)
assert content_type == "Protobuf"
assert output == [[('text', '1: 3bbc333c-e61c-433b-819a-0b9a8cc103b8')]]
with pytest.raises(ValueError, match="Failed to parse input."):
v(b'foobar')
@pytest.mark.parametrize("filename", ["protobuf02", "protobuf03"])
def test_format_pbuf(filename, tdata):
path = tdata.path(datadir + filename)
with open(path, "rb") as f:
input = f.read()
with open(path + "-decoded") as f:
expected = f.read()
assert protobuf.format_pbuf(input) == expected
| 28.483871 | 76 | 0.670442 | [
"MIT"
] | 0x7c48/mitmproxy | test/mitmproxy/contentviews/test_protobuf.py | 883 | Python |
import click
from ...runner import events
from . import default
def handle_after_execution(context: events.ExecutionContext, event: events.AfterExecution) -> None:
context.endpoints_processed += 1
default.display_execution_result(context, event)
if context.endpoints_processed == event.schema.endpoints_count:
click.echo()
def handle_event(context: events.ExecutionContext, event: events.ExecutionEvent) -> None:
"""Short output style shows single symbols in the progress bar.
Otherwise, identical to the default output style.
"""
if isinstance(event, events.Initialized):
default.handle_initialized(context, event)
if isinstance(event, events.AfterExecution):
context.hypothesis_output.extend(event.hypothesis_output)
handle_after_execution(context, event)
if isinstance(event, events.Finished):
default.handle_finished(context, event)
if isinstance(event, events.Interrupted):
default.handle_interrupted(context, event)
| 36.285714 | 99 | 0.748031 | [
"MIT"
] | RonnyPfannschmidt/schemathesis | src/schemathesis/cli/output/short.py | 1,016 | Python |
from ..utils import sortkey, capitalize_first
FIGURE_TEX_TEMPLATE = r'\hwgraphic{{{path}}}{{{headword}}}{{{attribution}}}'
# change to {filename} if you want to specify full paths.
FIGURE_PATH_TEMPLATE = r'figures/ill-{filename}'
class Image(object):
type = 'img'
def sk(self):
return sortkey(self.hw)
def __init__(self, hw='', img_src='', img_attrib=''):
super().__init__()
self.hw = hw
self.img_src = img_src
self.img_attrib = img_attrib
def __repr__(self):
return "(Image of '{headword}')".format(
headword=self.hw
)
def render(self, settings={}):
figure_path = FIGURE_PATH_TEMPLATE.format(filename=self.img_src)
return FIGURE_TEX_TEMPLATE.format(
headword=capitalize_first(self.hw),
path=figure_path,
attribution=self.img_attrib
)
| 27.8125 | 76 | 0.622472 | [
"MIT"
] | redmer/sfm2latex | sfm2latex/dictionary/Image.py | 890 | Python |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
def console_namespace():
import console_python
get_consoles = console_python.get_console
consoles = getattr(get_consoles, "consoles", None)
if consoles:
for console, stdout, stderr in get_consoles.consoles.values():
return console.locals
return {}
def is_display_list(listvar):
from mathutils import Vector
for var in listvar:
if type(var) is not Vector:
return False
return True
class VarStates:
@staticmethod
def store_states():
# Store the display states, called upon unregister the Add-on
# This is useful when you press F8 to reload the Addons.
# Then this function preserves the display states of the
# console variables.
state_props = bpy.context.window_manager.MathVisStatePropList
variables = get_math_data()
for key, ktype in variables.items():
if key and key not in state_props:
prop = state_props.add()
prop.name = key
prop.ktype = ktype.__name__
prop.state = [True, False]
@staticmethod
def get_index(key):
index = bpy.context.window_manager.MathVisStatePropList.find(key)
return index
@staticmethod
def delete(key):
state_props = bpy.context.window_manager.MathVisStatePropList
index = state_props.find(key)
if index != -1:
state_props.remove(index)
@staticmethod
def toggle_display_state(key):
state_props = bpy.context.window_manager.MathVisStatePropList
if key in state_props:
state_props[key].state[0] = not state_props[key].state[0]
else:
print("Odd: Can not find key %s in MathVisStateProps" % (key))
@staticmethod
def toggle_lock_state(key):
state_props = bpy.context.window_manager.MathVisStatePropList
if key in state_props:
state_props[key].state[1] = not state_props[key].state[1]
else:
print("Odd: Can not find key %s in MathVisStateProps" % (key))
def get_math_data():
from mathutils import Matrix, Vector, Quaternion, Euler
locals = console_namespace()
if not locals:
return {}
variables = {}
for key, var in locals.items():
if len(key) == 0 or key[0] == "_":
continue
type_var = type(var)
# Rules out sets/dicts.
# It's also possible the length check below is slow
# for data with underlying linked-list structure.
if not hasattr(type_var, "__getitem__"):
continue
# Don't do a truth test on the data because this causes an error with some
# array types, see T66107.
len_fn = getattr(type_var, "__len__", None)
if len_fn is None:
continue
if len_fn(var) == 0:
continue
if type_var in {Matrix, Vector, Quaternion, Euler} or \
type_var in {tuple, list} and is_display_list(var):
variables[key] = type_var
return variables
def cleanup_math_data():
locals = console_namespace()
if not locals:
return
variables = get_math_data()
for key in variables.keys():
index = VarStates.get_index(key)
if index == -1:
continue
state_prop = bpy.context.window_manager.MathVisStatePropList.get(key)
if state_prop.state[1]:
continue
del locals[key]
bpy.context.window_manager.MathVisStatePropList.remove(index)
def console_math_data():
from mathutils import Matrix, Vector, Quaternion, Euler
data_matrix = {}
data_quat = {}
data_euler = {}
data_vector = {}
data_vector_array = {}
for key, var in console_namespace().items():
if key[0] == "_":
continue
state_prop = bpy.context.window_manager.MathVisStatePropList.get(key)
if state_prop:
disp, lock = state_prop.state
if not disp:
continue
var_type = type(var)
if var_type is Matrix:
if len(var.col) != 4 or len(var.row) != 4:
if len(var.col) == len(var.row):
var = var.to_4x4()
else: # todo, support 4x3 matrix
continue
data_matrix[key] = var
elif var_type is Vector:
if len(var) < 3:
var = var.to_3d()
data_vector[key] = var
elif var_type is Quaternion:
data_quat[key] = var
elif var_type is Euler:
data_euler[key] = var
elif var_type in {list, tuple} and is_display_list(var):
data_vector_array[key] = var
return data_matrix, data_quat, data_euler, data_vector, data_vector_array
| 30.22043 | 82 | 0.62053 | [
"MIT"
] | calculusrobotics/RNNs-for-Bayesian-State-Estimation | Blender 2.91/2.91/scripts/addons/space_view3d_math_vis/utils.py | 5,621 | Python |
from django_unicorn.components import QuerySetType, UnicornView
from example.coffee.models import Flavor, Taste
class AddFlavorView(UnicornView):
is_adding = False
flavors = None
flavor_qty = 1
flavor_id = None
def __init__(self, *args, **kwargs):
super().__init__(**kwargs) # calling super is required
self.flavor_id = kwargs.get('flavor_id')
self.is_adding = False
def create(self):
if int(self.flavor_qty) > 0:
for i in range(int(self.flavor_qty)):
flavor = Flavor.objects.create(id = self.flavor_id)
flavor.save()
print("create flavor")
self.is_adding = False
self.show_table()
def add_flavor(self):
self.is_adding = True
self.show_table()
def cancel(self):
self.is_adding = False
self.show_table()
def show_table(self):
self.flavors = Flavor.objects.all()
def mount(self):
self.show_table() | 24.853659 | 67 | 0.60157 | [
"MIT"
] | Franziskhan/django-unicorn | example/unicorn/components/add_flavor.py | 1,019 | Python |
""" Measure stent migration relative to renals
Option to visualize 2 longitudinal scans
"""
import sys, os
import visvis as vv
from stentseg.utils.datahandling import select_dir, loadvol, loadmodel, loadmesh
from stentseg.stentdirect.stentgraph import create_mesh
from stentseg.utils.visualization import show_ctvolume
from stentseg.utils import _utils_GUI, PointSet
from stentseg.utils.picker import pick3d
from stentseg.utils.centerline import find_centerline, points_from_mesh, smooth_centerline, dist_over_centerline
from lspeas.analysis.utils_analysis import ExcelAnalysis
from stentseg.utils.utils_graphs_pointsets import point_in_pointcloud_closest_to_p
#sys.path.insert(0, os.path.abspath('..')) # parent, 2 folders further in pythonPath
#import utils_analysis
#from utils_analysis import ExcelAnalysis
#import get_anaconda_ringparts
from lspeas.utils.get_anaconda_ringparts import _get_model_hooks,get_midpoints_peaksvalleys,identify_peaks_valleys
#todo: from outline to script:
## Initialize
# select the ssdf basedir
basedir = select_dir(r'F/LSPEAS\LSPEAS_ssdf',
r'F/LSPEAS_ssdf_backup')
basedirstl = select_dir(r'D:\Profiles\koenradesma\Dropbox\UTdrive\MedDataMimics\LSPEAS_Mimics\Tests')
# select dataset
ptcode = 'LSPEAS_003'
ctcodes = ctcode1, ctcode2 = 'discharge', '12months' # ctcode2 = None if no second code
cropname = 'ring'
modelname = 'modelavgreg'
vesselname1 = 'LSPEAS_003_D_MK Smoothed_Wrapped1.0_edit-smart 4_copy_001.stl'
# LSPEAS_003_D_MK Smoothed_Wrapped1.0_edit-smart 4_copy_noRenals 7_001
vesselname2 = 'LSPEAS_003_12M_MK Smoothed_Wrapped1.0_smart 3_copy_001.stl'
sheet_renals_obs = 'renal locations obs1'
showAxis = True # True or False
showVol = 'ISO' # MIP or ISO or 2D or None
ringpart = True # True; False
clim0 = (0,2500)
# clim0 = -550,500
isoTh = 250
meshradius = 0.7
# create class object for excel analysis
foo = ExcelAnalysis() # excel locations initialized in class
## Renal origin coordinates: input by user/read excel
# coordinates, left and right most caudal renal
# ctcode1
xrenal1, yrenal1, zrenal1 = 132.7, 89.2, 85.5
renal1 = PointSet(list((xrenal1, yrenal1, zrenal1)))
# ctcode2
if ctcode2:
xrenal2, yrenal2, zrenal2 = 171, 165.1, 39.5
renal2 = PointSet(list((xrenal2, yrenal2, zrenal2)))
# renal_left, renal_right = foo.readRenalsExcel(sheet_renals_obs, ptcode, ctcode1)
# renal1 = renal_left
## Load (dynamic) stent models, vessel, ct
# Load static CT image to add as reference
s = loadvol(basedir, ptcode, ctcode1, cropname, 'avgreg')
vol1 = s.vol
if ctcode2:
s = loadvol(basedir, ptcode, ctcode2, cropname, 'avgreg')
vol2 = s.vol
# load stent model
s2 = loadmodel(basedir, ptcode, ctcode1, cropname, modelname)
model1 = s2.model
modelmesh1 = create_mesh(model1, meshradius)
if ctcode2:
s2 = loadmodel(basedir, ptcode, ctcode2, cropname, modelname)
model2 = s2.model
modelmesh2 = create_mesh(model2, meshradius)
# Load vessel mesh (output Mimics)
vessel1 = loadmesh(basedirstl,ptcode,vesselname1) #inverts Z
if ctcode2:
vessel2 = loadmesh(basedirstl,ptcode,vesselname2) #inverts Z
# get pointset from STL
ppvessel1 = points_from_mesh(vessel1, invertZ = False) # removes duplicates
if ctcode2:
ppvessel2 = points_from_mesh(vessel2, invertZ = False) # removes duplicates
## Create centerline: input start/end
# ctcode1
c1_start1 = (153, 86, 104.5) # distal end
c1_ends = [(142, 94, 64.5)] # either single point or multiple
centerline1 = find_centerline(ppvessel1, c1_start1, c1_ends, 0.5, ndist=20, regfactor=0.2, regsteps=10)
centerline1 = smooth_centerline(centerline1, 30) # 20 iterations for stepsize 0.5 is reasonable
# ctcode2
if ctcode2:
c2_start1 = (190, 165, 60) # distal end
c2_ends = [(179, 169, 17)] # either single point or multiple
centerline2 = find_centerline(ppvessel2, c2_start1, c2_ends, 0.5, ndist=20, regfactor=0.2, regsteps=10)
centerline2 = smooth_centerline(centerline2, 30)
# scipy.ndimage.interpolation.zoom
# scipy.interpolate.interpn
## Get peak and valley points
if False:
# ===== OPTION automated detection =====
# get midpoints peaks valleys
midpoints_peaks_valleys = get_midpoints_peaksvalleys(model1)
# from peaks valley pointcloud identiy peaks and valleys
R1_left,R2_left,R1_right,R2_right,R1_ant,R2_ant,R1_post,R2_post = identify_peaks_valleys(
midpoints_peaks_valleys, model1, vol1,vis=True)
# ===== OPTION excel =====
R1 = foo.readRingExcel(ptcode, ctcode1, ring='R1')
R1_ant, R1_post, R1_left, R1_right = R1[0], R1[1], R1[2], R1[3]
##
#todo: orientatie aorta bepalen dmv 4 hooks -> gemiddelde hoek
# z distance hiermee corrigeren
R2 = foo.readRingExcel(ptcode, ctcode1, ring='R2')
R2_ant, R2_post, R2_left, R2_right = R2[0], R2[1], R2[2], R2[3]
def get_stent_orientation(R1, R2):
R1, R2 = np.asarray(R1), np.asarray(R2)
R1, R2 = PointSet(R1), PointSet(R2) # turn array ndim2 into PointSet
R1_ant, R1_post, R1_left, R1_right = R1[0], R1[1], R1[2], R1[3]
R2_ant, R2_post, R2_left, R2_right = R2[0], R2[1], R2[2], R2[3]
refvector = [0,0,10] # z-axis
angle = (R1_ant-R2_ant).angle(refvector) # order does not matter
## Calculate distance ring peaks and valleys to renal
# ===== in Z =====
# proximal to renal is positive; origin is proximal
z_dist_R1_ant = list(renal1.flat)[2]-R1_ant[2]
z_dist_R1_post = list(renal1.flat)[2]-R1_post[2]
z_dist_R1_left = list(renal1.flat)[2]-R1_left[2]
z_dist_R1_right = list(renal1.flat)[2]-R1_right[2]
# ===== along centerline =====
# point of centerline closest to renal
renal1_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, renal1)
if ctcode2:
renal2_and_cl_point = point_in_pointcloud_closest_to_p(centerline2, renal2)
# point of centerline closest to peaks valleys
R1_left_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_left)
R1_right_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_right)
R1_ant_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_ant)
R1_post_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_post)
# calculate distance over centerline
dist_for_R1_left = dist_over_centerline(centerline1, R1_left_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_right = dist_over_centerline(centerline1, R1_right_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_ant = dist_over_centerline(centerline1, R1_ant_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_post = dist_over_centerline(centerline1, R1_post_and_cl_point[0], renal1_and_cl_point[0])
# Main outcome 1: distance 2nd ring valleys to renal
# Main outcome 2: migration 2nd ring valleys from discharge to 1, 6, 12 months
## Visualize
f = vv.figure(2); vv.clf()
f.position = 0.00, 22.00, 1920.00, 1018.00
alpha = 0.5
if ctcode2:
a1 = vv.subplot(121)
else:
a1 = vv.gca()
show_ctvolume(vol1, model1, showVol=showVol, clim=clim0, isoTh=isoTh)
pick3d(vv.gca(), vol1)
model1.Draw(mc='b', mw = 10, lc='g')
vm = vv.mesh(modelmesh1)
vm.faceColor = 'g'
# m = vv.mesh(vessel1)
# m.faceColor = (1,0,0, alpha) # red
# vis vessel, centerline, renal origo, peaks valleys R1
vv.plot(ppvessel1, ms='.', ls='', mc= 'r', alpha=0.2, mw = 7, axes = a1) # vessel
vv.plot(PointSet(list(c1_start1)), ms='.', ls='', mc='g', mw=18, axes = a1) # start1
vv.plot([e[0] for e in c1_ends], [e[1] for e in c1_ends], [e[2] for e in c1_ends], ms='.', ls='', mc='b', mw=18, axes = a1) # ends
vv.plot(centerline1, ms='.', ls='', mw=8, mc='y', axes = a1)
vv.plot(renal1, ms='.', ls='', mc='m', mw=18, axes = a1)
vv.plot(renal1_and_cl_point, ms='.', ls='-', mc='m', mw=18, axes = a1)
# vv.plot(R1_left_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_right_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_ant_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_post_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Analysis for model LSPEAS %s - %s' % (ptcode[7:], ctcode1))
a1.axis.axisColor= 1,1,1
a1.bgcolor= 0,0,0
a1.daspect= 1, 1, -1 # z-axis flipped
a1.axis.visible = showAxis
if ctcode2:
a2 = vv.subplot(122)
show_ctvolume(vol2, model2, showVol=showVol, clim=clim0, isoTh=isoTh)
pick3d(vv.gca(), vol2)
model2.Draw(mc='b', mw = 10, lc='g')
vm = vv.mesh(modelmesh2)
vm.faceColor = 'g'
# m = vv.mesh(vessel2)
# m.faceColor = (1,0,0, alpha) # red
# vis vessel, centerline, renal origo, peaks valleys R1
vv.plot(ppvessel2, ms='.', ls='', mc= 'r', alpha=0.2, mw = 7, axes = a2) # vessel
vv.plot(PointSet(list(c2_start1)), ms='.', ls='', mc='g', mw=18, axes = a2) # start1
vv.plot([e[0] for e in c2_ends], [e[1] for e in c2_ends], [e[2] for e in c2_ends], ms='.', ls='', mc='b', mw=18, axes = a2) # ends
vv.plot(centerline2, ms='.', ls='', mw=8, mc='y', axes = a2)
vv.plot(renal2, ms='.', ls='', mc='m', mw=18, axes = a2)
vv.plot(renal2_and_cl_point, ms='.', ls='-', mc='m', mw=18, axes = a2)
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Analysis for model LSPEAS %s - %s' % (ptcode[7:], ctcode2))
a2.axis.axisColor= 1,1,1
a2.bgcolor= 0,0,0
a2.daspect= 1, 1, -1 # z-axis flipped
a2.axis.visible = showAxis
| 40.776316 | 136 | 0.713994 | [
"BSD-3-Clause"
] | almarklein/stentseg | lspeas/analysis/stent_migration.py | 9,297 | Python |
"""
Django settings for my_site project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '98!@@ullqs8&yxhj7as31h-$lhdu691dnz@ch$(tsj@pe)ak&7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'testapp.apps.TestappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
STATICFILES_DIRS = (
os.path.join(STATIC_ROOT, 'static/'),
)
| 25.57037 | 91 | 0.702202 | [
"MIT"
] | garylwatson/mysteamlist | my_site/settings.py | 3,452 | Python |
import py
from pypy.rlib.rsdl import RSDL
from pypy.rlib.rarithmetic import r_uint
from pypy.rpython.lltypesystem import rffi
def test_sdl_init():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
RSDL.Quit()
def test_surface_basic():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
surface = RSDL.CreateRGBSurface(0, 150, 50, 32,
r_uint(0x000000FF),
r_uint(0x0000FF00),
r_uint(0x00FF0000),
r_uint(0xFF000000))
assert surface
assert rffi.getintfield(surface, 'c_w') == 150
assert rffi.getintfield(surface, 'c_h') == 50
RSDL.FreeSurface(surface)
RSDL.Quit()
def test_get_keyname():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
assert RSDL.GetKeyName(RSDL.K_PLUS)[0] == '+'
assert RSDL.GetKeyName(RSDL.K_RIGHTPAREN)[0] == ')'
assert RSDL.GetKeyName(RSDL.K_z)[0] == 'z'
def test_delay_getticks():
assert RSDL.Init(RSDL.INIT_VIDEO) >= 0
RSDL.Delay(10)
i = RSDL.GetTicks()
assert i >= 10
RSDL.Quit()
| 29.918919 | 55 | 0.591689 | [
"MIT"
] | benoitc/pypy | pypy/rlib/rsdl/test/test_basic.py | 1,107 | Python |
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import time, math
import numpy as np
from pnc.interface import Interface
from config.manipulator_config import ManipulatorConfig
from pnc.robot_system.pinocchio_robot_system import PinocchioRobotSystem
class ManipulatorInterface(Interface):
def __init__(self):
super(ManipulatorInterface, self).__init__()
self._robot = PinocchioRobotSystem(
cwd + "/robot_model/manipulator/three_link_manipulator.urdf",
cwd + "/robot_model/manipulator", True,
ManipulatorConfig.PRINT_ROBOT_INFO)
def get_command(self, sensor_data):
# Update Robot
self._robot.update_system(
sensor_data["base_com_pos"], sensor_data["base_com_quat"],
sensor_data["base_com_lin_vel"], sensor_data["base_com_ang_vel"],
sensor_data["base_joint_pos"], sensor_data["base_joint_quat"],
sensor_data["base_joint_lin_vel"],
sensor_data["base_joint_ang_vel"], sensor_data["joint_pos"],
sensor_data["joint_vel"])
# Operational Space Control
jtrq_cmd = self._compute_osc_command()
jpos_cmd = np.zeros_like(jtrq_cmd)
jvel_cmd = np.zeros_like(jtrq_cmd)
# Compute Cmd
command = self._robot.create_cmd_ordered_dict(jpos_cmd, jvel_cmd,
jtrq_cmd)
# Increase time variables
self._count += 1
self._running_time += ManipulatorConfig.DT
return command
def _compute_osc_command(self):
## TODO : Implement Operational Space Control
jtrq = np.zeros(self._robot.n_a)
return jtrq
| 32.09434 | 77 | 0.662551 | [
"MIT"
] | junhyeokahn/ASE389 | pnc/manipulator_pnc/manipulator_interface.py | 1,701 | Python |
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import collections
import copy
import time
from datetime import timedelta
from sklearn.cluster import DBSCAN, KMeans
from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
sys.path.append(".")
from reid import datasets
from reid import models
# from reid.models.dsbn import convert_dsbn, convert_bn
# from reid.models.csbn import convert_csbn
# from reid.models.idm_dsbn import convert_dsbn_idm, convert_bn_idm
# from reid.models.xbm import XBM
from reid.trainers import RSCTrainer
from reid.evaluators import Evaluator, extract_features
from reid.utils.data import CommDataset
from reid.utils.data import IterLoader
from reid.utils.data import transforms as T
from reid.utils.data.sampler import RandomMultipleGallerySampler
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.logging import Logger
from reid.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from reid.utils.rerank import compute_jaccard_distance
start_epoch = best_mAP = 0
def get_data(name, data_dir, combineall=False):
# data_dir = '/data/datasets'
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, combineall=combineall)
return dataset
def get_train_loader(args, dataset, height, width, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
# T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])
train_set = sorted(dataset.train) if trainset is None else sorted(trainset)
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def create_model(args):
model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout,
num_classes=args.nclass)
# use CUDA
model.cuda()
model = nn.DataParallel(model)
return model
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
start_time = time.monotonic()
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create datasets
iters = args.iters if (args.iters>0) else None
print("==> Load source-domain dataset")
train_items = []
for src in args.dataset_source.split(','):
dataset = get_data(src, args.data_dir, args.combine_all)
train_items.extend(dataset.train)
dataset_source = CommDataset(train_items)
print("==> Load target-domain dataset")
dataset_target = get_data(args.dataset_target, args.data_dir)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
train_loader_source = get_train_loader(args, dataset_source, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters)
source_classes = dataset_source.num_train_pids
args.nclass = source_classes
# Create model
model = create_model(args)
print(model)
# Evaluator
evaluator = Evaluator(model)
# Optimizer
params = [{"params": [value]} for _, value in model.named_parameters() if value.requires_grad]
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.1)
# Trainer
trainer = RSCTrainer(model, args.nclass, margin=args.margin)
for epoch in range(args.epochs):
train_loader_source.new_epoch()
# train_loader_target.new_epoch()
trainer.train(epoch, train_loader_source, optimizer, print_freq=args.print_freq, train_iters=args.iters)
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
print('Test on target: ', args.dataset_target)
_, mAP = evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
is_best = (mAP>best_mAP)
best_mAP = max(mAP, best_mAP)
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
print('\n * Finished epoch {:3d} model mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP, best_mAP, ' *' if is_best else ''))
lr_scheduler.step()
print ('==> Test with the best model on the target domain:')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
end_time = time.monotonic()
print('Total running time: ', timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Self-paced contrastive learning on UDA re-ID")
# data
parser.add_argument('-ds', '--dataset-source', type=str, default='dukemtmc')
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501')
parser.add_argument('--combine-all', action='store_true',
help="if True: combinall train, query, gallery for training;")
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# cluster
parser.add_argument('--eps', type=float, default=0.6,
help="max neighbor distance for DBSCAN")
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--nclass', type=int, default=1000,
help="number of classes (source+target)")
parser.add_argument('--s-class', type=int, default=1000,
help="number of classes (source)")
parser.add_argument('--t-class', type=int, default=1000,
help="number of classes (target)")
# loss
parser.add_argument('--margin', type=float, default=0.3,
help="margin for triplet loss")
parser.add_argument('--mu1', type=float, default=0.5,
help="weight for loss_bridge_pred")
parser.add_argument('--mu2', type=float, default=0.1,
help="weight for loss_bridge_feat")
parser.add_argument('--mu3', type=float, default=1,
help="weight for loss_div")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_idm',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# xbm parameters
parser.add_argument('--memorySize', type=int, default=8192,
help='meomory bank size')
parser.add_argument('--ratio', type=float, default=1,
help='memorySize=ratio*data_size')
parser.add_argument('--featureSize', type=int, default=2048)
parser.add_argument('--use-xbm', action='store_true', help="if True: strong baseline; if False: naive baseline")
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--epochs', type=int, default=60)
parser.add_argument('--iters', type=int, default=200)
parser.add_argument('--step-size', type=int, default=30)
# training configs
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--eval-step', type=int, default=10)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, default='/data/datasets')
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
# hbchen
parser.add_argument('--csdn', type=bool, default=False)
main()
| 39.464945 | 120 | 0.651426 | [
"MIT"
] | ZhaoChuyang/dgreid | examples/rsc_baseline.py | 10,695 | Python |
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains support for various built-in output mechanisms.
Here, a base OutputToFile class is implemented to provide simple output to
a file via the pickle serialization mechanism. It can be subclassed to implement
alternative serialization schemes, see json_factory.py and mfg_inspector.py for
examples.
"""
import contextlib
try:
import cPickle as pickle
except:
import pickle
import shutil
import tempfile
from openhtf import util
from openhtf.util import data
import six
# TODO(wallacbe): Switch to util
class Atomic(object):
"""Class that does atomic write in a contextual manner."""
def __init__(self, filename):
self.filename = filename
self.temp = tempfile.NamedTemporaryFile(delete=False)
def write(self, write_data):
if hasattr(write_data, 'decode'):
return self.temp.write(write_data)
return self.temp.write(write_data.encode())
def close(self):
self.temp.close()
shutil.move(self.temp.name, self.filename)
class OutputToFile(object):
"""Output the given TestRecord to a file.
Instances of this class are intended to be used as an output callback
(see Test.add_output_callbacks) to output TestRecord results to a file.
This base implementation outputs the TestRecord by serializing it via
the pickle module. Subclasses may change this by overriding the
serialize_test_record() method. Additionally, subclasses may implement
more complex file naming mechanisms by overriding the open_file() method.
Args:
test_record: The TestRecord to write out to a file.
"""
def __init__(self, filename_pattern):
self.filename_pattern = filename_pattern
@staticmethod
def serialize_test_record(test_record):
"""Override method to alter how test records are serialized to file data."""
return pickle.dumps(test_record, -1)
@staticmethod
def open_file(filename):
"""Override method to alter file open behavior or file types."""
return Atomic(filename)
@contextlib.contextmanager
def open_output_file(self, test_record):
"""Open file based on pattern."""
# Ignore keys for the log filename to not convert larger data structures.
record_dict = data.convert_to_base_types(
test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if isinstance(pattern, six.string_types) or callable(pattern):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
yield output_file
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
yield self.filename_pattern
else:
raise ValueError(
'filename_pattern must be string, callable, or File-like object')
def __call__(self, test_record):
with self.open_output_file(test_record) as outfile:
outfile.write(self.serialize_test_record(test_record))
| 33.796117 | 80 | 0.747774 | [
"Apache-2.0"
] | airdeng/openhtf | openhtf/output/callbacks/__init__.py | 3,481 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CustomPurchaseReceiptItem(Document):
pass
| 24.272727 | 49 | 0.790262 | [
"MIT"
] | MdAlAmin-aol/optic_store | optic_store/optic_store/doctype/custom_purchase_receipt_item/custom_purchase_receipt_item.py | 267 | Python |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target):
size = self.get_size(image.size)
image = F.resize(image, size)
target = target.resize(image.size)
return image, target
class FixedResize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size[0]
if w < h:
return (self.max_size, size)
else:
return (size, self.max_size)
def __call__(self, image, target):
size = self.get_size(image.size)
image = F.resize(image, size)
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
| 28.72807 | 83 | 0.589618 | [
"BSD-2-Clause"
] | zhoulw13/FCOS | fcos_core/data/transforms/transforms.py | 3,275 | Python |
# -*- coding:utf-8 -*-
"""
FTX Trade module.
https://docs.ftx.com/
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
import time
import zlib
import json
import copy
import hmac
import base64
from urllib.parse import urljoin
from collections import defaultdict, deque
from typing import DefaultDict, Deque, List, Dict, Tuple, Optional, Any
from itertools import zip_longest
from requests import Request
from quant.gateway import ExchangeGateway
from quant.state import State
from quant.order import Order, Fill, SymbolInfo
from quant.tasks import SingleTask, LoopRunTask
from quant.position import Position, MARGIN_MODE_CROSSED
from quant.asset import Asset
from quant.const import MARKET_TYPE_KLINE, INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION
from quant.utils import tools, logger
from quant.utils.websocket import Websocket
from quant.utils.http_client import AsyncHttpRequests
from quant.utils.decorator import async_method_locker
from quant.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL
from quant.order import ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET
from quant.order import LIQUIDITY_TYPE_MAKER, LIQUIDITY_TYPE_TAKER
from quant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED
from quant.market import Kline, Orderbook, Trade, Ticker
__all__ = ("FTXRestAPI", "FTXTrader", )
class FTXRestAPI:
"""
"""
def __init__(self, host, api_key=None, api_secret=None, subaccount_name=None) -> None:
self._host = host
self._api_key = api_key
self._api_secret = api_secret
self._subaccount_name = subaccount_name
async def _request(self, method: str, path: str, **kwargs) -> Any:
url = self._host + "/api/" + path
request = Request(method, url, **kwargs)
if self._api_key and self._api_secret:
self._sign_request(request)
_, success, error = await AsyncHttpRequests.fetch(method, url, headers=request.headers, timeout=10, **kwargs)
return success, error
def _sign_request(self, request: Request) -> None:
ts = int(time.time() * 1000)
prepared = request.prepare()
signature_payload = f'{ts}{prepared.method}{prepared.path_url}'.encode()
if prepared.body:
signature_payload += prepared.body
signature = hmac.new(self._api_secret.encode(), signature_payload, 'sha256').hexdigest()
request.headers['FTX-KEY'] = self._api_key
request.headers['FTX-SIGN'] = signature
request.headers['FTX-TS'] = str(ts)
if self._subaccount_name:
request.headers['FTX-SUBACCOUNT'] = self._subaccount_name
async def list_futures(self) -> List[dict]:
return await self._request('GET', 'futures')
async def get_future(self, market: str) -> dict:
return await self._request('GET', f'futures/{market}')
async def list_markets(self) -> List[dict]:
return await self._request('GET', 'markets')
async def get_orderbook(self, market: str, depth: int = None) -> dict:
return await self._request('GET', f'markets/{market}/orderbook', params={'depth': depth})
async def get_trades(self, market: str) -> dict:
return await self._request('GET', f'markets/{market}/trades')
async def get_account_info(self) -> dict:
return await self._request('GET', 'account')
async def get_open_orders(self, market: str = None) -> List[dict]:
#return await self._request('GET', 'orders', params={'market': market})
return await self._request('GET', 'orders?market={}'.format(market))
async def get_conditional_orders(self, market: str = None) -> List[dict]:
#return await self._request('GET', 'conditional_orders', params={'market': market})
return await self._request('GET', 'conditional_orders?market={}'.format(market))
async def place_order(self, market: str, side: str, price: float, size: float, type: str = 'limit',
reduce_only: bool = False, ioc: bool = False, post_only: bool = False,
client_id: str = None) -> dict:
return await self._request('POST', 'orders', json={'market': market,
'side': side,
'price': price,
'size': size,
'type': type,
'reduceOnly': reduce_only,
'ioc': ioc,
'postOnly': post_only,
'clientId': client_id})
async def place_conditional_order(
self, market: str, side: str, size: float, type: str = 'stop',
limit_price: float = None, reduce_only: bool = False, cancel: bool = True,
trigger_price: float = None, trail_value: float = None) -> dict:
"""
To send a Stop Market order, set type='stop' and supply a trigger_price
To send a Stop Limit order, also supply a limit_price
To send a Take Profit Market order, set type='trailing_stop' and supply a trigger_price
To send a Trailing Stop order, set type='trailing_stop' and supply a trail_value
"""
assert type in ('stop', 'take_profit', 'trailing_stop')
assert type not in ('stop', 'take_profit') or trigger_price is not None, 'Need trigger prices for stop losses and take profits'
assert type not in ('trailing_stop') or (trigger_price is None and trail_value is not None), 'Trailing stops need a trail value and cannot take a trigger price'
return await self._request('POST', 'conditional_orders', json={'market': market,
'side': side,
'triggerPrice': trigger_price,
'size': size,
'reduceOnly': reduce_only,
'type': 'stop',
'cancelLimitOnTrigger': cancel,
'orderPrice': limit_price})
async def cancel_order(self, order_id: str) -> dict:
return await self._request('DELETE', f'orders/{order_id}')
async def cancel_orders(self, market_name: str = None, conditional_orders: bool = False, limit_orders: bool = False) -> dict:
return await self._request('DELETE', 'orders', json={'market': market_name,
'conditionalOrdersOnly': conditional_orders,
'limitOrdersOnly': limit_orders})
async def get_fills(self) -> List[dict]:
return await self._request('GET', 'fills')
async def get_balances(self) -> List[dict]:
return await self._request('GET', 'wallet/balances')
async def get_deposit_address(self, ticker: str) -> dict:
return await self._request('GET', f'wallet/deposit_address/{ticker}')
async def get_positions(self, show_avg_price: bool = False) -> List[dict]:
return await self._request('GET', 'positions', params={'showAvgPrice': str(show_avg_price)})
async def get_kline(self, market_name: str, resolution: int, limit: int = None, start_time: int = None, end_time: int = None) -> dict:
#GET /markets/{market_name}/candles?resolution={resolution}&limit={limit}&start_time={start_time}&end_time={end_time}
params = {'resolution': resolution}
if limit:
params["limit"] = limit
if start_time:
params["start_time"] = start_time
if end_time:
params["end_time"] = end_time
return await self._request('GET', f'markets/{market_name}/candles', params=params)
class FTXTrader(Websocket, ExchangeGateway):
""" FTX Trade module. You can initialize trader object with some attributes in kwargs.
"""
def __init__(self, **kwargs):
"""Initialize."""
self.cb = kwargs["cb"]
state = None
self._platform = kwargs.get("platform")
self._symbols = kwargs.get("symbols")
self._strategy = kwargs.get("strategy")
self._account = kwargs.get("account")
self._access_key = kwargs.get("access_key")
self._secret_key = kwargs.get("secret_key")
self._subaccount_name = kwargs.get("subaccount_name")
if not self._platform:
state = State(self._platform, self._account, "param platform miss")
elif self._account and (not self._access_key or not self._secret_key):
state = State(self._platform, self._account, "param access_key or secret_key miss")
elif not self._strategy:
state = State(self._platform, self._account, "param strategy miss")
elif not self._symbols:
state = State(self._platform, self._account, "param symbols miss")
if state:
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
return
self._host = "https://ftx.com"
self._wss = "wss://ftx.com"
url = self._wss + "/ws"
super(FTXTrader, self).__init__(url, send_hb_interval=15, **kwargs)
self.heartbeat_msg = {"op": "ping"}
# Initializing our REST API client.
self._rest_api = FTXRestAPI(self._host, self._access_key, self._secret_key, self._subaccount_name)
#订单簿深度数据
self._orderbooks: DefaultDict[str, Dict[str, DefaultDict[float, float]]] = defaultdict(lambda: {side: defaultdict(float) for side in {'bids', 'asks'}})
self._assets: DefaultDict[str: Dict[str, float]] = defaultdict(lambda: {k: 0.0 for k in {'free', 'locked', 'total'}})
self._syminfo:DefaultDict[str: Dict[str, Any]] = defaultdict(dict)
if self._account != None:
self.initialize()
#如果四个行情回调函数都为空的话,就根本不需要执行市场行情相关代码
if (self.cb.on_kline_update_callback or
self.cb.on_orderbook_update_callback or
self.cb.on_trade_update_callback or
self.cb.on_ticker_update_callback):
#市场行情数据
FTXMarket(**kwargs)
@property
def rest_api(self):
return self._rest_api
async def create_order(self, symbol, action, price, quantity, order_type=ORDER_TYPE_LIMIT, *args, **kwargs):
""" Create an order.
Args:
symbol: Trade target
action: Trade direction, `BUY` or `SELL`.
price: Price of each contract.
quantity: The buying or selling quantity.
order_type: Order type, `MARKET` or `LIMIT`.
Returns:
order_no: Order ID if created successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"result": {"avgFillPrice": null, "clientId": null, "createdAt": "2019-11-16T11:08:37.726313+00:00", "filledSize": 0.0, "future": "ETH-PERP", "id": 871282987, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 251.0, "reduceOnly": false, "remainingSize": 0.02, "side": "sell", "size": 0.02, "status": "new", "type": "limit"}, "success": true}
if action == ORDER_ACTION_BUY:
side = "buy"
else:
side = "sell"
size = abs(float(quantity))
price = float(price)
if order_type == ORDER_TYPE_LIMIT:
ot = "limit"
elif order_type == ORDER_TYPE_MARKET:
ot = "market"
price = None
else:
raise NotImplementedError
success, error = await self._rest_api.place_order(symbol, side, price, size, ot)
if error:
return None, error
if not success["success"]:
return None, "place_order error"
result = success["result"]
return str(result["id"]), None
async def revoke_order(self, symbol, *order_nos):
""" Revoke (an) order(s).
Args:
symbol: Trade target
order_nos: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel all orders for
this symbol. If you set 1 or multiple param, you can cancel an or multiple order.
Returns:
删除全部订单情况: 成功=(True, None), 失败=(False, error information)
删除单个或多个订单情况: (删除成功的订单id[], 删除失败的订单id及错误信息[]),比如删除三个都成功那么结果为([1xx,2xx,3xx], [])
"""
# If len(order_nos) == 0, you will cancel all orders for this symbol.
if len(order_nos) == 0:
success, error = await self._rest_api.cancel_orders(symbol)
if error:
return False, error
if not success["success"]:
return False, "cancel_orders error"
return True, None
# If len(order_nos) > 0, you will cancel an or multiple orders.
else:
result = []
for order_no in order_nos:
_, e = await self._rest_api.cancel_order(order_no)
if e:
result.append((order_no, e))
else:
result.append((order_no, None))
return tuple(result), None
async def get_assets(self):
""" 获取交易账户资产信息
Args:
None
Returns:
assets: Asset if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"result": {"backstopProvider": false, "collateral": 110.094266926, "freeCollateral": 109.734306926, "initialMarginRequirement": 0.2, "leverage": 5.0, "liquidating": false, "maintenanceMarginRequirement": 0.03, "makerFee": 0.0002, "marginFraction": 61.1703338848761, "openMarginFraction": 61.170278323147016, "positionLimit": null, "positionLimitUsed": 2.15976, "positions": [{"collateralUsed": 0.35996, "cost": -1.7999, "entryPrice": 179.99, "estimatedLiquidationPrice": 11184.0172926, "future": "ETH-PERP", "initialMarginRequirement": 0.2, "longOrderSize": 0.0, "maintenanceMarginRequirement": 0.03, "netSize": -0.01, "openSize": 0.01, "realizedPnl": 0.01723393, "shortOrderSize": 0.0, "side": "sell", "size": 0.01, "unrealizedPnl": 0.0001}], "takerFee": 0.0007, "totalAccountValue": 110.094366926, "totalPositionSize": 1.7998, "useFttCollateral": true, "username": "[email protected]"}, "success": true}
success, error = await self._rest_api.get_account_info()
if error:
return None, error
if not success["success"]:
return None, "get_account_info error"
data = success["result"]
assets = {}
total = float(data["collateral"])
free = float(data["freeCollateral"])
locked = total - free
assets["USD"] = {
"total": total,
"free": free,
"locked": locked
}
if assets == self._assets:
update = False
else:
update = True
self._assets = assets
timestamp = tools.get_cur_timestamp_ms()
ast = Asset(self._platform, self._account, self._assets, timestamp, update)
return ast, None
def _convert_order_format(self, o):
"""将交易所订单结构转换为本交易系统标准订单结构格式
"""
order_no = str(o["id"])
state = o["status"]
remain = float(o["remainingSize"])
filled = float(o["filledSize"])
size = float(o["size"])
price = None if o["price"]==None else float(o["price"])
avg_price = None if o["avgFillPrice"]==None else float(o["avgFillPrice"])
if state == "new":
status = ORDER_STATUS_SUBMITTED
elif state == "open":
if remain < size:
status = ORDER_STATUS_PARTIAL_FILLED
else:
status = ORDER_STATUS_SUBMITTED
elif state == "closed":
if filled < size:
status = ORDER_STATUS_CANCELED
else:
status = ORDER_STATUS_FILLED
else:
return None
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"action": ORDER_ACTION_BUY if o["side"] == "buy" else ORDER_ACTION_SELL,
"symbol": o["market"],
"price": price,
"quantity": size,
"order_type": ORDER_TYPE_LIMIT if o["type"] == "limit" else ORDER_TYPE_MARKET,
"remain": remain, #size-filled会更好
"status": status,
"avg_price": avg_price
}
order = Order(**info)
return order
async def get_orders(self, symbol):
""" 获取当前挂单列表
Args:
symbol: Trade target
Returns:
orders: Order list if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"result": [{"avgFillPrice": null, "clientId": null, "createdAt": null, "filledSize": 0.0, "future": "ETH-PERP", "id": 769622011, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 152.0, "reduceOnly": false, "remainingSize": 0.002, "side": "buy", "size": 0.002, "status": "open", "type": "limit"}, {"avgFillPrice": null, "clientId": null, "createdAt": null, "filledSize": 0.0, "future": "ETH-PERP", "id": 769620713, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 150.0, "reduceOnly": false, "remainingSize": 0.001, "side": "buy", "size": 0.001, "status": "open", "type": "limit"}], "success": true}
orders:List[Order] = []
success, error = await self._rest_api.get_open_orders(symbol)
if error:
return None, error
if not success["success"]:
return None, "get_open_orders error"
data = success["result"]
for o in data:
order = self._convert_order_format(o)
if order == None:
return None, "get_open_orders error"
orders.append(order)
return orders, None
async def get_position(self, symbol):
""" 获取当前持仓
Args:
symbol: Trade target
Returns:
position: Position if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"result": [{"collateralUsed": 0.35986, "cost": -1.7984, "entryPrice": 179.84, "estimatedLiquidationPrice": 11184.0123266, "future": "ETH-PERP", "initialMarginRequirement": 0.2, "longOrderSize": 0.0, "maintenanceMarginRequirement": 0.03, "netSize": -0.01, "openSize": 0.01, "realizedPnl": 0.01866927, "recentAverageOpenPrice": 179.84, "recentPnl": -0.0009, "shortOrderSize": 0.0, "side": "sell", "size": 0.01, "unrealizedPnl": -0.0009}], "success": true}
success, error = await self._rest_api.get_positions(True)
if error:
return None, error
if not success["success"]:
return None, "get_position error"
p = next(filter(lambda x: x['future'] == symbol, success["result"]), None)
if p == None:
return Position(self._platform, self._account, self._strategy, symbol), None
if p["netSize"] == 0:
return Position(self._platform, self._account, self._strategy, symbol), None
pos = Position(self._platform, self._account, self._strategy, symbol)
pos.margin_mode = MARGIN_MODE_CROSSED #ftx只有全仓模式,如果想要逐仓模式的话就用子账户的方式来实现
pos.utime = tools.get_cur_timestamp_ms()
if p["netSize"] < 0: #空头仓位
pos.long_quantity = 0
pos.long_avail_qty = 0
pos.long_open_price = 0
pos.long_hold_price = 0
pos.long_liquid_price = 0
pos.long_unrealised_pnl = 0
pos.long_leverage = 0
pos.long_margin = 0
#
pos.short_quantity = abs(p["netSize"])
pos.short_avail_qty = pos.short_quantity-p["longOrderSize"] if p["longOrderSize"]<pos.short_quantity else 0
pos.short_open_price = p["recentAverageOpenPrice"]
pos.short_hold_price = p["entryPrice"]
pos.short_liquid_price = p["estimatedLiquidationPrice"]
pos.short_unrealised_pnl = p["unrealizedPnl"]
pos.short_leverage = int(1/p["initialMarginRequirement"])
pos.short_margin = p["collateralUsed"]
else: #多头仓位
pos.long_quantity = abs(p["netSize"])
pos.long_avail_qty = pos.long_quantity-p["shortOrderSize"] if p["shortOrderSize"]<pos.long_quantity else 0
pos.long_open_price = p["recentAverageOpenPrice"]
pos.long_hold_price = p["entryPrice"]
pos.long_liquid_price = p["estimatedLiquidationPrice"]
pos.long_unrealised_pnl = p["unrealizedPnl"]
pos.long_leverage = int(1/p["initialMarginRequirement"])
pos.long_margin = p["collateralUsed"]
#
pos.short_quantity = 0
pos.short_avail_qty = 0
pos.short_open_price = 0
pos.short_hold_price = 0
pos.short_liquid_price = 0
pos.short_unrealised_pnl = 0
pos.short_leverage = 0
pos.short_margin = 0
return pos, None
async def get_symbol_info(self, symbol):
""" 获取指定符号相关信息
Args:
symbol: Trade target
Returns:
symbol_info: SymbolInfo if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
"""
{
"success": true,
"result": [
{
"name": "BTC-0628",
"baseCurrency": null,
"quoteCurrency": null,
"type": "future",
"underlying": "BTC",
"enabled": true,
"ask": 3949.25,
"bid": 3949,
"last": 10579.52,
"priceIncrement": 0.25,
"sizeIncrement": 0.001
}
]
}
"""
info = self._syminfo[symbol]
if not info:
return None, "Symbol not exist"
price_tick = float(info["priceIncrement"])
size_tick = float(info["sizeIncrement"])
size_limit = None #原始数据中没有
value_tick = None #原始数据中没有
value_limit = None #原始数据中没有
if info["type"] == "future":
base_currency = info["underlying"]
quote_currency = "USD"
settlement_currency = "USD"
else: #"spot"
base_currency = info["baseCurrency"]
quote_currency = info["quoteCurrency"]
settlement_currency = info["quoteCurrency"]
symbol_type = info["type"]
is_inverse = False
multiplier = 1
syminfo = SymbolInfo(self._platform, symbol, price_tick, size_tick, size_limit, value_tick, value_limit, base_currency, quote_currency, settlement_currency, symbol_type, is_inverse, multiplier)
return syminfo, None
async def invalid_indicate(self, symbol, indicate_type):
""" update (an) callback function.
Args:
symbol: Trade target
indicate_type: INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION
Returns:
success: If execute successfully, return True, otherwise it's False.
error: If execute failed, return error information, otherwise it's None.
"""
async def _task():
if indicate_type == INDICATE_ORDER and self.cb.on_order_update_callback:
success, error = await self.get_orders(symbol)
if error:
state = State(self._platform, self._account, "get_orders error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
for order in success:
SingleTask.run(self.cb.on_order_update_callback, order)
elif indicate_type == INDICATE_ASSET and self.cb.on_asset_update_callback:
success, error = await self.get_assets()
if error:
state = State(self._platform, self._account, "get_assets error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_asset_update_callback, success)
elif indicate_type == INDICATE_POSITION and self.cb.on_position_update_callback:
success, error = await self.get_position(symbol)
if error:
state = State(self._platform, self._account, "get_position error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_position_update_callback, success)
if indicate_type == INDICATE_ORDER or indicate_type == INDICATE_ASSET or indicate_type == INDICATE_POSITION:
SingleTask.run(_task)
return True, None
else:
logger.error("indicate_type error! indicate_type:", indicate_type, caller=self)
return False, "indicate_type error"
async def _login(self):
"""FTX的websocket接口真是逗逼,验证成功的情况下居然不会返回任何消息"""
ts = int(time.time() * 1000)
signature = hmac.new(self._secret_key.encode(), f'{ts}websocket_login'.encode(), 'sha256').hexdigest()
args = {
'key': self._access_key,
'sign': signature,
'time': ts
}
#如果是子账户,就添加相应字段
if self._subaccount_name:
args["subaccount"] = self._subaccount_name
data = {'op': 'login', 'args': args}
await self.send_json(data)
async def connected_callback(self):
"""网络链接成功回调
"""
if self._account != None:
#账号不为空就要进行登录认证,然后订阅2个需要登录后才能订阅的私有频道:用户挂单通知和挂单成交通知(FTX只支持这2个私有频道)
await self._login() #登录认证
success, error = await self._rest_api.list_markets()
if error:
state = State(self._platform, self._account, "list_markets error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
for info in success["result"]:
self._syminfo[info["name"]] = info #符号信息一般不变,获取一次保存好,其他地方要用直接从这个变量获取就可以了
if self.cb.on_order_update_callback != None:
for sym in self._symbols:
orders, error = await self.get_orders(sym)
if error:
state = State(self._platform, self._account, "get_orders error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
for o in orders:
SingleTask.run(self.cb.on_order_update_callback, o)
if self.cb.on_position_update_callback != None:
for sym in self._symbols:
pos, error = await self.get_position(sym)
if error:
state = State(self._platform, self._account, "get_position error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
SingleTask.run(self.cb.on_position_update_callback, pos)
if self.cb.on_asset_update_callback != None:
ast, error = await self.get_assets()
if error:
state = State(self._platform, self._account, "get_assets error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
SingleTask.run(self.cb.on_asset_update_callback, ast)
#`用户挂单通知回调`不为空,就进行订阅
if self.cb.on_order_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'orders'})
#`用户挂单成交通知回调`不为空,就进行订阅
if self.cb.on_fill_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'fills'})
#计数初始化0
self._subscribe_response_count = 0
async def process(self, msg):
""" Process message that received from websocket.
Args:
msg: message received from websocket.
Returns:
None.
"""
if not isinstance(msg, dict):
return
logger.debug("msg:", json.dumps(msg), caller=self)
#{"type": "error", "code": 400, "msg": "Invalid login credentials"}
if msg["type"] == "error":
state = State(self._platform, self._account, "Websocket connection failed: {}".format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif msg["type"] == "pong":
return
elif msg["type"] == "info":
if msg["code"] == 20001:
#交易所重启了,我们就断开连接,websocket会自动重连
@async_method_locker("FTXTrader._ws_close.locker")
async def _ws_close():
await self.socket_close()
SingleTask.run(_ws_close)
elif msg["type"] == "unsubscribed":
return
#{'type': 'subscribed', 'channel': 'trades', 'market': 'BTC-PERP'}
elif msg["type"] == "subscribed":
self._subscribe_response_count = self._subscribe_response_count + 1 #每来一次订阅响应计数就加一
if self._subscribe_response_count == 2: #所有的订阅都成功了,通知上层接口都准备好了
state = State(self._platform, self._account, "Environment ready", State.STATE_CODE_READY)
SingleTask.run(self.cb.on_state_update_callback, state)
elif msg["type"] == "update":
channel = msg['channel']
if channel == 'orders':
self._update_order(msg)
elif channel == 'fills':
self._update_fill(msg)
def _update_order(self, order_info):
""" Order update.
Args:
order_info: Order information.
Returns:
None.
"""
#new (accepted but not processed yet), open, or closed (filled or cancelled)
#开仓
#{"id": 742849571, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 150.0, "size": 0.003, "status": "new", "filledSize": 0.0, "remainingSize": 0.003, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}
#150->修改->151
#{"id": 742849571, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 150.0, "size": 0.003, "status": "closed", "filledSize": 0.0, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}
#{"id": 742853455, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 151.0, "size": 0.003, "status": "new", "filledSize": 0.0, "remainingSize": 0.003, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}
#151->修改->187->成交
#{"id": 742853455, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 151.0, "size": 0.003, "status": "closed", "filledSize": 0.0, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}
#{"id": 742862380, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 187.0, "size": 0.003, "status": "closed", "filledSize": 0.003, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": 186.96, "postOnly": false, "ioc": false}
#市价全平仓位
#{"id": 742875876, "clientId": null, "market": "ETH-PERP", "type": "market", "side": "sell", "price": null, "size": 0.003, "status": "closed", "filledSize": 0.003, "remainingSize": 0.0, "reduceOnly": true, "avgFillPrice": 186.79, "postOnly": false, "ioc": true}
o = order_info["data"]
order = self._convert_order_format(o)
if order == None:
return
SingleTask.run(self.cb.on_order_update_callback, order)
def _update_fill(self, fill_info):
""" Fill update.
Args:
fill_info: Fill information.
Returns:
None.
"""
#{"channel": "orders", "type": "update", "data": {"id": 751733812, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 187.93, "size": 0.001, "status": "closed", "filledSize": 0.001, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": 184.25, "postOnly": false, "ioc": false}}
#{"channel": "fills", "type": "update", "data": {"id": 5741311, "market": "ETH-PERP", "future": "ETH-PERP", "baseCurrency": null, "quoteCurrency": null, "type": "order", "side": "buy", "price": 184.25, "size": 0.001, "orderId": 751733812, "time": "2019-11-08T09:52:27.366467+00:00", "feeRate": 0.0007, "fee": 0.000128975, "liquidity": "taker"}}
data = fill_info["data"]
fill_no = str(data["id"])
order_no = str(data["orderId"])
price = float(data["price"])
size = float(data["size"])
fee = float(data["fee"])
ts = tools.utctime_str_to_mts(data["time"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
liquidity = LIQUIDITY_TYPE_TAKER if data["liquidity"]=="taker" else LIQUIDITY_TYPE_MAKER
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"fill_no": fill_no,
"order_no": order_no,
"side": ORDER_ACTION_BUY if data["side"] == "buy" else ORDER_ACTION_SELL,
"symbol": data["market"],
"price": price,
"quantity": size,
"liquidity": liquidity,
"fee": fee,
"ctime": ts
}
fill = Fill(**info)
SingleTask.run(self.cb.on_fill_update_callback, fill)
@staticmethod
def mapping_layer():
""" 获取符号映射关系.
Returns:
layer: 符号映射关系
"""
return None #FTX不需要符号映射
class FTXMarket(Websocket):
""" FTX Trade module. You can initialize trader object with some attributes in kwargs.
"""
def __init__(self, **kwargs):
"""Initialize."""
self._platform = kwargs["platform"]
self._symbols = kwargs["symbols"]
self._host = "https://ftx.com"
self._wss = "wss://ftx.com"
url = self._wss + "/ws"
super(FTXMarket, self).__init__(url, send_hb_interval=15, **kwargs)
self.heartbeat_msg = {"op": "ping"}
self._rest_api = FTXRestAPI(self._host, None, None, None)
#订单簿深度数据
self._orderbooks: DefaultDict[str, Dict[str, DefaultDict[float, float]]] = defaultdict(lambda: {side: defaultdict(float) for side in {'bids', 'asks'}})
self.initialize()
async def _kline_loop_query(self, symbol, *args, **kwargs):
#{"result": [{"close": 7088.5, "high": 7090.0, "low": 7085.75, "open": 7090.0, "startTime": "2019-11-26T16:44:00+00:00", "time": 1574786640000.0, "volume": 0.70885}, {"close": 7088.0, "high": 7088.75, "low": 7088.0, "open": 7088.5, "startTime": "2019-11-26T16:45:00+00:00", "time": 1574786700000.0, "volume": 0.708875}], "success": true}
success, error = await self._rest_api.get_kline(symbol, 60, 2) #取2个时间窗口的数据
if error:
return None, error
if not success["success"]:
return None, "_kline_loop_query error"
result = success["result"]
k = result[0] #这里保存的是上一分钟完整的数据
self._update_kline(k, symbol)
async def connected_callback(self):
"""网络链接成功回调
"""
#订阅公共频道,无需登录认证
for sym in self._symbols:
if self.cb.on_trade_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'trades', 'market': sym})
if self.cb.on_orderbook_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'orderbook', 'market': sym})
if self.cb.on_ticker_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'ticker', 'market': sym})
if self.cb.on_kline_update_callback != None:
LoopRunTask.register(self._kline_loop_query, 60, sym)
async def process(self, msg):
""" Process message that received from websocket.
Args:
msg: message received from websocket.
Returns:
None.
"""
if not isinstance(msg, dict):
return
logger.debug("msg:", json.dumps(msg), caller=self)
#{"type": "pong"}
if msg.get("type") == "pong":
return
#{"type": "error", "code": 400, "msg": "Invalid login credentials"}
elif msg["type"] == "error":
state = State(self._platform, self._account, "Websocket connection failed: {}".format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif msg["type"] == "info":
if msg["code"] == 20001:
#交易所重启了,我们就断开连接,websocket会自动重连
@async_method_locker("FTXMarket._ws_close.locker")
async def _ws_close():
await self.socket_close()
SingleTask.run(_ws_close)
elif msg["type"] == "unsubscribed":
return
#{'type': 'subscribed', 'channel': 'trades', 'market': 'BTC-PERP'}
elif msg["type"] == "subscribed":
return
elif msg["type"] == "update" or msg["type"] == "partial":
channel = msg['channel']
if channel == 'orderbook':
self._update_orderbook(msg)
elif channel == 'trades':
self._update_trades(msg)
elif channel == 'ticker':
self._update_ticker(msg)
def _update_ticker(self, ticker_info):
""" ticker update.
Args:
ticker_info: ticker information.
Returns:
"""
#{"channel": "ticker", "market": "BTC-PERP", "type": "update", "data": {"bid": 9320.0, "ask": 9323.0, "bidSize": 78.506, "askSize": 101.2467, "last": 9333.5, "time": 1573014477.9969265}}
ts = int(float(ticker_info["data"]["time"])*1000) #转变为毫秒
p = {
"platform": self._platform,
"symbol": ticker_info["market"],
"ask": ticker_info["data"]["ask"],
"bid": ticker_info["data"]["bid"],
"last": ticker_info["data"]["last"],
"timestamp": ts
}
ticker = Ticker(**p)
SingleTask.run(self.cb.on_ticker_update_callback, ticker)
def _update_trades(self, trades_info):
""" trades update.
Args:
trades_info: trades information.
Returns:
"""
#{"channel": "trades", "market": "BTC-PERP", "type": "update", "data": [{"id": 2616562, "price": 9333.25, "size": 0.2143, "side": "sell", "liquidation": false, "time": "2019-11-06T05:19:51.187372+00:00"}]}
for t in trades_info["data"]:
ts = tools.utctime_str_to_mts(t["time"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
p = {
"platform": self._platform,
"symbol": trades_info["market"],
"action": ORDER_ACTION_BUY if t["side"] == "buy" else ORDER_ACTION_SELL,
"price": t["price"],
"quantity": t["size"],
"timestamp": ts
}
trade = Trade(**p)
SingleTask.run(self.cb.on_trade_update_callback, trade)
def _reset_orderbook(self, market: str) -> None:
if market in self._orderbooks:
del self._orderbooks[market]
def _get_orderbook(self, market: str) -> Dict[str, List[Tuple[float, float]]]:
return {
side: sorted(
[(price, quantity) for price, quantity in list(self._orderbooks[market][side].items()) if quantity],
key=lambda order: order[0] * (-1 if side == 'bids' else 1)
)
for side in {'bids', 'asks'}
}
def _update_orderbook(self, orderbook_info):
""" orderbook update.
Args:
orderbook_info: orderbook information.
Returns:
"""
market = orderbook_info['market']
data = orderbook_info['data']
if data['action'] == 'partial':
self._reset_orderbook(market)
for side in {'bids', 'asks'}:
book = self._orderbooks[market][side]
for price, size in data[side]:
if size:
book[price] = size
else:
del book[price]
#end for
checksum = data['checksum']
orderbook = self._get_orderbook(market)
checksum_data = [
':'.join([f'{float(order[0])}:{float(order[1])}' for order in (bid, offer) if order])
for (bid, offer) in zip_longest(orderbook['bids'][:100], orderbook['asks'][:100])
]
computed_result = int(zlib.crc32(':'.join(checksum_data).encode()))
if computed_result != checksum:
#校验和不对就需要重新订阅深度信息
@async_method_locker("FTXMarket._re_subscribe.locker")
async def _re_subscribe():
await self.send_json({'op': 'unsubscribe', 'channel': 'orderbook', 'market': market})
await self.send_json({'op': 'subscribe', 'channel': 'orderbook', 'market': market})
SingleTask.run(_re_subscribe)
#校验和不对就退出
return
logger.debug("orderbook:", json.dumps(orderbook), caller=self)
ts = int(float(data['time'])*1000) #转变为毫秒
p = {
"platform": self._platform,
"symbol": market,
"asks": orderbook['asks'],
"bids": orderbook['bids'],
"timestamp": ts
}
ob = Orderbook(**p)
SingleTask.run(self.cb.on_orderbook_update_callback, ob)
def _update_kline(self, kline_info, symbol):
""" kline update.
Args:
kline_info: kline information.
Returns:
None.
"""
info = {
"platform": self._platform,
"symbol": symbol,
"open": kline_info["open"],
"high": kline_info["high"],
"low": kline_info["low"],
"close": kline_info["close"],
"volume": kline_info["volume"],
"timestamp": tools.utctime_str_to_mts(kline_info["startTime"], "%Y-%m-%dT%H:%M:%S+00:00"),
"kline_type": MARKET_TYPE_KLINE
}
kline = Kline(**info)
SingleTask.run(self.cb.on_kline_update_callback, kline)
| 44.262048 | 912 | 0.571873 | [
"MIT"
] | a04512/alphahunter | quant/platform/ftx.py | 45,427 | Python |
import os
import matplotlib.pyplot as plt
plt.style.use("seaborn")
import numpy as np
from lib.utils import read_csv, find_cargo_root
from lib.blocking import block
data_folder = os.path.join(find_cargo_root(), "data")
save_folder = os.path.join(os.path.dirname(find_cargo_root()), "report", "assets")
if not os.path.isdir(save_folder):
os.mkdir(save_folder)
N = 10
true_val = 15
bruteforce = read_csv(os.path.join(data_folder, "E_vs_MCs_BruteForceMetropolis.csv"))
importance = read_csv(os.path.join(data_folder, "E_vs_MCs_ImportanceMetropolis.csv"))
x = [100, 1000, 3000, 5000, 7000, 10000]
#bruteforce_std = [np.sqrt(block(np.array(vals))[1]) for vals in [bruteforce["energy[au]"][1:up_to] for up_to in x]]
#importance_std = [np.sqrt(block(np.array(vals))[1]) for vals in [importance["energy[au]"][1:up_to] for up_to in x]]
#plt.plot(x, bruteforce_std, "-o", label="Brute-force")
#plt.plot(x, importance_std, "-o", label="Importance")
plt.plot(range(len(bruteforce["energy[au]"][1:])), bruteforce["energy[au]"][1:], "-o", label="Brute-force")
plt.plot(range(len(importance["energy[au]"][1:])), importance["energy[au]"][1:], "-o", label="Importance")
plt.xlabel("Monte Carlo cycles")
plt.ylabel(r"Energy")
plt.legend()
plt.savefig(os.path.join(save_folder, "E_vs_MCs_all.png"))
plt.show() | 43.266667 | 116 | 0.724191 | [
"MIT"
] | kmaasrud/vmc | vmc/result_analysis/E_vs_MCs.py | 1,298 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-22 11:53
from __future__ import unicode_literals
import company.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Company naem', max_length=100, unique=True)),
('description', models.CharField(max_length=500)),
('website', models.URLField(blank=True, help_text='Company website URL')),
('address', models.CharField(blank=True, help_text='Company address', max_length=200)),
('phone', models.CharField(blank=True, max_length=50)),
('email', models.EmailField(blank=True, max_length=254)),
('contact', models.CharField(blank=True, max_length=100)),
('image', models.ImageField(blank=True, max_length=255, null=True, upload_to=company.models.rename_company_image)),
('notes', models.TextField(blank=True)),
],
),
]
| 38.969697 | 131 | 0.61353 | [
"MIT"
] | ksanchezcld/InvenTree | InvenTree/company/migrations/0001_initial.py | 1,286 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
sys.path.append(os.path.dirname(__file__))
# -- Project information -----------------------------------------------------
project = "DoubletDetection"
copyright = "2019, Adam Gayoso and Jonathan Shor"
author = "Adam Gayoso and Jonathan Shor"
# The full version, including alpha/beta/rc tags
release = "2.5.2"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "m2r"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
master_doc = "index"
| 36 | 88 | 0.672414 | [
"MIT"
] | dpeerlab/DoubletDetection | docs/conf.py | 2,088 | Python |
from collections import namedtuple
Kit = namedtuple('Kit', 'name clip_r1_5 clip_r1_3 clip_r2_5 clip_r2_3 is_directional')
_KITS = [
Kit("truseq", 8, 8, 8, 8, False),
Kit("accelngs", 10, 10, 19, 5, True),
Kit("nebemseq", 5, 5, 11, 5, True)
]
KITS = {x.name: x for x in _KITS}
SUPPORTED_KITS = {x.name for x in _KITS}
| 25.461538 | 86 | 0.655589 | [
"MIT"
] | ahmedelhosseiny/bcbio-nextgen | bcbio/wgbsseq/kits.py | 331 | Python |
"""
This module lets you experience the POWER of FUNCTIONS and PARAMETERS.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Morgan Brown.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
"""
Calls the other functions in this module to test and/or demonstrate them.
"""
drawing_speed = 10 # Bigger numbers mean faster drawing
window = rg.TurtleWindow()
window.tracer(drawing_speed)
# -------------------------------------------------------------------------
# When the _TODO_s ask you to test YOUR code,
# comment-out the following two statements and replace them
# by calls to better_draw_circles et al as needed.
# -------------------------------------------------------------------------
draw_circles(rg.Point(100, 50))
draw_circles(rg.Point(-200, 0))
window.update()
window.close_on_mouse_click()
###############################################################################
# DONE: 2.
# First, RUN this program. You will see that it draws concentric circles
# whose radii vary by 15.
#
# Next, READ:
# -- main.
# Note that it constructs a TurtleWindow and then calls the function
# draw_circles
# twice, sending draw_circles one Point the first time
# and another Point the second time.
# -- The function draw_circles is defined immediately below this _TODO_.
# Be sure that you understand its code! Ask questions as needed!
#
# After you have done the above, change the above _TODO_ to DONE
# and continue to the next _TODO_ below.
#
###############################################################################
def draw_circles(point):
"""
Constructs a SimpleTurtle, then uses the SimpleTurtle to draw 10 circles
such that:
-- Each is centered at the given Point, and
-- They have radii: 15 30 45 60 75 ..., respectively.
"""
turtle = rg.SimpleTurtle()
# -------------------------------------------------------------------------
# Draw circles centered at the given Point, by telling the SimpleTurtle to:
# Step 1: Go to the given Point and point east (towards the right).
# Step 2: Go 15 pixels DOWN, with its Pen up.
# Then draw a radius R circle.
# Note: The circle will be centered at the given Point,
# because of the way that the SimpleTurtle draw_circle method works.
# Step 3: Repeat Step 2, but using 30 pixels instead of 15, in both places
# Step 4: Repeat Step 2, but using 45 pixels instead of 15
# Step 5: Repeat Step 2, but using 60 pixels instead of 15
# etc.
# -------------------------------------------------------------------------
turtle.pen_up()
turtle.go_to(point)
turtle.set_heading(0) # Point "east" (towards the right)
for k in range(1, 11): # k becomes 1, 2, 3, ... 10
turtle.pen_up()
# Go DOWN 15 pixels, ending up pointing east again
turtle.right(90)
turtle.forward(15)
turtle.left(90)
turtle.pen_down()
turtle.draw_circle(15 * k) # Radius 15, 30, 45, 60, ...
###############################################################################
# DONE: 3a.
# The function
# better_draw_circles
# defined below this _TODO_ starts out exactly the same as the code for
# draw_circles
# that you read above.
#
# Your job is to make
# better_draw_circles
# "better" than draw_circles by adding a PARAMETER for the amount
# by which the radii of the concentric circles increase, as described below.
#
# The new better_draw_circles function can do the same thing as
# the draw_circles function, but additionally allows for the radii to
# vary by ANY desired amount. Hence, the new version will be MORE POWERFUL.
#
# So, modify the better_draw_circles function defined BELOW so that
# it has a single ADDITIONAL PARAMETER that is the amount
# by which the radii of the circles increase.
#
# For example, if that new parameter is given the value 15,
# then the circles should have radii: 15 30 45 60 75 ..., respectively,
# just as in draw_circles. But if that new parameter is given the value 3,
# then the circles should have radii: 3 6 9 12 15 18 ..., respectively.
#
# DONE: 3b.
# In main at the place indicated, comment-out the two existing calls
# to draw_circles and add at least two calls to the improved
# better_draw_circles function, to TEST that your modified code is correct
# and does indeed allow for different amounts by which the radii can vary.
#
# #############################################################################
def better_draw_circles(point):
"""
Starts out the same as the draw_circles function defined ABOVE.
You Will make it an IMPROVED, MORE POWERFUL function per the above _TODO_.
"""
turtle = rg.SimpleTurtle()
turtle.pen_up()
turtle.go_to(point)
turtle.set_heading(0) # Point "east" (towards the right)
for k in range(1, 11): # k becomes 1, 2, 3, ... 10
turtle.pen_up()
# Go DOWN 15 pixels, ending up pointing east again
turtle.right(90)
turtle.forward(15)
turtle.left(90)
turtle.pen_down()
print(15*k) # Radius 15, 30, 45, 60, ...
###############################################################################
# DO: 4a.
# In the previous _TODO_, you made a MORE POWERFUL version
# of draw_circles by introducing a new PARAMETER for the amount
# by which the radii of the concentric circles increase.
#
# In this _TODO_, you will implement a function called
# even_better_draw_circles
# that has FIVE parameters, for:
# -- The center of the concentric circles (as it started with)
# -- The amount by which the radii vary (as you did above)
# -- The number of concentric circles drawn
# -- The pen color of each of the concentric circles
# -- The pen thickness of each of the concentric circles
#
# Hence, this even_better_draw_circles function will be
# even more POWERFUL than the previous functions,
# in that it can draw LOTS of different kinds of circles.
#
# Start by copy-and-pasting the code from better_draw_circles above
# to the body of the even_better_draw_circles function defined below.
# Then add parameters and modify the code to make them work!
#
# TODO: 4b.
# In main at the place indicated, comment-out the existing calls
# to better_draw_circles and add at least two calls to the improved
# even_better_draw_circles function, to TEST that your modified code is
# correct and does indeed use its parameters per their descriptions above.
#
###############################################################################
def even_better_draw_circles(point):
""" An improved version of draw_circles, per the _TODO_ above. """
# READ the above _TODO_ and then copy-paste code from better_circles here:
###############################################################################
# TODO: 5.
#
# Finally, comment-out the existing calls to even_better_draw_circles and
# add code in main to draw various circles that form a BEAUTIFUL picture!
###############################################################################
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 38.878173 | 79 | 0.58206 | [
"MIT"
] | brownme1/02-ObjectsFunctionsAndMethods | src/m5_why_parameters_are_powerful.py | 7,659 | Python |
"""
ETNA School API Wrapper
~~~~~~~~~~~~~~~~~~~~~~~
A python wrapper to help make python3 apps/bots using the ETNA API.
:copyright: (c) 2019 Yohann MARTIN
:license: MIT, see LICENSE for more details.
"""
__title__ = 'etnapy'
__author__ = 'Yohann MARTIN'
__license__ = 'MIT'
__version__ = "1.0.0"
from .user import User
from .promo import Promo
from .trophy import Trophy
from .etnapy import Intra
| 20.05 | 67 | 0.698254 | [
"MIT"
] | Astropilot/etnapy | etnapy/__init__.py | 401 | Python |
import pytest
import numpy as np
import random
from cem.backend import backend, NumpyBackend
try:
from cem.backend import CuPyBackend
import cupy as cp
skip_cupy_test = False
except ImportError:
skip_cupy_test = True
def test_numpy_backend():
X = random.randint(0, 10) * 10
Y = random.randint(0, 10) * 10
zeros = backend.zeros((X, Y))
ones = backend.ones((X, Y))
assert isinstance(backend, NumpyBackend)
assert isinstance(zeros, np.ndarray)
assert isinstance(ones, np.ndarray)
assert backend.int == np.int64
assert backend.float == np.float64
assert zeros.shape == (X, Y)
assert ones.shape == (X, Y)
assert backend.sin(ones).any() == np.sin(ones).any()
assert backend.cos(ones).any() == np.cos(ones).any()
@pytest.mark.skipif(skip_cupy_test, reason='CuPy is not installed.')
def test_cupy_backend():
backend.set_backend('cupy')
X = random.randint(0, 10) * 10
Y = random.randint(0, 10) * 10
zeros = backend.zeros((X, Y))
ones = backend.ones((X, Y))
assert isinstance(backend, CuPyBackend)
assert isinstance(zeros, cp.ndarray)
assert isinstance(ones, cp.ndarray)
assert backend.int == cp.int64
assert backend.float == cp.float64
assert zeros.shape == (X, Y)
assert ones.shape == (X, Y)
assert backend.sin(ones).all() == cp.sin(ones).all()
assert backend.cos(ones).all() == cp.cos(ones).all()
@pytest.mark.skipif(skip_cupy_test, reason='CuPy is not installed.')
def test_set_backend():
backend.set_backend('numpy')
assert isinstance(backend, NumpyBackend)
backend.set_backend('cupy')
assert isinstance(backend, CuPyBackend)
| 28.355932 | 68 | 0.674836 | [
"MIT"
] | dantehustg/cem | tests/test_backend.py | 1,673 | Python |
""" compatibility OpenTimelineIO 0.12.0 and older
"""
import os
import re
import sys
import json
import opentimelineio as otio
from . import utils
import clique
self = sys.modules[__name__]
self.track_types = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
self.project_fps = None
def create_otio_rational_time(frame, fps):
return otio.opentime.RationalTime(
float(frame),
float(fps)
)
def create_otio_time_range(start_frame, frame_duration, fps):
return otio.opentime.TimeRange(
start_time=create_otio_rational_time(start_frame, fps),
duration=create_otio_rational_time(frame_duration, fps)
)
def create_otio_reference(media_pool_item):
metadata = _get_metadata_media_pool_item(media_pool_item)
mp_clip_property = media_pool_item.GetClipProperty()
path = mp_clip_property["File Path"]
reformat_path = utils.get_reformated_path(path, padded=True)
padding = utils.get_padding_from_path(path)
if padding:
metadata.update({
"isSequence": True,
"padding": padding
})
# get clip property regarding to type
mp_clip_property = media_pool_item.GetClipProperty()
fps = float(mp_clip_property["FPS"])
if mp_clip_property["Type"] == "Video":
frame_start = int(mp_clip_property["Start"])
frame_duration = int(mp_clip_property["Frames"])
else:
audio_duration = str(mp_clip_property["Duration"])
frame_start = 0
frame_duration = int(utils.timecode_to_frames(
audio_duration, float(fps)))
otio_ex_ref_item = None
if padding:
# if it is file sequence try to create `ImageSequenceReference`
# the OTIO might not be compatible so return nothing and do it old way
try:
dirname, filename = os.path.split(path)
collection = clique.parse(filename, '{head}[{ranges}]{tail}')
padding_num = len(re.findall("(\\d+)(?=-)", filename).pop())
otio_ex_ref_item = otio.schema.ImageSequenceReference(
target_url_base=dirname + os.sep,
name_prefix=collection.format("{head}"),
name_suffix=collection.format("{tail}"),
start_frame=frame_start,
frame_zero_padding=padding_num,
rate=fps,
available_range=create_otio_time_range(
frame_start,
frame_duration,
fps
)
)
except AttributeError:
pass
if not otio_ex_ref_item:
# in case old OTIO or video file create `ExternalReference`
otio_ex_ref_item = otio.schema.ExternalReference(
target_url=reformat_path,
available_range=create_otio_time_range(
frame_start,
frame_duration,
fps
)
)
# add metadata to otio item
add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata)
return otio_ex_ref_item
def create_otio_markers(track_item, fps):
track_item_markers = track_item.GetMarkers()
markers = []
for marker_frame in track_item_markers:
note = track_item_markers[marker_frame]["note"]
if "{" in note and "}" in note:
metadata = json.loads(note)
else:
metadata = {"note": note}
markers.append(
otio.schema.Marker(
name=track_item_markers[marker_frame]["name"],
marked_range=create_otio_time_range(
marker_frame,
track_item_markers[marker_frame]["duration"],
fps
),
color=track_item_markers[marker_frame]["color"].upper(),
metadata=metadata
)
)
return markers
def create_otio_clip(track_item):
media_pool_item = track_item.GetMediaPoolItem()
mp_clip_property = media_pool_item.GetClipProperty()
if not self.project_fps:
fps = mp_clip_property["FPS"]
else:
fps = self.project_fps
name = track_item.GetName()
media_reference = create_otio_reference(media_pool_item)
source_range = create_otio_time_range(
int(track_item.GetLeftOffset()),
int(track_item.GetDuration()),
fps
)
if mp_clip_property["Type"] == "Audio":
return_clips = list()
audio_chanels = mp_clip_property["Audio Ch"]
for channel in range(0, int(audio_chanels)):
clip = otio.schema.Clip(
name=f"{name}_{channel}",
source_range=source_range,
media_reference=media_reference
)
for marker in create_otio_markers(track_item, fps):
clip.markers.append(marker)
return_clips.append(clip)
return return_clips
else:
clip = otio.schema.Clip(
name=name,
source_range=source_range,
media_reference=media_reference
)
for marker in create_otio_markers(track_item, fps):
clip.markers.append(marker)
return clip
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
return otio.schema.Gap(
source_range=create_otio_time_range(
gap_start,
(clip_start - tl_start_frame) - gap_start,
fps
)
)
def _create_otio_timeline(project, timeline, fps):
metadata = _get_timeline_metadata(project, timeline)
start_time = create_otio_rational_time(
timeline.GetStartFrame(), fps)
otio_timeline = otio.schema.Timeline(
name=timeline.GetName(),
global_start_time=start_time,
metadata=metadata
)
return otio_timeline
def _get_timeline_metadata(project, timeline):
media_pool = project.GetMediaPool()
root_folder = media_pool.GetRootFolder()
ls_folder = root_folder.GetClipList()
timeline = project.GetCurrentTimeline()
timeline_name = timeline.GetName()
for tl in ls_folder:
if tl.GetName() not in timeline_name:
continue
return _get_metadata_media_pool_item(tl)
def _get_metadata_media_pool_item(media_pool_item):
data = dict()
data.update({k: v for k, v in media_pool_item.GetMetadata().items()})
property = media_pool_item.GetClipProperty() or {}
for name, value in property.items():
if "Resolution" in name and "" != value:
width, height = value.split("x")
data.update({
"width": int(width),
"height": int(height)
})
if "PAR" in name and "" != value:
try:
data.update({"pixelAspect": float(value)})
except ValueError:
if "Square" in value:
data.update({"pixelAspect": float(1)})
else:
data.update({"pixelAspect": float(1)})
return data
def create_otio_track(track_type, track_name):
return otio.schema.Track(
name=track_name,
kind=self.track_types[track_type]
)
def add_otio_gap(clip_start, otio_track, track_item, timeline):
# if gap between track start and clip start
if clip_start > otio_track.available_range().duration.value:
# create gap and add it to track
otio_track.append(
create_otio_gap(
otio_track.available_range().duration.value,
track_item.GetStart(),
timeline.GetStartFrame(),
self.project_fps
)
)
def add_otio_metadata(otio_item, media_pool_item, **kwargs):
mp_metadata = media_pool_item.GetMetadata()
# add additional metadata from kwargs
if kwargs:
mp_metadata.update(kwargs)
# add metadata to otio item metadata
for key, value in mp_metadata.items():
otio_item.metadata.update({key: value})
def create_otio_timeline(resolve_project):
# get current timeline
self.project_fps = resolve_project.GetSetting("timelineFrameRate")
timeline = resolve_project.GetCurrentTimeline()
# convert timeline to otio
otio_timeline = _create_otio_timeline(
resolve_project, timeline, self.project_fps)
# loop all defined track types
for track_type in list(self.track_types.keys()):
# get total track count
track_count = timeline.GetTrackCount(track_type)
# loop all tracks by track indexes
for track_index in range(1, int(track_count) + 1):
# get current track name
track_name = timeline.GetTrackName(track_type, track_index)
# convert track to otio
otio_track = create_otio_track(
track_type, track_name)
# get all track items in current track
current_track_items = timeline.GetItemListInTrack(
track_type, track_index)
# loop available track items in current track items
for track_item in current_track_items:
# skip offline track items
if track_item.GetMediaPoolItem() is None:
continue
# calculate real clip start
clip_start = track_item.GetStart() - timeline.GetStartFrame()
add_otio_gap(
clip_start, otio_track, track_item, timeline)
# create otio clip and add it to track
otio_clip = create_otio_clip(track_item)
if not isinstance(otio_clip, list):
otio_track.append(otio_clip)
else:
for index, clip in enumerate(otio_clip):
if index == 0:
otio_track.append(clip)
else:
# add previouse otio track to timeline
otio_timeline.tracks.append(otio_track)
# convert track to otio
otio_track = create_otio_track(
track_type, track_name)
add_otio_gap(
clip_start, otio_track,
track_item, timeline)
otio_track.append(clip)
# add track to otio timeline
otio_timeline.tracks.append(otio_track)
return otio_timeline
def write_to_file(otio_timeline, path):
otio.adapters.write_to_file(otio_timeline, path)
| 32.489231 | 78 | 0.605834 | [
"MIT"
] | dangerstudios/OpenPype | openpype/hosts/resolve/otio/davinci_export.py | 10,559 | Python |
""" generators for the neuron project """
# general imports
import sys
import os
import zipfile
# third party imports
import numpy as np
import nibabel as nib
import scipy
import keras
from keras.utils import np_utils
from keras.models import Model
# local packages
import pynd.ndutils as nd
import pytools.patchlib as pl
import pytools.timer as timer
# reload patchlib (it's often updated right now...)
from imp import reload
reload(pl)
# other neuron (this project) packages
from . import dataproc as nrn_proc
from . import models as nrn_models
class Vol(object):
def __init__(self,
volpath,
ext='.npz',
nb_restart_cycle=None, # number of files to restart after
name='single_vol', # name
fixed_vol_size=True, # assumes each volume is fixed size
):
# get filenames at given paths
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
assert nb_files > 0, "Could not find any files at %s with extension %s" % (volpath, ext)
# set up restart cycle for volume files --
# i.e. after how many volumes do we restart
if nb_restart_cycle is None:
nb_restart_cycle = nb_files
# compute subvolume split
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[0]), ext)
# process volume
if data_proc_fn is not None:
vol_data = data_proc_fn(vol_data)
[f for f in _npz_headers(npz, namelist=['vol_data.npy'])][0][1]
nb_patches_per_vol = 1
if fixed_vol_size and (patch_size is not None) and all(f is not None for f in patch_size):
nb_patches_per_vol = np.prod(pl.gridsize(vol_data.shape, patch_size, patch_stride))
assert nb_restart_cycle <= (nb_files * nb_patches_per_vol), \
'%s restart cycle (%s) too big (%s) in %s' % \
(name, nb_restart_cycle, nb_files * nb_patches_per_vol, volpath)
def vol(volpath,
ext='.npz',
batch_size=1,
expected_nb_files=-1,
expected_files=None,
data_proc_fn=None, # processing function that takes in one arg (the volume)
relabel=None, # relabeling array
nb_labels_reshape=0, # reshape to categorial format for keras, need # labels
keep_vol_size=False, # whether to keep the volume size on categorical resizing
name='single_vol', # name, optional
nb_restart_cycle=None, # number of files to restart after
patch_size=None, # split the volume in patches? if so, get patch_size
patch_stride=1, # split the volume in patches? if so, get patch_stride
collapse_2d=None,
extract_slice=None,
force_binary=False,
nb_feats=1,
patch_rand=False,
patch_rand_seed=None,
vol_rand_seed=None,
binary=False,
yield_incomplete_final_batch=True,
verbose=False):
"""
generator for single volume (or volume patches) from a list of files
simple volume generator that loads a volume (via npy/mgz/nii/niigz), processes it,
and prepares it for keras model formats
if a patch size is passed, breaks the volume into patches and generates those
"""
# get filenames at given paths
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
assert nb_files > 0, "Could not find any files at %s with extension %s" % (volpath, ext)
# compute subvolume split
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[0]), ext)
# process volume
if data_proc_fn is not None:
vol_data = data_proc_fn(vol_data)
nb_patches_per_vol = 1
if patch_size is not None and all(f is not None for f in patch_size):
if relabel is None and len(patch_size) == (len(vol_data.shape) - 1):
tmp_patch_size = [f for f in patch_size]
patch_size = [*patch_size, vol_data.shape[-1]]
patch_stride = [f for f in patch_stride]
patch_stride = [*patch_stride, vol_data.shape[-1]]
assert len(vol_data.shape) == len(patch_size), "Vol dims %d are not equal to patch dims %d" % (len(vol_data.shape), len(patch_size))
nb_patches_per_vol = np.prod(pl.gridsize(vol_data.shape, patch_size, patch_stride))
if nb_restart_cycle is None:
print("setting restart cycle to", nb_files)
nb_restart_cycle = nb_files
assert nb_restart_cycle <= (nb_files * nb_patches_per_vol), \
'%s restart cycle (%s) too big (%s) in %s' % \
(name, nb_restart_cycle, nb_files * nb_patches_per_vol, volpath)
# check the number of files matches expected (if passed)
if expected_nb_files >= 0:
assert nb_files == expected_nb_files, \
"number of files do not match: %d, %d" % (nb_files, expected_nb_files)
if expected_files is not None:
if not (volfiles == expected_files):
print('file lists did not match. You should probably stop execution.', file=sys.stderr)
print(len(volfiles), len(expected_files))
if verbose:
print('nb_restart_cycle:', nb_restart_cycle)
# iterate through files
fileidx = -1
batch_idx = -1
feat_idx = 0
batch_shape = None
while 1:
fileidx = np.mod(fileidx + 1, nb_restart_cycle)
if verbose and fileidx == 0:
print('starting %s cycle' % name)
# read next file (circular)
try:
if verbose:
print('opening %s' % os.path.join(volpath, volfiles[fileidx]))
file_name = os.path.join(volpath, volfiles[fileidx])
vol_data = _load_medical_volume(file_name, ext, verbose)
# print(file_name, " was loaded", vol_data.shape)
except:
debug_error_msg = "#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s"
print(debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0]))
raise
# process volume
if data_proc_fn is not None:
vol_data = data_proc_fn(vol_data)
# the original segmentation files have non-sequential relabel (i.e. some relabel are
# missing to avoid exploding our model, we only care about the relabel that exist.
if relabel is not None:
vol_data = _relabel(vol_data, relabel)
# split volume into patches if necessary and yield
if patch_size is None:
this_patch_size = vol_data.shape
patch_stride = [1 for f in this_patch_size]
else:
this_patch_size = [f for f in patch_size]
for pi, p in enumerate(this_patch_size):
if p is None:
this_patch_size[pi] = vol_data.shape[pi]
patch_stride[pi] = 1
assert ~np.any(np.isnan(vol_data)), "Found a nan for %s" % volfiles[fileidx]
assert np.all(np.isfinite(vol_data)), "Found a inf for %s" % volfiles[fileidx]
patch_gen = patch(vol_data, this_patch_size,
patch_stride=patch_stride,
nb_labels_reshape=nb_labels_reshape,
batch_size=1,
infinite=False,
collapse_2d=collapse_2d,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
keep_vol_size=keep_vol_size)
empty_gen = True
patch_idx = -1
for lpatch in patch_gen:
empty_gen = False
patch_idx += 1
# add to feature
if np.mod(feat_idx, nb_feats) == 0:
vol_data_feats = lpatch
else:
vol_data_feats = np.concatenate([vol_data_feats, lpatch], np.ndim(lpatch)-1)
feat_idx += 1
if binary:
vol_data_feats = vol_data_feats.astype(bool)
if np.mod(feat_idx, nb_feats) == 0:
feats_shape = vol_data_feats[1:]
# yield previous batch if the new volume has different patch sizes
if batch_shape is not None and (feats_shape != batch_shape):
batch_idx = -1
batch_shape = None
print('switching patch sizes')
yield np.vstack(vol_data_batch)
# add to batch of volume data, unless the batch is currently empty
if batch_idx == -1:
vol_data_batch = [vol_data_feats]
batch_shape = vol_data_feats[1:]
else:
vol_data_batch = [*vol_data_batch, vol_data_feats]
# yield patch
batch_idx += 1
batch_done = batch_idx == batch_size - 1
files_done = np.mod(fileidx + 1, nb_restart_cycle) == 0
final_batch = yield_incomplete_final_batch and files_done and patch_idx == (nb_patches_per_vol-1)
if final_batch: # verbose and
print('last batch in %s cycle %d. nb_batch:%d' % (name, fileidx, len(vol_data_batch)))
if batch_done or final_batch:
batch_idx = -1
q = np.vstack(vol_data_batch)
yield q
if empty_gen:
raise ValueError('Patch generator was empty for file %s', volfiles[fileidx])
def patch(vol_data, # the volume
patch_size, # patch size
patch_stride=1, # patch stride (spacing)
nb_labels_reshape=1, # number of labels for categorical resizing. 0 if no resizing
keep_vol_size=False, # whether to keep the volume size on categorical resizing
batch_size=1, # batch size
collapse_2d=None,
patch_rand=False,
patch_rand_seed=None,
variable_batch_size=False,
infinite=False): # whether the generator should continue (re)-generating patches
"""
generate patches from volume for keras package
Yields:
patch: nd array of shape [batch_size, *patch_size], unless resized via nb_labels_reshape
"""
# some parameter setup
assert batch_size >= 1, "batch_size should be at least 1"
if patch_size is None:
patch_size = vol_data.shape
for pi,p in enumerate(patch_size):
if p is None:
patch_size[pi] = vol_data.shape[pi]
batch_idx = -1
if variable_batch_size:
batch_size = yield
# do while. if not infinite, will break at the end
while True:
# create patch generator
gen = pl.patch_gen(vol_data, patch_size,
stride=patch_stride,
rand=patch_rand,
rand_seed=patch_rand_seed)
# go through the patch generator
empty_gen = True
for lpatch in gen:
empty_gen = False
# reshape output layer as categorical and prep proper size
# print(lpatch.shape, nb_labels_reshape, keep_vol_size, patch_size)
lpatch = _categorical_prep(lpatch, nb_labels_reshape, keep_vol_size, patch_size)
if collapse_2d is not None:
lpatch = np.squeeze(lpatch, collapse_2d + 1) # +1 due to batch in first dim
# add this patch to the stack
if batch_idx == -1:
if batch_size == 1:
patch_data_batch = lpatch
else:
patch_data_batch = np.zeros([batch_size, *lpatch.shape[1:]])
patch_data_batch[0, :] = lpatch
else:
patch_data_batch[batch_idx+1, :] = lpatch
# yield patch
batch_idx += 1
if batch_idx == batch_size - 1:
batch_idx = -1
batch_size_y = yield patch_data_batch
if variable_batch_size:
batch_size = batch_size_y
assert not empty_gen, 'generator was empty. vol size was %s' % ''.join(['%d '%d for d in vol_data.shape])
# if not infinite generation, yield the last batch and break the while
if not infinite:
if batch_idx >= 0:
patch_data_batch = patch_data_batch[:(batch_idx+1), :]
yield patch_data_batch
break
def vol_seg(volpath,
segpath,
proc_vol_fn=None,
proc_seg_fn=None,
verbose=False,
name='vol_seg', # name, optional
ext='.npz',
nb_restart_cycle=None, # number of files to restart after
nb_labels_reshape=-1,
collapse_2d=None,
force_binary=False,
nb_input_feats=1,
relabel=None,
vol_rand_seed=None,
seg_binary=False,
vol_subname='norm', # subname of volume
seg_subname='aseg', # subname of segmentation
**kwargs):
"""
generator with (volume, segmentation)
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
** kwargs are any named arguments for vol(...),
except verbose, data_proc_fn, ext, nb_labels_reshape and name
(which this function will control when calling vol())
"""
# get vol generator
vol_gen = vol(volpath, **kwargs, ext=ext,
nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False,
relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=name+' vol',
verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)
# get seg generator, matching nb_files
# vol_files = [f.replace('norm', 'aseg') for f in _get_file_list(volpath, ext)]
# vol_files = [f.replace('orig', 'aseg') for f in vol_files]
vol_files = [f.replace(vol_subname, seg_subname) for f in _get_file_list(volpath, ext, vol_rand_seed)]
seg_gen = vol(segpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d,
force_binary=force_binary, relabel=relabel, vol_rand_seed=vol_rand_seed,
data_proc_fn=proc_seg_fn, nb_labels_reshape=nb_labels_reshape, keep_vol_size=True,
expected_files=vol_files, name=name+' seg', binary=seg_binary, verbose=False)
# on next (while):
while 1:
# get input and output (seg) vols
input_vol = next(vol_gen).astype('float16')
output_vol = next(seg_gen).astype('float16') # was int8. Why? need float possibility...
# output input and output
yield (input_vol, output_vol)
def vol_cat(volpaths, # expect two folders in here
crop=None, resize_shape=None, rescale=None, # processing parameters
verbose=False,
name='vol_cat', # name, optional
ext='.npz',
nb_labels_reshape=-1,
vol_rand_seed=None,
**kwargs): # named arguments for vol(...), except verbose, data_proc_fn, ext, nb_labels_reshape and name (which this function will control when calling vol())
"""
generator with (volume, binary_bit) (random order)
ONLY works with abtch size of 1 for now
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
"""
folders = [f for f in sorted(os.listdir(volpaths))]
# compute processing function
proc_vol_fn = lambda x: nrn_proc.vol_proc(x, crop=crop, resize_shape=resize_shape,
interp_order=2, rescale=rescale)
# get vol generators
generators = ()
generators_len = ()
for folder in folders:
vol_gen = vol(os.path.join(volpaths, folder), **kwargs, ext=ext, vol_rand_seed=vol_rand_seed,
data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=folder, verbose=False)
generators_len += (len(_get_file_list(os.path.join(volpaths, folder), '.npz')), )
generators += (vol_gen, )
bake_data_test = False
if bake_data_test:
print('fake_data_test', file=sys.stderr)
# on next (while):
while 1:
# build the random order stack
order = np.hstack((np.zeros(generators_len[0]), np.ones(generators_len[1]))).astype('int')
np.random.shuffle(order) # shuffle
for idx in order:
gen = generators[idx]
# for idx, gen in enumerate(generators):
z = np.zeros([1, 2]) #1,1,2 for categorical binary style
z[0,idx] = 1 #
# z[0,0,0] = idx
data = next(gen).astype('float32')
if bake_data_test and idx == 0:
# data = data*idx
data = -data
yield (data, z)
def add_prior(gen,
proc_vol_fn=None,
proc_seg_fn=None,
prior_type='location', # file-static, file-gen, location
prior_file=None, # prior filename
prior_feed='input', # input or output
patch_stride=1,
patch_size=None,
batch_size=1,
collapse_2d=None,
extract_slice=None,
force_binary=False,
verbose=False,
patch_rand=False,
patch_rand_seed=None):
"""
#
# add a prior generator to a given generator
# with the number of patches in batch matching output of gen
"""
# get prior
if prior_type == 'location':
prior_vol = nd.volsize2ndgrid(vol_size)
prior_vol = np.transpose(prior_vol, [1, 2, 3, 0])
prior_vol = np.expand_dims(prior_vol, axis=0) # reshape for model
elif prior_type == 'file': # assumes a npz filename passed in prior_file
with timer.Timer('loading prior', True):
data = np.load(prior_file)
prior_vol = data['prior'].astype('float16')
else: # assumes a volume
with timer.Timer('loading prior', True):
prior_vol = prior_file.astype('float16')
if force_binary:
nb_labels = prior_vol.shape[-1]
prior_vol[:, :, :, 1] = np.sum(prior_vol[:, :, :, 1:nb_labels], 3)
prior_vol = np.delete(prior_vol, range(2, nb_labels), 3)
nb_channels = prior_vol.shape[-1]
if extract_slice is not None:
if isinstance(extract_slice, int):
prior_vol = prior_vol[:, :, extract_slice, np.newaxis, :]
else: # assume slices
prior_vol = prior_vol[:, :, extract_slice, :]
# get the prior to have the right volume [x, y, z, nb_channels]
assert np.ndim(prior_vol) == 4 or np.ndim(prior_vol) == 3, "prior is the wrong size"
# prior generator
if patch_size is None:
patch_size = prior_vol.shape[0:3]
assert len(patch_size) == len(patch_stride)
prior_gen = patch(prior_vol, [*patch_size, nb_channels],
patch_stride=[*patch_stride, nb_channels],
batch_size=batch_size,
collapse_2d=collapse_2d,
keep_vol_size=True,
infinite=True,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
variable_batch_size=True,
nb_labels_reshape=0)
assert next(prior_gen) is None, "bad prior gen setup"
# generator loop
while 1:
# generate input and output volumes
gen_sample = next(gen)
# generate prior batch
gs_sample = _get_shape(gen_sample)
prior_batch = prior_gen.send(gs_sample)
yield (gen_sample, prior_batch)
def vol_prior(*args,
proc_vol_fn=None,
proc_seg_fn=None,
prior_type='location', # file-static, file-gen, location
prior_file=None, # prior filename
prior_feed='input', # input or output
patch_stride=1,
patch_size=None,
batch_size=1,
collapse_2d=None,
extract_slice=None,
force_binary=False,
nb_input_feats=1,
verbose=False,
vol_rand_seed=None,
patch_rand=False,
**kwargs): # anything else you'd like to pass to vol()
"""
generator that appends prior to (volume, segmentation) depending on input
e.g. could be ((volume, prior), segmentation)
"""
patch_rand_seed = None
if patch_rand:
patch_rand_seed = np.random.random()
# prepare the vol_seg
vol_gen = vol(*args,
**kwargs,
collapse_2d=collapse_2d,
force_binary=False,
verbose=verbose,
vol_rand_seed=vol_rand_seed)
gen = vol(*args, **kwargs,
proc_vol_fn=None,
proc_seg_fn=None,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_size=patch_size,
patch_stride=patch_stride,
batch_size=batch_size,
vol_rand_seed=vol_rand_seed,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
nb_input_feats=nb_input_feats)
# add prior to output
pgen = add_prior(gen,
proc_vol_fn=proc_vol_fn,
proc_seg_fn=proc_seg_fn,
prior_type=prior_type,
prior_file=prior_file,
prior_feed=prior_feed,
patch_stride=patch_stride,
patch_size=patch_size,
batch_size=batch_size,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
vol_rand_seed=vol_rand_seed)
# generator loop
while 1:
gen_sample, prior_batch = next(pgen)
input_vol, output_vol = gen_sample
if prior_feed == 'input':
yield ([input_vol, prior_batch], output_vol)
else:
assert prior_feed == 'output'
yield (input_vol, [output_vol, prior_batch])
def vol_seg_prior(*args,
proc_vol_fn=None,
proc_seg_fn=None,
prior_type='location', # file-static, file-gen, location
prior_file=None, # prior filename
prior_feed='input', # input or output
patch_stride=1,
patch_size=None,
batch_size=1,
collapse_2d=None,
extract_slice=None,
force_binary=False,
nb_input_feats=1,
verbose=False,
vol_rand_seed=None,
patch_rand=None,
**kwargs):
"""
generator that appends prior to (volume, segmentation) depending on input
e.g. could be ((volume, prior), segmentation)
"""
patch_rand_seed = None
if patch_rand:
patch_rand_seed = np.random.random()
# prepare the vol_seg
gen = vol_seg(*args, **kwargs,
proc_vol_fn=None,
proc_seg_fn=None,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_size=patch_size,
patch_stride=patch_stride,
batch_size=batch_size,
vol_rand_seed=vol_rand_seed,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
nb_input_feats=nb_input_feats)
# add prior to output
pgen = add_prior(gen,
proc_vol_fn=proc_vol_fn,
proc_seg_fn=proc_seg_fn,
prior_type=prior_type,
prior_file=prior_file,
prior_feed=prior_feed,
patch_stride=patch_stride,
patch_size=patch_size,
batch_size=batch_size,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed)
# generator loop
while 1:
gen_sample, prior_batch = next(pgen)
input_vol, output_vol = gen_sample
if prior_feed == 'input':
yield ([input_vol, prior_batch], output_vol)
else:
assert prior_feed == 'output'
yield (input_vol, [output_vol, prior_batch])
def vol_prior_hack(*args,
proc_vol_fn=None,
proc_seg_fn=None,
prior_type='location', # file-static, file-gen, location
prior_file=None, # prior filename
prior_feed='input', # input or output
patch_stride=1,
patch_size=None,
batch_size=1,
collapse_2d=None,
extract_slice=None,
force_binary=False,
nb_input_feats=1,
verbose=False,
vol_rand_seed=None,
**kwargs):
"""
"""
# prepare the vol_seg
gen = vol_seg_hack(*args, **kwargs,
proc_vol_fn=None,
proc_seg_fn=None,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_size=patch_size,
patch_stride=patch_stride,
batch_size=batch_size,
vol_rand_seed=vol_rand_seed,
nb_input_feats=nb_input_feats)
# get prior
if prior_type == 'location':
prior_vol = nd.volsize2ndgrid(vol_size)
prior_vol = np.transpose(prior_vol, [1, 2, 3, 0])
prior_vol = np.expand_dims(prior_vol, axis=0) # reshape for model
elif prior_type == 'file': # assumes a npz filename passed in prior_file
with timer.Timer('loading prior', True):
data = np.load(prior_file)
prior_vol = data['prior'].astype('float16')
else : # assumes a volume
with timer.Timer('astyping prior', verbose):
prior_vol = prior_file
if not (prior_vol.dtype == 'float16'):
prior_vol = prior_vol.astype('float16')
if force_binary:
nb_labels = prior_vol.shape[-1]
prior_vol[:, :, :, 1] = np.sum(prior_vol[:, :, :, 1:nb_labels], 3)
prior_vol = np.delete(prior_vol, range(2, nb_labels), 3)
nb_channels = prior_vol.shape[-1]
if extract_slice is not None:
if isinstance(extract_slice, int):
prior_vol = prior_vol[:, :, extract_slice, np.newaxis, :]
else: # assume slices
prior_vol = prior_vol[:, :, extract_slice, :]
# get the prior to have the right volume [x, y, z, nb_channels]
assert np.ndim(prior_vol) == 4 or np.ndim(prior_vol) == 3, "prior is the wrong size"
# prior generator
if patch_size is None:
patch_size = prior_vol.shape[0:3]
assert len(patch_size) == len(patch_stride)
prior_gen = patch(prior_vol, [*patch_size, nb_channels],
patch_stride=[*patch_stride, nb_channels],
batch_size=batch_size,
collapse_2d=collapse_2d,
keep_vol_size=True,
infinite=True,
#variable_batch_size=True, # this
nb_labels_reshape=0)
# assert next(prior_gen) is None, "bad prior gen setup"
# generator loop
while 1:
# generate input and output volumes
input_vol = next(gen)
if verbose and np.all(input_vol.flat == 0):
print("all entries are 0")
# generate prior batch
# with timer.Timer("with send?"):
# prior_batch = prior_gen.send(input_vol.shape[0])
prior_batch = next(prior_gen)
if prior_feed == 'input':
yield ([input_vol, prior_batch], input_vol)
else:
assert prior_feed == 'output'
yield (input_vol, [input_vol, prior_batch])
def vol_seg_hack(volpath,
segpath,
proc_vol_fn=None,
proc_seg_fn=None,
verbose=False,
name='vol_seg', # name, optional
ext='.npz',
nb_restart_cycle=None, # number of files to restart after
nb_labels_reshape=-1,
collapse_2d=None,
force_binary=False,
nb_input_feats=1,
relabel=None,
vol_rand_seed=None,
seg_binary=False,
vol_subname='norm', # subname of volume
seg_subname='aseg', # subname of segmentation
**kwargs):
"""
generator with (volume, segmentation)
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
** kwargs are any named arguments for vol(...),
except verbose, data_proc_fn, ext, nb_labels_reshape and name
(which this function will control when calling vol())
"""
# get vol generator
vol_gen = vol(volpath, **kwargs, ext=ext,
nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False,
relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=name+' vol',
verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)
# on next (while):
while 1:
# get input and output (seg) vols
input_vol = next(vol_gen).astype('float16')
# output input and output
yield input_vol
def vol_sr_slices(volpath,
nb_input_slices,
nb_slice_spacing,
batch_size=1,
ext='.npz',
vol_rand_seed=None,
nb_restart_cycle=None,
name='vol_sr_slices',
rand_slices=True, # randomize init slice order (i.e. across entries per batch) given a volume
simulate_whole_sparse_vol=False,
verbose=False
):
"""
default generator for slice-wise super resolution
"""
def indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing):
idx = start_indices[0]
output_batch = np.expand_dims(vol_data[:,:,idx:idx+nb_slices_in_subvol], 0)
input_batch = np.expand_dims(vol_data[:,:,idx:(idx+nb_slices_in_subvol):(nb_slice_spacing+1)], 0)
for idx in start_indices[1:]:
out_sel = np.expand_dims(vol_data[:,:,idx:idx+nb_slices_in_subvol], 0)
output_batch = np.vstack([output_batch, out_sel])
input_batch = np.vstack([input_batch, np.expand_dims(vol_data[:,:,idx:(idx+nb_slices_in_subvol):(nb_slice_spacing+1)], 0)])
output_batch = np.reshape(output_batch, [batch_size, -1, output_batch.shape[-1]])
return (input_batch, output_batch)
print('vol_sr_slices: SHOULD PROPERLY RANDOMIZE accross different subjects', file=sys.stderr)
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
if nb_restart_cycle is None:
nb_restart_cycle = nb_files
# compute the number of slices we'll need in a subvolume
nb_slices_in_subvol = (nb_input_slices - 1) * (nb_slice_spacing + 1) + 1
# iterate through files
fileidx = -1
while 1:
fileidx = np.mod(fileidx + 1, nb_restart_cycle)
if verbose and fileidx == 0:
print('starting %s cycle' % name)
try:
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[fileidx]), ext, verbose)
except:
debug_error_msg = "#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s"
print(debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0]))
raise
# compute some random slice
nb_slices = vol_data.shape[2]
nb_start_slices = nb_slices - nb_slices_in_subvol + 1
# prepare batches
if simulate_whole_sparse_vol: # if essentially simulate a whole sparse volume for consistent inputs, and yield slices like that:
init_slice = 0
if rand_slices:
init_slice = np.random.randint(0, high=nb_start_slices-1)
all_start_indices = list(range(init_slice, nb_start_slices, nb_slice_spacing+1))
for batch_start in range(0, len(all_start_indices), batch_size*(nb_input_slices-1)):
start_indices = [all_start_indices[s] for s in range(batch_start, batch_start + batch_size)]
input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
yield (input_batch, output_batch)
# if just random slices, get a batch of random starts from this volume and that's it.
elif rand_slices:
assert not simulate_whole_sparse_vol
start_indices = np.random.choice(range(nb_start_slices), size=batch_size, replace=False)
input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
yield (input_batch, output_batch)
# go slice by slice (overlapping regions)
else:
for batch_start in range(0, nb_start_slices, batch_size):
start_indices = list(range(batch_start, batch_start + batch_size))
input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
yield (input_batch, output_batch)
def img_seg(volpath,
segpath,
batch_size=1,
verbose=False,
nb_restart_cycle=None,
name='img_seg', # name, optional
ext='.png',
vol_rand_seed=None,
**kwargs):
"""
generator for (image, segmentation)
"""
def imggen(path, ext, nb_restart_cycle=None):
"""
TODO: should really use the volume generators for this
"""
files = _get_file_list(path, ext, vol_rand_seed)
if nb_restart_cycle is None:
nb_restart_cycle = len(files)
idx = -1
while 1:
idx = np.mod(idx+1, nb_restart_cycle)
im = scipy.misc.imread(os.path.join(path, files[idx]))[:, :, 0]
yield im.reshape((1,) + im.shape)
img_gen = imggen(volpath, ext, nb_restart_cycle)
seg_gen = imggen(segpath, ext)
# on next (while):
while 1:
input_vol = np.vstack([next(img_gen).astype('float16')/255 for i in range(batch_size)])
input_vol = np.expand_dims(input_vol, axis=-1)
output_vols = [np_utils.to_categorical(next(seg_gen).astype('int8'), num_classes=2) for i in range(batch_size)]
output_vol = np.vstack([np.expand_dims(f, axis=0) for f in output_vols])
# output input and output
yield (input_vol, output_vol)
# Some internal use functions
def _get_file_list(volpath, ext=None, vol_rand_seed=None):
"""
get a list of files at the given path with the given extension
"""
files = [f for f in sorted(os.listdir(volpath)) if ext is None or f.endswith(ext)]
if vol_rand_seed is not None:
np.random.seed(vol_rand_seed)
files = np.random.permutation(files).tolist()
return files
def _load_medical_volume(filename, ext, verbose=False):
"""
load a medical volume from one of a number of file types
"""
with timer.Timer('load_vol', verbose >= 2):
if ext == '.npz':
vol_file = np.load(filename)
vol_data = vol_file['vol_data']
elif ext == 'npy':
vol_data = np.load(filename)
elif ext == '.mgz' or ext == '.nii' or ext == '.nii.gz':
vol_med = nib.load(filename)
vol_data = vol_med.get_data()
else:
raise ValueError("Unexpected extension %s" % ext)
return vol_data
def _categorical_prep(vol_data, nb_labels_reshape, keep_vol_size, patch_size):
if nb_labels_reshape > 1:
lpatch = _to_categorical(vol_data, nb_labels_reshape, keep_vol_size)
# if keep_vol_size:
# lpatch = np.reshape(lpatch, [*patch_size, nb_labels_reshape])
elif nb_labels_reshape == 1:
lpatch = np.expand_dims(vol_data, axis=-1)
else:
assert nb_labels_reshape == 0
lpatch = vol_data
lpatch = np.expand_dims(lpatch, axis=0)
return lpatch
def _to_categorical(y, num_classes=None, reshape=True):
"""
# copy of keras.utils.np_utils.to_categorical, but with a boolean matrix instead of float
Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input.
"""
oshape = y.shape
y = np.array(y, dtype='int').ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), bool)
categorical[np.arange(n), y] = 1
if reshape:
categorical = np.reshape(categorical, [*oshape, num_classes])
return categorical
def _relabel(vol_data, labels, forcecheck=False):
if forcecheck:
vd = np.unique(vol_data.flat)
assert len(vd) == len(labels), "number of given labels does not match number of actual labels"
# by doing zeros, any label not in labels gets left to 0
new_vol_data = np.zeros(vol_data.shape, vol_data.dtype)
for idx, val in np.ndenumerate(labels):
new_vol_data[vol_data == val] = idx
return new_vol_data
def _npz_headers(npz, namelist=None):
"""
taken from https://stackoverflow.com/a/43223420
Takes a path to an .npz file, which is a Zip archive of .npy files.
Generates a sequence of (name, shape, np.dtype).
namelist is a list with variable names, ending in '.npy'.
e.g. if variable 'var' is in the file, namelist could be ['var.npy']
"""
with zipfile.ZipFile(npz) as archive:
if namelist is None:
namelist = archive.namelist()
for name in namelist:
if not name.endswith('.npy'):
continue
npy = archive.open(name)
version = np.lib.format.read_magic(npy)
shape, fortran, dtype = np.lib.format._read_array_header(npy, version)
yield name[:-4], shape, dtype
def _get_shape(x):
if isinstance(x, (list, tuple)):
return _get_shape(x[0])
else:
return x.shape[0]
| 37.097836 | 171 | 0.587676 | [
"MIT"
] | adriaan16/brainstorm | ext/neuron/neuron/generators.py | 39,435 | Python |
import fibra
import fibra.net
import fibra.event
import cPickle as pickle
import exceptions
import json
import time
import types
import zlib
schedule = fibra.schedule()
class NULL(object): pass
class Timeout(Exception): pass
class Disconnect(Exception): pass
class Connection(fibra.event.Connection):
COMPRESSION_THRESHOLD = 1024
REQUEST_TIMEOUT = 10.0
def __init__(self, *args, **kw):
fibra.event.Connection.__init__(self, *args, **kw)
self.request_id = 0
self.requests = {}
schedule.install(self.check_timeouts())
self.rpc = None
def check_timeouts(self):
while self.running:
now = time.time()
for request_id, (T, task) in self.requests.items():
if now - T > self.REQUEST_TIMEOUT:
self.requests.pop(request_id)
schedule.install(task, Timeout(self.protocol.transport.address))
yield 0.25
def serialize(self, response, obj, headers={}):
if 'accept' in headers:
if headers['accept'] == 'text/json':
response['content-type'] = 'text/json'
body = json.dumps(obj)
else:
response['content-type'] = 'text/pickle'
body = pickle.dumps(obj)
if len(body) > self.COMPRESSION_THRESHOLD:
body = zlib.compress(body)
response['compression'] = 'zlib'
return body
def deserialize(self, headers, body):
if headers.get('compression', None) == 'zlib':
body = zlib.decompress(body)
content_type = headers.get('content-type', None)
if content_type == 'text/pickle':
body = pickle.loads(body)
elif content_type == 'text/json':
body = json.loads(body)
return body
def request(self, name, args, kw):
headers = {}
body = args, kw
request_id = headers["request-id"] = str(self.request_id)
headers['method'] = name
self.request_id += 1
task = yield fibra.Self()
self.requests[request_id] = time.time(), task
yield self.send('request', headers, body)
response = yield fibra.Suspend()
yield fibra.Return(response)
def dispatch(self, top, headers, body):
if body:
body = self.deserialize(headers, body)
yield fibra.event.Connection.dispatch(self, top, headers, body)
def do_request(self, headers, body):
response = {}
response['request-id'] = headers['request-id']
result = NULL
try:
if headers["method"][0] == "_": raise AttributeError('cannot access private methods.')
method = getattr(self.rpc, headers['method'])
except AttributeError, e:
response['exception'] = 'AttributeError'
response['msg'] = str(e)
else:
args, kw = body
try:
result = method(*args, **kw)
while type(result) is types.GeneratorType:
result = yield result
result = self.serialize(response, result, headers)
except Exception, e:
response['exception'] = e.__class__.__name__
response['msg'] = str(e)
result = NULL
yield self.send('response', response, result)
def send(self, top, response, body=NULL):
if body is NULL:
body = ""
elif 'content-type' not in response:
body = self.serialize(response, body)
try:
yield fibra.event.Connection.send(self, top, response, body)
except fibra.ClosedTube:
raise Disconnect()
def do_response(self, headers, body):
request_id = headers["request-id"]
if request_id in self.requests:
T, task = self.requests.pop(request_id)
if "exception" in headers:
schedule.install(task, getattr(exceptions, headers['exception'])(headers['msg']))
else:
schedule.install(task, body)
yield None
else:
print "Expired request:", request_id
class RPC(object):
def __init__(self, connection):
self.connection = connection
def __getattr__(self, key):
return lambda *args, **kw: self.connection.request(key, args, kw)
| 32.264706 | 98 | 0.573154 | [
"Unlicense"
] | simonwittber/fibra | fibra/msg.py | 4,388 | Python |
from adminsortable2.admin import SortableAdminMixin
from decimal import Decimal
from django.contrib import admin
from django.contrib.gis import admin as geo_admin
from import_export import fields
from import_export import widgets
from import_export.admin import ImportExportModelAdmin
from import_export.resources import ModelResource as ImportExportModelResource
from solo.admin import SingletonModelAdmin
from .models import Client
from .models import Contact
from .models import Contract
from .models import DashboardItem
from .models import Estimate
from .models import Invoice
from .models import Location
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import SettingsApp
from .models import SettingsCompany
from .models import SettingsContract
from .models import Task
from .models import Testimonial
from .models import Time
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
# Register your models here.
class ClientResource(ImportExportModelResource):
"""
"""
class Meta:
model = Client
# auto fill id? #295
# https://github.com/django-import-export/django-import-export/issues/295
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Client)
class ClientAdmin(ImportExportModelAdmin):
"""
"""
resource_class = ClientResource
class ContactResource(ImportExportModelResource):
"""
"""
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
class Meta:
model = Contact
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Contact)
class ContactAdmin(ImportExportModelAdmin):
"""
"""
resource_class = ContactResource
@admin.register(Contract)
class ContractAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(DashboardItem)
class DashboardItemAdmin(SortableAdminMixin, admin.ModelAdmin):
"""
"""
class EstimateResource(ImportExportModelResource):
"""
"""
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
amount = fields.Field(
column_name='estimate_amount',
attribute='amount',
widget=DecimalWidget())
subtotal = fields.Field(
column_name='subtotal', attribute='subtotal', widget=DecimalWidget())
document_id = fields.Field(
column_name='estimate_id',
attribute='document_id',
widget=DecimalWidget())
class Meta:
model = Estimate
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Estimate)
class EstimateAdmin(ImportExportModelAdmin):
"""
"""
resource_class = EstimateResource
class InvoiceResource(ImportExportModelResource):
"""
"""
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
amount = fields.Field(
column_name='amount', attribute='amount', widget=DecimalWidget())
paid_amount = fields.Field(
column_name='paid_amount',
attribute='paid_amount',
widget=DecimalWidget())
subtotal = fields.Field(
column_name='subtotal', attribute='subtotal', widget=DecimalWidget())
balance = fields.Field(
column_name='balance', attribute='balance', widget=DecimalWidget())
document_id = fields.Field(
column_name='invoice_id',
attribute='document_id',
widget=DecimalWidget())
class Meta:
model = Invoice
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Invoice)
class InvoiceAdmin(ImportExportModelAdmin):
"""
"""
resource_class = InvoiceResource
@admin.register(Location)
class LocationAdmin(geo_admin.OSMGeoAdmin):
"""
"""
search_fields = ('name', )
@admin.register(Log)
class LogAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Newsletter)
class NewsletterAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Note)
class NoteAdmin(ImportExportModelAdmin):
"""
"""
class ProjectResource(ImportExportModelResource):
"""
"""
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
billable_amount = fields.Field(
column_name='billable_amount',
attribute='billable_amount',
widget=DecimalWidget())
budget = fields.Field(
column_name='budget', attribute='budget', widget=DecimalWidget())
budget_spent = fields.Field(
column_name='budget_spent',
attribute='budget_spent',
widget=DecimalWidget())
team_costs = fields.Field(
column_name='team_costs',
attribute='team_costs',
widget=DecimalWidget())
total_costs = fields.Field(
column_name='total_costs',
attribute='total_costs',
widget=DecimalWidget())
class Meta:
model = Project
exclude = ('task', 'team')
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Profile)
class ProfileAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Project)
class ProjectAdmin(ImportExportModelAdmin):
"""
"""
resource_class = ProjectResource
@admin.register(Proposal)
class ProposalAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Report)
class ReportAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Service)
class ServiceAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(SettingsApp)
class SettingsAppAdmin(SingletonModelAdmin):
"""
"""
@admin.register(SettingsCompany)
class SettingsCompanyAdmin(SingletonModelAdmin):
"""
"""
@admin.register(SettingsContract)
class SettingsContractAdmin(SingletonModelAdmin):
"""
"""
@admin.register(Testimonial)
class TestimonialAdmin(ImportExportModelAdmin):
"""
"""
prepopulated_fields = {"slug": ("name", )}
class TaskResource(ImportExportModelResource):
"""
"""
class Meta:
model = Task
exclude = ('unit', 'billable', 'active')
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Task)
class TaskAdmin(ImportExportModelAdmin):
"""
"""
resource_class = TaskResource
class TimeResource(ImportExportModelResource):
"""
"""
billable = fields.Field(
column_name='billable', attribute='billable', widget=BooleanWidget())
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
invoiced = fields.Field(
column_name='invoiced', attribute='invoiced', widget=BooleanWidget())
project = fields.Field(
column_name='project',
attribute='project',
widget=widgets.ForeignKeyWidget(Project, 'name'))
task = fields.Field(
column_name='task',
attribute='task',
widget=widgets.ForeignKeyWidget(Task, 'name'))
user = fields.Field(
column_name='user', attribute='user', widget=UserWidget())
class Meta:
model = Time
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Time)
class TimeAdmin(ImportExportModelAdmin):
"""
"""
resource_class = TimeResource
| 23.951049 | 78 | 0.649635 | [
"MIT"
] | ACLARKNET/aclarknet-database | aclarknet/database/admin.py | 10,275 | Python |
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000146"
stations_name = "parl.2019-12-12/Version 1/west-norfolk.gov.uk-1572885849000-.tsv"
addresses_name = "parl.2019-12-12/Version 1/west-norfolk.gov.uk-1572885849000-.tsv"
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
allow_station_point_from_postcode = False
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "10024107639":
rec["postcode"] = ""
rec["accept_suggestion"] = False
if record.addressline1 == "8 Lions Close":
rec["postcode"] = "PE38 0AT"
return rec
def station_record_to_dict(self, record):
# Dersingham Village Centre
if record.polling_place_id == "17923":
rec = super().station_record_to_dict(record)
rec["location"] = Point(0.512389, 52.843528, srid=4326)
return rec
# Windsor Park, KING`S LYNN
if record.polling_place_id == "17867":
rec = super().station_record_to_dict(record)
rec["location"] = Point(0.404833, 52.748556, srid=4326)
return rec
if record.polling_place_id == "18049":
record = record._replace(polling_place_postcode="PE14 9QH")
record = record._replace(polling_place_easting="0")
record = record._replace(polling_place_northing="0")
return super().station_record_to_dict(record)
| 36.630435 | 87 | 0.654599 | [
"BSD-3-Clause"
] | alexdutton/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_kings_lynn.py | 1,685 | Python |
# -*- coding: utf-8 -*-
# Django settings for basic pinax project.
import os.path
import posixpath
import pinax
PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# tells Pinax to use the default theme
PINAX_THEME = "default"
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("Your Name", "[email protected]"),
]
MANAGERS = ADMINS
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3", # Add "postgresql_psycopg2", "postgresql", "mysql", "sqlite3" or "oracle".
"NAME": "dev.db", # Or path to database file if using sqlite3.
"USER": "", # Not used with sqlite3.
"PASSWORD": "", # Not used with sqlite3.
"HOST": "", # Set to empty string for localhost. Not used with sqlite3.
"PORT": "", # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media", "static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/site_media/static/"
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "media"),
os.path.join(PINAX_ROOT, "media", PINAX_THEME),
]
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don't share it with anybody.
SECRET_KEY = ""
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.load_template_source",
"django.template.loaders.app_directories.load_template_source",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django_openid.consumer.SessionConsumer",
"django.contrib.messages.middleware.MessageMiddleware",
"pinax.apps.account.middleware.LocaleMiddleware",
"pagination.middleware.PaginationMiddleware",
"pinax.middleware.security.HideSensistiveFieldsMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "basic_project.urls"
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
os.path.join(PINAX_ROOT, "templates", PINAX_THEME),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"staticfiles.context_processors.static_url",
"pinax.core.context_processors.pinax_settings",
"pinax.apps.account.context_processors.account",
"notification.context_processors.notification",
"announcements.context_processors.site_wide_announcements",
]
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.humanize",
"pinax.templatetags",
# external
"notification", # must be first
"staticfiles",
"debug_toolbar",
"mailer",
"uni_form",
"django_openid",
"ajax_validation",
"timezones",
"emailconfirmation",
"announcements",
"pagination",
"idios",
# Pinax
"pinax.apps.account",
"pinax.apps.signup_codes",
"pinax.apps.analytics",
# project
"about",
"profiles",
]
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
EMAIL_BACKEND = "mailer.backend.DbBackend"
ABSOLUTE_URL_OVERRIDES = {
"auth.user": lambda o: "/profiles/profile/%s/" % o.username,
}
AUTH_PROFILE_MODULE = "profiles.Profile"
NOTIFICATION_LANGUAGE_MODULE = "account.Account"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
AUTHENTICATION_BACKENDS = [
"pinax.apps.account.auth_backends.AuthenticationBackend",
]
LOGIN_URL = "/account/login/" # @@@ any way this can be a url name?
LOGIN_REDIRECT_URLNAME = "what_next"
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
# URCHIN_ID = "ua-..."
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
| 29.773585 | 122 | 0.71071 | [
"MIT"
] | amarandon/pinax | pinax/projects/basic_project/settings.py | 6,312 | Python |
"""Generates faucet config for given number of switches and number of devices per switch"""
import getopt
import sys
import yaml
from forch.utils import proto_dict
from forch.proto.faucet_configuration_pb2 import Interface, StackLink, Datapath, \
Vlan, FaucetConfig, LLDPBeacon, Stack
CORP_DP_ID = 273
T1_DP_ID_START = 177
T2_DP_ID_START = 1295
FLAT_DP_ID_START = 513
SETUP_VLAN = 171
TEST_VLAN = 272
FLAT_LINK_PORT_START = 6
T1_STACK_PORT_START = 100
T2_STACK_PORT_START = 50
ACCESS_PORT_START_DEFAULT = 101
FLAT_ACCESS_PORT_START = 1
TAP_PORT = 4
FAUCET_EGRESS_PORT = 28
FLAT_EGRESS_PORT = 50
CORP_EGRESS_PORT = 10
LACP_MODE = 3
T1_DP_MAC_PREFIX = '0e:00:00:00:01:'
T2_DP_MAC_PREFIX = '0e:00:00:00:02:'
FLAT = 'flat'
CORP = 'corp'
STACK = 'stack'
T1_DP = 't1'
T2_DP = 't2'
# pylint: disable=protected-access
# pylint: disable=too-many-arguments
class FaucetConfigGenerator():
"""Class for generating faucet config for given switches and devices per switch"""
def _build_dp_interfaces(self, dp_index, **kwargs):
interfaces = {}
# add egress interface
egress_port = kwargs.get('egress_port')
tagged_vlans = kwargs.get('tagged_vlans')
lacp = kwargs.get('lacp')
if egress_port:
self._add_egress_interface(interfaces, egress_port, tagged_vlans, lacp)
# add tap interface
tap_vlan = kwargs.get('tap_vlan')
if tap_vlan:
self._add_tap_interface(interfaces, tap_vlan)
# add flat link interfaces
dps = kwargs.get('dps')
if dps:
self._add_flat_link_interfaces(interfaces, dps, dp_index)
# add stack interfaces linking from t1 to t2 switches
t2_dps = kwargs.get('t2_dps')
if t2_dps:
self._add_t1_stack_interfaces(interfaces, dp_index, t2_dps)
# add stack interfaces linking from t2 to t1 switches
t1_dps = kwargs.get('t1_dps')
if t1_dps:
self._add_t2_stack_interfaces(interfaces, dp_index, t1_dps)
# add access interfaces
access_ports = kwargs.get('access_ports')
access_port_start = kwargs.get('access_port_start', ACCESS_PORT_START_DEFAULT)
native_vlan = kwargs.get('native_vlan')
port_acl = kwargs.get('port_acl')
if access_ports:
self._add_access_interfaces(
interfaces, access_ports, access_port_start, native_vlan, port_acl)
return interfaces
def _add_egress_interface(self, interfaces, egress_port, tagged_vlans, lacp):
if lacp:
interfaces[egress_port] = Interface(
description='egress', lacp=LACP_MODE, tagged_vlans=tagged_vlans)
else:
interfaces[egress_port] = Interface(
description='egress', tagged_vlans=tagged_vlans)
def _add_tap_interface(self, interfaces, tap_vlan):
interfaces[TAP_PORT] = Interface(description='tap', tagged_vlans=[tap_vlan])
def _add_flat_link_interfaces(self, interfaces, dps, dp_index):
if dp_index < len(dps) - 1:
next_dp = dps[dp_index + 1]
next_port = FLAT_LINK_PORT_START + dp_index
description = ("to %s port %s" % (next_dp, next_port))
interfaces[next_port] = Interface(
description=description, stack=StackLink(dp=next_dp, port=next_port))
if dp_index > 0:
prev_dp = dps[dp_index - 1]
prev_port = FLAT_LINK_PORT_START + (len(dps) + dp_index - 1) % len(dps)
description = ("to %s port %s" % (prev_dp, prev_port))
interfaces[prev_port] = Interface(
description=description, stack=StackLink(dp=prev_dp, port=prev_port))
def _add_t1_stack_interfaces(self, interfaces, dp_index, t2_dps):
t2_port = T2_STACK_PORT_START + dp_index * 2
for index, t2_dp in enumerate(t2_dps):
port = T1_STACK_PORT_START + index
description = ("to %s port %s" % (t2_dp, t2_port))
interfaces[port] = Interface(
description=description, stack=StackLink(dp=t2_dp, port=t2_port))
def _add_t2_stack_interfaces(self, interfaces, dp_index, t1_dps):
t1_port = T1_STACK_PORT_START + dp_index
for index, t1_dp in enumerate(t1_dps):
port = T2_STACK_PORT_START + index * 2
description = ('to %s port %s' % (t1_dp, t1_port))
interfaces[port] = Interface(
description=description, stack=StackLink(dp=t1_dp, port=t1_port))
def _add_access_interfaces(self, interfaces, access_ports, access_port_start, native_vlan,
port_acl):
for index in range(access_ports):
interfaces[index + access_port_start] = Interface(
description='IoT Device', native_vlan=native_vlan, acl_in=port_acl,
max_hosts=1)
def _build_datapath_config(self, dp_id, interfaces, mac=None):
lldp_beacon = LLDPBeacon(max_per_interval=5, send_interval=5)
stack = Stack(priority=1)
return Datapath(
dp_id=dp_id, faucet_dp_mac=mac, hardware='Generic',
lacp_timeout=5, lldp_beacon=lldp_beacon, interfaces=interfaces, stack=stack)
def _generate_dp_mac(self, dp_type, dp_index):
if dp_type == T1_DP:
return T1_DP_MAC_PREFIX + "{:02x}".format(dp_index+1)
if dp_type == T2_DP:
return T2_DP_MAC_PREFIX + "{:02x}".format(dp_index+1)
raise Exception('Unknown dp_type: %s' % dp_type)
def create_scale_faucet_config(self, t1_switches, t2_switches, access_ports):
"""Create Faucet config with stacking topology"""
setup_vlan = SETUP_VLAN
test_vlan = TEST_VLAN
vlans = {
setup_vlan: Vlan(description='Faucet IoT'),
test_vlan: Vlan(description='Orchestrated Testing')
}
t1_dps = [('nz-kiwi-t1sw%s' % (dp_index + 1)) for dp_index in range(t1_switches)]
t2_dps = [('nz-kiwi-t2sw%s' % (dp_index + 1)) for dp_index in range(t2_switches)]
dps = {}
for dp_index, dp_name in enumerate(t1_dps):
tap_vlan = test_vlan if not dp_index else None
interfaces = self._build_dp_interfaces(
dp_index, dps=t1_dps, t2_dps=t2_dps, tagged_vlans=[setup_vlan],
tap_vlan=tap_vlan, egress_port=FAUCET_EGRESS_PORT, lacp=True)
dps[dp_name] = self._build_datapath_config(
T1_DP_ID_START + dp_index, interfaces, self._generate_dp_mac(T1_DP, dp_index))
for dp_index, dp_name in enumerate(t2_dps):
interfaces = self._build_dp_interfaces(
dp_index, t1_dps=t1_dps, access_ports=access_ports, native_vlan=setup_vlan,
port_acl='uniform_acl', lacp=True)
dps[dp_name] = self._build_datapath_config(
T2_DP_ID_START + dp_index, interfaces, self._generate_dp_mac(T2_DP, dp_index))
return FaucetConfig(dps=dps, version=2, include=['uniform.yaml'], vlans=vlans)
def create_flat_faucet_config(self, num_switches, num_access_ports):
"""Create Faucet config with flat topology"""
setup_vlan = SETUP_VLAN
switches = [('sw%s' % (sw_num + 1)) for sw_num in range(num_switches)]
dps = {}
vlans = {setup_vlan: Vlan(description='Faucet IoT')}
for sw_num, sw_name in enumerate(switches):
interfaces = self._build_dp_interfaces(
sw_num, dps=switches, egress_port=FAUCET_EGRESS_PORT, tagged_vlans=[setup_vlan],
access_ports=num_access_ports, native_vlan=setup_vlan, port_acl='uniform_acl',
access_port_start=FLAT_ACCESS_PORT_START, lacp=True)
dps[sw_name] = self._build_datapath_config(
FLAT_DP_ID_START + sw_num, interfaces, self._generate_dp_mac(T2_DP, sw_num))
return FaucetConfig(dps=dps, version=2, include=['uniform.yaml'], vlans=vlans)
def create_corp_faucet_config(self):
"""Create Faucet config for corp network"""
setup_vlan = SETUP_VLAN
switch = 'corp'
dps = {}
interfaces = self._build_dp_interfaces(
CORP_DP_ID, tagged_vlans=[setup_vlan], access_ports=1, access_port_start=1,
native_vlan=setup_vlan, egress_port=CORP_EGRESS_PORT)
dps[switch] = self._build_datapath_config(CORP_DP_ID, interfaces)
return FaucetConfig(dps=dps, version=2)
def main(argv):
"""main method for standalone run"""
config_generator = FaucetConfigGenerator()
filepath = '/tmp/faucet_config_dump'
egress = 2
access = 3
devices = 1
topo_type = STACK
argv = argv[1:]
help_msg = """
<python3> build_config.py -e <egress_switches> -a <access_switches> -d <devices per switch>
-p <config path> -t <topology type (flat, corp, stack)>
"""
try:
opts, _ = getopt.getopt(
argv, 'he:a:d:p:t:', ['egress=', 'access=', 'devices=', 'path=', 'type='])
except getopt.GetoptError:
print(help_msg)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(help_msg)
sys.exit()
elif opt in ('-e', '--egress'):
egress = int(arg)
elif opt in ('-a', '--access'):
access = int(arg)
elif opt in ('-d', '--devices'):
devices = int(arg)
elif opt in ('-p', '--path'):
filepath = arg
elif opt in ('-t', '--type'):
topo_type = arg
if topo_type == FLAT:
faucet_config = config_generator.create_flat_faucet_config(access, devices)
elif topo_type == CORP:
faucet_config = config_generator.create_corp_faucet_config()
elif topo_type == STACK:
faucet_config = config_generator.create_scale_faucet_config(egress, access, devices)
else:
raise Exception('Unkown topology type: %s' % topo_type)
config_map = proto_dict(faucet_config)
with open(filepath, 'w') as config_file:
yaml.dump(config_map, config_file)
if __name__ == '__main__':
main(sys.argv)
| 39.414063 | 96 | 0.644896 | [
"Apache-2.0"
] | henry54809/forch | testing/python_lib/build_config.py | 10,090 | Python |
import logging
import yaml
from scanapi.config_loader import load_config_file
from scanapi.errors import (
BadConfigurationError,
EmptyConfigFileError,
InvalidKeyError,
InvalidPythonCodeError,
)
from scanapi.exit_code import ExitCode
from scanapi.reporter import Reporter
from scanapi.session import session
from scanapi.settings import settings
from scanapi.tree import EndpointNode
logger = logging.getLogger(__name__)
def scan():
"""Caller function that tries to scans the file and write the report."""
spec_path = settings["spec_path"]
try:
api_spec = load_config_file(spec_path)
except FileNotFoundError as e:
error_message = f"Could not find API spec file: {spec_path}. {str(e)}"
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
except EmptyConfigFileError as e:
error_message = f"API spec file is empty. {str(e)}"
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
except yaml.YAMLError as e:
error_message = "Error loading specification file."
error_message = "{}\nPyYAML: {}".format(error_message, str(e))
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
try:
root_node = EndpointNode(api_spec)
results = root_node.run()
except (InvalidKeyError, KeyError, InvalidPythonCodeError,) as e:
error_message = "Error loading API spec."
error_message = "{} {}".format(error_message, str(e))
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
try:
write_report(results)
except (BadConfigurationError, InvalidPythonCodeError) as e:
logger.error(e)
raise SystemExit(ExitCode.USAGE_ERROR)
session.exit()
def write_report(results):
"""Constructs a Reporter object and calls the write method of Reporter to
push the results to a file.
"""
reporter = Reporter(settings["output_path"], settings["template"])
reporter.write(results)
| 30.984848 | 78 | 0.706112 | [
"MIT"
] | hebertjulio/scanapi | scanapi/scan.py | 2,045 | Python |
print("\079") | 13 | 13 | 0.615385 | [
"MIT"
] | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | Chapter 02/ch2_17.py | 13 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for third party pycocotools to be used within object_detection.
Note that nothing in this file is tensorflow related and thus cannot
be called directly as a slim metric, for example.
TODO(jonathanhuang): wrap as a slim metric in metrics.py
Usage example: given a set of images with ids in the list image_ids
and corresponding lists of numpy arrays encoding groundtruth (boxes and classes)
and detections (boxes, scores and classes), where elements of each list
correspond to detections/annotations of a single image,
then evaluation (in multi-class mode) can be invoked as follows:
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
image_ids, groundtruth_boxes_list, groundtruth_classes_list,
max_num_classes, output_path=None)
detections_list = coco_tools.ExportDetectionsToCOCO(
image_ids, detection_boxes_list, detection_scores_list,
detection_classes_list, output_path=None)
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import copy
import time
import numpy as np
from pycocotools import coco
from pycocotools import cocoeval
from pycocotools import mask
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.utils import json_utils
class COCOWrapper(coco.COCO):
"""Wrapper for the pycocotools COCO class."""
def __init__(self, dataset, detection_type='bbox'):
"""COCOWrapper constructor.
See http://mscoco.org/dataset/#format for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
"""
supported_detection_types = ['bbox', 'segmentation']
if detection_type not in supported_detection_types:
raise ValueError('Unsupported detection type: {}. '
'Supported values are: {}'.format(
detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
def LoadAnnotations(self, annotations):
"""Load annotations dictionary into COCO datastructure.
See http://mscoco.org/dataset/#format for a description of the annotations
format. As above, this function replicates the default behavior of the API
but does not require writing to external storage.
Args:
annotations: python list holding object detection results where each
detection is encoded as a dict with required keys ['image_id',
'category_id', 'score'] and one of ['bbox', 'segmentation'] based on
`detection_type`.
Returns:
a coco.COCO datastructure holding object detection annotations results
Raises:
ValueError: if annotations is not a list
ValueError: if annotations do not correspond to the images contained
in self.
"""
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if not isinstance(annotations, list):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids)
& set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if self._detection_type == 'bbox':
for idx, ann in enumerate(annotations):
bb = ann['bbox']
ann['area'] = bb[2] * bb[3]
ann['id'] = idx + 1
ann['iscrowd'] = 0
elif self._detection_type == 'segmentation':
for idx, ann in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = idx + 1
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
class COCOEvalWrapper(cocoeval.COCOeval):
"""Wrapper for the pycocotools COCOeval class.
To evaluate, create two objects (groundtruth_dict and detections_list)
using the conventions listed at http://mscoco.org/dataset/#format.
Then call evaluation as follows:
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False,
iou_type='bbox', oks_sigmas=None):
"""COCOEvalWrapper constructor.
Note that for the area-based metrics to be meaningful, detection and
groundtruth boxes must be in image coordinates measured in pixels.
Args:
groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding
groundtruth annotations
detections: a coco.COCO (or coco_tools.COCOWrapper) object holding
detections
agnostic_mode: boolean (default: False). If True, evaluation ignores
class labels, treating all detections as proposals.
iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,
`keypoints`.
oks_sigmas: Float numpy array holding the OKS variances for keypoints.
"""
cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type)
if oks_sigmas is not None:
self.params.kpt_oks_sigmas = oks_sigmas
if agnostic_mode:
self.params.useCats = 0
self._iou_type = iou_type
def GetCategory(self, category_id):
"""Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
"""
return self.cocoGt.cats[category_id]
def GetAgnosticMode(self):
"""Returns true if COCO Eval is configured to evaluate in agnostic mode."""
return self.params.useCats == 0
def GetCategoryIdList(self):
"""Returns list of valid category ids."""
return self.params.catIds
def ComputeMetrics(self,
include_metrics_per_category=False,
all_metrics_per_category=False):
"""Computes detection/keypoint metrics.
Args:
include_metrics_per_category: If True, will include metrics per category.
all_metrics_per_category: If true, include all the summery metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
Returns:
1. summary_metrics: a dictionary holding:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/[email protected]': mean average precision at 50% IOU
'Precision/[email protected]': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels). NOTE: not present for 'keypoints'
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections. NOTE: not present for 'keypoints'
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: a dictionary holding category specific results with
keys of the form: 'Precision mAP ByCategory/category'
(without the supercategory part if no supercategories exist).
For backward compatibility 'PerformanceByCategory' is included in the
output regardless of all_metrics_per_category.
If evaluating class-agnostic mode, per_category_ap is an empty
dictionary.
Raises:
ValueError: If category_stats does not exist.
"""
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = {}
if self._iou_type in ['bbox', 'segm']:
summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]),
('Precision/[email protected]', self.stats[1]),
('Precision/[email protected]', self.stats[2]),
('Precision/mAP (small)', self.stats[3]),
('Precision/mAP (medium)', self.stats[4]),
('Precision/mAP (large)', self.stats[5]),
('Recall/AR@1', self.stats[6]),
('Recall/AR@10', self.stats[7]),
('Recall/AR@100', self.stats[8]),
('Recall/AR@100 (small)', self.stats[9]),
('Recall/AR@100 (medium)', self.stats[10]),
('Recall/AR@100 (large)', self.stats[11])])
elif self._iou_type == 'keypoints':
category_id = self.GetCategoryIdList()[0]
category_name = self.GetCategory(category_id)['name']
summary_metrics = OrderedDict([])
summary_metrics['Precision/mAP ByCategory/{}'.format(
category_name)] = self.stats[0]
summary_metrics['Precision/[email protected] ByCategory/{}'.format(
category_name)] = self.stats[1]
summary_metrics['Precision/[email protected] ByCategory/{}'.format(
category_name)] = self.stats[2]
summary_metrics['Precision/mAP (medium) ByCategory/{}'.format(
category_name)] = self.stats[3]
summary_metrics['Precision/mAP (large) ByCategory/{}'.format(
category_name)] = self.stats[4]
summary_metrics['Recall/AR@1 ByCategory/{}'.format(
category_name)] = self.stats[5]
summary_metrics['Recall/AR@10 ByCategory/{}'.format(
category_name)] = self.stats[6]
summary_metrics['Recall/AR@100 ByCategory/{}'.format(
category_name)] = self.stats[7]
summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format(
category_name)] = self.stats[8]
summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format(
category_name)] = self.stats[9]
if not include_metrics_per_category:
return summary_metrics, {}
if not hasattr(self, 'category_stats'):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return summary_metrics, per_category_ap
for category_index, category_id in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
# Kept for backward compatilbility
per_category_ap['PerformanceByCategory/mAP/{}'.format(
category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
per_category_ap['Precision mAP ByCategory/{}'.format(
category)] = self.category_stats[0][category_index]
per_category_ap['Precision [email protected] ByCategory/{}'.format(
category)] = self.category_stats[1][category_index]
per_category_ap['Precision [email protected] ByCategory/{}'.format(
category)] = self.category_stats[2][category_index]
per_category_ap['Precision mAP (small) ByCategory/{}'.format(
category)] = self.category_stats[3][category_index]
per_category_ap['Precision mAP (medium) ByCategory/{}'.format(
category)] = self.category_stats[4][category_index]
per_category_ap['Precision mAP (large) ByCategory/{}'.format(
category)] = self.category_stats[5][category_index]
per_category_ap['Recall AR@1 ByCategory/{}'.format(
category)] = self.category_stats[6][category_index]
per_category_ap['Recall AR@10 ByCategory/{}'.format(
category)] = self.category_stats[7][category_index]
per_category_ap['Recall AR@100 ByCategory/{}'.format(
category)] = self.category_stats[8][category_index]
per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(
category)] = self.category_stats[9][category_index]
per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(
category)] = self.category_stats[10][category_index]
per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(
category)] = self.category_stats[11][category_index]
return summary_metrics, per_category_ap
def _ConvertBoxToCOCOFormat(box):
"""Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
"""
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
def _RleCompress(masks):
"""Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
"""
rle = mask.encode(np.asfortranarray(masks))
rle['counts'] = six.ensure_str(rle['counts'])
return rle
def ExportSingleImageGroundtruthToCoco(image_id,
next_annotation_id,
category_id_set,
groundtruth_boxes,
groundtruth_classes,
groundtruth_keypoints=None,
groundtruth_keypoint_visibilities=None,
groundtruth_masks=None,
groundtruth_is_crowd=None,
groundtruth_area=None):
"""Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_keypoints: optional float numpy array of keypoints
with shape [num_gt_boxes, num_keypoints, 2].
groundtruth_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated
as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and
visible.
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If
provided, then the area values (in the original absolute coordinates) will
be populated instead of calculated from bounding box coordinates.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
has_is_crowd = groundtruth_is_crowd is not None
if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
has_keypoints = groundtruth_keypoints is not None
has_keypoint_visibilities = groundtruth_keypoint_visibilities is not None
if has_keypoints and not has_keypoint_visibilities:
groundtruth_keypoint_visibilities = np.full(
(num_boxes, groundtruth_keypoints.shape[1]), 2)
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0
if groundtruth_area is not None and groundtruth_area[i] > 0:
area = float(groundtruth_area[i])
else:
area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]))
export_dict = {
'id':
next_annotation_id + i,
'image_id':
image_id,
'category_id':
int(groundtruth_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area': area,
'iscrowd':
iscrowd
}
if groundtruth_masks is not None:
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
if has_keypoints:
keypoints = groundtruth_keypoints[i]
visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [-1])
coco_keypoints = []
num_valid_keypoints = 0
for keypoint, visibility in zip(keypoints, visibilities):
# Convert from [y, x] to [x, y] as mandated by COCO.
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
if int(visibility) > 0:
num_valid_keypoints = num_valid_keypoints + 1
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_valid_keypoints
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportGroundtruthToCOCO(image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=None):
"""Export groundtruth detection annotations in numpy arrays to COCO API.
This function converts a set of groundtruth detection annotations represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are three lists: image ids for each groundtruth image,
groundtruth boxes for each image and groundtruth classes respectively.
Note that the image_ids provided here must match the ones given to the
ExportDetectionsToCOCO function in order for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]
(note that num_gt_boxes can be different for each entry in the list)
groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]
(note that num_gt_boxes can be different for each entry in the list)
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
output_path: (optional) path for exporting result to JSON
Returns:
dictionary that can be read by COCO API
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes):
raise ValueError('Input lists must have the same length')
# For reasons internal to the COCO API, it is important that annotation ids
# are not equal to zero; we thus start counting from 1.
annotation_id = 1
for image_id, boxes, classes in zip(image_ids, groundtruth_boxes,
groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(
image_id,
annotation_id,
category_id_set,
boxes,
classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {
'annotations': groundtruth_export_list,
'images': image_export_list,
'categories': categories
}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
def ExportSingleImageDetectionBoxesToCoco(image_id,
category_id_set,
detection_boxes,
detection_scores,
detection_classes,
detection_keypoints=None,
detection_keypoint_visibilities=None):
"""Export detections of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. Note that the image_ids
provided here must match the ones given to the
ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in
correspondence - that is: boxes[i, :], and classes[i]
are associated with the same groundtruth annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_boxes: float numpy array of shape [num_detections, 4] containing
detection boxes.
detection_scores: float numpy array of shape [num_detections] containing
scored for the detection boxes.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection boxes.
detection_keypoints: optional float numpy array of keypoints
with shape [num_detections, num_keypoints, 2].
detection_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_detections, num_keypoints]. Integer is
treated as an enum with 0=not labels, 1=labeled but not visible and
2=labeled and visible.
Returns:
a list of detection annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_boxes, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(detection_boxes.shape) != 2:
raise ValueError('All entries in detection_boxes expected to be of '
'rank 2.')
if detection_boxes.shape[1] != 4:
raise ValueError('All entries in detection_boxes should have '
'shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension). '
'Classes shape: %d. Boxes shape: %d. '
'Scores shape: %d' % (
detection_classes.shape[0], detection_boxes.shape[0],
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
export_dict = {
'image_id':
image_id,
'category_id':
int(detection_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])),
'score':
float(detection_scores[i]),
}
if detection_keypoints is not None:
keypoints = detection_keypoints[i]
num_keypoints = keypoints.shape[0]
if detection_keypoint_visibilities is None:
detection_keypoint_visibilities = np.full((num_boxes, num_keypoints),
2)
visibilities = np.reshape(detection_keypoint_visibilities[i], [-1])
coco_keypoints = []
for keypoint, visibility in zip(keypoints, visibilities):
# Convert from [y, x] to [x, y] as mandated by COCO.
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_keypoints
detections_list.append(export_dict)
return detections_list
def ExportSingleImageDetectionMasksToCoco(image_id,
category_id_set,
detection_masks,
detection_scores,
detection_classes):
"""Export detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'segmentation': _RleCompress(detection_masks[i]),
'score': float(detection_scores[i])
})
return detections_list
def ExportDetectionsToCOCO(image_ids,
detection_boxes,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export detection annotations in numpy arrays to COCO API.
This function converts a set of predicted detections represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of boxes, scores and
classes, respectively, corresponding to each image for which detections
have been produced. Note that the image_ids provided here must
match the ones given to the ExportGroundtruthToCOCO function in order
for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: detection_boxes[i, :], detection_scores[i] and
detection_classes[i] are associated with the same detection.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]
detection_scores: list of numpy arrays (float) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'bbox', 'score'].
Raises:
ValueError: if (1) detection_boxes and detection_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers.
"""
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if not (len(image_ids) == len(detection_boxes) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
for image_id, boxes, scores, classes in zip(image_ids, detection_boxes,
detection_scores,
detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(
image_id,
category_id_set,
boxes,
scores,
classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
def ExportSegmentsToCOCO(image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export segmentation masks in numpy arrays to COCO API.
This function converts a set of predicted instance masks represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of segments, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
Note this function is recommended to use for small dataset.
For large dataset, it should be used with a merge function
(e.g. in map reduce), otherwise the memory consumption is large.
We assume that for each image, masks, scores and classes are in
correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]
and detection_classes[i] are associated with the same detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]
and type uint8. The height and width should match the shape of
corresponding image.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'segmentation', 'score'].
Raises:
ValueError: if detection_masks and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_masks) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for image_id, masks, scores, classes in zip(image_ids, detection_masks,
detection_scores,
detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(masks.shape) != 4:
raise ValueError('All entries in masks expected to be of '
'rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if not num_boxes == masks.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in segment_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(
image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
def ExportKeypointsToCOCO(image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Exports keypoints in numpy arrays to COCO API.
This function converts a set of predicted keypoints represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of keypoints, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
We assume that for each image, keypoints, scores and classes are in
correspondence --- that is: detection_keypoints[i, :, :, :],
detection_scores[i] and detection_classes[i] are associated with the same
detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_keypoints: list of numpy arrays with shape
[num_detection, num_keypoints, 2] and type float32 in absolute
x-y coordinates.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category and an integer 'num_keypoints' key specifying the number of
keypoints the category has.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'keypoints', 'score'].
Raises:
ValueError: if detection_keypoints and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_keypoints) ==
len(detection_scores) == len(detection_classes)):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for image_id, keypoints, scores, classes in zip(
image_ids, detection_keypoints, detection_scores, detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(keypoints.shape) != 3:
raise ValueError('All entries in keypoints expected to be of '
'rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if not num_boxes == keypoints.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_keypoints, and detection_scores should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {
cat['id']: cat['num_keypoints'] for cat in categories
if 'num_keypoints' in cat}
for i in range(num_boxes):
if classes[i] not in category_id_set:
raise ValueError('class id should be in category_id_set\n')
if classes[i] in category_id_to_num_keypoints_map:
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
# Adds extra ones to indicate the visibility for each keypoint as is
# recommended by MSCOCO.
instance_keypoints = np.concatenate(
[keypoints[i, 0:num_keypoints, :],
np.expand_dims(np.ones(num_keypoints), axis=1)],
axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({
'image_id': image_id,
'category_id': int(classes[i]),
'keypoints': instance_keypoints,
'score': float(scores[i])
})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
| 45.346639 | 80 | 0.668937 | [
"Apache-2.0"
] | 1911590204/models | research/object_detection/metrics/coco_tools.py | 43,170 | Python |
import os
from PySide2 import QtWidgets
from mapclientplugins.filechooserstep.ui_configuredialog import Ui_ConfigureDialog
INVALID_STYLE_SHEET = 'background-color: rgba(239, 0, 0, 50)'
DEFAULT_STYLE_SHEET = ''
class ConfigureDialog(QtWidgets.QDialog):
"""
Configure dialog to present the user with the options to configure this step.
"""
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self._ui = Ui_ConfigureDialog()
self._ui.setupUi(self)
self._workflow_location = None
# Keep track of the previous identifier so that we can track changes
# and know how many occurrences of the current identifier there should
# be.
self._previousIdentifier = ''
# Set a place holder for a callable that will get set from the step.
# We will use this method to decide whether the identifier is unique.
self.identifierOccursCount = None
self._previousLocation = ''
self._makeConnections()
def _makeConnections(self):
self._ui.lineEdit0.textChanged.connect(self.validate)
self._ui.lineEditFileLocation.textChanged.connect(self.validate)
self._ui.pushButtonFileChooser.clicked.connect(self._fileChooserClicked)
def _fileChooserClicked(self):
# Second parameter returned is the filter chosen
location, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Select File Location', self._previousLocation)
if location:
self._previousLocation = location
display_location = self._output_location(location)
self._ui.lineEditFileLocation.setText(display_location)
def _output_location(self, location=None):
if location is None:
display_path = self._ui.lineEditFileLocation.text()
else:
display_path = location
if self._workflow_location and os.path.isabs(display_path):
display_path = os.path.relpath(display_path, self._workflow_location)
return display_path
def setWorkflowLocation(self, location):
self._workflow_location = location
def accept(self):
"""
Override the accept method so that we can confirm saving an
invalid configuration.
"""
result = QtWidgets.QMessageBox.Yes
if not self.validate():
result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration',
'This configuration is invalid. '
' Unpredictable behaviour may result if you choose \'Yes\','
' are you sure you want to save this configuration?)',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if result == QtWidgets.QMessageBox.Yes:
QtWidgets.QDialog.accept(self)
def validate(self):
"""
Validate the configuration dialog fields. For any field that is not valid
set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the
overall validity of the configuration.
"""
# Determine if the current identifier is unique throughout the workflow
# The identifierOccursCount method is part of the interface to the workflow framework.
value = self.identifierOccursCount(self._ui.lineEdit0.text())
valid = (value == 0) or (value == 1 and self._previousIdentifier == self._ui.lineEdit0.text())
self._ui.lineEdit0.setStyleSheet(DEFAULT_STYLE_SHEET if valid else INVALID_STYLE_SHEET)
non_empty = len(self._ui.lineEditFileLocation.text())
file_path = self._output_location()
if self._workflow_location:
file_path = os.path.join(self._workflow_location, file_path)
location_valid = non_empty and os.path.isfile(file_path)
self._ui.lineEditFileLocation.setStyleSheet(DEFAULT_STYLE_SHEET if location_valid else INVALID_STYLE_SHEET)
return valid and location_valid
def getConfig(self):
"""
Get the current value of the configuration from the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
"""
self._previousIdentifier = self._ui.lineEdit0.text()
config = {'identifier': self._ui.lineEdit0.text(), 'File': self._output_location()}
if self._previousLocation:
config['previous_location'] = os.path.relpath(self._previousLocation, self._workflow_location)
else:
config['previous_location'] = ''
return config
def setConfig(self, config):
"""
Set the current value of the configuration for the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
"""
self._previousIdentifier = config['identifier']
self._ui.lineEdit0.setText(config['identifier'])
self._ui.lineEditFileLocation.setText(config['File'])
if 'previous_location' in config:
self._previousLocation = os.path.join(self._workflow_location, config['previous_location'])
| 41.569231 | 115 | 0.65544 | [
"Apache-2.0"
] | mapclient-plugins/mapclientplugins.filechooserstep | mapclientplugins/filechooserstep/configuredialog.py | 5,404 | Python |
from problem import Problem
class DistinctPowers(Problem, name="Distinct powers", expected=9183):
@Problem.solution()
def brute_force(self):
# Good ol fashion set comprehension
return len({a ** b for a in range(2, 101) for b in range(2, 101)})
| 30 | 74 | 0.677778 | [
"MIT"
] | davisschenk/Project-Euler-Rewrite | problems/p029.py | 270 | Python |
import cv2
import sys
cwd = sys.path[0]
if __name__ == '__main__':
success = True
cap = cv2.VideoCapture(cwd + '/face.avi')
i = 0
while success:
success, img = cap.read()
cv2.imwrite(cwd + '/out/frame' + str(i) + '.jpg', img)
i = i + 1
| 17.5 | 62 | 0.539286 | [
"MIT"
] | yo1995/Daily_Python_Tasks | HLWD_opencv_py/dissect-video-to-img-sequence.py | 280 | Python |
# terrascript/data/mrcrilly/awx.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:12:44 UTC)
import terrascript
class awx_credential(terrascript.Data):
pass
class awx_credential_azure_key_vault(terrascript.Data):
pass
class awx_credentials(terrascript.Data):
pass
__all__ = [
"awx_credential",
"awx_credential_azure_key_vault",
"awx_credentials",
]
| 17.26087 | 73 | 0.753149 | [
"BSD-2-Clause"
] | mjuenema/python-terrascript | terrascript/data/mrcrilly/awx.py | 397 | Python |
#
# (C) Copyright IBM Corp. 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import requests
from urllib.parse import urlencode
logger = logging.getLogger(__name__)
class IBMIAMClient:
def __init__(self, iam_config, cf_endpoint, cf_namespace):
self.iam_api_key = iam_config.get('api_key', None)
self.iam_auth_endpoint = iam_config['ibm_auth_endpoint']
self.cf_endpoint = cf_endpoint
self.cf_namespace = cf_namespace
def get_iam_token(self):
data = urlencode({'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': self.iam_api_key})
headers = {
'content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
res = requests.post(self.iam_auth_endpoint, data=data, headers=headers)
if res.status_code != 200:
raise RuntimeError("Error: http code {} while retrieving IAM token for API key.".format(res.status_code))
bearer_response = res.json()
bearer_token = bearer_response['access_token']
logger.debug(bearer_token)
return bearer_token
def get_function_namespace_id(self, iam_token):
logger.debug("Getting name space id for {}".format(self.cf_namespace))
headers = {
'content-type': 'application/json',
'Accept': 'application/json',
'Authorization': iam_token
}
url = '/'.join([self.cf_endpoint, 'api', 'v1', 'namespaces'])
res = requests.get(url, headers=headers)
if res.status_code != 200:
raise RuntimeError("Error: http code {} while listing namespaces.".format(res.status_code))
namespaces = res.json()
for current_namespace in namespaces['namespaces']:
if 'name' in current_namespace and current_namespace['name'] == self.cf_namespace:
logger.debug("Found name space id {} for {}".format(current_namespace['id'], self.cf_namespace))
return current_namespace['id']
raise Exception("No IBM Cloud Functions namespace \"{}\" found.".format(self.cf_namespace))
| 38.289855 | 117 | 0.671461 | [
"Apache-2.0"
] | class-euproject/lithops | pywren_ibm_cloud/libs/ibm_cloudfunctions/iam.py | 2,642 | Python |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2015, Luis Pedro Coelho <[email protected]>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import numpy as np
from .normalise import normaliselabels
from .base import supervised_model
__all__ = ['normaliselabels', 'ctransforms']
class threshold_model(object):
'''
threshold_model
Attributes
----------
threshold : float
threshold value
'''
def __init__(self, threshold=.5):
self.threshold = .5
def apply(self, f):
return f >= self.threshold
def __repr__(self):
return 'threshold_model({})'.format(self.threshold)
__str__ = __repr__
class fixed_threshold_learner(object):
def __init__(self, threshold=.5):
self.threshold = threshold
def train(self, features, labels, **kwargs):
return threshold_model(self.threshold)
def __repr__(self):
return 'fixed_threshold_learner({})'.format(self.threshold)
__str__ = __repr__
class ctransforms_model(supervised_model):
'''
model = ctransforms_model(models)
A model that consists of a series of transformations.
See Also
--------
ctransforms
'''
def __init__(self, models):
self.models = models
def apply_many(self, features):
if len(features) == 0:
return features
for m in self.models:
features = m.apply_many(features)
return features
def __repr__(self):
return 'ctransforms_model({})'.format(self.models)
__str__ = __repr__
def __getitem__(self, ix):
return self.models[ix]
def apply(self,features):
for T in self.models:
features = T.apply(features)
return features
class ctransforms(object):
'''
ctransf = ctransforms(c0, c1, c2, ...)
Concatenate transforms.
'''
def __init__(self,*args):
self.transforms = args
def train(self, features, labels, **kwargs):
models = []
model = None
for T in self.transforms:
if model is not None:
features = np.array([model.apply(f) for f in features])
model = T.train(features, labels, **kwargs)
models.append(model)
return ctransforms_model(models)
def __repr__(self):
return 'ctransforms(*{})'.format(self.transforms)
__str__ = __repr__
def set_option(self, opt, val):
idx, opt = opt
self.transforms[idx].set_option(opt,val)
| 29.727273 | 80 | 0.666389 | [
"MIT"
] | cumeadi/milk | milk/supervised/classifier.py | 3,597 | Python |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
r"""
Verify that we execute TeX in a subdirectory (if that's where the document
resides) by checking that all the auxiliary files get created there and
not in the top-level directory. Test this when variantDir is used
Add use of \include and \includegraphics from within the included file
Also check that we find files
Test case courtesy Joel B. Mohler.
"""
import TestSCons
test = TestSCons.TestSCons()
latex = test.where_is('latex')
if not latex:
test.skip_test("Could not find 'latex'; skipping test.\n")
pdflatex = test.where_is('pdflatex')
if not pdflatex:
test.skip_test("Could not find 'pdflatex'; skipping test.\n")
test.subdir('docs')
test.subdir(['docs','content'])
test.subdir(['docs','fig'])
test.write('SConstruct', """\
import os
env = Environment(TOOLS = ['tex', 'pdftex'])
env.VariantDir('build', 'docs', duplicate=0)
pdf = env.PDF('build/main.tex')
""")
test.write(['docs','main.tex'],
r"""\documentclass{article}
\usepackage{makeidx}
\makeindex
\begin{document}
Hi there.
\index{info}
\include{content/chapter}
\printindex{}
\end{document}
""")
test.write(['docs','content','chapter.tex'],
r"""Sub-document 1
\input{content/subchap}
""")
test.write(['docs','content','subchap.tex'], """\
Sub-chapter 2
""")
#test.run(arguments = '.')
#test.run(arguments = '.', stderr=None, stdout=None)
# next line tests that side effect nodes get disambiguated
# and their directories created in a variantDir before
# the builder tries to populate them and fails
test.run(arguments = 'build/main.pdf', stderr=None, stdout=None)
test.must_exist(['build', 'main.aux'])
test.must_exist(['build', 'main.fls'])
test.must_exist(['build', 'main.idx'])
test.must_exist(['build', 'main.ilg'])
test.must_exist(['build', 'main.ind'])
test.must_exist(['build', 'main.log'])
test.must_exist(['build', 'main.pdf'])
test.must_exist(['build', 'content', 'chapter.aux'])
test.must_not_exist('main.aux')
test.must_not_exist('main.dvi')
test.must_not_exist('main.idx')
test.must_not_exist('main.ilg')
test.must_not_exist('main.ind')
test.must_not_exist('main.log')
test.must_not_exist('main.pdf')
test.must_not_exist(['docs', 'main.aux'])
test.must_not_exist(['docs', 'main.dvi'])
test.must_not_exist(['docs', 'main.idx'])
test.must_not_exist(['docs', 'main.ilg'])
test.must_not_exist(['docs', 'main.ind'])
test.must_not_exist(['docs', 'main.log'])
test.must_not_exist(['docs', 'main.pdf'])
test.must_not_exist(['docs', 'content', 'main.aux'])
test.must_not_exist(['docs', 'content', 'main.dvi'])
test.must_not_exist(['docs', 'content', 'main.idx'])
test.must_not_exist(['docs', 'content', 'main.ilg'])
test.must_not_exist(['docs', 'content', 'main.ind'])
test.must_not_exist(['docs', 'content', 'main.log'])
test.must_not_exist(['docs', 'content', 'main.pdf'])
test.must_not_exist(['docs', 'content', 'chapter.aux'])
test.up_to_date(arguments = '.', stderr=None, stdout=None)
test.write(['docs','content', 'subchap.tex'], """\
Sub-document 2a
""")
test.not_up_to_date(arguments = '.')
#test.up_to_date(arguments = '.', stderr=None, stdout=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 30.034483 | 74 | 0.723766 | [
"MIT"
] | Boris-de/scons | test/TEX/subdir_variantdir_include2.py | 4,355 | Python |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "AttrDecl/name"
@dataclass
class Root:
class Meta:
name = "root"
namespace = "AttrDecl/name"
value_00: Optional[int] = field(
default=None,
metadata={
"name": "ପ00",
"type": "Attribute",
}
)
value_01: Optional[int] = field(
default=None,
metadata={
"name": "ଭ01",
"type": "Attribute",
}
)
value_02: Optional[int] = field(
default=None,
metadata={
"name": "ର02",
"type": "Attribute",
}
)
value_10: Optional[int] = field(
default=None,
metadata={
"name": "ଲ10",
"type": "Attribute",
}
)
value_11: Optional[int] = field(
default=None,
metadata={
"name": "ଲ11",
"type": "Attribute",
}
)
value_12: Optional[int] = field(
default=None,
metadata={
"name": "ଳ12",
"type": "Attribute",
}
)
value_20: Optional[int] = field(
default=None,
metadata={
"name": "ଶ20",
"type": "Attribute",
}
)
value_21: Optional[int] = field(
default=None,
metadata={
"name": "ଷ21",
"type": "Attribute",
}
)
value_22: Optional[int] = field(
default=None,
metadata={
"name": "ହ22",
"type": "Attribute",
}
)
value_30: Optional[int] = field(
default=None,
metadata={
"name": "ଽ30",
"type": "Attribute",
}
)
value_40: Optional[int] = field(
default=None,
metadata={
"name": "ଡ଼40",
"type": "Attribute",
}
)
value_41: Optional[int] = field(
default=None,
metadata={
"name": "ଡ଼41",
"type": "Attribute",
}
)
value_42: Optional[int] = field(
default=None,
metadata={
"name": "ଢ଼42",
"type": "Attribute",
}
)
value_50: Optional[int] = field(
default=None,
metadata={
"name": "ୟ50",
"type": "Attribute",
}
)
value_51: Optional[int] = field(
default=None,
metadata={
"name": "ୠ51",
"type": "Attribute",
}
)
value_52: Optional[int] = field(
default=None,
metadata={
"name": "ୡ52",
"type": "Attribute",
}
)
value_60: Optional[int] = field(
default=None,
metadata={
"name": "அ60",
"type": "Attribute",
}
)
value_61: Optional[int] = field(
default=None,
metadata={
"name": "இ61",
"type": "Attribute",
}
)
value_62: Optional[int] = field(
default=None,
metadata={
"name": "ஊ62",
"type": "Attribute",
}
)
value_70: Optional[int] = field(
default=None,
metadata={
"name": "எ70",
"type": "Attribute",
}
)
value_71: Optional[int] = field(
default=None,
metadata={
"name": "ஏ71",
"type": "Attribute",
}
)
value_72: Optional[int] = field(
default=None,
metadata={
"name": "ஐ72",
"type": "Attribute",
}
)
value_80: Optional[int] = field(
default=None,
metadata={
"name": "ஒ80",
"type": "Attribute",
}
)
value_81: Optional[int] = field(
default=None,
metadata={
"name": "ஓ81",
"type": "Attribute",
}
)
value_82: Optional[int] = field(
default=None,
metadata={
"name": "க82",
"type": "Attribute",
}
)
value_90: Optional[int] = field(
default=None,
metadata={
"name": "ங90",
"type": "Attribute",
}
)
value_91: Optional[int] = field(
default=None,
metadata={
"name": "ங91",
"type": "Attribute",
}
)
value_92: Optional[int] = field(
default=None,
metadata={
"name": "ச92",
"type": "Attribute",
}
)
| 21.598086 | 40 | 0.421799 | [
"MIT"
] | tefra/xsdata-w3c-tests | output/models/sun_data/attr_decl/ad_name/ad_name00104m/ad_name00104m10_xsd/ad_name00104m10.py | 4,570 | Python |
#!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = requirements_file.read()
setup_requirements = ['setuptools_scm', ]
test_requirements = ['pytest>=3', 'pytest-runner']
setup(
author="USDA ARS Northwest Watershed Research Center",
author_email='[email protected]',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Take 2 for pysnobal in pure python",
entry_points={
'console_scripts': [
'pysnobal=pysnobal.cli:main',
],
},
install_requires=requirements,
license="CC0 1.0",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords='pysnobal',
name='pysnobal',
packages=find_packages(include=['pysnobal', 'pysnobal.*']),
package_data={
'pysnobal': [
'./pysnobal_core_config.ini'
]
},
use_scm_version={
'local_scheme': 'node-and-date',
},
setup_requires=setup_requirements,
test_suite='pysnobal.tests',
tests_require=test_requirements,
url='https://github.com/scotthavens/pysnobal',
zip_safe=False,
)
| 28.741935 | 74 | 0.645903 | [
"CC0-1.0"
] | scotthavens/pysnobal | setup.py | 1,782 | Python |
# Copyright (c) 2011 Sam Rushing
"""ECC secp256k1 OpenSSL wrapper.
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This file is modified from python-bitcoinlib.
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.AMBKH_compute_key.restype = ctypes.c_int
ssl.AMBKH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.AMBKSA_sign.restype = ctypes.c_int
ssl.AMBKSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.AMBKSA_verify.restype = ctypes.c_int
ssl.AMBKSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with AMBKSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey():
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ambkh_key(self, other_pubkey):
ambkh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.AMBKH_compute_key(ctypes.pointer(ambkh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ambkh_key(): AMBKH_compute_key() failed')
return ambkh_keybuffer.raw
def get_ambkh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ambkh_key(other_pubkey)
return kdf(r)
def sign(self, hash, low_s = True):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.AMBKSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.AMBKSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
assert mb_sig.raw[0] == 0x30
assert mb_sig.raw[1] == sig_size0.value - 2
total_size = mb_sig.raw[1]
assert mb_sig.raw[2] == 2
r_size = mb_sig.raw[3]
assert mb_sig.raw[4 + r_size] == 2
s_size = mb_sig.raw[5 + r_size]
s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
return mb_sig.raw[:sig_size0.value]
else:
low_s_value = SECP256K1_ORDER - s_value
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
low_s_bytes = low_s_bytes[1:]
new_s_size = len(low_s_bytes)
new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.AMBKSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| 36.480687 | 130 | 0.688235 | [
"MIT"
] | Alonewolf-123/AmbankCoin-Core | test/functional/test_framework/key.py | 8,500 | Python |
# -*- coding: utf-8 -*-
import errno
import os
import re
import hashlib
import tempfile
import sys
import shutil
import logging
import click
import crayons
import delegator
import parse
import requests
import six
import stat
import warnings
try:
from weakref import finalize
except ImportError:
try:
from .vendor.backports.weakref import finalize
except ImportError:
class finalize(object):
def __init__(self, *args, **kwargs):
logging.warn('weakref.finalize unavailable, not cleaning...')
def detach(self):
return False
from time import time
logging.basicConfig(level=logging.ERROR)
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
except ImportError:
try:
from .vendor.pathlib2 import Path
except ImportError:
pass
from distutils.spawn import find_executable
from contextlib import contextmanager
from .patched.piptools.resolver import Resolver
from .patched.piptools.repositories.pypi import PyPIRepository
from .patched.piptools.scripts.compile import get_pip_command
from .patched.piptools import logging as piptools_logging
from .patched.piptools.exceptions import NoCandidateFound
from .vendor.pip9.download import is_archive_file
from .vendor.pip9.exceptions import DistributionNotFound
from .vendor.pip9.index import Link
from .vendor.pip9._vendor.requests.exceptions import HTTPError, ConnectionError
from .pep508checker import lookup
from .environments import PIPENV_MAX_ROUNDS, PIPENV_CACHE_DIR
if six.PY2:
class ResourceWarning(Warning):
pass
specifiers = [k for k in lookup.keys()]
# List of version control systems we support.
VCS_LIST = ('git', 'svn', 'hg', 'bzr')
SCHEME_LIST = ('http://', 'https://', 'ftp://', 'ftps://', 'file://')
requests = requests.Session()
def get_requirement(dep):
from .vendor.pip9.req.req_install import _strip_extras, Wheel
from .vendor import requirements
"""Pre-clean requirement strings passed to the requirements parser.
Ensures that we can accept both local and relative paths, file and VCS URIs,
remote URIs, and package names, and that we pass only valid requirement strings
to the requirements parser. Performs necessary modifications to requirements
object if the user input was a local relative path.
:param str dep: A requirement line
:returns: :class:`requirements.Requirement` object
"""
path = None
uri = None
cleaned_uri = None
editable = False
dep_link = None
# check for editable dep / vcs dep
if dep.startswith('-e '):
editable = True
# Use the user supplied path as the written dependency
dep = dep.split(' ', 1)[1]
# Split out markers if they are present - similar to how pip does it
# See pip9.req.req_install.InstallRequirement.from_line
if not any(dep.startswith(uri_prefix) for uri_prefix in SCHEME_LIST):
marker_sep = ';'
else:
marker_sep = '; '
if marker_sep in dep:
dep, markers = dep.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
# Strip extras from the requirement so we can make a properly parseable req
dep, extras = _strip_extras(dep)
# Only operate on local, existing, non-URI formatted paths which are installable
if is_installable_file(dep):
dep_path = Path(dep)
dep_link = Link(dep_path.absolute().as_uri())
if dep_path.is_absolute() or dep_path.as_posix() == '.':
path = dep_path.as_posix()
else:
path = get_converted_relative_path(dep)
dep = dep_link.egg_fragment if dep_link.egg_fragment else dep_link.url_without_fragment
elif is_vcs(dep):
# Generate a Link object for parsing egg fragments
dep_link = Link(dep)
# Save the original path to store in the pipfile
uri = dep_link.url
# Construct the requirement using proper git+ssh:// replaced uris or names if available
cleaned_uri = clean_git_uri(dep)
dep = cleaned_uri
if editable:
dep = '-e {0}'.format(dep)
req = [r for r in requirements.parse(dep)][0]
# if all we built was the requirement name and still need everything else
if req.name and not any([req.uri, req.path]):
if dep_link:
if dep_link.scheme.startswith('file') and path and not req.path:
req.path = path
req.local_file = True
req.uri = None
else:
req.uri = dep_link.url_without_fragment
# If the result is a local file with a URI and we have a local path, unset the URI
# and set the path instead -- note that local files may have 'path' set by accident
elif req.local_file and path and not req.vcs:
req.path = path
req.uri = None
if dep_link and dep_link.is_wheel and not req.name:
req.name = os.path.basename(Wheel(dep_link.path).name)
elif req.vcs and req.uri and cleaned_uri and cleaned_uri != uri:
req.uri = strip_ssh_from_git_uri(req.uri)
req.line = strip_ssh_from_git_uri(req.line)
req.editable = editable
if markers:
req.markers = markers
if extras:
# Bizarrely this is also what pip does...
req.extras = [
r for r in requirements.parse('fakepkg{0}'.format(extras))
][
0
].extras
return req
def cleanup_toml(tml):
toml = tml.split('\n')
new_toml = []
# Remove all empty lines from TOML.
for line in toml:
if line.strip():
new_toml.append(line)
toml = '\n'.join(new_toml)
new_toml = []
# Add newlines between TOML sections.
for i, line in enumerate(toml.split('\n')):
# Skip the first line.
if line.startswith('['):
if i > 0:
# Insert a newline before the heading.
new_toml.append('')
new_toml.append(line)
# adding new line at the end of the TOML file
new_toml.append('')
toml = '\n'.join(new_toml)
return toml
def parse_python_version(output):
"""Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
"""
version_pattern = re.compile(r'''
^ # Beginning of line.
Python # Literally "Python".
\s # Space.
(?P<major>\d+) # Major = one or more digits.
\. # Dot.
(?P<minor>\d+) # Minor = one or more digits.
(?: # Unnamed group for dot-micro.
\. # Dot.
(?P<micro>\d+) # Micro = one or more digit.
)? # Micro is optional because pypa/pipenv#1893.
.* # Trailing garbage.
$ # End of line.
''', re.VERBOSE)
match = version_pattern.match(output)
if not match:
return None
return match.groupdict(default='0')
def python_version(path_to_python):
if not path_to_python:
return None
try:
c = delegator.run([path_to_python, '--version'], block=False)
except Exception:
return None
c.block()
version = parse_python_version(c.out.strip() or c.err.strip())
try:
version = u'{major}.{minor}.{micro}'.format(**version)
except TypeError:
return None
return version
def escape_grouped_arguments(s):
"""Prepares a string for the shell (on Windows too!)
Only for use on grouped arguments (passed as a string to Popen)
"""
if s is None:
return None
# Additional escaping for windows paths
if os.name == 'nt':
s = "{}".format(s.replace("\\", "\\\\"))
return '"' + s.replace("'", "'\\''") + '"'
def clean_pkg_version(version):
"""Uses pip to prepare a package version string, from our internal version."""
return six.u(pep440_version(str(version).replace('==', '')))
class HackedPythonVersion(object):
"""A Beautiful hack, which allows us to tell pip which version of Python we're using."""
def __init__(self, python_version, python_path):
self.python_version = python_version
self.python_path = python_path
def __enter__(self):
os.environ['PIP_PYTHON_VERSION'] = str(self.python_version)
os.environ['PIP_PYTHON_PATH'] = str(self.python_path)
def __exit__(self, *args):
# Restore original Python version information.
del os.environ['PIP_PYTHON_VERSION']
def prepare_pip_source_args(sources, pip_args=None):
if pip_args is None:
pip_args = []
if sources:
# Add the source to pip9.
pip_args.extend(['-i', sources[0]['url']])
# Trust the host if it's not verified.
if not sources[0].get('verify_ssl', True):
pip_args.extend(
[
'--trusted-host',
urlparse(sources[0]['url']).netloc.split(':')[0],
]
)
# Add additional sources as extra indexes.
if len(sources) > 1:
for source in sources[1:]:
pip_args.extend(['--extra-index-url', source['url']])
# Trust the host if it's not verified.
if not source.get('verify_ssl', True):
pip_args.extend(
[
'--trusted-host',
urlparse(source['url']).hostname,
]
)
return pip_args
def actually_resolve_reps(
deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre
):
from pip9 import basecommand, req
from pip9._vendor import requests as pip_requests
class PipCommand(basecommand.Command):
"""Needed for pip-tools."""
name = 'PipCommand'
constraints = []
req_dir = tempfile.mkdtemp(prefix='pipenv-', suffix='-requirements')
for dep in deps:
if dep:
if dep.startswith('-e '):
constraint = req.InstallRequirement.from_editable(
dep[len('-e '):]
)
else:
fd, t = tempfile.mkstemp(
prefix='pipenv-', suffix='-requirement.txt', dir=req_dir
)
with os.fdopen(fd, 'w') as f:
f.write(dep)
constraint = [
c for c in req.parse_requirements(t, session=pip_requests)
][
0
]
# extra_constraints = []
if ' -i ' in dep:
index_lookup[constraint.name] = project.get_source(
url=dep.split(' -i ')[1]
).get(
'name'
)
if constraint.markers:
markers_lookup[constraint.name] = str(
constraint.markers
).replace(
'"', "'"
)
constraints.append(constraint)
rmtree(req_dir)
pip_command = get_pip_command()
pip_args = []
if sources:
pip_args = prepare_pip_source_args(sources, pip_args)
if verbose:
print('Using pip: {0}'.format(' '.join(pip_args)))
pip_options, _ = pip_command.parse_args(pip_args)
session = pip_command._build_session(pip_options)
pypi = PyPIRepository(
pip_options=pip_options, use_json=False, session=session
)
if verbose:
logging.log.verbose = True
piptools_logging.log.verbose = True
resolved_tree = set()
resolver = Resolver(
constraints=constraints,
repository=pypi,
clear_caches=clear,
prereleases=pre,
)
# pre-resolve instead of iterating to avoid asking pypi for hashes of editable packages
try:
resolved_tree.update(resolver.resolve(max_rounds=PIPENV_MAX_ROUNDS))
except (NoCandidateFound, DistributionNotFound, HTTPError) as e:
click.echo(
'{0}: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\n '
'You can use {1} to bypass this mechanism, then run {2} to inspect the situation.'
''.format(
crayons.red('Warning', bold=True),
crayons.red('$ pipenv install --skip-lock'),
crayons.red('$ pipenv graph'),
),
err=True,
)
click.echo(crayons.blue(str(e)), err=True)
if 'no version found at all' in str(e):
click.echo(
crayons.blue(
'Please check your version specifier and version number. See PEP440 for more information.'
)
)
raise RuntimeError
return resolved_tree, resolver
def venv_resolve_deps(
deps, which, project, pre=False, verbose=False, clear=False, allow_global=False
):
from . import resolver
import json
resolver = escape_grouped_arguments(resolver.__file__.rstrip('co'))
cmd = '{0} {1} {2} {3} {4} {5}'.format(
escape_grouped_arguments(which('python')),
resolver,
'--pre' if pre else '',
'--verbose' if verbose else '',
'--clear' if clear else '',
'--system' if allow_global else '',
)
os.environ['PIPENV_PACKAGES'] = '\n'.join(deps)
c = delegator.run(cmd, block=True)
del os.environ['PIPENV_PACKAGES']
try:
assert c.return_code == 0
except AssertionError:
if verbose:
click.echo(c.out, err=True)
click.echo(c.err, err=True)
else:
click.echo(c.err[int(len(c.err) / 2) - 1:], err=True)
sys.exit(c.return_code)
if verbose:
click.echo(c.out.split('RESULTS:')[0], err=True)
try:
return json.loads(c.out.split('RESULTS:')[1].strip())
except IndexError:
raise RuntimeError('There was a problem with locking.')
def resolve_deps(
deps,
which,
project,
sources=None,
verbose=False,
python=False,
clear=False,
pre=False,
allow_global=False,
):
"""Given a list of dependencies, return a resolved list of dependencies,
using pip-tools -- and their hashes, using the warehouse API / pip9.
"""
index_lookup = {}
markers_lookup = {}
python_path = which('python', allow_global=allow_global)
backup_python_path = sys.executable
results = []
# First (proper) attempt:
with HackedPythonVersion(python_version=python, python_path=python_path):
try:
resolved_tree, resolver = actually_resolve_reps(
deps,
index_lookup,
markers_lookup,
project,
sources,
verbose,
clear,
pre,
)
except RuntimeError:
# Don't exit here, like usual.
resolved_tree = None
# Second (last-resort) attempt:
if resolved_tree is None:
with HackedPythonVersion(
python_version='.'.join([str(s) for s in sys.version_info[:3]]),
python_path=backup_python_path,
):
try:
# Attempt to resolve again, with different Python version information,
# particularly for particularly particular packages.
resolved_tree, resolver = actually_resolve_reps(
deps,
index_lookup,
markers_lookup,
project,
sources,
verbose,
clear,
pre,
)
except RuntimeError:
sys.exit(1)
for result in resolved_tree:
if not result.editable:
name = pep423_name(result.name)
version = clean_pkg_version(result.specifier)
index = index_lookup.get(result.name)
if not markers_lookup.get(result.name):
markers = str(
result.markers
) if result.markers and 'extra' not in str(
result.markers
) else None
else:
markers = markers_lookup.get(result.name)
collected_hashes = []
if any('python.org' in source['url'] or 'pypi.org' in source['url']
for source in sources):
try:
# Grab the hashes from the new warehouse API.
r = requests.get(
'https://pypi.org/pypi/{0}/json'.format(name),
timeout=10,
)
api_releases = r.json()['releases']
cleaned_releases = {}
for api_version, api_info in api_releases.items():
cleaned_releases[
clean_pkg_version(api_version)
] = api_info
for release in cleaned_releases[version]:
collected_hashes.append(release['digests']['sha256'])
collected_hashes = [
'sha256:' + s for s in collected_hashes
]
except (ValueError, KeyError, ConnectionError):
if verbose:
click.echo(
'{0}: Error generating hash for {1}'.format(
crayons.red('Warning', bold=True), name
)
)
# Collect un-collectable hashes (should work with devpi).
try:
collected_hashes = collected_hashes + list(
list(resolver.resolve_hashes([result]).items())[0][1]
)
except (ValueError, KeyError, ConnectionError, IndexError):
if verbose:
print('Error generating hash for {}'.format(name))
collected_hashes = sorted(set(collected_hashes))
d = {'name': name, 'version': version, 'hashes': collected_hashes}
if index:
d.update({'index': index})
if markers:
d.update({'markers': markers.replace('"', "'")})
results.append(d)
return results
def multi_split(s, split):
"""Splits on multiple given separators."""
for r in split:
s = s.replace(r, '|')
return [i for i in s.split('|') if len(i) > 0]
def convert_deps_from_pip(dep):
""""Converts a pip-formatted dependency to a Pipfile-formatted one."""
dependency = {}
req = get_requirement(dep)
extras = {'extras': req.extras}
# File installs.
if (req.uri or req.path or is_installable_file(req.name)) and not req.vcs:
# Assign a package name to the file, last 7 of it's sha256 hex digest.
if not req.uri and not req.path:
req.path = os.path.abspath(req.name)
hashable_path = req.uri if req.uri else req.path
if not req.name:
req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest()
req.name = req.name[len(req.name) - 7:]
# {path: uri} TOML (spec 4 I guess...)
if req.uri:
dependency[req.name] = {'file': hashable_path}
else:
dependency[req.name] = {'path': hashable_path}
if req.extras:
dependency[req.name].update(extras)
# Add --editable if applicable
if req.editable:
dependency[req.name].update({'editable': True})
# VCS Installs.
elif req.vcs:
if req.name is None:
raise ValueError(
'pipenv requires an #egg fragment for version controlled '
'dependencies. Please install remote dependency '
'in the form {0}#egg=<package-name>.'.format(req.uri)
)
# Crop off the git+, etc part.
if req.uri.startswith('{0}+'.format(req.vcs)):
req.uri = req.uri[len(req.vcs) + 1:]
dependency.setdefault(req.name, {}).update({req.vcs: req.uri})
# Add --editable, if it's there.
if req.editable:
dependency[req.name].update({'editable': True})
# Add subdirectory, if it's there
if req.subdirectory:
dependency[req.name].update({'subdirectory': req.subdirectory})
# Add the specifier, if it was provided.
if req.revision:
dependency[req.name].update({'ref': req.revision})
# Extras: e.g. #egg=requests[security]
if req.extras:
dependency[req.name].update({'extras': req.extras})
elif req.extras or req.specs or hasattr(req, 'markers'):
specs = None
# Comparison operators: e.g. Django>1.10
if req.specs:
r = multi_split(dep, '!=<>~')
specs = dep[len(r[0]):]
dependency[req.name] = specs
# Extras: e.g. requests[socks]
if req.extras:
dependency[req.name] = extras
if specs:
dependency[req.name].update({'version': specs})
if hasattr(req, 'markers'):
if isinstance(dependency[req.name], six.string_types):
dependency[req.name] = {'version': specs}
dependency[req.name].update({'markers': req.markers})
# Bare dependencies: e.g. requests
else:
dependency[dep] = '*'
# Cleanup when there's multiple values, e.g. -e.
if len(dependency) > 1:
for key in dependency.copy():
if not hasattr(dependency[key], 'keys'):
del dependency[key]
return dependency
def is_star(val):
return isinstance(val, six.string_types) and val == '*'
def is_pinned(val):
return isinstance(val, six.string_types) and val.startswith('==')
def convert_deps_to_pip(deps, project=None, r=True, include_index=False):
""""Converts a Pipfile-formatted dependency to a pip-formatted one."""
dependencies = []
for dep in deps.keys():
# Default (e.g. '>1.10').
extra = deps[dep] if isinstance(deps[dep], six.string_types) else ''
version = ''
index = ''
# Get rid of '*'.
if is_star(deps[dep]) or str(extra) == '{}':
extra = ''
hash = ''
# Support for single hash (spec 1).
if 'hash' in deps[dep]:
hash = ' --hash={0}'.format(deps[dep]['hash'])
# Support for multiple hashes (spec 2).
if 'hashes' in deps[dep]:
hash = '{0} '.format(
''.join(
[' --hash={0} '.format(h) for h in deps[dep]['hashes']]
)
)
# Support for extras (e.g. requests[socks])
if 'extras' in deps[dep]:
extra = '[{0}]'.format(','.join(deps[dep]['extras']))
if 'version' in deps[dep]:
if not is_star(deps[dep]['version']):
version = deps[dep]['version']
# For lockfile format.
if 'markers' in deps[dep]:
specs = '; {0}'.format(deps[dep]['markers'])
else:
# For pipfile format.
specs = []
for specifier in specifiers:
if specifier in deps[dep]:
if not is_star(deps[dep][specifier]):
specs.append(
'{0} {1}'.format(specifier, deps[dep][specifier])
)
if specs:
specs = '; {0}'.format(' and '.join(specs))
else:
specs = ''
if include_index and not is_file(deps[dep]) and not is_vcs(deps[dep]):
pip_src_args = []
if 'index' in deps[dep]:
pip_src_args = [project.get_source(deps[dep]['index'])]
else:
pip_src_args = project.sources
pip_args = prepare_pip_source_args(pip_src_args)
index = ' '.join(pip_args)
# Support for version control
maybe_vcs = [vcs for vcs in VCS_LIST if vcs in deps[dep]]
vcs = maybe_vcs[0] if maybe_vcs else None
# Support for files.
if 'file' in deps[dep]:
extra = '{1}{0}'.format(extra, deps[dep]['file']).strip()
# Flag the file as editable if it is a local relative path
if 'editable' in deps[dep]:
dep = '-e '
else:
dep = ''
# Support for paths.
elif 'path' in deps[dep]:
extra = '{1}{0}'.format(extra, deps[dep]['path']).strip()
# Flag the file as editable if it is a local relative path
if 'editable' in deps[dep]:
dep = '-e '
else:
dep = ''
if vcs:
extra = '{0}+{1}'.format(vcs, deps[dep][vcs])
# Support for @refs.
if 'ref' in deps[dep]:
extra += '@{0}'.format(deps[dep]['ref'])
extra += '#egg={0}'.format(dep)
# Support for subdirectory
if 'subdirectory' in deps[dep]:
extra += '&subdirectory={0}'.format(deps[dep]['subdirectory'])
# Support for editable.
if 'editable' in deps[dep]:
# Support for --egg.
dep = '-e '
else:
dep = ''
s = '{0}{1}{2}{3}{4} {5}'.format(
dep, extra, version, specs, hash, index
).strip()
dependencies.append(s)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False)
f.write('\n'.join(dependencies).encode('utf-8'))
f.close()
return f.name
def mkdir_p(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError(
"a file with the same name as the desired dir, '{0}', already exists.".format(
newdir
)
)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir_p(head)
if tail:
os.mkdir(newdir)
def is_required_version(version, specified_version):
"""Check to see if there's a hard requirement for version
number provided in the Pipfile.
"""
# Certain packages may be defined with multiple values.
if isinstance(specified_version, dict):
specified_version = specified_version.get('version', '')
if specified_version.startswith('=='):
return version.strip() == specified_version.split('==')[1].strip()
return True
def strip_ssh_from_git_uri(uri):
"""Return git+ssh:// formatted URI to git+git@ format"""
if isinstance(uri, six.string_types):
uri = uri.replace('git+ssh://', 'git+')
return uri
def clean_git_uri(uri):
"""Cleans VCS uris from pip9 format"""
if isinstance(uri, six.string_types):
# Add scheme for parsing purposes, this is also what pip does
if uri.startswith('git+') and '://' not in uri:
uri = uri.replace('git+', 'git+ssh://')
return uri
def is_editable(pipfile_entry):
if hasattr(pipfile_entry, 'get'):
return pipfile_entry.get('editable', False) and any(
pipfile_entry.get(key) for key in ('file', 'path') + VCS_LIST
)
return False
def is_vcs(pipfile_entry):
from .vendor import requirements
"""Determine if dictionary entry from Pipfile is for a vcs dependency."""
if hasattr(pipfile_entry, 'keys'):
return any(key for key in pipfile_entry.keys() if key in VCS_LIST)
elif isinstance(pipfile_entry, six.string_types):
return bool(
requirements.requirement.VCS_REGEX.match(
clean_git_uri(pipfile_entry)
)
)
return False
def is_installable_file(path):
"""Determine if a path can potentially be installed"""
from .vendor.pip9.utils import is_installable_dir
from .vendor.pip9.utils.packaging import specifiers
if hasattr(path, 'keys') and any(
key for key in path.keys() if key in ['file', 'path']
):
path = urlparse(path['file']).path if 'file' in path else path['path']
if not isinstance(path, six.string_types) or path == '*':
return False
# If the string starts with a valid specifier operator, test if it is a valid
# specifier set before making a path object (to avoid breaking windows)
if any(path.startswith(spec) for spec in '!=<>~'):
try:
specifiers.SpecifierSet(path)
# If this is not a valid specifier, just move on and try it as a path
except specifiers.InvalidSpecifier:
pass
else:
return False
if not os.path.exists(os.path.abspath(path)):
return False
lookup_path = Path(path)
absolute_path = '{0}'.format(lookup_path.absolute())
if lookup_path.is_dir() and is_installable_dir(absolute_path):
return True
elif lookup_path.is_file() and is_archive_file(absolute_path):
return True
return False
def is_file(package):
"""Determine if a package name is for a File dependency."""
if hasattr(package, 'keys'):
return any(key for key in package.keys() if key in ['file', 'path'])
if os.path.exists(str(package)):
return True
for start in SCHEME_LIST:
if str(package).startswith(start):
return True
return False
def pep440_version(version):
"""Normalize version to PEP 440 standards"""
from .vendor.pip9.index import parse_version
# Use pip built-in version parser.
return str(parse_version(version))
def pep423_name(name):
"""Normalize package name to PEP 423 style standard."""
name = name.lower()
if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
return name.replace('_', '-')
else:
return name
def proper_case(package_name):
"""Properly case project name from pypi.org."""
# Hit the simple API.
r = requests.get(
'https://pypi.org/pypi/{0}/json'.format(package_name),
timeout=0.3,
stream=True,
)
if not r.ok:
raise IOError(
'Unable to find package {0} in PyPI repository.'.format(
package_name
)
)
r = parse.parse('https://pypi.org/pypi/{name}/json', r.url)
good_name = r['name']
return good_name
def split_section(input_file, section_suffix, test_function):
"""
Split a pipfile or a lockfile section out by section name and test function
:param dict input_file: A dictionary containing either a pipfile or lockfile
:param str section_suffix: A string of the name of the section
:param func test_function: A test function to test against the value in the key/value pair
>>> split_section(my_lockfile, 'vcs', is_vcs)
{
'default': {
"six": {
"hashes": [
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"
],
"version": "==1.11.0"
}
},
'default-vcs': {
"e1839a8": {
"editable": true,
"path": "."
}
}
}
"""
pipfile_sections = ('packages', 'dev-packages')
lockfile_sections = ('default', 'develop')
if any(section in input_file for section in pipfile_sections):
sections = pipfile_sections
elif any(section in input_file for section in lockfile_sections):
sections = lockfile_sections
else:
# return the original file if we can't find any pipfile or lockfile sections
return input_file
for section in sections:
split_dict = {}
entries = input_file.get(section, {})
for k in list(entries.keys()):
if test_function(entries.get(k)):
split_dict[k] = entries.pop(k)
input_file['-'.join([section, section_suffix])] = split_dict
return input_file
def split_file(file_dict):
"""Split VCS and editable dependencies out from file."""
sections = {
'vcs': is_vcs,
'editable': lambda x: hasattr(x, 'keys') and x.get('editable'),
}
for k, func in sections.items():
file_dict = split_section(file_dict, k, func)
return file_dict
def merge_deps(
file_dict,
project,
dev=False,
requirements=False,
ignore_hashes=False,
blocking=False,
only=False,
):
"""
Given a file_dict, merges dependencies and converts them to pip dependency lists.
:param dict file_dict: The result of calling :func:`pipenv.utils.split_file`
:param :class:`pipenv.project.Project` project: Pipenv project
:param bool dev=False: Flag indicating whether dev dependencies are to be installed
:param bool requirements=False: Flag indicating whether to use a requirements file
:param bool ignore_hashes=False:
:param bool blocking=False:
:param bool only=False:
:return: Pip-converted 3-tuples of [deps, requirements_deps]
"""
deps = []
requirements_deps = []
for section in list(file_dict.keys()):
# Turn develop-vcs into ['develop', 'vcs']
section_name, suffix = section.rsplit(
'-', 1
) if '-' in section and not section == 'dev-packages' else (
section, None
)
if not file_dict[section] or section_name not in (
'dev-packages', 'packages', 'default', 'develop'
):
continue
is_dev = section_name in ('dev-packages', 'develop')
if is_dev and not dev:
continue
if ignore_hashes:
for k, v in file_dict[section]:
if 'hash' in v:
del v['hash']
# Block and ignore hashes for all suffixed sections (vcs/editable)
no_hashes = True if suffix else ignore_hashes
block = True if suffix else blocking
include_index = True if not suffix else False
converted = convert_deps_to_pip(
file_dict[section], project, r=False, include_index=include_index
)
deps.extend((d, no_hashes, block) for d in converted)
if dev and is_dev and requirements:
requirements_deps.extend((d, no_hashes, block) for d in converted)
return deps, requirements_deps
def recase_file(file_dict):
"""Recase file before writing to output."""
if 'packages' in file_dict or 'dev-packages' in file_dict:
sections = ('packages', 'dev-packages')
elif 'default' in file_dict or 'develop' in file_dict:
sections = ('default', 'develop')
for section in sections:
file_section = file_dict.get(section, {})
# Try to properly case each key if we can.
for key in list(file_section.keys()):
try:
cased_key = proper_case(key)
except IOError:
cased_key = key
file_section[cased_key] = file_section.pop(key)
return file_dict
def get_windows_path(*args):
"""Sanitize a path for windows environments
Accepts an arbitrary list of arguments and makes a clean windows path"""
return os.path.normpath(os.path.join(*args))
def find_windows_executable(bin_path, exe_name):
"""Given an executable name, search the given location for an executable"""
requested_path = get_windows_path(bin_path, exe_name)
if os.path.exists(requested_path):
return requested_path
# Ensure we aren't adding two layers of file extensions
exe_name = os.path.splitext(exe_name)[0]
files = [
'{0}.{1}'.format(exe_name, ext) for ext in ['', 'py', 'exe', 'bat']
]
exec_paths = [get_windows_path(bin_path, f) for f in files]
exec_files = [
filename for filename in exec_paths if os.path.isfile(filename)
]
if exec_files:
return exec_files[0]
return find_executable(exe_name)
def path_to_url(path):
return Path(normalize_drive(os.path.abspath(path))).as_uri()
def get_converted_relative_path(path, relative_to=os.curdir):
"""Given a vague relative path, return the path relative to the given location"""
return os.path.join('.', os.path.relpath(path, start=relative_to))
def walk_up(bottom):
"""Mimic os.walk, but walk 'up' instead of down the directory tree.
From: https://gist.github.com/zdavkeos/1098474
"""
bottom = os.path.realpath(bottom)
# Get files in current dir.
try:
names = os.listdir(bottom)
except Exception:
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
yield bottom, dirs, nondirs
new_path = os.path.realpath(os.path.join(bottom, '..'))
# See if we are at the top.
if new_path == bottom:
return
for x in walk_up(new_path):
yield x
def find_requirements(max_depth=3):
"""Returns the path of a Pipfile in parent directories."""
i = 0
for c, d, f in walk_up(os.getcwd()):
i += 1
if i < max_depth:
if 'requirements.txt':
r = os.path.join(c, 'requirements.txt')
if os.path.isfile(r):
return r
raise RuntimeError('No requirements.txt found!')
# Borrowed from pew to avoid importing pew which imports psutil
# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82
@contextmanager
def temp_environ():
"""Allow the ability to set os.environ temporarily"""
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
def is_valid_url(url):
"""Checks if a given string is an url"""
pieces = urlparse(url)
return all([pieces.scheme, pieces.netloc])
def download_file(url, filename):
"""Downloads file from url to a path with filename"""
r = requests.get(url, stream=True)
if not r.ok:
raise IOError('Unable to download file')
with open(filename, 'wb') as f:
f.write(r.content)
def need_update_check():
"""Determines whether we need to check for updates."""
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
if not os.path.exists(p):
return True
out_of_date_time = time() - (24 * 60 * 60)
if os.path.isfile(p) and os.path.getmtime(p) <= out_of_date_time:
return True
else:
return False
def touch_update_stamp():
"""Touches PIPENV_CACHE_DIR/.pipenv_update_check"""
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
try:
os.utime(p, None)
except OSError:
with open(p, 'w') as fh:
fh.write('')
def normalize_drive(path):
"""Normalize drive in path so they stay consistent.
This currently only affects local drives on Windows, which can be
identified with either upper or lower cased drive names. The case is
always converted to uppercase because it seems to be preferred.
See: <https://github.com/pypa/pipenv/issues/1218>
"""
if os.name != 'nt' or not isinstance(path, six.string_types):
return path
drive, tail = os.path.splitdrive(path)
# Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts.
if drive.islower() and len(drive) == 2 and drive[1] == ':':
return '{}{}'.format(drive.upper(), tail)
return path
def is_readonly_path(fn):
"""Check if a provided path exists and is readonly.
Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`
"""
if os.path.exists(fn):
return (os.stat(fn).st_mode & stat.S_IREAD) or not os.access(
fn, os.W_OK
)
return False
def set_write_bit(fn):
if os.path.exists(fn):
os.chmod(fn, stat.S_IWRITE | stat.S_IWUSR)
return
def rmtree(directory, ignore_errors=False):
shutil.rmtree(
directory, ignore_errors=ignore_errors, onerror=handle_remove_readonly
)
def handle_remove_readonly(func, path, exc):
"""Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion."""
# Check for read-only attribute
default_warning_message = 'Unable to remove file due to permissions restriction: {!r}'
# split the initial exception out into its type, exception, and traceback
exc_type, exc_exception, exc_tb = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path)
except (OSError, IOError) as e:
if e.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(
default_warning_message.format(path), ResourceWarning
)
return
if exc_exception.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix, prefix, dir=None):
if 'RAM_DISK' in os.environ:
import uuid
name = uuid.uuid4().hex
dir_name = os.path.join(os.environ['RAM_DISK'].strip(), name)
os.mkdir(dir_name)
self.name = dir_name
else:
self.name = tempfile.mkdtemp(suffix, prefix, dir)
self._finalizer = finalize(
self,
self._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
@classmethod
def _cleanup(cls, name, warn_message):
rmtree(name)
warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
rmtree(self.name)
| 33.712393 | 116 | 0.586711 | [
"MIT"
] | bryant1410/pipenv | pipenv/utils.py | 43,253 | Python |
from ui import *
startUI()
# # - read the input data:
# import MnistLoader
# training_data, validation_data, test_data = MnistLoader.load_data_wrapper()
# training_data = list(training_data)
# # ---------------------
# # - network.py example:
# from Network import Network, vectorized_result
# from NetworkLoader import save, load
# # netPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\Lab\\Models\\model_5epochs.json";
# # net = load(netPath)
# # # imgPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\\Lab\\HandTestImages\\0.png"
# # # predict(imgPath, 7, net)
# # # net = Network([784, 30, 10])
# # # net.run(training_data, 5, 10, 3.0, test_data=test_data, monitor_evaluation_cost=True,
# # # monitor_evaluation_accuracy=True,
# # # monitor_training_cost=True,
# # # monitor_training_accuracy=True)
# # imgPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\\Lab\\HandTestImages\\0.png"
# # #predict(imgPath, net)
# # save(net, "E:\ITMO University\Интеллектуальные системы и технологии\Lab5\Lab\Models\model_5epochs.json")
# from ui import *
# net = ""
# startUI()
# # ----------------------
# # - network2.py example:
# # import network2
# # net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
# # #net.large_weight_initializer()
# # net.SGD(training_data, 30, 10, 0.1, lmbda = 5.0,evaluation_data=validation_data,
# # monitor_evaluation_accuracy=True)
| 10.374194 | 113 | 0.631219 | [
"MIT"
] | YuriyAksenov/ImageRecognition | Test.py | 1,744 | Python |
#!/usr/bin/env python
import unittest
from math import pi # , isnan
from random import random
import gemmi
from gemmi import Position, UnitCell
class TestMath(unittest.TestCase):
def test_SMat33_transformed_by(self):
tensor = gemmi.SMat33f(random(), random(), random(),
random(), random(), random())
mat = gemmi.Mat33()
mat.fromlist([[random() for _ in range(3)] for _ in range(3)])
t1 = tensor.transformed_by(mat).as_mat33().tolist()
t2 = mat.multiply(tensor.as_mat33()).multiply(mat.transpose()).tolist()
for i in range(3):
for j in range(3):
self.assertAlmostEqual(t1[i][j], t2[i][j])
class TestUnitCell(unittest.TestCase):
def test_dummy_cell(self):
cell = UnitCell()
self.assertEqual([cell.a, cell.b, cell.c], [1, 1, 1])
self.assertEqual([cell.alpha, cell.beta, cell.gamma], [90, 90, 90])
self.assertEqual(cell.volume, 1.0)
def test_ortho_cell(self):
cell = UnitCell(25.14, 39.50, 45.07, 90, 90, 90)
pos = Position(5, -6, 7)
frac = cell.fractionalize(pos)
self.assertAlmostEqual(frac.x, 0.198886, delta=1e-6)
self.assertAlmostEqual(frac.y, -0.151899, delta=1e-6)
self.assertAlmostEqual(frac.z, 0.155314, delta=1e-6)
pos2 = cell.orthogonalize(frac)
self.assertAlmostEqual(pos.x, pos2.x, delta=1e-12)
self.assertAlmostEqual(pos.y, pos2.y, delta=1e-12)
self.assertAlmostEqual(pos.z, pos2.z, delta=1e-12)
corner = cell.orthogonalize(gemmi.Fractional(1, 1, 1))
self.assertAlmostEqual(corner.x, cell.a, delta=1e-12)
self.assertAlmostEqual(corner.y, cell.b, delta=1e-12)
self.assertAlmostEqual(corner.z, cell.c, delta=1e-12)
rec = cell.reciprocal()
self.assertEqual([rec.alpha, rec.beta, rec.gamma], [90, 90, 90])
self.assertAlmostEqual(rec.a, 1 / cell.a, delta=1e-17)
def test_triclinic_cell(self):
cell = UnitCell(35.996, 41.601, 45.756, 67.40, 66.90, 74.85)
pos = Position(-15, -17, 190)
frac = cell.fractionalize(pos)
pos2 = cell.orthogonalize(frac)
self.assertAlmostEqual(pos.x, pos2.x, delta=1e-12)
self.assertAlmostEqual(pos.y, pos2.y, delta=1e-12)
self.assertAlmostEqual(pos.z, pos2.z, delta=1e-12)
# tested against values from uctbx:
# from cctbx import uctbx
# uc = uctbx.unit_cell((35.996, 41.601, 45.756, 67.40, 66.90, 74.85))
# uc.d_star_sq((-3, -2, 1))
# uc.d((3, 4, 5))
self.assertAlmostEqual(cell.calculate_1_d2([-3, -2, 1]),
0.0128229081865688, delta=1e-17)
self.assertAlmostEqual(cell.calculate_d([3, 4, 5]),
7.7319559244298, delta=1e-13)
# uc.metrical_matrix()
cctbx_mm = [1295.712016, 1730.643201, 2093.611536,
391.3591013825865, 646.1921687548228, 731.5043620154578]
mt = cell.metric_tensor()
for a, b in zip(mt.elements(), cctbx_mm):
self.assertAlmostEqual(a, b, delta=1e-12)
# uc.reciprocal_metrical_matrix()
cctbx_rmm = [0.00092792089082916, 0.000689632633981, 0.0006277651322979,
-0.000104162588996, -0.000250008091601, -0.000208806754807]
rmt = cell.reciprocal_metric_tensor()
for a, b in zip(rmt.elements(), cctbx_rmm):
self.assertAlmostEqual(a, b, delta=1e-15)
def test_atom_to_site(self):
cell = UnitCell(35.996, 41.601, 45.756, 67.40, 66.90, 74.85)
atom = gemmi.Atom()
atom.aniso = gemmi.SMat33f(13.1, 20.1, 11.1, -3.5, 5.5, -0.4)
site = gemmi.SmallStructure.Site(atom, cell)
# tested against values from cctbx:
# from cctbx import uctbx, adptbx
# uc = uctbx.unit_cell((35.996, 41.601, 45.756, 67.40, 66.90, 74.85))
# aniso = (13.1, 20.1, 11.1, -3.5, 5.5, -0.4)
# ucif = adptbx.u_cart_as_u_cif(uc, aniso)
ucif = [11.537759976524049, 19.43436271641311, 11.1,
-8.078683096677723, 1.4787260755519491, -3.9018967241279157]
for a, b in zip(site.aniso.elements(), ucif):
self.assertAlmostEqual(a, b, delta=1e-6)
class TestAngles(unittest.TestCase):
def test_dihedral_special_cases(self):
a = Position(random(), random(), random())
# not sure what it should be in such undefined cases
#self.assertTrue(isnan(gemmi.calculate_dihedral(a, a, a, a)))
self.assertEqual(gemmi.calculate_dihedral(a, a, a, a), 0.0)
# Special cases from scitbx tst_math.py
# atan2 is guaranteed to give exact values (I think)
p000 = Position(0, 0, 0)
p100 = Position(1, 0, 0)
p010 = Position(0, 1, 0)
def xy_dihedral(last_point):
return gemmi.calculate_dihedral(p100, p000, p010, last_point)
self.assertEqual(xy_dihedral(Position(1, 1, 0)), 0.0)
self.assertEqual(xy_dihedral(Position(-1, 1, 0)), pi)
p01_ = Position(0, 1, -1)
self.assertEqual(xy_dihedral(p01_), pi/2)
p01_.z = 1
self.assertEqual(xy_dihedral(p01_), -pi/2)
def test_dihedral(self):
# based on from https://stackoverflow.com/questions/20305272/
p0 = Position(24.969, 13.428, 30.692) # N
p1 = Position(24.044, 12.661, 29.808) # CA
p2 = Position(22.785, 13.482, 29.543) # C
p3 = Position(21.951, 13.670, 30.431) # O
p4 = Position(23.672, 11.328, 30.466) # CB
p5 = Position(22.881, 10.326, 29.620) # CG
p6 = Position(23.691, 9.935, 28.389) # CD1
p7 = Position(22.557, 9.096, 30.459) # CD2
def check_dihedral(a, b, c, d, angle):
deg = gemmi.calculate_dihedral(a, b, c, d) * 180 / pi
self.assertAlmostEqual(deg, angle, places=4)
check_dihedral(p0, p1, p2, p3, -71.21515)
check_dihedral(p0, p1, p4, p5, -171.94319)
check_dihedral(p1, p4, p5, p6, 60.82226)
check_dihedral(p1, p4, p5, p7, -177.63641)
if __name__ == '__main__':
unittest.main()
| 46.045113 | 80 | 0.601241 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | ConorFWild/gemmi_pandda | tests/test_unitcell.py | 6,124 | Python |
# Coyright 2017-2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.2"
from typed_python.internals import Class, Member, Function, UndefinedBehaviorException, makeNamedTuple
from typed_python.type_function import TypeFunction
from typed_python.hash import sha_hash
from typed_python.SerializationContext import SerializationContext
from typed_python.type_filter import TypeFilter
from typed_python._types import (
Forward, TupleOf, ListOf, Tuple, NamedTuple, OneOf, ConstDict,
Alternative, Value, serialize, deserialize, serializeStream, deserializeStream,
PointerTo, Dict, validateSerializedObject, validateSerializedObjectStream, decodeSerializedObject,
getOrSetTypeResolver
)
import typed_python._types as _types
# in the c module, these are functions, but because they're not parametrized,
# we want them to be actual values. Otherwise, we'll have 'Float64()'
# where we would have written 'Float64' etc.
Bool = _types.Bool()
Int8 = _types.Int8()
Int16 = _types.Int16()
Int32 = _types.Int32()
Int64 = _types.Int64()
UInt8 = _types.UInt8()
UInt16 = _types.UInt16()
UInt32 = _types.UInt32()
UInt64 = _types.UInt64()
Float32 = _types.Float32()
Float64 = _types.Float64()
NoneType = _types.NoneType()
String = _types.String()
Bytes = _types.Bytes()
EmbeddedMessage = _types.EmbeddedMessage()
| 38.916667 | 102 | 0.774625 | [
"Apache-2.0"
] | szymonlipinski/nativepython | typed_python/__init__.py | 1,868 | Python |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reddit_disentanglement dataset."""
import collections
import csv
import itertools
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{zhu2019did,
title={Who did They Respond to? Conversation Structure Modeling using Masked Hierarchical Transformer},
author={Zhu, Henghui and Nan, Feng and Wang, Zhiguo and Nallapati, Ramesh and Xiang, Bing},
journal={arXiv preprint arXiv:1911.10666},
year={2019}
}
"""
_DESCRIPTION = """
This dataset contains ~3M messages from reddit.
Every message is labeled with metadata. The task is to predict the id of its
parent message in the corresponding thread.
Each record contains a list of messages from one thread.
Duplicated and broken records are removed from the dataset.
Features are:
- id - message id
- text - message text
- author - message author
- created_utc - message UTC timestamp
- link_id - id of the post that the comment relates to
Target:
- parent_id - id of the parent message in the current thread
"""
_THREAD_KEY = "thread"
_MESSAGE_ID = "id"
_MESSAGE_TEXT = "text"
_MESSAGE_TIMESTAMP = "created_utc"
_MESSAGE_AUTHOR = "author"
_MESSAGE_LINK_ID = "link_id"
_MESSAGE_PARENT_ID = "parent_id"
def _read_csv(path):
with tf.io.gfile.GFile(path) as f:
reader = csv.DictReader(f)
for row in reader:
if row["id"]: # Filter out broken lines in the original dataset
yield row
def _deduplicate(data):
"""Remove duplicated records."""
cnt = collections.Counter(row["id"] for row in data)
nonuniq_ids = set(id for id, count in cnt.items() if count > 1)
nonuniq_data = [row for row in data if row["id"] in nonuniq_ids]
unique_data = [row for row in data if row["id"] not in nonuniq_ids]
# Make sure same id records are next to each other for itertools.groupby
nonuniq_data = sorted(nonuniq_data, key=lambda row: row["id"])
for _, same_id_data in itertools.groupby(nonuniq_data, lambda row: row["id"]):
same_id_data = list(same_id_data)
if all(same_id_data[0] == x for x in same_id_data):
unique_data.append(same_id_data[0])
else:
non_deleted_same_id_data = [row for row in same_id_data
if row["author"] != "[deleted]"]
if len(non_deleted_same_id_data) != 1:
raise ValueError("Found several message with id {} in the original"
" data".format(non_deleted_same_id_data[0]["id"]))
unique_data.append(non_deleted_same_id_data[0])
return sorted(unique_data,
key=lambda row: (row["link_id"], row["created_utc"]))
class RedditDisentanglement(tfds.core.GeneratorBasedBuilder):
"""Reddit Disentanglement dataset."""
VERSION = tfds.core.Version("2.0.0")
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
Download https://github.com/henghuiz/MaskedHierarchicalTransformer, decompress
raw_data.zip and run generate_dataset.py with your reddit api credentials.
Then put train.csv, val.csv and test.csv from the output directory into the
manual folder.
"""
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
_THREAD_KEY: tfds.features.Sequence(
tfds.features.FeaturesDict({
_MESSAGE_ID: tfds.features.Text(),
_MESSAGE_TEXT: tfds.features.Text(),
_MESSAGE_TIMESTAMP: tfds.features.Text(),
_MESSAGE_AUTHOR: tfds.features.Text(),
_MESSAGE_LINK_ID: tfds.features.Text(),
_MESSAGE_PARENT_ID: tfds.features.Text()
}))}),
homepage="https://github.com/henghuiz/MaskedHierarchicalTransformer",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"path": os.path.join(
dl_manager.manual_dir, "train.csv")},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={"path": os.path.join(
dl_manager.manual_dir, "val.csv")},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={"path": os.path.join(
dl_manager.manual_dir, "test.csv")},
),
]
def _generate_examples(self, path):
"""Yields examples."""
data = list(_read_csv(path))
data = _deduplicate(data)
for link_id, one_topic_data in itertools.groupby(
data, lambda row: row["link_id"]):
one_topic_data = list(one_topic_data)
for row in one_topic_data:
row["text"] = row.pop("body")
yield link_id, {_THREAD_KEY: one_topic_data}
| 35.253247 | 105 | 0.675447 | [
"Apache-2.0"
] | Ak0303/datasets | tensorflow_datasets/text/reddit_disentanglement.py | 5,429 | Python |
"""Copyright (c) 2005-2017, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
This file contains various classes supporting modifications to CellML models.
"""
import pycml
from pycml import *
import validator
class ModelModificationError(ValueError):
"""Error thrown if a model modification is invalid."""
pass
class ModelModifier(object):
"""Base class supporting common model modification functionality.
This class contains the logic to deal with adding/deleting variables, components, equations, etc.
and connecting things up. It also handles re-analysing the model when modifications have been
completed to ensure that PyCml's internal data structures are up-to-date.
Instances should be created with the model to modify as a single parameter. Once all
modifications have been completed, the finalize method must be called to ensure later
processing of the model (e.g. code generation) will succeed.
"""
def __init__(self, model):
"""Constructor."""
self.model = model
self._units_converter = None
self.connections_made = set()
def finalize(self, error_handler, pre_units_check_hook=None, check_units=True):
"""Re-do the model validation steps needed for further processing of the model.
Checks connections, etc. and builds up the dependency graph again, then performs
a topological sort.
If any errors are found during re-validation, the error_handler will be called with the
list. Warnings are ignored.
TODO: figure out how to determine how much of this is actually needed - InterfaceGenerator
can probably get away with less work.
"""
self._clear_model_caches()
# We want to see any errors
logging_info = validator.CellMLValidator.setup_logging(show_errors=True, show_warnings=False)
# Re-run validation & analysis
self.model._check_variable_mappings()
if not self.model._cml_validation_errors:
assignment_exprs = self.model.search_for_assignments()
self.model._check_assigned_vars(assignment_exprs)
if not self.model._cml_validation_errors:
self.model._classify_variables(assignment_exprs)
self.model._order_variables(assignment_exprs)
if not self.model._cml_validation_errors and check_units:
if callable(pre_units_check_hook):
pre_units_check_hook()
self.model._check_connection_units(check_for_units_conversions=False)
self.model._check_dimensional_consistency(assignment_exprs,
xml_context=False,
warn_on_units_errors=self.model.get_option('warn_on_units_errors'),
check_for_units_conversions=False)
if self.model._cml_validation_errors:
error_handler(self.model._cml_validation_errors)
# Clear up logging
validator.CellMLValidator.cleanup_logging(logging_info)
def _clear_model_caches(self):
"""
Clear cached links in the model, since we'll need to recompute many of them
once we've finished modifying it. Also clears dependency information.
"""
for comp in getattr(self.model, u'component', []):
for math in getattr(comp, u'math', []):
math._unset_cached_links()
for var in self.model.get_all_variables():
var.clear_dependency_info()
assignment_exprs = self.model.search_for_assignments()
for expr in assignment_exprs:
expr.clear_dependency_info()
def create_new_component(self, cname):
"""Create a new component in the model, ensuring the name is unique.
If a component with name cname already exists,
underscores will be added to the component name to make it unique.
"""
while True:
try:
self.model.get_component_by_name(cname)
cname += u'_'
except KeyError:
# Component with this name doesn't exist
break
# Create the component
comp = cellml_component.create_new(self.model, cname)
self.model._add_component(comp)
return comp
def connect_variables(self, source, target):
"""Create a connection between the given source and target variables.
The variables are both specified either by a pair (cname,vname), or as cellml_variable objects.
The source variable must exist within the model, whereas the target might not, in
which case it will be created.
Note that in the case that both source and target exist, it might NOT be the case that
target obtains its value from source. They might already be connected, and source obtains
its value from target. Or they might both obtain their value from a common source.
If the variable names are not identical, any variables created will have the same name as the
target, if possible. If there's an existing variable with that name, not connected to the
source, then underscores will be appended to the name to avoid conflicts. Note that we do
check for variables in intermediate components that have the same name as the source and are
connected to it, to avoid adding unnecessary variables.
Returns the target variable object.
"""
if isinstance(source, cellml_variable):
src_cname, src_vname = source.component.name, source.name
else:
src_cname, src_vname = source
if isinstance(target, cellml_variable):
target_cname, target_vname = target.component.name, target.name
else:
target_cname, target_vname = target
src_comp = self.model.get_component_by_name(src_cname)
target_comp = self.model.get_component_by_name(target_cname)
# print "connect_variables(", src_cname, src_vname, "to", target_cname, target_vname, ")"
if src_comp == target_comp:
return target_comp.get_variable_by_name(target_vname)
# Determine encapsulation paths from target & source to the root
src_path = self._parent_path(src_comp)
target_path = self._parent_path(target_comp)
# print "paths: src=", map(lambda c: c and c.name, src_path), map(lambda c: c and c.name, target_path)
# At some point these will share a common path, even if it's just the root itself
meeting_index = self._find_common_tail(src_path, target_path)
# Construct path from source to target, leaving out the root (None)
path = src_path[:meeting_index]
if src_path[meeting_index]:
path.append(src_path[meeting_index])
path.extend(reversed(target_path[:meeting_index]))
# Traverse this path, adding connections at each step
next_src_var = src_comp.get_variable_by_name(src_vname)
# print "conn", map(lambda c: c.name, path), next_src_var, src_vname, target_vname
for i, src_comp in enumerate(path[:-1]):
target_comp = path[i+1]
# print "step", i, "from", next_src_var, "to", target_comp.name, target_vname
next_src_var = self._make_connection(next_src_var, target_comp, target_vname)
# print "step", i, "made", next_src_var
return next_src_var
def _make_connection(self, src_var, target_comp, target_vname):
"""Make a connection from a source variable to a given component and suggested local name.
Note that in the case that both variables already exist and are connected, the existing
connection is allowed to flow in either direction.
"""
src_comp = src_var.component
target_var = self._find_or_create_variable(target_comp.name, target_vname, src_var)
# print "_make_conn", src_var, target_var, target_comp.name, target_vname
# Sanity check the target variable
if (target_var.get_type() == VarTypes.Mapped
and target_var.get_source_variable(recurse=True) is src_var.get_source_variable(recurse=True)):
# print "Connection exists between", src_var, "and target", target_var
return target_var
elif target_var.get_type() == VarTypes.Unknown:
# We've created this variable, so should be ok, but check for gotchas
assert not(hasattr(target_var, u'initial_value'))
if src_comp is target_comp.parent():
src_if = u'private'
target_if = u'public'
elif src_comp.parent() is target_comp:
src_if = u'public'
target_if = u'private'
else:
assert src_comp.parent() is target_comp.parent()
src_if = u'public'
target_if = u'public'
# One special case: if the src_var is actually obtained from a different
# component at this level or above, in which case we should use the real
# source, not that given.
if getattr(src_var, src_if + u'_interface', u'none') == u'in':
src_var = src_var.get_source_variable()
# Check and set the interface attributes
# print "Connecting source", src_var, src_if, getattr(src_var, src_if + u'_interface', u'none'), src_var.units,
# print "to", target_var, target_if, getattr(target_var, target_if + u'_interface', u'none'), target_var.units
assert getattr(src_var, src_if + u'_interface', u'none') != u'in'
assert getattr(target_var, target_if + u'_interface', u'none') != u'out'
src_var.xml_set_attribute((src_if + u'_interface', None), u'out')
target_var.xml_set_attribute((target_if + u'_interface', None), u'in')
# Create the connection element
self._create_connection_element(src_var, target_var)
self.connections_made.add(frozenset([src_var, target_var]))
# Ensure we handle a later connection attempt between these variables correctly
target_var._set_source_variable(src_var)
else:
# Naming conflict; try again with a different target name
return self._make_connection(src_var, target_comp, target_vname + u'_')
return target_var
def _find_connection_element(self, var1, var2):
"""Find any connection element containing a connection of the given variables.
Returns a pair, the first element of which is either the element or None, and the
second of which is a boolean indicating whether the variables need to be swapped
in order to match the order of the components in the connection.
"""
cn1, cn2 = var1.component.name, var2.component.name
cnames = set([cn1, cn2])
for conn in getattr(self.model, u'connection', []):
mc = conn.map_components
if set([mc.component_1, mc.component_2]) == cnames:
break
else:
conn = None
if conn:
swap = conn.map_components.component_1 == cn2
else:
swap = False
return conn, swap
def _create_connection_element(self, var1, var2):
"""Create a connection element connecting the given variables and add to the model.
If there's already a connection element for the relevant pair of components,
we just add another map_variables element to that.
"""
conn, swap = self._find_connection_element(var1, var2)
if conn:
if swap:
var1, var2 = var2, var1
else:
conn = var1.xml_create_element(u'connection', NSS[u'cml'])
mapc = var1.xml_create_element(u'map_components', NSS[u'cml'],
attributes={u'component_1': var1.component.name,
u'component_2': var2.component.name})
conn.xml_append(mapc)
self.model.xml_append(conn)
mapv = var1.xml_create_element(u'map_variables', NSS[u'cml'],
attributes={u'variable_1': var1.name,
u'variable_2': var2.name})
conn.xml_append(mapv)
def remove_connection(self, var1, var2):
"""Remove a connection between two variables.
Removes the relevant map_variables element.
If this results in an empty connection element, removes that as well.
"""
conn, swap = self._find_connection_element(var1, var2)
if not conn:
raise ModelModificationError("Cannot remove non-existent connection.")
if swap:
var1, var2 = var2, var1
# Find the relevant map_variables element
mapv = conn.xml_xpath(u'cml:map_variables[@variable_1="%s" and @variable_2="%s"]'
% (var1.name, var2.name))
if not mapv:
raise ModelModificationError("Cannot remove non-existent connection.")
conn.xml_remove_child(mapv[0])
if not hasattr(conn, u'map_variables'):
conn.xml_parent.xml_remove_child(conn)
def remove_connections(self, var):
"""Remove all connection elements for the given variable.
Removes each relevant map_variables element.
If this results in an empty connection element, removes that as well.
"""
cname, vname = var.component.name, var.name
for conn in list(getattr(self.model, u'connection', [])):
if cname == conn.map_components.component_1:
vid = u'variable_1'
elif cname == conn.map_components.component_2:
vid = u'variable_2'
else:
continue
for mapv in conn.map_variables:
if vname == getattr(mapv, vid, ''):
# Found a connection
conn.xml_remove_child(mapv)
if not hasattr(conn, u'map_variables'):
conn.xml_parent.xml_remove_child(conn)
# There can't be any more matching map_variables in this connection
break
def _update_connections(self, oldVar, newVar):
"""Change all variables connected to oldVar to be mapped to newVar instead."""
vars = [v for v in self.model.get_all_variables() if v.get_source_variable(True) is oldVar]
# Remove old connections, including interfaces and types so creating the new connection works
for v in vars:
self.remove_connections(v)
self.del_attr(v, u'public_interface')
self.del_attr(v, u'private_interface')
v.clear_dependency_info()
# Create new connections
for v in vars:
self.connect_variables(newVar, v)
def _find_common_tail(self, l1, l2):
"""Find the first element at which both lists are identical from then on."""
i = -1
try:
while l1[i] == l2[i]:
i -= 1
except IndexError:
# One list is the tail of the other
pass
# i now gives the last differing element
assert i < -1
return i+1
def _parent_path(self, comp):
"""Return a path of components from that given to the encapsulation root.
The root is specified by None, since we're really dealing with a forest,
not a tree.
"""
path = [comp]
while comp:
path.append(comp.parent())
comp = comp.parent()
return path
def _process_operator(self, expr, operator, func, *args, **kwargs):
"""Apply func to any application of the given operator within the given tree."""
for elt in self.model.xml_element_children(expr):
self._process_operator(elt, operator, func, *args, **kwargs)
if isinstance(expr, mathml_apply) and expr.operator().localName == operator:
func(expr, *args, **kwargs)
def _find_or_create_variable(self, cname, vname, source):
"""Find a given variable in the model, creating it if necessary.
We look for a variable in the component named cname with the same name as the source.
If it doesn't exist, a variable named vname will be created in that component (unless
it already exists).
The variable will become a mapped variable with the given source.
Hence if it is created it will have the same units.
"""
try:
var = self.model.get_variable_by_name(cname, source.name)
raise KeyError()
except KeyError:
# Have we created it already?
try:
var = self.model.get_variable_by_name(cname, vname)
except KeyError:
# Create it and add to model
units = source.component.get_units_by_name(source.units)
var = self.add_variable(cname, vname, units)
return var
def add_variable(self, comp, vname, units, **kwargs):
"""Add a new variable to the given component.
Remaining arguments are as for cellml_variable.create_new.
Returns the new variable object.
"""
if not isinstance(comp, cellml_component):
comp = self.model.get_component_by_name(comp)
units = self.add_units(units)
var = cellml_variable.create_new(comp, vname, units.name, **kwargs)
comp._add_variable(var)
return var
def _get_units_object(self, units):
"""Helper function to convert a units specification into a cellml_units object.
The input can be a cellml_units object, in which case we just return it.
However, it can also be a serialised CellML units definition, in which case it
will be parsed to create the object.
"""
if isinstance(units, cellml_units):
# We're done
pass
else:
units = amara_parse_cellml(unicode(units))
assert isinstance(units, cellml_units)
return units
def add_units(self, units):
"""Add a units definition to the model, if it doesn't already exist.
If the definition isn't in the model, at whole-model level, it will be added. If the same
definition is already available, however, that definition should be used by preference.
Will return the actual units object to use.
"""
units = self.model._get_units_obj(units)
try:
model_units = self.model.get_units_by_name(units.name)
except KeyError:
model_units = None
if model_units:
assert units.uniquify_tuple == model_units.uniquify_tuple
units = model_units
else:
units.name = self._uniquify_name(units.name, self.model.get_units_by_name)
self.model.add_units(units.name, units)
self.model.xml_append(units)
# Ensure referenced units exist
for unit in getattr(units, u'unit', []):
unit._set_units_element(self.add_units(unit.get_units_element()), override=True)
unit.units = unit.get_units_element().name
return units
def add_expr_to_comp(self, comp, expr):
"""Add an expression to the mathematics in the given component.
comp may be a cellml_component instance or a component name.
"""
if not isinstance(comp, cellml_component):
comp = self.model.get_component_by_name(comp)
if not hasattr(comp, u'math'):
# Create the math element
math = comp.xml_create_element(u'math', NSS[u'm'])
comp.xml_append(math)
# Append this expression
comp.math.xml_append(expr)
def remove_expr(self, expr):
"""Remove an expression (ODE or assignment) from its parent."""
assert isinstance(expr, mathml_apply)
if expr.xml_parent:
expr.xml_parent.safe_remove_child(expr)
expr.xml_parent = None # Not done by Amara...
return expr
def remove_definition(self, var, keep_initial_value=False):
"""Remove any existing definition (as an equation) of the given variable.
If keep_initial_value is False, then also remove any initial_value attribute.
If the variable is Mapped, throw a ModelModificationError.
"""
if var.get_type() == VarTypes.Mapped:
raise ModelModificationError("Cannot remove the equation defining a mapped variable - remove the definition of its source instead")
if not keep_initial_value:
self.del_attr(var, u'initial_value')
# Note: if this is a variable added by a protocol, then it shouldn't have
# any dependencies set up yet, so this is a no-op.
for dep in var.get_all_expr_dependencies():
self.remove_expr(dep)
# We know don't know how it will be defined
var.clear_dependency_info()
def del_attr(self, elt, localName, ns=None):
"""Delete an XML attribute from an element, if it exists."""
for (pyname, (qname, ns_)) in elt.xml_attributes.items():
_, name = SplitQName(qname)
if ns_ == ns and name == localName:
delattr(elt, pyname)
def _uniquify_var_name(self, varname, comp):
"""Ensure varname is unique within the given component.
Underscores will be appended to the name until it is unique. The unique name will be returned.
"""
return self._uniquify_name(varname, comp.get_variable_by_name)
def _uniquify_name(self, name, callable):
"""Ensure the given name is unique within a particular context.
The context is determined by the given function: it will be passed candidate names to test
for existence, and is expected to throw iff the name is not already used. Underscores will
be appended to the given name until callable throws, and the resulting unique name returned.
"""
while True:
try:
callable(name)
name += u'_'
except:
break
return name
def set_units_converter(self, converter):
"""Set the object used to units-convert variable initial values."""
self._units_converter = converter
def get_units_converter(self):
"""Get the units converter object, if any has been set."""
if not self._units_converter:
raise ModelModificationError("No units converter has been set.")
return self._units_converter
def _convert_initial_value(self, var, units, do_conversion=True):
"""Convert any initial value of the given variable into the given units.
If there is no initial value, returns None.
If there is no units converter, leaves the initial_value unchanged.
"""
if not hasattr(var, u'initial_value'):
return None
value = var.initial_value
if value and self._units_converter and do_conversion:
if not var.get_units().equals(units):
try:
value = self._units_converter.convert_constant(value, var.get_units(), units, var.component)
except EvaluationError, e:
raise ModelModificationError("Cannot units-convert initial value as requires run-time information:\n"
+ str(e))
return unicode(value)
class InterfaceGenerator(ModelModifier):
"""Class for generating an interface between a CellML model and external code.
This contains functionality for users to describe the interface desired by the external code, i.e.
which variables are inputs and/or outputs, and expected units. It will then create a new component
within the CellML model containing these variables, and add units conversions where required. The
external code then only needs to interact with this new component.
"""
def __init__(self, model, name='interface', units_converter=None):
super(InterfaceGenerator, self).__init__(model)
self._interface_component = None
self._interface_component_name = name
self.set_units_converter(units_converter)
def add_input(self, var, units, annotate=True, convert_initial_value=True):
"""Specify a variable as an input to the model.
var should be a cellml_variable object already existing in the model.
units should be a suitable input to self._get_units_object.
If adding both State and Free variables as inputs, make sure to add the Free variable first,
otherwise you will not be able to specify units for it.
Set annotate to False if you do not wish a Constant variable to be annotated as a modifiable
parameter.
If a units converter has been supplied, we will also try to units-convert initial values.
This may not be possible if special conversions are used, since they may involve variables
whose values are not known at this time. If this is the case, set convert_initial_value to
False to avoid applying the conversion. A proper solution requires CellML 1.1 features.
The new variable added to the interface component is returned.
"""
assert isinstance(var, cellml_variable)
units = self._get_units_object(units)
var = var.get_source_variable(recurse=True) # Ensure we work with source variables only
var_name = var.fullname(cellml=True)
# Check that the variable has a suitable type to be an input
t = var.get_type()
if t == VarTypes.Computed:
raise ModelModificationError("Cannot specify computed variable " + var.fullname() + " as an input")
elif t not in [VarTypes.Constant, VarTypes.Free, VarTypes.State]:
raise ModelModificationError("Variable " + var.fullname() + " has unexpected type " + str(t))
# Add a new variable with desired units to the interface component
comp = self.get_interface_component()
newvar = self.add_variable(comp, var_name, units, id=var.cmeta_id,
initial_value=self._convert_initial_value(var, units, convert_initial_value),
interfaces={u'public': u'out'})
newvar._set_type(t)
# Remove initial value and id from the original, if they exist
self.del_attr(var, u'initial_value')
self.del_attr(var, u'id', NSS['cmeta'])
# If the original variable was a state variable, split the defining equation
if t == VarTypes.State:
self._split_ode(newvar, var)
# Annotate the new variable as a parameter if the original was a constant
if t == VarTypes.Constant and annotate:
newvar.set_is_modifiable_parameter(True)
self._update_connections(var, newvar)
return newvar
def add_output(self, var, units, annotate=True):
"""Specify a variable as an output of the model.
var should be a cellml_variable object already existing in the model.
units should be a suitable input to self._get_units_object.
The new variable will take the cmeta:id of the original, and hence existing metadata
annotations will refer to the new variable.
If annotate is set to True, the new variable will also be annotated as a derived quantity.
The new variable added to the interface component is returned.
"""
assert isinstance(var, cellml_variable)
units = self._get_units_object(units)
var = var.get_source_variable(recurse=True)
var_name = var.fullname(cellml=True)
comp = self.get_interface_component()
newvar = self.add_variable(comp, var_name, units, id=var.cmeta_id)
self.del_attr(var, u'id', NSS['cmeta'])
self.connect_variables(var, newvar)
if annotate:
newvar.set_is_derived_quantity(True)
return newvar
def add_output_function(self, resultName, operator, argVars, units):
"""Add an output that's defined as a (MathML) function of existing model variables.
The desired units are those of the function's result. The function arguments will be
imported with their units as given by the model, and the function calculated. This result
will then be units-converted if necessary.
The new variable added to the interface component is returned.
"""
# Add the result variable
comp = self.get_interface_component()
units = self._get_units_object(units)
result_var = self.add_variable(comp, resultName, units)
result_var.set_pe_keep(True)
# Map the argument variables
operands = []
for var in argVars:
operands.append(self.add_output(var, var.get_units(), annotate=False).name)
# Create the new function and assign it to result_var
expr = mathml_apply.create_new(self.model, operator, operands)
assign = mathml_apply.create_new(self.model, u'eq', [result_var.name, expr])
self.add_expr_to_comp(comp, assign)
return result_var
def make_var_constant(self, var, value):
"""Turn a variable into a constant."""
self.remove_definition(var)
var.clear_dependency_info()
var.initial_value = unicode(str(value))
var._set_type(VarTypes.Constant)
def make_var_computed_constant(self, var, value):
"""Turn a variable into a Computed variable with constant value definition."""
self.remove_definition(var)
var.clear_dependency_info()
defn = mathml_apply.create_new(self.model, u'eq',
[var.name, (unicode(str(value)), var.get_units().name)])
self.add_expr_to_comp(var.component, defn)
var._set_type(VarTypes.Computed)
def finalize(self, *args, **kwargs):
"""Override finalize to also set up standard interface elements not defined individually."""
self._add_all_odes_to_interface()
self._transform_derivatives_on_rhs()
super(InterfaceGenerator, self).finalize(*args, **kwargs)
def _transform_derivatives_on_rhs(self):
"""Transform any equations with derivatives on the RHS to use the variable defining it instead.
self._split_ode must have been used for all derivatives before calling this method. This means
that each ODE now has a variable to which the RHS is assigned. Rather than using the derivative
directly, which could break the dependency chain if units conversions are used for time, equations
should refer to this new variable instead.
"""
for expr in self.model.search_for_assignments():
self._process_operator(list(expr.operands())[1], u'diff', self._transform_derivative_on_rhs)
def _transform_derivative_on_rhs(self, expr):
"""Transform a derivative on the RHS of an equation to refer to the defining variable.
Helper method used by self._transform_derivatives_on_rhs to do the actual transformation.
"""
# Find the variable to use
dep_var = expr.diff.dependent_variable.get_source_variable(recurse=True)
indep_var = expr.diff.independent_variable.get_source_variable(recurse=True)
ode = dep_var.get_ode_dependency(indep_var)
rhs_var = ode.eq.rhs.variable.get_source_variable(recurse=True)
# Ensure there's something mapped to it in this component
rhs_var = self.connect_variables(rhs_var, (expr.component.name, rhs_var.name))
# Update this expression
parent = expr.xml_parent
parent.xml_insert_after(expr, mathml_ci.create_new(parent, rhs_var.name))
parent.safe_remove_child(expr)
def _split_ode(self, newVar, oldVar):
"""Split an ODE definition so the derivative goes into the interface component.
The RHS stays where it is, and is assigned to a new variable, which is connected to the interface
component and assigned to the new derivative. newVar is the new state variable in the interface
component, and oldVar will soon be mapped to it by the caller.
Any other equations in the model which use the derivative are transformed to use the new variable
instead.
"""
# Get the free variable in the interface component
free_var = self.model.find_free_vars()[0]
if free_var.component is not newVar.component:
free_var = self.add_input(free_var, free_var.get_units())
# Add a new variable to assign the RHS to, with units of the original derivative
deriv_name = self._uniquify_var_name(u'd_%s_d_%s' % (oldVar.name, free_var.name), oldVar.component)
orig_ode = oldVar.get_all_expr_dependencies()[0]
orig_rhs_var = self.add_variable(oldVar.component, deriv_name, orig_ode.eq.lhs.get_units().extract())
# Add an output version of this in the interface, with desired units
desired_units = newVar.get_units().quotient(free_var.get_units())
mapped_rhs_var = self.add_output(orig_rhs_var, desired_units, annotate=False)
# Replace the original ODE with an assignment
orig_rhs = orig_ode.eq.rhs
orig_ode.safe_remove_child(orig_rhs)
self.remove_expr(orig_ode)
self.add_expr_to_comp(oldVar.component,
mathml_apply.create_new(self.model, u'eq',
[orig_rhs_var.name, orig_rhs]))
# Create a new ODE in the interface component
new_ode = mathml_diff.create_new(self.model, free_var.name, newVar.name, mapped_rhs_var.name)
self.add_expr_to_comp(newVar.component, new_ode)
new_ode.classify_variables(root=True, dependencies_only=True)
def _add_all_odes_to_interface(self):
"""All the derivatives should be considered as model outputs, and state variables as model inputs.
For any that haven't been done explicitly, this method will add the corresponding state variable
as an input, with its original units, which has the desired effect.
"""
comp = self.get_interface_component()
for var in self.model.find_state_vars():
if var.component is not comp:
self.add_input(var, var.get_units())
def get_interface_component(self):
"""Get the new component that will contain the interface.
The name will be self._interface_component_name, unless a component with that name already exists,
in which case underscores will be added to the component name to make it unique.
"""
if self._interface_component is None:
self._interface_component = self.create_new_component(unicode(self._interface_component_name))
self.model.interface_component_name = unicode(self._interface_component_name)
assert not self._interface_component.ignore_component_name
return self._interface_component
class UnitsConverter(ModelModifier):
"""Top-level interface to the units conversion code in PyCml.
"""
def __init__(self, model, warn_only=None, show_xml_context_only=False):
super(UnitsConverter, self).__init__(model)
if warn_only is None:
warn_only = model.get_option('warn_on_units_errors')
self.warn_only = warn_only
self.show_xml_context_only = show_xml_context_only
self.special_conversions = {}
self._setup_logger()
self._converted_mappings = set()
def __del__(self):
self._cleanup_logger()
def _setup_logger(self):
logger = logging.getLogger('units-converter')
logger.setLevel(logging.WARNING)
formatter = logging.Formatter(fmt="%(name)s: %(message)s")
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
self._log_handler = handler
def _cleanup_logger(self):
"""Flush logger & remove handler."""
logger = logging.getLogger('units-converter')
self._log_handler.flush()
logger.removeHandler(self._log_handler)
def try_convert(self, func, *args, **kwargs):
"""Call the given function, and log any units errors produced."""
try:
func(*args, **kwargs)
except UnitsError, e:
if self.show_xml_context_only:
e.show_xml_context_only()
if self.warn_only:
e.warn = True
e.level = logging.WARNING
logging.getLogger('units-converter').log(e.level, unicode(e).encode('UTF-8'))
def _apply_special_conversion_for_nested_expr(self, expr, defn_units, desired_units):
"""Apply a special conversion to the given (sub-)expression.
This will get called by mathml_units_mixin._add_units_conversion if a special conversion is required by a nested sub-expression.
"""
for from_units, to_units in self.special_conversions.iterkeys():
if (from_units.dimensionally_equivalent(defn_units)
and to_units.dimensionally_equivalent(desired_units)):
# We can apply this conversion
expr = self.special_conversions[(from_units, to_units)](expr)
DEBUG('units-converter', "Used nested special conversion from", repr(from_units), "to", repr(to_units))#, "giving", expr.xml())
break
# else:
# print "No on nested conv from", repr(from_units), "to", repr(to_units)
return expr
def _check_special_conversion(self, expr):
"""Check whether a special conversion applies to the given assignment.
Special conversions allow us to do units conversion between dimensionally non-equivalent
quantities, by utilising biological knowledge. Available special conversions are added
using the add_special_conversion method.
"""
lhs_units = expr.eq.lhs.get_units()
rhs_units = expr.eq.rhs.get_units()
if lhs_units.dimensionally_equivalent(rhs_units):
return
for from_units, to_units in self.special_conversions.iterkeys():
if (from_units.dimensionally_equivalent(rhs_units)
and to_units.dimensionally_equivalent(lhs_units)):
# We can apply this conversion
self.special_conversions[(from_units, to_units)](expr)
DEBUG('units-converter', "Used special conversion from", repr(from_units), "to", repr(to_units))#, "giving", expr.xml())
break
# else:
# print "No on conv from", repr(from_units), "to", repr(to_units)
def add_special_conversion(self, from_units, to_units, converter):
"""Add a new special conversion to the list available.
Special conversions allow us to do units conversion between dimensionally non-equivalent
quantities, by utilising biological knowledge. The function "converter" will be called with
an assignment (top-level mathml_apply instance) that has RHS units equivalent to from_units,
and LHS units equivalent to to_units. It should alter the equation in-place (i.e. the
object passed to it must contain the final equation) to do an appropriate units conversion,
at least so that LHS and RHS dimensions match.
"""
self.special_conversions[(from_units, to_units)] = converter
def modify_rhs(self, expr, operator, var):
"""Helper method of use to special units conversions.
Will modify the given expr in-place, replacing the RHS by an application of the given operator.
The operands will be the existing RHS and a ci element referencing the supplied variable object.
Connections and variables will be added to ensure that the given variable is available in the
component in which expr appears.
Returns expr, for ease of chaining expressions.
"""
assert isinstance(var, cellml_variable)
# Ensure var is available in expr's component
local_var_name = var.name
source_comp = var.component
expr_comp = expr.component
if source_comp != expr_comp:
local_var = self.connect_variables(var, (expr_comp.name, var.fullname(cellml=True)))
local_var_name = local_var.name
# Change expr
rhs = expr.eq.rhs
expr.safe_remove_child(rhs)
new_rhs = mathml_apply.create_new(var.model, operator, [rhs, local_var_name])
expr.xml_append(new_rhs)
return expr
def times_rhs_by(self, expr, var):
"""Helper method of use to special units conversions.
Will modify the given expr in-place, post-multiplying the RHS by a reference to the given variable object.
Connections and variables will be added to ensure that the given variable is available in the
component in which expr appears.
Returns expr, for ease of chaining expressions.
"""
return self.modify_rhs(expr, u'times', var)
def divide_rhs_by(self, expr, var):
"""Helper method of use to special units conversions.
Will modify the given expr in-place, post-dividing the RHS by a reference to the given variable
object.
Connections and variables will be added to ensure that the given variable is available in the
component in which expr appears.
Returns expr, for ease of chaining expressions.
"""
return self.modify_rhs(expr, u'divide', var)
def convert_assignments(self, exprs):
"""Apply conversions to any assignments in the given iterable."""
boolean = self.model.get_units_by_name('cellml:boolean')
for expr in exprs:
if isinstance(expr, mathml_apply):
# print 'Converting? assignment', element_xpath(expr)
if self.special_conversions:
self.try_convert(self._check_special_conversion, expr)
self.try_convert(expr._set_in_units, boolean)
def convert_constant(self, value, from_units, to_units, comp):
"""Convert a constant value into desired units."""
from_units = self.add_units(from_units)
to_units = self.add_units(to_units)
expr = mathml_apply.create_new(self.model, u'eq', [(u'0', to_units.name),
(unicode(value), from_units.name)])
self.add_expr_to_comp(comp, expr)
# Nasty hack to make expr.is_top_level return True
expr._cml_assigns_to = expr.operands().next()
if self.special_conversions:
self.try_convert(self._check_special_conversion, expr)
self.try_convert(expr.eq.rhs._set_in_units, to_units)
self.remove_expr(expr)
return expr.eq.rhs.evaluate()
def convert_mapping(self, mapping, comp1, comp2, var1, var2):
"""Apply conversions to a mapping between two variables."""
model = self.model
# Check for being already converted
var_pair = frozenset([var1, var2])
if var_pair in self._converted_mappings:
DEBUG('units-converter', 'Skipping already converted mapping', var1, '<->', var2)
return
else:
self._converted_mappings.add(var_pair)
# Ensure mapping is var1 := var2; swap vars if needed
swapped = False
try:
if var2.get_source_variable() is var1:
swapped = True
var1, var2 = var2, var1
comp1, comp2 = comp2, comp1
except TypeError:
pass
# Get units
u1 = var1.get_units()
u2 = var2.get_units()
DEBUG('units-converter', "Converting mapping of", var1, ":=", var2,
"(units:", repr(u1), repr(u2), ")")
if not u1.equals(u2):
# We need a conversion
# Add a copy of var1 to comp1, with units as var2
if getattr(var1, u'public_interface', '') == u'in':
in_interface = u'public'
else:
in_interface = u'private'
var1_converter = self.add_variable(comp1, var1.name + u'_converter', u2, interfaces={in_interface: u'in'})
var1._cml_var_type = VarTypes.Computed
var1._cml_source_var = None
delattr(var1, in_interface + u'_interface')
var1_converter._set_source_variable(var2)
# Add assignment maths for var1 := var1_converter
app = mathml_apply.create_new(model, u'eq', [var1.name, var1_converter.name])
self.add_expr_to_comp(comp1, app)
var1._cml_depends_on = [app]
app._cml_assigns_to = var1
# Update mapping to var1_converter := var2
if swapped:
mapping.variable_2 = var1_converter.name
else:
mapping.variable_1 = var1_converter.name
# Fix usage counts - var1_converter is only used by app, and so var2 usage decreases
var1_converter._used()
for _ in xrange(var1.get_usage_count()):
var2._decrement_usage_count()
# Apply units conversion to the assignment
self.convert_assignments([app])
# Add the assignment into the sorted list
assignments = model.get_assignments()
idx = assignments.index(var1)
assignments[idx:idx+1] = [var1_converter, app]
def convert_connections(self, connections):
"""Add units conversions for all connections in the given set.
:param connections: a set of variable pairs representing connections. For each pair of variables a units conversion
will be added if needed and not already performed.
"""
model = self.model
for conn in getattr(model, u'connection', []):
comp1 = model.get_component_by_name(conn.map_components.component_1)
comp2 = model.get_component_by_name(conn.map_components.component_2)
for mapping in conn.map_variables:
var1 = model.get_variable_by_name(comp1.name, mapping.variable_1)
var2 = model.get_variable_by_name(comp2.name, mapping.variable_2)
if frozenset([var1, var2]) in connections:
self.convert_mapping(mapping, comp1, comp2, var1, var2)
def add_conversions_for_component(self, comp):
"""Add all units conversions required by the given component.
This allows us to only apply the conversions required by an interface component created
by an InterfaceGenerator.
"""
model = self.model
if self.special_conversions:
self.model._cml_special_units_converter = self._apply_special_conversion_for_nested_expr
assignments = model.search_for_assignments(comp)
self.convert_assignments(assignments)
if self.special_conversions:
del self.model._cml_special_units_converter
for conn in getattr(model, u'connection', []):
cname1 = conn.map_components.component_1
cname2 = conn.map_components.component_2
if comp.name in [cname1, cname2]:
comp1 = model.get_component_by_name(cname1)
comp2 = model.get_component_by_name(cname2)
for mapping in conn.map_variables:
var1 = model.get_variable_by_name(cname1, mapping.variable_1)
var2 = model.get_variable_by_name(cname2, mapping.variable_2)
self.convert_mapping(mapping, comp1, comp2, var1, var2)
def add_all_conversions(self):
"""Add all units conversions required in the given model."""
model = self.model
# Mathematical expressions
self.convert_assignments(model.get_assignments())
# Connections
for conn in getattr(model, u'connection', []):
comp1 = model.get_component_by_name(conn.map_components.component_1)
comp2 = model.get_component_by_name(conn.map_components.component_2)
for mapping in conn.map_variables:
var1 = model.get_variable_by_name(comp1.name, mapping.variable_1)
var2 = model.get_variable_by_name(comp2.name, mapping.variable_2)
self.convert_mapping(mapping, comp1, comp2, var1, var2)
return
| 49.387003 | 143 | 0.647885 | [
"Apache-2.0",
"BSD-3-Clause"
] | gonayl/Chaste | python/pycml/processors.py | 50,918 | Python |
#!/usr/bin/env python
#
# Run wasm benchmarks in various configurations and report the times.
# Run with -h for help.
#
# Note: this is a copy of wasm-bench.py adapted for d8.
#
# In the default mode which is "turbofan+liftoff", runs a single shell with
# `--no-wasm-tier-up --liftoff` and `--no-wasm-tier-up --no-liftoff`
# and prints three tab-separated columns:
#
# Ion-result Baseline-result Ion/Baseline
#
# In other benchmarking modes, runs one or two shells with the same argument
# (depending on the mode) and prints three tab-separated columns:
#
# shell1-result shell2-result shell1-result/shell2-result
#
# When measuring compile times (argument = 0) results are compile
# times in ms.
#
# When measuring run times (argument > 0) results are mostly running
# times in ms, except that linpack is 1000000/mflops and scimark is
# 10000/score, always as integer values.
#
# A lower result is always better. Linpack and SciMark outputs are
# inverted to make this consistent.
#
# We measure the running time only for the already-compiled wasm code,
# not the end-to-end time including startup and compilation. The
# difference in ratios is actually not large, but running time is the
# best measure.
#
# TODO: Annotate results with - and +, derived from
# the variance maybe. Switch -s / --significance.
#
# TODO: catch any exception from the subprocess and print the log if
# there was one.
#
# TODO: Also check the output for other arguments than the default.
# Easy: use list of results for for the problems, indexed by problem size
#
# TODO: In several cases below we'd like to check the entire output,
# not just one line of it. Easy: lists of lines, match in order.
#
# TODO: We might like for the output not to contain any other lines than
# the ones we are grepping for. Not very hard - just a flag.
import argparse, os, re, subprocess, sys
def main():
(mode, numruns, argument, isVerbose, noThreads, dumpData, dumpVariance, dumpRange, patterns) = parse_args()
(shell1, shell2) = get_shells(mode)
print "# mode=%s, runs=%d, problem size=%s" % (mode, numruns, (str(argument) if argument != None else "default"))
if not is_check(mode):
print "# Lower score is better"
for test in tests:
(name, _, fn, _) = test
found = len(patterns) == 0
for p in patterns:
found = found or re.search(p, name)
if not found:
continue
msg = name + "\t" + ("\t" if len(name) < 8 else "")
if is_check(mode):
fn(test, isVerbose, noThreads, shell1, get_system1(mode), argument)
msg += "did not crash today"
else:
# Run back-to-back for each shell to reduce caching noise
t1 = []
for i in range(numruns):
(c, r) = fn(test, isVerbose, noThreads, shell1, get_system1(mode), argument)
t1.append(c if argument == 0 else r)
t1.sort()
t2 = []
if not is_only(mode):
for i in range(numruns):
(c, r) = fn(test, isVerbose, noThreads, shell2, get_system2(mode), argument)
t2.append(c if argument == 0 else r)
t2.sort()
n1 = t1[len(t1)/2]
n2 = 1
if not is_only(mode):
n2 = t2[len(t2)/2]
score = three_places(n1, n2)
msg += str(n1) + "\t"
if not is_only(mode):
msg += str(n2) + "\t"
msg += score
if dumpVariance:
lo1 = t1[1]
hi1 = t1[len(t1)-2]
msg += "\t[" + three_places(lo1, n1) + ", " + three_places(hi1, n1) + "]"
if not is_only(mode):
lo2 = t2[1]
hi2 = t2[len(t2)-2]
msg += "\t[" + three_places(lo2, n2) + ", " + three_places(hi2, n2) + "]"
if dumpRange:
lo1 = t1[1]
hi1 = t1[len(t1)-2]
msg += "\t[" + str(lo1) + ", " + str(hi1) + "]"
if not is_only(mode):
lo2 = t2[1]
hi2 = t2[len(t2)-2]
msg += "\t[" + str(lo2) + ", " + str(hi2) + "]"
if dumpData:
msg += "\t" + str(t1)
if not is_only(mode):
msg += "\t" + str(t2)
print msg
def three_places(a, b):
if b == 0:
return "-----"
return str(round(float(a)/float(b)*1000)/1000)
def run_std(test, isVerbose, noThreads, shell, mode, argument):
(name, program, _, correct) = test
if program == None:
program = "wasm_" + name + ".js"
text = run_test(isVerbose, noThreads, shell, program, mode, argument)
return parse_output(text, argument, correct)
def run_linpack(test, isVerbose, noThreads, shell, mode, argument):
text = run_test(isVerbose, noThreads, shell, "wasm_linpack_float.c.js", mode, argument)
if argument == 0:
return parse_output(text, 0, None)
mflops = float(parse_line(text, r"Unrolled +Single +Precision.*Mflops", 4))
score = int(10000000.0/mflops)
return (0,score)
def run_scimark(test, isVerbose, noThreads, shell, mode, argument):
text = run_test(isVerbose, noThreads, shell, "wasm_lua_scimark.c.js", mode, argument)
if argument == 0:
return parse_output(text, 0, None)
mark = float(parse_line(text, r"SciMark.*small", 2))
score = int(100000.0/mark)
return (0,score)
tests = [ ("box2d", None, run_std, r"frame averages:.*, range:.* to "),
("bullet", None, run_std, r"ok.*"),
("conditionals", None, run_std, r"ok 144690090"),
("copy", None, run_std, r"sum:2836"),
("corrections", None, run_std, r"final: 40006013:10225."),
("fannkuch", None, run_std, r"4312567891011"),
("fasta", None, run_std, r"CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT"),
("fib", "fib.js", run_std, r"fib.40. = 102334155"),
("ifs", None, run_std, r"ok"),
#("linpack", None, run_linpack, None),
("binarytrees", "wasm_lua_binarytrees.c.js", run_std, "843\t trees of depth 10\t check: -842"),
#("scimark", None, run_scimark, None),
("memops", None, run_std, r"final: 400."),
("primes", None, run_std, r"lastprime: 3043739."),
("raybench", "raybench.js", run_std, r"Render time: .*"),
("rust-fannkuch", "rust-fannkuch.js", run_std, r"fannkuch\(11\) = 556355"),
("skinning", None, run_std, r"blah=0.000000"),
("zlib", "wasm_zlib.c.js", run_std, r"sizes: 100000,25906") ]
def run_test(isVerbose, noThreads, shell, program, mode, argument):
cmd = [shell]
if mode == "liftoff":
cmd.append("--no-wasm-tier-up")
# Flag --liftoff is implied by --single_threaded
cmd.append("--liftoff")
if mode == "turbofan":
cmd.append("--no-wasm-tier-up")
cmd.append("--no-liftoff")
if noThreads:
cmd.append("----wasm_num_compilation_tasks=1")
cmd.append(program)
if argument != None:
cmd.append("--")
cmd.append(str(argument))
if isVerbose:
print "# %s" % str(cmd)
log = open('output.tmp', 'w')
text = subprocess.check_output(cmd, stderr=log, universal_newlines=True).split("\n")
log.close()
return text
def parse_output(text, argument, correct):
compileTime = 0
runTime = 0
found = False
do_check = argument == None and correct
for t in text:
if do_check and not found:
found = re.match(correct, t)
if re.match("WASM COMPILE TIME: ", t):
compileTime = int(t[19:])
elif re.match("WASM RUN TIME: ", t):
runTime = int(t[15:])
if do_check and not found:
print text
panic("Did not match expected output " + correct)
return (compileTime, runTime)
def parse_line(text, correct, fieldno):
for t in text:
if re.match(correct, t):
return re.split(r" +", t)[fieldno-1]
panic("Did not match expected output " + correct)
def get_shells(mode):
shell1 = None
shell2 = None
if uses_one_shell(mode):
shell1 = get_shell("JS_SHELL")
shell2 = shell1
else:
shell1 = get_shell("JS_SHELL1")
shell2 = get_shell("JS_SHELL2")
return (shell1, shell2)
def get_shell(name):
probe = os.getenv(name)
if not (probe and os.path.isfile(probe) and os.access(probe, os.X_OK)):
panic(name + " does not name an executable shell")
return probe
def is_check(mode):
return mode == "ion_check" or mode == "baseline_check" or mode == "cranelift_check"
def uses_one_shell(mode):
if is_check(mode) or is_only(mode):
return True
if get_system1(mode) != get_system2(mode):
return True
return False
def get_system1(mode):
if re.search(r"_|\+", mode):
return re.split(r"_|\+", mode)[0]
return mode
def get_system2(mode):
if re.search(r"\+", mode):
return re.split(r"\+", mode)[1]
panic("Mode does not have a second system: " + mode)
def is_only(mode):
return mode == "liftoff_only" or mode == "turbofan_only"
def panic(msg):
sys.exit("Error: " + msg)
def parse_args():
parser = argparse.ArgumentParser(description=
"""Run wasm benchmarks in various configurations.
When a single JS shell is needed the default program name is 'js';
otherwise it can be overridden with the environment variable JS_SHELL.
When two shells are needed they must be named by the environment
variables JS_SHELL1 and JS_SHELL2.""")
parser.add_argument("-a", "--problem", metavar="argument", type=int, help=
"""The problem size argument. The default is 3. With argument=0 we
effectively only compile the code and compilation time is reported
instead. The max is 5.""")
parser.add_argument("-c", "--check", metavar="mode", choices=["liftoff", "turbofan", "turbofan+liftoff"], help=
"""Run only one shell a single run, to see if it works. `mode` must
be "ion" or "baseline" or "cranelift".""")
parser.add_argument("-d", "--data", action="store_true", help=
"""Print the measurement data as two comma-separated lists following
the normal results.""")
parser.add_argument("-i", "--variance", action="store_true", help=
"""For five or more runs, discard the high and low measurements and
print low/median and high/median following the standard columns.""")
parser.add_argument("-j", "--range", action="store_true", help=
"""For five or more runs, discard the high and low measurements and
print low and high following the standard columns.""")
parser.add_argument("-m", "--mode", metavar="mode",
choices=["liftoff", "turbofan", "turbofan+liftoff"],
help=
"""Compare the output of two different shells.
`mode` must be "liftoff", "turbofan", or "turbofan+liftoff"
where a and b are one of those systems. A single system a means a+a.""")
parser.add_argument("-n", "--numruns", metavar="numruns", type=int, help=
"""The number of iterations to run. The default is 1. The value
should be odd. We report the median time.""")
parser.add_argument("-o", "--only", metavar="mode", choices=["liftoff", "turbofan", "turbofan+liftoff"], help=
"""Run only the one shell in the normal manner, and report results
according to any other switches""")
parser.add_argument("-v", "--verbose", action="store_true", help=
"""Verbose. Echo commands and other information on stderr.""")
parser.add_argument("-t", "--no-threads", action="store_true", help=
"""Disable threads in the shell, for added timing stability.
This will significantly impact compile times, and may impact running
time since eg GC runs on the remaining thread with everything else.""")
parser.add_argument("pattern", nargs="*", help=
"""Regular expressions to match against test names""")
args = parser.parse_args();
if args.check and args.mode:
panic("--check and --mode are incompatible")
if args.check and args.only:
panic("--check and --only are incompatible")
if args.mode and args.only:
panic("--mode and --only are incompatible")
mode = "turbofan+liftoff"
if args.mode:
if re.search(r"\+", args.mode):
mode = args.mode
else:
mode = args.mode + "+" + args.mode
if args.check:
mode = args.check + "_check"
if args.only:
mode = args.only + "_only"
if args.check and args.variance:
panic("--check and --variance are incompatible")
if args.check and args.range:
panic("--check and --range are incompatible")
numruns = 1
if args.numruns != None:
if args.numruns <= 0:
panic("--numruns requires a nonnegative integer")
numruns = args.numruns
if is_check(mode):
numruns = 1
if not (numruns % 2):
panic("The number of runs must be odd")
if args.variance and numruns < 5:
panic("At least five runs required for --variance")
if args.range and numruns < 5:
panic("At least five runs required for --range")
argument = None
if args.problem != None:
if args.problem < 0 or args.problem > 5:
panic("--problem requires an integer between 0 and 5")
argument = args.problem
if args.verbose:
args.data = True
return (mode, numruns, argument, args.verbose, args.no_threads, args.data, args.variance, args.range, args.pattern)
if __name__ == '__main__':
main()
| 40.00831 | 119 | 0.574742 | [
"MIT"
] | julian-seward1/embenchen | asm_v_wasm/wasm_bench-d8.py | 14,443 | Python |
from django.db.backends.postgresql.base import DatabaseWrapper as PostgresqlDatabaseWrapper
from db.backends.postgresql.creation import DatabaseCreation
from db.backends.postgresql.schema import DatabaseSchemaEditor
class DatabaseWrapper(PostgresqlDatabaseWrapper):
creation_class = DatabaseCreation
SchemaEditorClass = DatabaseSchemaEditor
| 35.2 | 91 | 0.863636 | [
"Apache-2.0"
] | aaxelb/SHARE | db/backends/postgresql/base.py | 352 | Python |
from typing import Dict, List, Any
import numpy as np
import cv2
from vcap import (
DetectionNode,
DETECTION_NODE_TYPE,
OPTION_TYPE,
BaseStreamState,
BaseBackend,
rect_to_coords)
from vcap_utils import (
BaseOpenVINOBackend,
)
SOS_INDEX = 0
EOS_INDEX = 1
MAX_SEQ_LEN = 28
ALPHABET = ' 0123456789abcdefghijklmnopqrstuvwxyz'
# We have to do this because we need there to be a process_frame to use it
class OpenVINOModel(BaseOpenVINOBackend):
def process_frame(self,
frame: np.ndarray,
detection_node: DETECTION_NODE_TYPE,
options: Dict[str, OPTION_TYPE],
state: BaseStreamState) -> DETECTION_NODE_TYPE:
raise NotImplemented('This backend is not for processing frames. '
'It is only used for storing a model.')
class Backend(BaseBackend):
label_map: Dict[int, str] = {1: "text"}
def __init__(self, detector: OpenVINOModel,
recognizer_encoder: OpenVINOModel,
recognizer_decoder: OpenVINOModel):
super().__init__()
self.detector = detector
self.recognizer_encoder = recognizer_encoder
self.recognizer_decoder = recognizer_decoder
@property
def workload(self) -> float:
return (self.detector.workload +
self.recognizer_encoder.workload +
self.recognizer_decoder.workload)
def process_frame(self, frame: np.ndarray,
detection_node: DETECTION_NODE_TYPE,
options: Dict[str, OPTION_TYPE],
state: BaseStreamState) -> DETECTION_NODE_TYPE:
n, c, h, w = self.detector.net.inputs['im_data'].shape
hidden_shape = self.recognizer_decoder.net.inputs['prev_hidden'].shape
input_dict, resize = self.detector.prepare_inputs(
frame,
frame_input_name="im_data"
)
input_dict["im_data"] = (input_dict["im_data"]
.reshape((n, c, h, w)).astype(np.float32))
input_image_size = self.detector.net.inputs['im_data'].shape[-2:]
input_image_info = np.asarray(
[[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)
input_dict["im_info"] = input_image_info
prediction = self.detector.send_to_batch(input_dict).result()
scores = prediction["scores"]
detections_filter = scores > options["threshold"]
scores = scores[detections_filter]
rects = prediction["boxes"][detections_filter]
text_features = prediction["text_features"][detections_filter]
feature_queues = []
for text_feature in text_features:
feature_queues.append(
self.recognizer_encoder.send_to_batch({'input': text_feature}))
detections = []
for score, rect, feature_queue in zip(scores, rects, feature_queues):
feature = feature_queue.result()['output']
feature = np.reshape(feature,
(feature.shape[0], feature.shape[1], -1))
feature = np.transpose(feature, (0, 2, 1))
hidden = np.zeros(hidden_shape)
prev_symbol_index = np.ones((1,)) * SOS_INDEX
text = ''
for _ in range(MAX_SEQ_LEN):
decoder_output = self.recognizer_decoder.send_to_batch({
'prev_symbol': prev_symbol_index,
'prev_hidden': hidden,
'encoder_outputs': feature
}).result()
symbols_distr = decoder_output['output']
prev_symbol_index = int(np.argmax(symbols_distr, axis=1))
if prev_symbol_index == EOS_INDEX:
break
text += ALPHABET[prev_symbol_index]
hidden = decoder_output['hidden']
detections.append(DetectionNode(
name="text",
coords=rect_to_coords(rect.tolist()),
extra_data={
"detection_confidence": float(score),
"text": text
},
))
return resize.scale_and_offset_detection_nodes(detections)
| 37 | 79 | 0.592479 | [
"BSD-3-Clause"
] | aotuai/capsule-zoo | capsules/detector_text_openvino/backend.py | 4,255 | Python |
import dlib
from termcolor import colored
from face_cropper.core import DLIB_FACE_DETECTING_MIN_SCORE
def detect(image: str, verbose: bool = False):
"""Detects faces on a given image using dlib and returns matches.
:param image: Path to access the image to be searched
:type image: [string]
:param verbose: Wether or not command should output informations
:type image: [bool], default to False
:raises RuntimeError: When the provided image_path is invalid
:return: The detected faces
:rtype: [list of dlib.rectangle]
"""
detector = dlib.get_frontal_face_detector()
img = dlib.load_rgb_image(image)
dets = detector.run(img, 1, DLIB_FACE_DETECTING_MIN_SCORE)[0]
verbose and print(
colored(
f"Number of faces detected: {len(dets)}\n",
"yellow"
)
)
detections = []
# Avoiding circular imports
from face_cropper.cli.output import colored_detection_output
for index, detection in enumerate(dets):
detections.append(detection)
verbose and print(colored(f"Detection {index + 1}:", "green"))
verbose and colored_detection_output(detection)
return detections
| 30.692308 | 70 | 0.691729 | [
"MIT"
] | Dave-Lopper/face_cropper | face_cropper/core/detector.py | 1,197 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.