input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from deepracer_msgs/SetVisualColorRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class SetVisualColorRequest(genpy.Message):
_md5sum = "c993776acc4e7a226360c9194290bf99"
_type = "deepracer_msgs/SetVisualColorRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string link_name
string visual_name
std_msgs/ColorRGBA ambient
std_msgs/ColorRGBA diffuse
std_msgs/ColorRGBA specular
std_msgs/ColorRGBA emissive
bool block
================================================================================
MSG: std_msgs/ColorRGBA
float32 r
float32 g
float32 b
float32 a
"""
__slots__ = ['link_name','visual_name','ambient','diffuse','specular','emissive','block']
_slot_types = ['string','string','std_msgs/ColorRGBA','std_msgs/ColorRGBA','std_msgs/ColorRGBA','std_msgs/ColorRGBA','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
link_name,visual_name,ambient,diffuse,specular,emissive,block
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetVisualColorRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.link_name is None:
self.link_name = ''
if self.visual_name is None:
self.visual_name = ''
if self.ambient is None:
self.ambient = std_msgs.msg.ColorRGBA()
if self.diffuse is None:
self.diffuse = std_msgs.msg.ColorRGBA()
if self.specular is None:
self.specular = std_msgs.msg.ColorRGBA()
if self.emissive is None:
self.emissive = std_msgs.msg.ColorRGBA()
if self.block is None:
self.block = False
else:
self.link_name = ''
self.visual_name = ''
self.ambient = std_msgs.msg.ColorRGBA()
self.diffuse = std_msgs.msg.ColorRGBA()
self.specular = std_msgs.msg.ColorRGBA()
self.emissive = std_msgs.msg.ColorRGBA()
self.block = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.visual_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_16fB().pack(_x.ambient.r, _x.ambient.g, _x.ambient.b, _x.ambient.a, _x.diffuse.r, _x.diffuse.g, _x.diffuse.b, _x.diffuse.a, _x.specular.r, _x.specular.g, _x.specular.b, _x.specular.a, _x.emissive.r, _x.emissive.g, _x.emissive.b, _x.emissive.a, _x.block))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.ambient is None:
self.ambient = std_msgs.msg.ColorRGBA()
if self.diffuse is None:
self.diffuse = std_msgs.msg.ColorRGBA()
if self.specular is None:
self.specular = std_msgs.msg.ColorRGBA()
if self.emissive is None:
self.emissive = std_msgs.msg.ColorRGBA()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.link_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.link_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.visual_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.visual_name = str[start:end]
_x = self
start = end
end += 65
(_x.ambient.r, _x.ambient.g, _x.ambient.b, _x.ambient.a, _x.diffuse.r, _x.diffuse.g, _x.diffuse.b, _x.diffuse.a, _x.specular.r, _x.specular.g, _x.specular.b, _x.specular.a, _x.emissive.r, _x.emissive.g, _x.emissive.b, _x.emissive.a, _x.block,) = _get_struct_16fB().unpack(str[start:end])
self.block = bool(self.block)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.visual_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_16fB().pack(_x.ambient.r, _x.ambient.g, _x.ambient.b, _x.ambient.a, _x.diffuse.r, _x.diffuse.g, _x.diffuse.b, _x.diffuse.a, _x.specular.r, _x.specular.g, _x.specular.b, _x.specular.a, _x.emissive.r, _x.emissive.g, _x.emissive.b, _x.emissive.a, _x.block))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.ambient is None:
self.ambient = std_msgs.msg.ColorRGBA()
if self.diffuse is None:
self.diffuse = std_msgs.msg.ColorRGBA()
if self.specular is None:
self.specular = std_msgs.msg.ColorRGBA()
if self.emissive is None:
self.emissive = std_msgs.msg.ColorRGBA()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.link_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.link_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.visual_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.visual_name = str[start:end]
_x = self
start = end
end += 65
(_x.ambient.r, _x.ambient.g, _x.ambient.b, _x.ambient.a, _x.diffuse.r, _x.diffuse.g, _x.diffuse.b, _x.diffuse.a, _x.specular.r, _x.specular.g, _x.specular.b, _x.specular.a, _x.emissive.r, _x.emissive.g, _x.emissive.b, _x.emissive.a, _x.block,) = _get_struct_16fB().unpack(str[start:end])
self.block = bool(self.block)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_16fB = None
def _get_struct_16fB():
global _struct_16fB
if _struct_16fB is None:
_struct_16fB = struct.Struct("<16fB")
return _struct_16fB
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from deepracer_msgs/SetVisualColorResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetVisualColorResponse(genpy.Message):
_md5sum = "2ec6f3eff0161f4257b808b12bc830c2"
_type = "deepracer_msgs/SetVisualColorResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool success
string status_message
"""
__slots__ = ['success','status_message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetVisualColorResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.status_message is None:
self.status_message = ''
else:
self.success = False
self.status_message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
| |
468342 * uk_82
+ 369117 * uk_83
+ 79380 * uk_84
+ 559629 * uk_85
+ 861273 * uk_86
+ 369117 * uk_87
+ 877212 * uk_88
+ 691362 * uk_89
+ 2242306609 * uk_9
+ 148680 * uk_90
+ 1048194 * uk_91
+ 1613178 * uk_92
+ 691362 * uk_93
+ 544887 * uk_94
+ 117180 * uk_95
+ 826119 * uk_96
+ 1271403 * uk_97
+ 544887 * uk_98
+ 25200 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 144144 * uk_100
+ 218736 * uk_101
+ 118944 * uk_102
+ 1288287 * uk_103
+ 1954953 * uk_104
+ 1063062 * uk_105
+ 2966607 * uk_106
+ 1613178 * uk_107
+ 877212 * uk_108
+ 8000 * uk_109
+ 947060 * uk_11
+ 47200 * uk_110
+ 6400 * uk_111
+ 57200 * uk_112
+ 86800 * uk_113
+ 47200 * uk_114
+ 278480 * uk_115
+ 37760 * uk_116
+ 337480 * uk_117
+ 512120 * uk_118
+ 278480 * uk_119
+ 5587654 * uk_12
+ 5120 * uk_120
+ 45760 * uk_121
+ 69440 * uk_122
+ 37760 * uk_123
+ 408980 * uk_124
+ 620620 * uk_125
+ 337480 * uk_126
+ 941780 * uk_127
+ 512120 * uk_128
+ 278480 * uk_129
+ 757648 * uk_13
+ 1643032 * uk_130
+ 222784 * uk_131
+ 1991132 * uk_132
+ 3021508 * uk_133
+ 1643032 * uk_134
+ 30208 * uk_135
+ 269984 * uk_136
+ 409696 * uk_137
+ 222784 * uk_138
+ 2412982 * uk_139
+ 6771479 * uk_14
+ 3661658 * uk_140
+ 1991132 * uk_141
+ 5556502 * uk_142
+ 3021508 * uk_143
+ 1643032 * uk_144
+ 4096 * uk_145
+ 36608 * uk_146
+ 55552 * uk_147
+ 30208 * uk_148
+ 327184 * uk_149
+ 10275601 * uk_15
+ 496496 * uk_150
+ 269984 * uk_151
+ 753424 * uk_152
+ 409696 * uk_153
+ 222784 * uk_154
+ 2924207 * uk_155
+ 4437433 * uk_156
+ 2412982 * uk_157
+ 6733727 * uk_158
+ 3661658 * uk_159
+ 5587654 * uk_16
+ 1991132 * uk_160
+ 10218313 * uk_161
+ 5556502 * uk_162
+ 3021508 * uk_163
+ 1643032 * uk_164
+ 3969 * uk_17
+ 1260 * uk_18
+ 7434 * uk_19
+ 63 * uk_2
+ 1008 * uk_20
+ 9009 * uk_21
+ 13671 * uk_22
+ 7434 * uk_23
+ 400 * uk_24
+ 2360 * uk_25
+ 320 * uk_26
+ 2860 * uk_27
+ 4340 * uk_28
+ 2360 * uk_29
+ 20 * uk_3
+ 13924 * uk_30
+ 1888 * uk_31
+ 16874 * uk_32
+ 25606 * uk_33
+ 13924 * uk_34
+ 256 * uk_35
+ 2288 * uk_36
+ 3472 * uk_37
+ 1888 * uk_38
+ 20449 * uk_39
+ 118 * uk_4
+ 31031 * uk_40
+ 16874 * uk_41
+ 47089 * uk_42
+ 25606 * uk_43
+ 13924 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 44846132180 * uk_47
+ 264592179862 * uk_48
+ 35876905744 * uk_49
+ 16 * uk_5
+ 320649845087 * uk_50
+ 486580534153 * uk_51
+ 264592179862 * uk_52
+ 187944057 * uk_53
+ 59664780 * uk_54
+ 352022202 * uk_55
+ 47731824 * uk_56
+ 426603177 * uk_57
+ 647362863 * uk_58
+ 352022202 * uk_59
+ 143 * uk_6
+ 18941200 * uk_60
+ 111753080 * uk_61
+ 15152960 * uk_62
+ 135429580 * uk_63
+ 205512020 * uk_64
+ 111753080 * uk_65
+ 659343172 * uk_66
+ 89402464 * uk_67
+ 799034522 * uk_68
+ 1212520918 * uk_69
+ 217 * uk_7
+ 659343172 * uk_70
+ 12122368 * uk_71
+ 108343664 * uk_72
+ 164409616 * uk_73
+ 89402464 * uk_74
+ 968321497 * uk_75
+ 1469410943 * uk_76
+ 799034522 * uk_77
+ 2229805417 * uk_78
+ 1212520918 * uk_79
+ 118 * uk_8
+ 659343172 * uk_80
+ 250047 * uk_81
+ 79380 * uk_82
+ 468342 * uk_83
+ 63504 * uk_84
+ 567567 * uk_85
+ 861273 * uk_86
+ 468342 * uk_87
+ 25200 * uk_88
+ 148680 * uk_89
+ 2242306609 * uk_9
+ 20160 * uk_90
+ 180180 * uk_91
+ 273420 * uk_92
+ 148680 * uk_93
+ 877212 * uk_94
+ 118944 * uk_95
+ 1063062 * uk_96
+ 1613178 * uk_97
+ 877212 * uk_98
+ 16128 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 182700 * uk_100
+ 273420 * uk_101
+ 25200 * uk_102
+ 1324575 * uk_103
+ 1982295 * uk_104
+ 182700 * uk_105
+ 2966607 * uk_106
+ 273420 * uk_107
+ 25200 * uk_108
+ 571787 * uk_109
+ 3930299 * uk_11
+ 137780 * uk_110
+ 137780 * uk_111
+ 998905 * uk_112
+ 1494913 * uk_113
+ 137780 * uk_114
+ 33200 * uk_115
+ 33200 * uk_116
+ 240700 * uk_117
+ 360220 * uk_118
+ 33200 * uk_119
+ 947060 * uk_12
+ 33200 * uk_120
+ 240700 * uk_121
+ 360220 * uk_122
+ 33200 * uk_123
+ 1745075 * uk_124
+ 2611595 * uk_125
+ 240700 * uk_126
+ 3908387 * uk_127
+ 360220 * uk_128
+ 33200 * uk_129
+ 947060 * uk_13
+ 8000 * uk_130
+ 8000 * uk_131
+ 58000 * uk_132
+ 86800 * uk_133
+ 8000 * uk_134
+ 8000 * uk_135
+ 58000 * uk_136
+ 86800 * uk_137
+ 8000 * uk_138
+ 420500 * uk_139
+ 6866185 * uk_14
+ 629300 * uk_140
+ 58000 * uk_141
+ 941780 * uk_142
+ 86800 * uk_143
+ 8000 * uk_144
+ 8000 * uk_145
+ 58000 * uk_146
+ 86800 * uk_147
+ 8000 * uk_148
+ 420500 * uk_149
+ 10275601 * uk_15
+ 629300 * uk_150
+ 58000 * uk_151
+ 941780 * uk_152
+ 86800 * uk_153
+ 8000 * uk_154
+ 3048625 * uk_155
+ 4562425 * uk_156
+ 420500 * uk_157
+ 6827905 * uk_158
+ 629300 * uk_159
+ 947060 * uk_16
+ 58000 * uk_160
+ 10218313 * uk_161
+ 941780 * uk_162
+ 86800 * uk_163
+ 8000 * uk_164
+ 3969 * uk_17
+ 5229 * uk_18
+ 1260 * uk_19
+ 63 * uk_2
+ 1260 * uk_20
+ 9135 * uk_21
+ 13671 * uk_22
+ 1260 * uk_23
+ 6889 * uk_24
+ 1660 * uk_25
+ 1660 * uk_26
+ 12035 * uk_27
+ 18011 * uk_28
+ 1660 * uk_29
+ 83 * uk_3
+ 400 * uk_30
+ 400 * uk_31
+ 2900 * uk_32
+ 4340 * uk_33
+ 400 * uk_34
+ 400 * uk_35
+ 2900 * uk_36
+ 4340 * uk_37
+ 400 * uk_38
+ 21025 * uk_39
+ 20 * uk_4
+ 31465 * uk_40
+ 2900 * uk_41
+ 47089 * uk_42
+ 4340 * uk_43
+ 400 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 186111448547 * uk_47
+ 44846132180 * uk_48
+ 44846132180 * uk_49
+ 20 * uk_5
+ 325134458305 * uk_50
+ 486580534153 * uk_51
+ 44846132180 * uk_52
+ 187944057 * uk_53
+ 247608837 * uk_54
+ 59664780 * uk_55
+ 59664780 * uk_56
+ 432569655 * uk_57
+ 647362863 * uk_58
+ 59664780 * uk_59
+ 145 * uk_6
+ 326214817 * uk_60
+ 78605980 * uk_61
+ 78605980 * uk_62
+ 569893355 * uk_63
+ 852874883 * uk_64
+ 78605980 * uk_65
+ 18941200 * uk_66
+ 18941200 * uk_67
+ 137323700 * uk_68
+ 205512020 * uk_69
+ 217 * uk_7
+ 18941200 * uk_70
+ 18941200 * uk_71
+ 137323700 * uk_72
+ 205512020 * uk_73
+ 18941200 * | |
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
"""Class Client and method multithread_upload.
:class:`Client` can send POST, PUT, and GET requests to the TensorBay Dataset Open API.
:meth:`multithread_upload` creates a multi-thread framework for uploading.
"""
import logging
import os
from collections import defaultdict
from concurrent.futures import FIRST_EXCEPTION, ThreadPoolExecutor, wait
from itertools import repeat, zip_longest
from typing import (
Any,
Callable,
DefaultDict,
Generator,
Generic,
Iterable,
Iterator,
List,
MutableSequence,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
from urllib.parse import urljoin
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import RequestException
from requests.models import PreparedRequest, Response
from tqdm import tqdm
from urllib3.util.retry import Retry
from ..__verison__ import __version__
from ..exception import ResponseError, ResponseErrorDistributor
from ..utility import ReprMixin, ReprType, locked
from .log import RequestLogging, ResponseLogging
logger = logging.getLogger(__name__)
class Config: # pylint: disable=too-few-public-methods
"""This is a base class defining the concept of Request Config.
Attributes:
max_retries: Maximum retry times of the request.
allowed_retry_methods: The allowed methods for retrying request.
allowed_retry_status: The allowed status for retrying request.
If both methods and status are fitted, the retrying strategy will work.
timeout: Timeout value of the request in seconds.
is_internal: Whether the request is from internal.
"""
def __init__(self) -> None:
self.max_retries = 3
self.allowed_retry_methods = ["HEAD", "OPTIONS", "POST", "PUT"]
self.allowed_retry_status = [429, 500, 502, 503, 504]
self.timeout = 30
self.is_internal = False
self._x_source = "PYTHON-SDK"
config = Config()
class TimeoutHTTPAdapter(HTTPAdapter):
"""This class defines the http adapter for setting the timeout value.
Arguments:
*args: Extra arguments to initialize TimeoutHTTPAdapter.
timeout: Timeout value of the post request in seconds.
**kwargs: Extra keyword arguments to initialize TimeoutHTTPAdapter.
"""
def __init__(self, *args: Any, timeout: Optional[int] = None, **kwargs: Any) -> None:
self.timeout = timeout if timeout is not None else config.timeout
super().__init__(*args, **kwargs)
def send( # pylint: disable=too-many-arguments
self,
request: PreparedRequest,
stream: Any = False,
timeout: Any = None,
verify: Any = True,
cert: Any = None,
proxies: Any = None,
) -> Any:
"""Send the request.
Arguments:
request: The PreparedRequest being sent.
stream: Whether to stream the request content.
timeout: Timeout value of the post request in seconds.
verify: A path string to a CA bundle to use or
a boolean which controls whether to verify the server's TLS certificate.
cert: User-provided SSL certificate.
proxies: Proxies dict applying to the request.
Returns:
Response object.
"""
if timeout is None:
timeout = self.timeout
return super().send(request, stream, timeout, verify, cert, proxies)
class UserSession(Session): # pylint: disable=too-few-public-methods
"""This class defines UserSession."""
def __init__(self) -> None:
super().__init__()
# self.session.hooks["response"] = [logging_hook]
retry_strategy = Retry(
total=config.max_retries,
status_forcelist=config.allowed_retry_status,
method_whitelist=config.allowed_retry_methods,
raise_on_status=False,
)
self.mount("http://", TimeoutHTTPAdapter(20, 20, retry_strategy))
self.mount("https://", TimeoutHTTPAdapter(20, 20, retry_strategy))
def request( # type: ignore[override] # pylint: disable=signature-differs
self, method: str, url: str, *args: Any, **kwargs: Any
) -> Response: # noqa: DAR401
"""Make the request.
Arguments:
method: Method for the request.
url: URL for the request.
*args: Extra arguments to make the request.
**kwargs: Extra keyword arguments to make the request.
Returns:
Response of the request.
Raises:
ResponseError: If post response error.
"""
try:
response = super().request(method, url, *args, **kwargs)
if response.status_code not in (200, 201):
logger.error(
"Unexpected status code(%d)!%s", response.status_code, ResponseLogging(response)
)
raise ResponseError(response)
logger.debug(ResponseLogging(response))
return response
except RequestException as error:
logger.error(
"%s.%s: %s%s",
error.__class__.__module__,
error.__class__.__name__,
error,
RequestLogging(error.request),
)
raise
class Client:
"""This class defines :class:`Client`.
:class:`Client` defines the client that saves the user and URL information
and supplies basic call methods that will be used by derived clients,
such as sending GET, PUT and POST requests to TensorBay Open API.
Arguments:
access_key: User's access key.
url: The URL of the graviti gas website.
"""
_DEFAULT_URL_CN = "https://gas.graviti.cn/"
_DEFAULT_URL_COM = "https://gas.graviti.com/"
def __init__(self, access_key: str, url: str = "") -> None:
if access_key.startswith("Accesskey-"):
url = url if url else Client._DEFAULT_URL_CN
elif access_key.startswith("ACCESSKEY-"):
url = url if url else Client._DEFAULT_URL_COM
else:
raise TypeError("Wrong accesskey format!")
if not url.startswith("https://"):
raise TypeError("Invalid url, only support url starts with 'https://'")
self.gateway_url = urljoin(url, "gatewayv2/")
self.access_key = access_key
self._sessions: DefaultDict[int, UserSession] = defaultdict(UserSession)
self._open_api = urljoin(self.gateway_url, "tensorbay-open-api/v1/")
def _url_make(self, section: str, dataset_id: str = "") -> str:
"""Generate Open API URL.
Arguments:
section: The section of the request.
dataset_id: Dataset ID.
Returns:
Open API URL.
"""
if dataset_id:
dataset_url = urljoin(self._open_api, "datasets/")
if section:
url = urljoin(urljoin(dataset_url, dataset_id + "/"), section)
else:
url = urljoin(dataset_url, dataset_id)
else:
if section:
url = urljoin(self._open_api, section)
else:
url = urljoin(self._open_api, "datasets")
return url
@property
def session(self) -> UserSession:
"""Create and return a session per PID so each sub-processes will use their own session.
Returns:
The session corresponding to the process.
"""
return self._sessions[os.getpid()]
def open_api_do(
self, method: str, section: str, dataset_id: str = "", **kwargs: Any
) -> Response:
"""Send a request to the TensorBay Open API.
Arguments:
method: The method of the request.
section: The section of the request.
dataset_id: Dataset ID.
**kwargs: Extra keyword arguments to send in the POST request.
Raises:
ResponseError: When the status code OpenAPI returns is unexpected.
Returns:
Response of the request.
"""
kwargs.setdefault("headers", {})["X-Token"] = self.access_key
kwargs["headers"][
"X-Source"
] = f"{config._x_source}/{__version__}" # pylint: disable=protected-access
try:
return self.do(method=method, url=self._url_make(section, dataset_id), **kwargs)
except ResponseError as error:
response = error.response
error_code = response.json()["code"]
raise ResponseErrorDistributor.get(error_code, ResponseError)(response) from None
def do(self, method: str, url: str, **kwargs: Any) -> Response: # pylint: disable=invalid-name
"""Send a request.
Arguments:
method: The method of the request.
url: The URL of the request.
**kwargs: Extra keyword arguments to send in the GET request.
Returns:
Response of the request.
"""
return self.session.request(method=method, url=url, **kwargs)
_T = TypeVar("_T")
class Tqdm(tqdm): # type: ignore[misc]
"""A wrapper class of tqdm for showing the process bar.
Arguments:
total: The number of excepted iterations.
disable: Whether to disable the entire progress bar.
"""
def __init__(self, total: int, disable: bool = False) -> None:
super().__init__(desc="Uploading", total=total, disable=disable)
def update_callback(self, _: Any) -> None:
"""Callback function for updating process bar when multithread task is done."""
self.update()
def update_for_skip(self, condition: bool) -> bool:
"""Update process bar for the items which are skipped in builtin filter function.
Arguments:
condition: The filter condition, the process bar will be updated if condition is False.
Returns:
The input condition.
"""
if not condition:
self.update()
return condition
def multithread_upload(
function: Callable[[_T], None],
arguments: Iterable[_T],
*,
jobs: int = 1,
pbar: Tqdm,
) -> None:
"""Multi-thread upload framework.
Arguments:
function: The upload function.
arguments: The arguments of the upload function.
jobs: The number of the max workers in multi-thread uploading procession.
pbar: The :class:`Tqdm` instance for showing the upload process bar.
"""
with ThreadPoolExecutor(jobs) as executor:
futures = [executor.submit(function, argument) for argument in arguments]
for future in futures:
future.add_done_callback(pbar.update_callback)
done, not_done = wait(futures, return_when=FIRST_EXCEPTION)
for future in not_done:
future.cancel()
for future in done:
future.result()
PagingGenerator = Callable[[int, int], Generator[_T, None, int]]
class LazyItem(Generic[_T]): # pylint: disable=too-few-public-methods
"""In paging lazy evaluation system, a LazyItem instance represents an element in a pagination.
If user wants to access the elememt, LazyItem will trigger the paging request to pull a page of
elements and return the required element. All the pulled elements will be stored in different
LazyItem instances and will not be requested again.
Arguments:
page: The page the item belongs to.
Attributes:
page: The parent :class:`LazyPage` of this item.
data: The actual element stored in this item.
"""
_S = TypeVar("_S", bound="LazyItem[_T]")
__slots__ = ("page", "data")
def __init__(self, page: "LazyPage[_T]", data: _T):
self.page = page
self.data = data
@classmethod
def from_page(cls, page: "LazyPage[_T]") -> "LazyItem[_T]":
"""Create a LazyItem instance from page.
Arguments:
page: The page of the element.
Returns:
The LazyItem instance which stores the input page.
"""
obj: "LazyItem[_T]" = object.__new__(cls)
obj.page = page
return obj
@classmethod
def from_data(cls, data: _T) -> "LazyItem[_T]":
"""Create a LazyItem instance from data.
Arguments:
data: The actual data needs to be stored in LazyItem.
Returns:
The LazyItem instance which stores the input data.
"""
obj: "LazyItem[_T]" = object.__new__(cls)
obj.data = data
return obj
def get(self) -> _T:
"""Access the actual element represented by LazyItem.
If the element is already pulled from web, it will be return directly, otherwise this
function will request for a page of | |
'''
Functions for loading and slicing data
'''
from datetime import datetime
from pkg_resources import resource_filename
import numpy as np
from scipy.io import loadmat
__all__ = ['get_data',
'get_samplerate_mstimer',
'get_samplerate_datetime',
'_sliding_window',
'rolling_mean',
'outliers_iqr_method',
'outliers_modified_z',
'MAD',
'load_exampledata']
def get_data(filename, delim=',', column_name='None', encoding=None,
ignore_extension=False):
'''load data from file
Function to load data from a .CSV or .MAT file into numpy array.
File can be accessed from local disk or url.
Parameters
----------
filename : string
absolute or relative path to the file object to read
delim : string
the delimiter used if CSV file passed
default : ','
column_name : string
for CSV files with header: specify column that contains the data
for matlab files it specifies the table name that contains the data
default : 'None'
ignore_extension : bool
if True, extension is not tested, use for example for files where
the extention is not .csv or .txt but the data is formatted as if
it is.
default : False
Returns
-------
out : 1-d numpy array
array containing the data from the requested column of the specified file
Examples
--------
As an example, let's load two example data files included in the package
For this we use pkg_resources for automated testing purposes, you don't need
this when using the function.
>>> from pkg_resources import resource_filename
>>> filepath = resource_filename(__name__, 'data/data.csv')
So, assuming your file lives at 'filepath', you open it as such:
>>> get_data(filepath)
array([530., 518., 506., ..., 492., 493., 494.])
Files with multiple columns can be opened by specifying the 'column_name' where
the data resides:
>>> filepath = resource_filename(__name__, 'data/data2.csv')
Again you don't need the above. It is there for automated testing.
>>> get_data(filepath, column_name='timer')
array([0.00000000e+00, 8.54790319e+00, 1.70958064e+01, ...,
1.28192904e+05, 1.28201452e+05, 1.28210000e+05])
You can open matlab files in much the same way by specifying the column
where the data lives:
>>> filepath = resource_filename(__name__, 'data/data2.mat')
Again you don't need the above. It is there for automated testing.
Open matlab file by specifying the column name as well:
>>> get_data(filepath, column_name='hr')
array([515., 514., 514., ..., 492., 494., 496.])
You can any csv formatted text file no matter the extension if you
set ignore_extension to True:
>>> filepath = resource_filename(__name__, 'data/data.log')
>>> get_data(filepath, ignore_extension = True)
array([530., 518., 506., ..., 492., 493., 494.])
You can specify column names in the same way when using ignore_extension
>>> filepath = resource_filename(__name__, 'data/data2.log')
>>> data = get_data(filepath, column_name = 'hr', ignore_extension = True)
'''
file_ext = filename.split('.')[-1]
if file_ext == 'csv' or file_ext == 'txt':
if column_name != 'None':
hrdata = np.genfromtxt(filename, delimiter=delim, names=True, dtype=None, encoding=None)
try:
hrdata = hrdata[column_name]
except Exception as error:
raise LookupError('\nError loading column "%s" from file "%s". \
Is column name specified correctly?\n The following error was provided: %s'
%(column_name, filename, error))
elif column_name == 'None':
hrdata = np.genfromtxt(filename, delimiter=delim, dtype=np.float64)
else: # pragma: no cover
raise LookupError('\nError: column name "%s" not found in header of "%s".\n'
%(column_name, filename))
elif file_ext == 'mat':
data = loadmat(filename)
if column_name != "None":
hrdata = np.array(data[column_name][:, 0], dtype=np.float64)
else: # pragma: no cover
raise LookupError('\nError: column name required for Matlab .mat files\n\n')
else:
if ignore_extension:
if column_name != 'None':
hrdata = np.genfromtxt(filename, delimiter=delim, names=True, dtype=None, encoding=None)
try:
hrdata = hrdata[column_name]
except Exception as error:
raise LookupError('\nError loading column "%s" from file "%s". \
Is column name specified correctly?\n'
%(column_name, filename))
elif column_name == 'None': # pragma: no cover
hrdata = np.genfromtxt(filename, delimiter=delim, dtype=np.float64)
else: # pragma: no cover
raise LookupError('\nError: column name "%s" not found in header of "%s".\n'
%(column_name, filename))
else:
raise IncorrectFileType('unknown file format')
return None
return hrdata
def get_samplerate_mstimer(timerdata):
'''detemine sample rate based on ms timer
Function to determine sample rate of data from ms-based timer list or array.
Parameters
----------
timerdata : 1d numpy array or list
sequence containing values of a timer, in ms
Returns
-------
out : float
the sample rate as determined from the timer sequence provided
Examples
--------
first we load a provided example dataset
>>> data, timer = load_exampledata(example = 1)
since it's a timer that counts miliseconds, we use this function.
Let's also round to three decimals
>>> round(get_samplerate_mstimer(timer), 3)
116.996
of course if another time unit is used, converting it to ms-based
should be trivial.
'''
sample_rate = ((len(timerdata) / (timerdata[-1]-timerdata[0]))*1000)
return sample_rate
def get_samplerate_datetime(datetimedata, timeformat='%H:%M:%S.%f'):
'''determine sample rate based on datetime
Function to determine sample rate of data from datetime-based timer
list or array.
Parameters
----------
timerdata : 1-d numpy array or list
sequence containing datetime strings
timeformat : string
the format of the datetime-strings in datetimedata
default : '%H:%M:%S.f' (24-hour based time including ms: e.g. 21:43:12.569)
Returns
-------
out : float
the sample rate as determined from the timer sequence provided
Examples
--------
We load the data like before
>>> data, timer = load_exampledata(example = 2)
>>> timer[0]
'2016-11-24 13:58:58.081000'
Note that we need to specify the timeformat used so that datetime understands
what it's working with:
>>> round(get_samplerate_datetime(timer, timeformat = '%Y-%m-%d %H:%M:%S.%f'), 3)
100.42
'''
datetimedata = np.asarray(datetimedata, dtype='str') #cast as str in case of np.bytes type
elapsed = ((datetime.strptime(datetimedata[-1], timeformat) -
datetime.strptime(datetimedata[0], timeformat)).total_seconds())
sample_rate = (len(datetimedata) / elapsed)
return sample_rate
def _sliding_window(data, windowsize):
'''segments data into windows
Function to segment data into windows for rolling mean function.
Function returns the data segemented into sections.
Parameters
----------
data : 1d array or list
array or list containing data over which sliding windows are computed
windowsize : int
size of the windows to be created by the function
Returns
-------
out : array of arrays
data segmented into separate windows.
Examples
--------
>>> import numpy as np
>>> data = np.array([1, 2, 3, 4, 5])
>>> windows = _sliding_window(data, windowsize = 3)
>>> windows.shape
(3, 3)
'''
shape = data.shape[:-1] + (data.shape[-1] - windowsize + 1, windowsize)
strides = data.strides + (data.strides[-1],)
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
def rolling_mean(data, windowsize, sample_rate):
'''calculates rolling mean
Function to calculate the rolling mean (also: moving average) over the passed data.
Parameters
----------
data : 1-dimensional numpy array or list
sequence containing data over which rolling mean is to be computed
windowsize : int or float
the window size to use, in seconds
calculated as windowsize * sample_rate
sample_rate : int or float
the sample rate of the data set
Returns
-------
out : 1-d numpy array
sequence containing computed rolling mean
Examples
--------
>>> data, _ = load_exampledata(example = 1)
>>> rmean = rolling_mean(data, windowsize=0.75, sample_rate=100)
>>> rmean[100:110]
array([514.49333333, 514.49333333, 514.49333333, 514.46666667,
514.45333333, 514.45333333, 514.45333333, 514.45333333,
514.48 , 514.52 ])
'''
# calculate rolling mean
data_arr = np.array(data)
rol_mean = np.mean(_sliding_window(data_arr, int(windowsize*sample_rate)), axis=1)
# need to fill 1/2 windowsize gap at the start and end
n_missvals = int(abs(len(data_arr) - len(rol_mean))/2)
missvals_a = np.array([rol_mean[0]]*n_missvals)
missvals_b = np.array([rol_mean[-1]]*n_missvals)
rol_mean = np.concatenate((missvals_a, rol_mean, missvals_b))
#only to catch length errors that sometimes unexplicably occur.
##Generally not executed, excluded from testing and coverage
if len(rol_mean) != len(data): # pragma: no cover
lendiff = len(rol_mean) - len(data)
if lendiff < 0:
rol_mean = np.append(rol_mean, 0)
else:
rol_mean = rol_mean[:-1]
return rol_mean
def outliers_iqr_method(hrvalues):
'''removes outliers
Function that removes outliers based on the interquartile range method and
substitutes them for the median
see: https://en.wikipedia.org/wiki/Interquartile_range
Parameters
----------
hrvalues : 1-d numpy array or list
sequence of values, from which outliers need to be identified
Returns
-------
out : tuple
[0] cleaned sequence with identified outliers substituted for the median
[1] list of indices that have been replaced in the original array or list
Examples
--------
>>> x = [2, 4, 3, 4, 6, 7, 35, 2, 3, 4]
>>> outliers_iqr_method(x)
([2, 4, 3, 4, 6, 7, 4.0, 2, 3, 4], [6])
'''
med = np.median(hrvalues)
q1, q3 = np.percentile(hrvalues, [25, 75])
iqr = q3 - q1
lower = q1 - (1.5 * iqr)
upper = q3 + (1.5 * iqr)
output = []
replaced_indices = []
for i in range(0,len(hrvalues)):
if hrvalues[i] < lower or hrvalues[i] > | |
the value of `layer_5_directional_back_absoptance_matrix_name` or None if not set
"""
return self["Layer 5 Directional Back Absoptance Matrix Name"]
@layer_5_directional_back_absoptance_matrix_name.setter
def layer_5_directional_back_absoptance_matrix_name(self, value=None):
"""Corresponds to IDD field `Layer 5 Directional Back Absoptance Matrix
Name`"""
self["Layer 5 Directional Back Absoptance Matrix Name"] = value
class ConstructionWindowEquivalentLayer(DataObject):
""" Corresponds to IDD object `Construction:WindowEquivalentLayer`
Start with outside layer and work your way to the inside Layer
Up to 11 layers total. Up to six solid layers and up to five gaps.
Enter the material name for each layer
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'outside layer',
{'name': u'Outside Layer',
'pyname': u'outside_layer',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 2',
{'name': u'Layer 2',
'pyname': u'layer_2',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 3',
{'name': u'Layer 3',
'pyname': u'layer_3',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 4',
{'name': u'Layer 4',
'pyname': u'layer_4',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 5',
{'name': u'Layer 5',
'pyname': u'layer_5',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 6',
{'name': u'Layer 6',
'pyname': u'layer_6',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 7',
{'name': u'Layer 7',
'pyname': u'layer_7',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 8',
{'name': u'Layer 8',
'pyname': u'layer_8',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 9',
{'name': u'Layer 9',
'pyname': u'layer_9',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 10',
{'name': u'Layer 10',
'pyname': u'layer_10',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'layer 11',
{'name': u'Layer 11',
'pyname': u'layer_11',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Surface Construction Elements',
'min-fields': 2,
'name': u'Construction:WindowEquivalentLayer',
'pyname': u'ConstructionWindowEquivalentLayer',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def outside_layer(self):
"""field `Outside Layer`
Args:
value (str): value for IDD Field `Outside Layer`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `outside_layer` or None if not set
"""
return self["Outside Layer"]
@outside_layer.setter
def outside_layer(self, value=None):
"""Corresponds to IDD field `Outside Layer`"""
self["Outside Layer"] = value
@property
def layer_2(self):
"""field `Layer 2`
Args:
value (str): value for IDD Field `Layer 2`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_2` or None if not set
"""
return self["Layer 2"]
@layer_2.setter
def layer_2(self, value=None):
"""Corresponds to IDD field `Layer 2`"""
self["Layer 2"] = value
@property
def layer_3(self):
"""field `Layer 3`
Args:
value (str): value for IDD Field `Layer 3`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_3` or None if not set
"""
return self["Layer 3"]
@layer_3.setter
def layer_3(self, value=None):
"""Corresponds to IDD field `Layer 3`"""
self["Layer 3"] = value
@property
def layer_4(self):
"""field `Layer 4`
Args:
value (str): value for IDD Field `Layer 4`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_4` or None if not set
"""
return self["Layer 4"]
@layer_4.setter
def layer_4(self, value=None):
"""Corresponds to IDD field `Layer 4`"""
self["Layer 4"] = value
@property
def layer_5(self):
"""field `Layer 5`
Args:
value (str): value for IDD Field `Layer 5`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_5` or None if not set
"""
return self["Layer 5"]
@layer_5.setter
def layer_5(self, value=None):
"""Corresponds to IDD field `Layer 5`"""
self["Layer 5"] = value
@property
def layer_6(self):
"""field `Layer 6`
Args:
value (str): value for IDD Field `Layer 6`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_6` or None if not set
"""
return self["Layer 6"]
@layer_6.setter
def layer_6(self, value=None):
"""Corresponds to IDD field `Layer 6`"""
self["Layer 6"] = value
@property
def layer_7(self):
"""field `Layer 7`
Args:
value (str): value for IDD Field `Layer 7`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_7` or None if not set
"""
return self["Layer 7"]
@layer_7.setter
def layer_7(self, value=None):
"""Corresponds to IDD field `Layer 7`"""
self["Layer 7"] = value
@property
def layer_8(self):
"""field `Layer 8`
Args:
value (str): value for IDD Field `Layer 8`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_8` or None if not set
"""
return self["Layer 8"]
@layer_8.setter
def layer_8(self, value=None):
"""Corresponds to IDD field `Layer 8`"""
self["Layer 8"] = value
@property
def layer_9(self):
"""field `Layer 9`
Args:
value (str): value for IDD Field `Layer 9`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_9` or None if not set
"""
return self["Layer 9"]
@layer_9.setter
def layer_9(self, value=None):
"""Corresponds to IDD field `Layer 9`"""
self["Layer 9"] = value
@property
def layer_10(self):
"""field `Layer 10`
Args:
value (str): value for IDD Field `Layer 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_10` or None if not set
"""
return self["Layer 10"]
@layer_10.setter
def layer_10(self, value=None):
"""Corresponds to IDD field `Layer 10`"""
self["Layer 10"] = value
@property
def layer_11(self):
"""field `Layer 11`
Args:
value (str): value for IDD Field `Layer 11`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `layer_11` or None if not set
"""
return self["Layer 11"]
@layer_11.setter
def layer_11(self, value=None):
"""Corresponds to IDD field `Layer 11`"""
self["Layer 11"] = value
class ConstructionWindowDataFile(DataObject):
""" Corresponds to IDD object `Construction:WindowDataFile`
Initiates search of the Window data file for a window called Name.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'file name',
{'name': u'File Name',
'pyname': u'file_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Surface Construction Elements',
'min-fields': 0,
'name': u'Construction:WindowDataFile',
'pyname': u'ConstructionWindowDataFile',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def file_name(self):
"""field `File Name`
| default file name is "Window5DataFile.dat"
| limit on this field is 100 characters.
Args:
value (str): value for IDD Field `File Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `file_name` or None if not set
"""
return self["File Name"]
@file_name.setter
def file_name(self, value=None):
"""Corresponds to IDD field `File Name`"""
self["File Name"] = value
class MaterialPropertyGlazingSpectralData(DataObject):
""" Corresponds to IDD object `MaterialProperty:GlazingSpectralData`
Name is followed by up to 800 sets of normal-incidence measured values of
[wavelength, transmittance, front reflectance, back reflectance] for wavelengths
covering the solar spectrum (from about 0.25 to 2.5 microns)
"""
_schema = {'extensible-fields': OrderedDict([(u'wavelength',
{'name': u'Wavelength',
'pyname': u'wavelength',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'micron'}),
(u'transmittance',
{'name': u'Transmittance',
'pyname': u'transmittance',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real'}),
(u'front reflectance',
{'name': u'Front Reflectance',
'pyname': u'front_reflectance',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real'}),
(u'back reflectance',
{'name': u'Back Reflectance',
'pyname': u'back_reflectance',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'})]),
'format': u'spectral',
'group': u'Surface Construction Elements',
'min-fields': 0,
'name': u'MaterialProperty:GlazingSpectralData',
'pyname': u'MaterialPropertyGlazingSpectralData',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
wavelength=None,
transmittance=None,
| |
won't get drawn until plot is drawn.
#
if(fill_x):
plot.polygon.append(add_polygon(wks,plot, \
polygon_y[0:ppts2+3], \
polygon_x[0:ppts2+3],gsres))
else:
plot.polygon.append(add_polygon(wks,plot, \
polygon_x[0:ppts2+3], \
polygon_y[0:ppts2+3],gsres))
#
# Advance polygon counter.
#
npoly = npoly + 1
bpt = -1 # Reinitialize
ept = -1
return plot
#
# I copied this from Nio.py
#
def _get_integer_version(strversion):
d = strversion.split('.')
if len(d) > 2:
v = int(d[0]) * 10000 + int(d[1]) * 100 + int(d[2][0])
elif len(d) is 2:
v = int(d[0]) * 10000 + int(d[1]) * 100
else:
v = int(d[0]) * 10000
return v
IS_NEW_MA = _get_integer_version(numpy.__version__) > 10004
#
# Import other stuff we need.
#
from .hlu import *
import site, types, subprocess, sys, os, math, re
#
# Try to guess the package path for PyNGL. If it can't
# be found, then you can "help" it by setting the
# environment variable PYNGL_NCARG to the "ncarg"
# directory that's under the package directory.
#
pkgs_pth = get_python_lib(1)
# Try a different one.
if not (os.path.exists(pkgs_pth)):
pkgs_pth = os.path.join(sys.prefix, 'lib64', 'python'+sys.version[:3],
'site-packages')
if (not (os.path.exists(pkgs_pth)) and os.environ.get("PYNGL_NCARG") is None):
print('Cannot find the Python packages directory and PYNGL_NCARG is not set.')
print('There may be some difficulty finding needed PyNGL system files')
print('unless you set the PYNGL_NCARG environment variable.')
first_call_to_open_wks = 0
class Resources:
pass
class PlotIds:
pass
def _inputt(a,b):
#
# Promote a and b to numpy arrays that have at least a dimension of 1.
#
a2 = _promote_scalar(a)
b2 = _promote_scalar(b)
#
# Determine what kind of array to return.
#
if _is_numpy(a) or _is_numpy(b):
import numpy
return numpy.array(fplib._inputt(a2,b2))
else:
return fplib._inputt(a2,b2)
def _int_id(plot_id):
#
# Convert PlotIds objects to integer plot ids.
#
if (type(plot_id) == type(1)):
# Is an integer.
return plot_id
elif (type(plot_id) == type([1])):
# Is a list.
return plot_id[0]
elif (isinstance(plot_id,PlotIds)):
# Is a PlotIds class instance.
if (type(plot_id.base[0]) != type(1)):
print("plot id is not valid")
return None
return plot_id.base[0]
else:
print("plot id is not valid")
return None
def _is_list_or_tuple(arg):
if ( ((type(arg) == list) or (type(arg) == tuple)) ):
return True
else:
return False
def _is_numpy_array(arg):
if isinstance(arg,numpy.ndarray):
return True
else:
return False
def _is_numpy_ma(arg):
if HAS_MA and ma.isMaskedArray(arg):
return True
else:
return False
#
# This function returns True if it encounters a Python scalar.
#
def _is_python_scalar(arg):
import types
if (isinstance(arg, integer_types) or \
type(arg)==float):
return True
else:
return False
#
# This function returns True if we have a numpy scalar or array.
#
def _is_numpy(arg):
try:
import numpy
if isinstance(arg,numpy.ndarray) or isinstance(arg,numpy.generic):
return True
else:
return False
except:
return False
#
# This function returns True if it encounters a numpy scalar.
# A numpy scalar can either be a numpy array with 0 dimensions,
# or a numpy scalar, which is a new thing that didn't exist in
# numeric.
#
def _is_numpy_scalar(arg):
try:
import numpy
if (isinstance(arg,numpy.ndarray)) and (len(arg.shape) == 0):
return True
#
# Test for numpy scalar.
#
elif isinstance(arg,numpy.generic):
return True
else:
return False
except:
return False
#
# This function returns True if it's a Python scalar or a
# numpy scalar.
#
def _is_scalar(arg):
return _is_numpy_scalar(arg) or _is_python_scalar(arg)
def _arg_with_scalar(arg):
#
# This function is to accommodate scalar arguments for
# some functions that take lists, tuples, or NumPy arrays.
# The input argument is checked to see if it is a number and,
# if so, it is converted to a single-element list. Otherwise
# the original argument is returned.
#
if (_is_scalar(arg)):
return [arg]
else:
return arg
#
# This function returns a NumPy array and the fill value
# if arr is a masked array; otherwise it just returns arr and
# 'None' for the fill value.
#
# Later, I'd like to add recognition of NioVariables, and then
# I can look for the "_FillValue" attribute and use this.
#
def _get_arr_and_fv(arr):
if _is_numpy_ma(arr):
if IS_NEW_MA:
return arr.filled(),arr.fill_value
else:
return arr.filled(),arr.fill_value()
else:
return arr,None
#
# This function returns a NumPy array and the fill value
# if arr is a masked array; otherwise it just returns arr and
# a default missing value.
#
# This is similar to _get_arr_and_fv, except a fill
# value is always returned.
#
def _get_arr_and_force_fv(arr,default_msg=1.e20):
if _is_numpy_ma(arr):
if IS_NEW_MA:
return arr.filled(),arr.fill_value
else:
return arr.filled(),arr.fill_value()
else:
return arr,default_msg
#
# Determine if a variable has an attribute. If so,
# return that attribute's value. Otherwise, return
# the default value given.
#
def _get_res_value_keep(res, attrname,default):
if hasattr(res,attrname):
return(getattr(res,attrname))
else:
return(default)
#
# Determine if a variable has a key. If so,
# return that key's value. Otherwise, return
# the default value given.
#
def _get_key_value_keep(res, keyname,default):
if keyname in res:
return(res[keyname])
else:
return(default)
#
# If a masked array, then convert to a numpy array.
# Otherwise just return the value.
#
# This is similar to "_get_arr_and_fv" except
# it doesn't return the fill value.
#
# Hopefully after the release of numpy 1.0.5, we can move
# this code into the C interface.
#
def _convert_from_ma(value):
if _is_numpy_ma(value):
return value.filled()
else:
return value
def _convert_to_ma(arr,fv):
if HAS_MA:
return ma.masked_values(arr,value=fv)
else:
return arr
#
# This function checks if a fill value exists, and if it does,
# sets the appropriate missing value PyNGL resource.
#
def _set_msg_val_res(rlist,fv,plot_type):
type_res_pairs = { "xy_x" : "caXMissingV",
"xy_y" : "caYMissingV",
"scalar" : "sfMissingValueV",
"vector_u" : "vfMissingUValueV",
"vector_v" : "vfMissingVValueV"}
if not plot_type in list(type_res_pairs.keys()):
return None
res_to_set = type_res_pairs[plot_type]
if not fv is None:
if res_to_set not in rlist:
rlist[res_to_set] = fv
else:
if rlist[res_to_set] != fv:
print("Warning:",res_to_set,"is not equal to actual missing value of data,",fv)
def _ck_for_rangs(dir):
#
# This function checks that the appropriate data files for
# the high-res database exist.
#
file_names = ( \
"gshhs(0).rim", "gshhs(1).rim", \
"gshhs(2).rim", "gshhs(3).rim", \
"gshhs(4).rim", \
"rangs(0).cat", "rangs(0).cel", \
"rangs(1).cat", "rangs(1).cel", \
"rangs(2).cat", "rangs(2).cel", \
"rangs(3).cat", "rangs(3).cel", \
"rangs(4).cat", "rangs(4).cel" \
)
for file in file_names:
fp_file = dir + "/" + file
if (not (os.path.exists(fp_file))):
print('\nInfo message: The environment variable PYNGL_RANGS has ')
print(' been set, but the required high-res database file')
print(' "' + fp_file + '"')
print(' is not in that directory.\n')
return None
def _ismissing(arg,mval):
#
# Returns an array of the same shape as "arg" that
# has True values in all places where "arg" has
# missing values.
#
arg2 = _convert_from_ma(arg)
if _is_numpy(arg2):
pass
else:
print("_ismissing: first argument must be a numpy array or scalar.")
return None
return(numpy.equal(arg2,mval))
#
# _get_fill_value(arr)
# input:
# arr - any Python object
# output:
# if arr is a numpy masked array:
# fill_value is the fill value
# if arr is not a masked array
# fill_value returned as None.
#
def _get_fill_value(arr):
#
# If arr is a numpy masked array, return its fill value.
#
if _is_numpy_ma(arr):
if IS_NEW_MA:
return arr.fill_value
else:
return arr.fill_value()
#
# Not a NumPy masked array.
#
return None
def _get_values(obj,rlistc):
rlist = _crt_dict(rlistc)
values = NhlGetValues(_int_id(obj),rlist)
del rlist
return (values)
#
# Test procedure to see if we're masking a lambert conformal map.
#
def _test_for_mask_lc(rlist,rlist1):
masklc = False
maskoutline = 1
if "nglMaskLambertConformal" in rlist:
if rlist["nglMaskLambertConformal"]:
masklc = True
if "nglMaskLambertConformalOutlineOn" in rlist:
maskoutline = rlist["nglMaskLambertConformalOutlineOn"]
if masklc:
if "mpMinLatF" in rlist1 and "mpMaxLatF" in rlist1 and \
"mpMinLonF" in rlist1 and "mpMaxLonF" in rlist1:
if (rlist1["mpMinLatF"] < 0):
rlist1["mpLambertParallel1F"] = -0.001
rlist1["mpLambertParallel2F"] = -89.999
else:
print("map: Warning: one or more of the resources mpMinLatF, mpMaxLatF, mpMinLonF, and mpMaxLonF have not been set.")
print("No masking of the Lambert Conformal map will take place.")
masklc = False
#
# Don't draw or advance frame if we're masking a
# lambert conformal map. We'll do that later.
#
drawit = True
frameit = True
maxit = True
if masklc:
if "nglDraw" in rlist:
drawit = rlist["nglDraw"]
if "nglFrame" in rlist:
frameit = rlist["nglFrame"]
if "nglMaximize" in rlist:
maxit = rlist["nglMaximize"]
_set_spc_res("Draw",False)
_set_spc_res("Frame",False)
_set_spc_res("Maximize",False)
mask_list = {}
mask_list["MaskLC"] = masklc
mask_list["MaskLCDraw"] = drawit
mask_list["MaskLCFrame"] = frameit
mask_list["MaskLCMaximize"] = maxit
mask_list["MaskLCOutline"] = maskoutline
return mask_list
#***********************************************************************#
# Function : mask_lambert_conformal #
# wks: graphic #
# maplc: graphic #
# mask_list: dictionary #
# res: logical #
# #
# Given a lambert conformal projection, and min/max lat/lon coords, #
# this function will mask the map outside the boundaries defined by #
# the coords. mask_list has a set of resources that determines the #
# behavior of the masked plot. #
# "res" is an optional list of resources. #
# #
# Note, due to the nature of Lambert Conformal plots, lon labels #
# cannot be automatically drawn on this type of plot. #
# #
# Programming Note: The function expects longitude input data to #
# range from -360:180E. | |
<filename>stockgeist/client.py
from typing import Tuple, Dict, List
import pandas as pd
import requests
from tqdm import tqdm
from stockgeist.responses import ArticleMetricsResponse, MessageMetricsResponse, PriceMetricsResponse, \
RankingMetricsResponse, TopicMetricsResponse, SymbolsResponse, FundamentalsResponse
class StockGeistClient:
"""
A Client class responsible for communication with StockGeist's API.
"""
def __init__(self, token):
self._token = token
self._session = requests.Session()
self._base_url = 'https://api.stockgeist.ai/'
def _gen(self):
while True:
yield
def _construct_query(self, endpoint_name: str, query_args: Dict[str, object]) -> str:
"""
Helper function for constructing API query.
:param endpoint_name: Name of the StockGeist's REST API endpoint.
:param query_args: Dict containing all arguments passed to REST API.
:return: REST API query string.
"""
# construct query
query = f'{self._base_url}{endpoint_name}?token={self._token}&'
for name, value in query_args.items():
if value is not None:
if isinstance(value, tuple):
query += f'{name}={",".join(value)}&'
else:
query += f'{name}={value}&'
query = query.strip('&')
return query
def _fetch_data_time_series(self, endpoint_name: str, query_args: Dict) -> List[Dict]:
"""
Fetch data from time series endpoints of REST API.
:param endpoint_name: Name of the StockGeist's REST API endpoint.
:param query_args: Dict containing all arguments passed to REST API.
:return: list of batches of data returned by REST API.
"""
res = []
for _ in tqdm(self._gen()):
# construct query
query = self._construct_query(endpoint_name, query_args)
# query endpoint
res_batch = self._session.get(query).json()
res.append(res_batch)
# check response
if res_batch['metadata']['status_code'] != 200:
return res
if endpoint_name == 'time-series/price-metrics':
try:
# some data returned
first_timestamp = pd.Timestamp(res_batch['body'][0]['timestamp'])
except IndexError:
# data not returned - might have encountered market holiday, weekend or non-market hours
first_timestamp = pd.Timestamp(query_args['end']).replace(hour=23, minute=0,
second=0) - pd.Timedelta(
days=1)
if query_args['start'] is not None:
# check whether all data range is fetched
if first_timestamp.strftime('%Y-%m-%dT%H:%M:%S') <= query_args['start']:
break
else:
query_args['end'] = first_timestamp.strftime('%Y-%m-%dT%H:%M:%S')
else:
break
else:
first_timestamp = pd.Timestamp(res_batch['body'][0]['timestamp']).strftime('%Y-%m-%dT%H:%M:%S')
if query_args['start'] is not None:
# check whether all data range is fetched
if first_timestamp == query_args['start']:
break
else:
query_args['end'] = first_timestamp
else:
break
return res
def _fetch_data_snapshot(self, endpoint_name: str, query_args: Dict) -> List[Dict]:
"""
Fetch data from snapshot endpoints of REST API.
:param endpoint_name: Name of the StockGeist's REST API endpoint.
:param query_args: Dict containing all arguments passed to REST API.
:return: list of batches of data returned by REST API.
"""
# construct query
query = self._construct_query(endpoint_name, query_args)
# query endpoint
res = self._session.get(query).json()
return [res]
def get_credits(self):
"""
Queries StockGeist's API and gets the number of credits available for given token.
"""
# get data
res = self._fetch_data_snapshot('snapshot/credits', {})[0]
credits = res['metadata']['credits']
return credits
def get_message_metrics(self,
symbol: str,
timeframe: str = '5m',
filter: Tuple[str, ...] = ('total_count', ),
start: str = None,
end: str = None) -> MessageMetricsResponse:
"""
Queries StockGeist's API and gets message metrics data.
:param symbol: Stock ticker for which to retrieve data.
:param timeframe: Time resolution of returned data. Possible values are 5m, 1h, 1d.
:param filter: What metrics to return with the response. Possible values are: inf_positive_count,
inf_neutral_count, inf_negative_count, inf_total_count, em_positive_count, em_neutral_count,
em_negative_count, em_total_count, total_count, pos_index, msg_ratio, ma, ma_diff, std_dev,
ma_count_change. For more information check https://docs.stockgeist.ai.
:param start: Timestamp of the earliest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:param end: Timestamp of the latest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:return: MessageMetricsResponse object.
"""
# get query arguments
query_args = locals()
query_args.pop('self')
# get data
res = self._fetch_data_time_series('time-series/message-metrics', query_args)
return MessageMetricsResponse(res, query_args)
def get_article_metrics(self,
symbol: str,
timeframe: str = '5m',
filter: Tuple[str, ...] = ('titles',),
start: str = None,
end: str = None) -> ArticleMetricsResponse:
"""
Queries StockGeist's API and gets article metrics data.
:param symbol: Stock ticker for which to retrieve data.
:param timeframe: Time resolution of returned data. Possible values are 5m, 1h, 1d.
:param filter: What metrics to return with the response. Possible values are: titles, title_sentiments,
mentions, summaries, sentiment_spans, urls. For more information check https://docs.stockgeist.ai.
:param start: Timestamp of the earliest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:param end: Timestamp of the latest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:return: ArticleMetricsResponse object.
"""
# get query arguments
query_args = locals()
query_args.pop('self')
# get data
res = self._fetch_data_time_series('time-series/article-metrics', query_args)
return ArticleMetricsResponse(res, query_args)
def get_price_metrics(self,
symbol: str,
timeframe: str = '5m',
filter: Tuple[str, ...] = ('close',),
start: str = None,
end: str = None) -> PriceMetricsResponse:
"""
Queries StockGeist's API and gets price metrics data.
:param symbol: Stock ticker for which to retrieve data.
:param timeframe: Time resolution of returned data. Possible values are 5m, 1h, 1d.
:param filter: What metrics to return with the response. Possible values are: open, high, low, close, volume.
For more information check https://docs.stockgeist.ai.
:param start: Timestamp of the earliest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:param end: Timestamp of the latest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:return: PriceMetricsResponse object.
"""
# get query arguments
query_args = locals()
query_args.pop('self')
# get data
res = self._fetch_data_time_series('time-series/price-metrics', query_args)
return PriceMetricsResponse(res, query_args)
def get_topic_metrics(self,
symbol: str,
timeframe: str = '5m',
filter: Tuple[str, ...] = ('words',),
start: str = None,
end: str = None) -> TopicMetricsResponse:
"""
Queries StockGeist's API and gets topic metrics data.
:param symbol: Stock ticker for which to retrieve data.
:param timeframe: Time resolution of returned data. Possible values are 5m, 1h, 1d.
:param filter: What metrics to return with the response. Possible values are: words, scores. For more
information check https://docs.stockgeist.ai.
:param start: Timestamp of the earliest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:param end: Timestamp of the latest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:return: TopicMetricsResponse object.
"""
# get query arguments
query_args = locals()
query_args.pop('self')
# get data
res = self._fetch_data_time_series('time-series/topic-metrics', query_args)
return TopicMetricsResponse(res, query_args)
def get_ranking_metrics(self,
symbol: str = None,
timeframe: str = '5m',
filter: Tuple[str, ...] = ('symbols',),
start: str = None,
end: str = None,
by: str = 'total_count',
direction: str = 'descending',
top: int = 5) -> RankingMetricsResponse:
"""
Queries StockGeist's API and gets ranking metrics data.
:param symbol: Stock ticker for which to retrieve data.
:param timeframe: Time resolution of returned data. Possible values are 5m, 1h, 1d.
:param filter: What metrics to return with the response. Possible values are: symbols, scores, score_changes,
values. For more information check https://docs.stockgeist.ai.
:param start: Timestamp of the earliest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:param end: Timestamp of the latest data point in returned time series. Time is assumed to be in
UTC time zone. Valid format: YYYY-mm-ddTHH:MM:SS.
:param by: Select message metric by which stock ranking is produced. Possible values are: inf_positive_count,
inf_neutral_count, inf_negative_count, inf_total_count, em_positive_count, em_neutral_count,
em_negative_count, em_total_count, total_count, pos_index, msg_ratio, ma, ma_diff, std_dev,
ma_count_change.
:param direction: Ranking direction: descending/ascending leaves stock with largest/smallest metric
value at the top.
:param top: Number of top stocks to return.
:return: RankingMetricsResponse object.
"""
# get query arguments
query_args = locals()
query_args.pop('self')
# get data
res = self._fetch_data_time_series('time-series/ranking-metrics', query_args)
return RankingMetricsResponse(res, query_args)
def get_symbols(self) -> SymbolsResponse:
"""
Queries StockGeist's API and gets all available symbols.
:return: SymbolsResponse object.
"""
# get query arguments
query_args = locals()
query_args.pop('self')
# get data
res = self._fetch_data_snapshot('snapshot/symbols', query_args)
return SymbolsResponse(res, query_args)
def get_fundamentals(self,
symbol: str = None,
filter: Tuple[str, ...] = ('market_cap',)) -> FundamentalsResponse:
"""
Queries StockGeist's API and gets fundamentals data.
:param symbol: Stock ticker for which to retrieve data.h, 1d.
:param filter: What metrics to return with the response. Possible values are: ``book_to_sh``, ``rsi_14``,
``eps_next_y``,
52w_range, eps_ttm, roa, dividend_perc, beta, oper_margin, p_to_fcf, eps_this_y, inst_trans, p_to_b,
rel_volume, perf_quarter, sales, | |
`__getitem__(kwargs)`,
- A list is returned, not a subspace.
- This list constains keys (coords), not values.
- The coords refer to the original space, not the subspace.
The last point is especially useful for
`SparseSpace.label_xSection`.
"""
def embed(coord): return {**kwargs, **coord._asdict()}
return [self.Coord(**embed(x)) for x in self[kwargs]]
# Old implementation.
# - I prefer the new version for its re-use of __getitem__'s
# nesting, evidencing their mutual relationship)
# - Note that unlike xpList.inds(): missingval shenanigans
# are here unnecessary coz each coordinate is complete.
# match = lambda x: all(getattr(x,k)==kwargs[k] for k in kwargs)
# return [x for x in self if match(x)]
def __repr__(self):
# Note: print(xpList(self)) produces more human-readable key listing,
# but we don't want to implement it here, coz it requires split_attrs(),
# which we don't really want to call again.
L = 2
keys = [str(k) for k in self]
if 2*L < len(keys):
keys = keys[:L] + ["..."] + keys[-L:]
keys = "[\n " + ",\n ".join(keys) + "\n]"
txt = f"<{self.__class__.__name__}> with {len(self)} keys: {keys}"
# txt += " befitting the coord. sys. with axes "
txt += "\nplaced in a coord-sys with axes "
try:
txt += "(and ticks):" + str(struct_tools.AlignedDict(self.ticks))
except AttributeError:
txt += ":\n" + str(self.axes)
return txt
def nest(self, inner_axes=None, outer_axes=None):
"""Return a new xpSpace with axes `outer_axes`,
obtained by projecting along the `inner_axes`.
The entries of this `xpSpace` are themselves `xpSpace`s,
with axes `inner_axes`,
each one regrouping the entries with the same (projected) coordinate.
Note: is also called by `__getitem__(key)` if `key` is dict."""
# Default: a singleton outer space,
# with everything contained in the inner (projection) space.
if inner_axes is None and outer_axes is None:
outer_axes = ()
# Validate axes
if inner_axes is None:
assert outer_axes is not None
inner_axes = struct_tools.complement(self.axes, outer_axes)
else:
assert outer_axes is None
outer_axes = struct_tools.complement(self.axes, inner_axes)
# Fill spaces
outer_space = self.__class__(outer_axes)
for coord, entry in self.items():
outer_coord = outer_space.__getkey__(coord)
try:
inner_space = outer_space[outer_coord]
except KeyError:
inner_space = self.__class__(inner_axes)
outer_space[outer_coord] = inner_space
inner_space[inner_space.__getkey__(coord)] = entry
return outer_space
def add_axis(self, axis):
self.__init__(self.axes+(axis,))
for coord in list(self):
entry = self.pop(coord)
self[coord + (None,)] = entry
def intersect_axes(self, attrs):
"""Rm those a in attrs that are not in self.axes.
This allows errors in the axes allotment, for ease-of-use."""
absent = struct_tools.complement(attrs, self.axes)
if absent:
print(color_text("Warning:", colorama.Fore.RED),
"The requested attributes",
color_text(str(absent), colorama.Fore.RED),
("were not found among the"
" xpSpace axes (attrs. used as coordinates"
" for the set of experiments)."
" This may be no problem if the attr. is redundant"
" for the coord-sys."
" However, if it is caused by confusion or mis-spelling,"
" then it is likely to cause mis-interpretation"
" of the shown results."))
attrs = struct_tools.complement(attrs, absent)
return attrs
def label_xSection(self, label, *NoneAttrs, **sub_coord):
"""Insert duplicate entries for the cross section
whose `coord`s match `sub_coord`,
adding the attr `Const=label` to their `coord`,
reflecting the "constance/constraint/fixation" this represents.
This distinguishes the entries in this fixed-affine subspace,
preventing them from being gobbled up in `nest`.
If you wish, you can specify the `NoneAttrs`,
which are consequently set to None for the duplicated entries,
preventing them from getting plotted in tuning panels.
"""
if "Const" not in self.axes:
self.add_axis('Const')
for coord in self.coords(**self.intersect_axes(sub_coord)):
entry = copy.deepcopy(self[coord])
coord = coord._replace(Const=label)
coord = coord._replace(**{a: None for a in NoneAttrs})
self[coord] = entry
AXES_ROLES = dict(outer=None, inner=None, mean=None, optim=None)
class xpSpace(SparseSpace):
"""Functionality to facilitate working with `xps` and their results.
`xpSpace.from_list` initializes a `SparseSpace` from a list
of objects, typically experiments referred to as `xp`s, by
(1) computing the relevant axes from the attributes, and
(2) filling the dict by `xp`s.
Using `xpSpace.from_list(xps)` creates a SparseSpace holding `xp`s.
However, the nested `xpSpace`s output by `xpSpace.table_tree` will hold
objects of type `UncertainQtty`,
coz `xpSpace.table_tree` calls `mean` calls `field(statkey)`.
The main use of `xpSpace` is through `xpSpace.print` & `xpSpace.plot`,
both of which call `xpSpace.table_tree` to nest the axes of the `SparseSpace`.
"""
@classmethod
def from_list(cls, xps):
"""Init xpSpace from xpList."""
def make_ticks(axes, ordering=dict(
N = 'default',
seed = 'default',
infl = 'default',
loc_rad = 'default',
rot = 'as_found',
da_method = 'as_found',
)):
"""Unique & sort, for each axis (individually) in axes."""
for ax_name, arr in axes.items():
ticks = set(arr) # unique (jumbles order)
# Sort
order = ordering.get(ax_name, 'default').lower()
if hasattr(order, '__call__'): # eg. mylist.index
ticks = sorted(ticks, key=order)
elif 'as_found' in order:
ticks = sorted(ticks, key=arr.index)
else: # default sorting, with None placed at the end
ticks = sorted(ticks, key= lambda x: (x is None, x))
if any(x in order for x in ['rev', 'inv']):
ticks = ticks[::-1]
axes[ax_name] = ticks
# Define axes
xp_list = xpList(xps)
axes = xp_list.split_attrs(nomerge=['Const'])[0]
make_ticks(axes)
self = cls(axes.keys())
# Note: this attr (ticks) will not be propagated through nest().
# That is fine. Otherwise we should have to prune the ticks
# (if they are to be useful), which we don't want to do.
self.ticks = axes
# Fill
self.update({self.__getkey__(xp): xp for xp in xps})
return self
def field(self, statkey="rmse.a"):
"""Extract `statkey` for each item in `self`."""
# Init a new xpDict to hold field
avrgs = self.__class__(self.axes)
found_anything = False
for coord, xp in self.items():
val = getattr(xp.avrgs, statkey, None)
avrgs[coord] = val
found_anything = found_anything or (val is not None)
if not found_anything:
raise AttributeError(
f"The stat. field '{statkey}' was not found"
" among any of the xp's.")
return avrgs
def mean(self, axes=None):
# Note: The case `axes=()` should work w/o special treatment.
if axes is None:
return self
nested = self.nest(axes)
for coord, space in nested.items():
def getval(uq): return uq.val if isinstance(uq, UncertainQtty) else uq
vals = [getval(uq) for uq in space.values()]
# Don't use nanmean! It would give false impressions.
mu = np.mean(vals)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
# Don't print warnings caused by N=1.
# It already correctly yield nan's.
var = np.var(vals, ddof=1)
N = len(vals)
uq = UncertainQtty(mu, np.sqrt(var/N))
uq.nTotal = N
uq.nFail = N - np.isfinite(vals).sum()
uq.nSuccess = N - uq.nFail
nested[coord] = uq
return nested
def tune(self, axes=None, costfun=None):
"""Get (compile/tabulate) a stat field optimised wrt. tuning params."""
# Define cost-function
costfun = (costfun or 'increasing').lower()
if 'increas' in costfun:
costfun = (lambda x: +x)
elif 'decreas' in costfun:
costfun = (lambda x: -x)
else:
assert hasattr(costfun, '__call__') # custom
# Note: The case `axes=()` should work w/o special treatment.
if axes is None:
return self
nested = self.nest(axes)
for coord, space in nested.items():
# Find optimal value and coord within space
MIN = np.inf
for i, (inner_coord, uq) in enumerate(space.items()):
cost = costfun(uq.val)
if cost <= MIN:
MIN = cost
uq_opt = uq
uq_opt.tuned_coord = inner_coord
nested[coord] = uq_opt
return nested
def validate_axes(self, axes):
"""Validate axes.
Note: This does not convert None to (),
allowing None to remain special.
Use `axis or ()` wherever tuples are required.
"""
roles = {} # "inv"
for role in set(axes) | set(AXES_ROLES):
assert role in AXES_ROLES, f"Invalid role {role!r}"
aa = axes.get(role, AXES_ROLES[role])
if aa is None:
pass # Purposely special
else:
# Ensure iterable
if isinstance(aa, str) or not hasattr(aa, "__iter__"):
aa = (aa,)
aa = self.intersect_axes(aa)
for axis in aa:
# Ensure unique
if axis in roles:
raise TypeError(
f"An axis (here {axis!r}) cannot be assigned to 2"
f" roles (here {role!r} and {roles[axis]!r}).")
else:
roles[axis] = role
axes[role] = aa
return axes
def table_tree(self, statkey, axes):
"""Hierarchical nest(): xp_dict>outer>inner>mean>optim.
as specified by `axes`. Returns this new xpSpace.
- print_1d / plot_1d (respectively) separate
tables / panel(row)s for `axes['outer']`, and
columns/ x-axis for `axes['inner']`.
- The `axes['mean']` and `axes['optim']` get eliminated
by the mean()/tune() operations.
Note: cannot support multiple statkeys
because it's not (obviously) meaningful
when optimizing over tuning_axes.
"""
axes = self.validate_axes(axes)
def mean_tune(xp_dict):
"""Take mean, then tune.
Note: the SparseDict implementation should be sufficiently
"uncluttered" that mean_tune() (or a | |
# PyGEP: Gene Expression Programming for Python
# Copyright (C) 2007 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
Provides standard GEP chromosomes as well as the symbol decorator for
defining functions for use in chromosomes. Note that the Chromosome
class is an abstract base class, providing methods for construction and
evaluation of genes. Chromosomes that inherit from it should provide
a set of terminal and function symbols.
'''
from pygep.functions.linkers import default_linker
from pygep.gene import KarvaGene
from pygep.util import cache
import random
def symbol(symb):
'''
Decorator that assigns a symbol to a function for chromosome
display. The symbol is stored in the function.symbol attribute.
@symbol('/')
def divide(x, y):
return x / y
@param symb: symbol to use, typically one character
'''
def decorator(func):
'''
Attaches a symbol to a function as its 'symbol' attribute
@param func: function to decorate
'''
func.symbol = symb
return func
return decorator
class MetaChromosome(type):
'''
Metaclass for computing various information about a chromosomal
type. Sets the following attributes on a chromosome class:
- arity: maximum functional arity
- symbols: symbols that can reside in the head
Also turns caching of fitness values on for all chromosomes.
'''
def __new__(mcs, name, bases, dct):
'''
Prepares a chromosome type for use in GEP, assigning to
cls.symbols, cls.arity, and caching the cls._fitness.
@param mcs: class to apply the metaclass to
@param name: name of the class
@param bases: base classes
@param dct: class dict
'''
typ = type.__new__(mcs, name, bases, dct)
typ.symbols = typ.functions + typ.terminals
# Find the max arity
try:
typ.arity = max([f.func_code.co_argcount for f in typ.functions])
except ValueError:
typ.arity = 0
# Cache fitness values
typ._fitness = cache(typ._fitness)
return typ
class Chromosome(object):
'''
A Chromosome must provide these attributes:
- functions: tuple of nonterminals
- terminals: tuple of terminal symbols
And override these functions:
- _fitness: fitness of a given individual
- _solved: True if the problem is optimally solved (optional)
An example Chromosome that evolves simple arithmetic expressions
on data objects providing attributes 'a' and 'b' and the constants
1 and 2:
from pygep.functions.arithmetic import *
from pygep import Chromosome
class Calculation(Chromosome):
functions = multiply, add, subtract, divide
terminals = 'a', 1, 2
def _fitness(self):
# Evaluate chromosome fitness here.
# This often involves calling self.evaluate(something)
def _solved(self):
# Not required, but useful if the problem can
# be optimally solved. Usually this just means
# checking self.fitness.
'''
__metaclass__ = MetaChromosome
__next_id = 1
gene_type = KarvaGene
functions = ()
terminals = ()
symbols = () # overridden by metaclass
head = tail = length = arity = 0
@classmethod
def generate(cls, head, genes=1, linker=default_linker):
'''
Returns a generator of random GEP chromosomes
@param head: head length (min=0)
@param genes: number of genes (min=1)
@param linker: linking function
'''
tail = head * (cls.arity - 1) + 1
while True:
new_genes = [None] * genes
for i in xrange(genes):
new_genes[i] = cls.gene_type(
[random.choice(cls.symbols) for _ in xrange(head)] + \
[random.choice(cls.terminals) for _ in xrange(tail)], head
)
yield cls(new_genes, head, linker)
def __init__(self, genes, head, linker=default_linker):
'''
Instantiates a chromsome instance and analyzes it for evaluation.
Sets the self.coding tuple to the last genes in the coding regions
and various other internal data for the chromosome. Note that it
is generally unwise to instantiate chromosomes manually. It is
much more common to create them via calls to the static method
Chromosome.generate(...).
@param genes: genes in the chromosome
@param head: length (not index) of the gene heads (min=0)
@param linker: linker function for gene evaluation
'''
# Must have at least one gene and a head length of 0
if head < 0:
raise ValueError('Head length must be at least 0')
if not genes:
raise ValueError('Must have at least 1 gene')
self.genes = genes
self.head = head
self.linker = linker
# Unique number of the organism
self.__id = type(self).__next_id
type(self).__next_id += 1
def __cmp__(self, other):
'''@return: cmp value of two chromosomes by fitness'''
if self is other:
return 0
return cmp(self.fitness, other.fitness)
def __len__(self):
'''@return: total number of alleles in the chromosome'''
return sum(len(g) for g in self.genes)
def __iter__(self):
'''@return: generator for alleles in chromosome'''
for gene in self.genes:
for allele in gene:
yield allele
def __getitem__(self, i):
'''
Returns a given allele by index
@param i: allele index
@return: allele
'''
i, j = divmod(i, len(self.genes))
return self.genes[j][i]
@cache
def __repr__(self):
'''@return: repr of chromosome alleles'''
return ''.join(repr(g) for g in self.genes)
def _child(self, genes):
'''
Creates a child chromosome
@param genes: ordered list of GEP genes
@return: a child chromosome of self
'''
if genes != self.genes:
return type(self)(genes, self.head, self.linker)
return self
# Unique ID of the organism
id = property(lambda self: self.__id, doc='Organism #')
def __call__(self, obj):
'''
Evaluates a given GEP chromosome against some instance. The
terminals in the chromosome are assumed to be attributes on
the object instance provided (unless they are numeric constants).
@param obj: an object instance with terminal attributes set
@return: result of evaluating the chromosome
'''
return self.linker(*[g(obj) for g in self.genes])
def _fitness(self):
'''@return: comparable fitness value'''
raise NotImplementedError('Must override Chromosome._fitness')
def _solved(self):
'''@return: boolean indicating optimal solution found'''
return False
fitness = property(lambda self: self._fitness(), doc='Fitness value')
solved = property(lambda self: self._solved(), doc='Problem solved')
def mutate(self, rate):
'''
Produces a new chromosome via potential point mutation on each
locus. If nothing changes, the original chromosome is returned.
@param rate: mutation rate per locus
@return: child chromosome (or self)
'''
genes = list(self.genes)
# Traverse the chromosome gene by gene
for gene_idx, gene in enumerate(self.genes):
# Then locus by locus
replacements = []
for i, allele in enumerate(gene):
# Do we mutate this locus?
if random.random() < rate:
# Mutation within the tail can only use terminals
if i >= self.head:
new_allele = random.choice(self.terminals)
else:
new_allele = random.choice(self.symbols)
# Only use this if the mutation actually did something
if new_allele != allele:
replacements.append((i, [new_allele]))
# If we have actual replacements to make, do them
if replacements:
genes[gene_idx] = gene.derive(replacements)
# Create a child of this chromosome
return self._child(genes)
def invert(self):
'''
Produces a new chromosome via head inversion
@return: child chromosome
'''
if self.head < 2: # Head inversion does nothing in this case
return self
genes = list(self.genes)
# Choose a random gene and two points within the head
i = random.choice(xrange(len(self.genes)))
start, stop = random.sample(xrange(self.head), 2)
# Order the indexes correctly
if start > stop:
start, stop = stop, start
# Create the new chromosome
replacement = list(reversed(genes[i][start:stop+1]))
genes[i] = genes[i].derive([(start, replacement)])
return self._child(genes)
def transpose_is(self, length):
'''
Produces a new chromosome via IS transposition
@param length: sequence length (typically 1, 2, or 3)
@return: child chromosome
'''
# Since IS does not transpose to the root, it has no purpose
# if the head length is less than 2.
if self.head < 2:
return self
# Pick source and target genes
genes = list(self.genes)
source = random.choice(genes)
target = random.choice(xrange(len(genes)))
# Extract a transposition sequence. Truncate if required.
start = random.choice(xrange(len(source)))
end = start + length
end = self.head if end > self.head else end
# Offset into target gene: in the head | |
from django import forms
class EmptyForm(forms.Form):
pass
class LoginForm(forms.Form):
username = forms.CharField(
max_length=50,
label='Username'
)
password = forms.CharField(
max_length=32,
label='Password',
widget=forms.PasswordInput(),
required=True
)
class DeleteForm(forms.Form):
verify = forms.CharField(
initial='true',
widget=forms.HiddenInput()
)
class ConfirmForm(forms.Form):
verify = forms.CharField(
initial='true',
widget=forms.HiddenInput()
)
class ViewAddForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class ViewUpdateForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class ViewSearchForm(forms.Form):
searchstring = forms.CharField(
max_length=50,
required=True,
widget=forms.TextInput(attrs={'id': 'searchbox'})
)
class ViewAdvancedSearchForm(forms.Form):
searchAttribute = forms.CharField(
max_length=50,
required=True
)
searchValue = forms.CharField(
max_length=50,
required=False
)
attributesList = forms.CharField(
max_length=256,
required=False
)
OPTIONS = (
('devices', 'devices'),
('device categories', 'device categories'),
('passwords', 'passwords'),
('password categories', 'password categories'),
('networks', 'networks')
)
displayTypes = forms.MultipleChoiceField(
choices=OPTIONS,
required=False
)
class NetworkTreeAddForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
protocol = forms.ChoiceField(
label='Protocol',
choices=(('ipv4', 'ipv4'), ('ipv6', 'ipv6'))
)
class NetworkAddForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Address',
help_text='The network/address in CIDR form (x.x.x.x or x.x.x.x/xx)'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class NetworkRangeAddForm(forms.Form):
range = forms.CharField(
max_length=50,
label='Range'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class NetworkDeleteForm(forms.Form):
recursive = forms.BooleanField(
label='Recursive delete',
required=False
)
class PasswordKeyAddForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
key = forms.CharField(
max_length=32,
label='Key',
widget=forms.PasswordInput(),
required=False
)
validate = forms.CharField(
max_length=32,
label='Key (again)',
widget=forms.PasswordInput(),
required=False
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class CounterAddBasicForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class CounterAddLoopingForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
values = forms.CharField(
max_length=5000,
label='Values',
help_text='one value per row',
widget=forms.Textarea(attrs={'cols':'30', 'rows': '5'})
)
class CounterUpdateBasicForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
value = forms.DecimalField(
min_value=0,
decimal_places=0,
label='Value'
)
class CounterUpdateLoopingForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
value = forms.CharField(
max_length=50,
label='Value'
)
values = forms.CharField(
max_length=5000,
label='Values',
help_text='one value per row',
widget=forms.Textarea(attrs={'cols':'30', 'rows': '5'})
)
class CounterSetForm(forms.Form):
value = forms.DecimalField(
min_value=0,
decimal_places=0,
label='Value'
)
class PasswordAddForm(forms.Form):
pw_username = forms.CharField(
max_length=50,
label='Username',
required=False
)
pw_password = forms.CharField(
max_length=250,
label='Password',
widget=forms.PasswordInput(),
required=False,
help_text='Max length: 250, leave empty for generated password.'
)
validate = forms.CharField(
max_length=250,
label='Password (again)',
widget=forms.PasswordInput(),
required=False
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
def __init__(self, password_keys, *args, **kwargs):
super(PasswordAddForm, self).__init__(*args, **kwargs)
keylist = [('__no-password-key__', 'None')]
for key in password_keys:
value = (key.oid, key.attributes['name'])
if key.attributes.get('default', False) is True:
keylist.insert(0, value)
else:
keylist.append(value)
field = forms.ChoiceField(
label='Password key',
choices=keylist
)
self.fields['passwordkey'] = field
class PasswordUpdateForm(forms.Form):
pw_username = forms.CharField(max_length = 50, label = 'Username',
required = False)
pw_password = forms.CharField(max_length = 250, label = 'Password',
widget = forms.PasswordInput(), required = False,
help_text = 'Max length: 250, leave empty for generated password.')
validate = forms.CharField(max_length = 250, label = 'Password (again)',
widget = forms.PasswordInput(), required = False)
description = forms.CharField(max_length = 100, required = False,
label = 'Description')
def __init__(self, password_keys, *args, **kwargs):
super(PasswordUpdateForm, self).__init__(*args, **kwargs)
keylist = [('__no-password-key__', 'None')]
for key in password_keys:
value = (key.oid, key.attributes['name'])
if key.attributes.get('default', False) is True:
keylist.insert(0, value)
else:
keylist.append(value)
field = forms.ChoiceField(label = 'Password key', choices = keylist)
self.fields['passwordkey'] = field
class DeviceTemplateAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this template.', required = False)
inheritance_only = forms.BooleanField(label = 'Inheritance only',
required = False,
initial = False,
help_text = 'Template is used for inheritance only.')
device_creation = forms.BooleanField(label = 'Device creation',
required = False,
initial = False,
help_text = 'Template is used for device creation.')
def __init__(self, templates, *args, **kwargs):
super(DeviceTemplateAddForm, self).__init__(*args, **kwargs)
choices = []
for template in templates:
choices.append((template.oid,
template.attributes.get('name', '[UKNOWN]')))
field = forms.MultipleChoiceField(required = False,
label = 'Inherited templates',
choices = choices)
self.fields['inherited_templates'] = field
class NetworkTemplateAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this template.', required = False)
inheritance_only = forms.BooleanField(label = 'Inheritance only',
required = False,
initial = False,
help_text = 'Template is used for inheritance only.')
def __init__(self, templates, *args, **kwargs):
super(NetworkTemplateAddForm, self).__init__(*args, **kwargs)
choices = []
for template in templates:
choices.append((template.oid,
template.attributes.get('name', '[UKNOWN]')))
field = forms.MultipleChoiceField(required = False,
label = 'Inherited templates',
choices = choices)
self.fields['inherited_templates'] = field
class DeviceTemplateUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this template.', required = False)
inheritance_only = forms.BooleanField(label = 'Inheritance only',
required = False,
initial = False,
help_text = 'Template is used for inheritance only.')
device_creation = forms.BooleanField(label = 'Device creation',
required = False,
initial = False,
help_text = 'Template is used for device creation.')
def __init__(self, templates, *args, **kwargs):
super(DeviceTemplateUpdateForm, self).__init__(*args, **kwargs)
choices = []
for template in templates:
choices.append((template.oid,
template.attributes.get('name', '[UKNOWN]')))
field = forms.MultipleChoiceField(required = False,
label = 'Inherited templates',
choices = choices)
self.fields['inherited_templates'] = field
class NetworkTemplateUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this template.', required = False)
inheritance_only = forms.BooleanField(label = 'Inheritance only',
required = False,
initial = False,
help_text = 'Template is used for inheritance only.')
def __init__(self, templates, *args, **kwargs):
super(NetworkTemplateUpdateForm, self).__init__(*args, **kwargs)
choices = []
for template in templates:
choices.append((template.oid,
template.attributes.get('name', '[UKNOWN]')))
field = forms.MultipleChoiceField(required = False,
label = 'Inherited templates',
choices = choices)
self.fields['inherited_templates'] = field
class TemplateRuleTextAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to create.')
hidden = forms.BooleanField(label = 'Hide attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will hidden per default if it is large/wikitext.')
important = forms.BooleanField(label = 'Important attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed on the device/entity overview page.')
large = forms.BooleanField(label = 'Large attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will have a separate display box.')
wikitext = forms.BooleanField(label = 'Wikitext attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed using wikitext parsing, implies "large".')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'Number of stored versions of the attribute.')
class TemplateRuleFixedAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to create.')
string_value = forms.CharField(max_length = 100, label = 'String value',
help_text = 'The created attributes value.')
variable_expansion = forms.BooleanField(label = 'Expand variables',
required = False,
initial = False)
important = forms.BooleanField(label = 'Important attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed on the device/entity overview page.')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'Number of stored versions of the attribute.')
class TemplateRuleRegmatchAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to create.')
regexp = forms.CharField(max_length = 50, label = 'Regexp',
help_text = 'Regular expression that must match the input value.')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'Number of stored versions of the attribute.')
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
important = forms.BooleanField(label = 'Important attribute',
required = False,
initial = False,
help_text = | |
<reponame>joeedh/Fairing.blend
from . import util, shadow
from . import simplemesh
import bpy, bmesh, time, random
from bpy_extras import view3d_utils
from mathutils import *
from math import *
from bpy.props import *
import bgl
import blf
class AXES:
X = 1
Y = 2
Z = 4
colormap = [
Vector([0, 0, 1, 1]),
Vector([1, 0, 1, 1]),
Vector([0, 1, 0, 1]),
Vector([0, 1, 1, 1]),
Vector([1, 1, 0, 1]),
Vector([1, 0, 0, 1])
]
def fract(f):
return f - floor(f)
def colormap_get(f):
f = min(max(f, 0.0), 1.0)
t = fract(f*len(colormap)*0.9999999)
f = int(f*len(colormap)*0.9999999)
if f >= len(colormap)-1:
return colormap[f]
else:
a = colormap[f]
b = colormap[f+1]
return a + (b - a) * t
def handle_mirror_clipping(self, ob, bm, vcos):
axes = 0
limit = 0.0
for mod in ob.modifiers:
#print(mod.type)
if mod.type != "MIRROR" or not mod.use_clip: continue
if mod.use_axis[0]:
axes |= AXES.X
if mod.use_axis[1]:
axes |= AXES.Y
if mod.use_axis[2]:
axes |= AXES.Z
limit = max(limit, mod.merge_threshold)
for i, v in enumerate(bm.verts):
if not v.select or v.hide: continue
for j in range(3):
if not (axes & (1<<j)):
continue
d = abs(vcos[i][j])
if d <= limit:
v.co[j] = 0
def draw_callback_3dpx(self, context):
if not hasattr(self, "sm"):
print("no 3d draw data")
return
matrix = bpy.context.region_data.perspective_matrix
sm = self.sm
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glEnable(bgl.GL_DEPTH_TEST)
#bgl.glPolygonOffset(100000, 100000);
#bgl.glDisable(bgl.GL_BLEND)
bgl.glEnable(bgl.GL_BLEND)
sm.draw({
"uColor" : [0.7, 0.8, 1, 0.3],
"viewProjectionMatrix" : matrix,
"uPolyOffset" : 0.5
})
#bgl.glEnable(bgl.GL_BLEND)
if self.sm2 is not None:
sm2 = self.sm2
sm2.draw({
"uColor" : [1, 1, 1, 0.7],
"viewProjectionMatrix" : matrix,
"uPolyOffset" : 0.5
})
if self.sm3 is not None:
self.sm3.draw({
"uColor" : [1, 1, 1, 0.5],
"viewProjectionMatrix" : matrix,
"uPolyOffset" : 0.5
})
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glEnable(bgl.GL_DEPTH_TEST)
def draw_callback_px(self, context):
#print("mouse points", len(self.mouse_path))
font_id = 0 # XXX, need to find out how best to get this.
area = context.area
w = area.width
h = area.height
self.text = "yay"
# draw some text
if self.text != "":
blf.position(font_id, 15, 30, 0)
blf.size(font_id, 20, 72)
blf.draw(font_id, self.text)
sm = simplemesh.SimpleMesh()
d = 100
#sm.tri([0,0,0], [0, d, 0], [d, d, 0])
for l in self._lines:
v1 = [(l[0][0]-w*0.5)/w*2.0, (l[0][1]-h*0.5)/h*2.0, 0]
v2 = [(l[1][0]-w*0.5)/w*2.0, (l[1][1]-h*0.5)/h*2.0, 0]
v1[0] = v1[1] = 0
#print(v1, v2)
sm.line(v1, v2)
#sm.line([0, 0, 0], [d, d, 0])
sm.draw({
"uColor" : [1, 1, 1, 0.75]
})
#print(dir(bgl))
return
# 50% alpha, 2 pixel width line
bgl.glEnable(bgl.GL_BLEND)
bgl.glColor4f(0.0, 0.0, 0.0, 0.5)
bgl.glLineWidth(2)
bgl.glBegin(bgl.GL_LINE_STRIP)
for x, y in self.mouse_path:
bgl.glVertex2i(x, y)
bgl.glEnd()
# restore opengl defaults
bgl.glLineWidth(1)
bgl.glDisable(bgl.GL_BLEND)
bgl.glColor4f(0.0, 0.0, 0.0, 1.0)
class ProjectToUnSubD(bpy.types.Operator):
"""Modal object selection with a ray cast"""
bl_idname = "mesh.project_to_unsubd"
bl_label = "UnSubD Project"
bl_options = {'REGISTER', 'UNDO'}
factor: bpy.props.FloatProperty(name="factor")
sm2 = None
sm3 = None
def main(self, context):
# get the context arguments
scene = context.scene
region = context.region
rv3d = context.region_data
LT = simplemesh.LayerTypes
lf = LT.VERTEX | LT.NORMALS | LT.UVS | LT.COLORS
self.sm3 = simplemesh.SimpleMesh(shader=simplemesh.SimpleShader3D, layerflag=lf)
sm3 = self.sm3
dl = self.factor
def obj_ray_cast(obj, matrix_inv, ray_origin, ray_target):
"""Wrapper for ray casting that moves the ray into object space"""
#ray_origin_obj = matrix_inv @ ray_origin
#ray_target_obj = matrix_inv @ ray_target
ray_direction = ray_target - ray_origin
# cast the ray
success, location, normal, face_index = obj.ray_cast(ray_origin, ray_direction)
dist = (ray_origin - location).length
if success:
return location, dist, normal, face_index
else:
return None
ob = context.active_object
if ob is None or ob.type != "MESH" or ob.name.startswith("_") or ob.mode != "EDIT":
print("invalid object", ob)
return
ob2 = self._ob2
self.sm2 = simplemesh.SimpleMesh(shader=simplemesh.SimpleShader3D)
sm2 = self.sm2
bm = bmesh.from_edit_mesh(ob.data)
cos = self.cos
nos = self.nos
for i, v in enumerate(bm.verts):
v.co = cos[i]
# get the ray relative to the object
matrix_inv = ob.matrix_world.inverted()
dav = 0
dtot = 0
matrix = ob.matrix_world
for i, v in enumerate(bm.verts):
if not v.select or v.hide: continue
no = v.normal
target = v.co + no*1000
ret = obj_ray_cast(ob2, matrix_inv, v.co, target)
target = v.co - no*1000
ret2 = obj_ray_cast(ob2, matrix_inv, v.co, target)
if ret is None and ret2 is None: continue
elif ret is not None and ret2 is not None:
if ret2[1] < ret[1]:
ret = ret2
elif ret is None and ret2 is not None:
ret = ret2
no = Vector(v.normal)
no.normalize()
v.co = cos[i] + (ret[0] - cos[i]) * dl
dist = (v.co - cos[i]).length
dav += dist
dtot += 1
sm2.line(matrix @ v.co, matrix @ Vector(ret[0]))
sm2.line(matrix @ v.co, matrix @ Vector(ret[0]))
for e in bm.edges:
ok = not e.verts[0].hide and e.verts[0].select
ok = ok or (not e.verts[1].hide and e.verts[1].select)
if not ok: continue
v1, v2 = e.verts
#sm3.line(matrix @ v1.co, matrix @ v2.co)
if dtot > 1:
dav /= dtot
for i, v in enumerate(bm.verts):
if not v.select or v.hide: continue
no = Vector(nos[i])
no.normalize()
sl = -1 if dl < 0 else 1
v.co += no*sl*dav
handle_mirror_clipping(self, ob, bm, self.cos)
bmesh.update_edit_mesh(ob.data, destructive=False)
@classmethod
def poll(cls, context):
return (context.mode == 'EDIT_MESH')
def execute(self, context):
self._ob2 = shadow.getUnSubShadow(context.active_object, ctx=context)
self._lines = []
self.main(context)
self.stop()
return {'FINISHED'}
def makeDrawData(self, ob2):
me = shadow.ob_get_final_mesh(ob2)
LT = simplemesh.LayerTypes
lf = LT.VERTEX | LT.NORMALS | LT.UVS | LT.COLORS
drawbm = bmesh.new()
self.sm = simplemesh.SimpleMesh(shader=simplemesh.SimpleShader3D, layerflag=lf)
fset = set()
sm = self.sm
layer = me.polygon_layers_int["origindex"].data
for i, p in enumerate(me.polygons):
i2 = layer[i].value
if i2 == -1: #i2 in fset:
li = p.loop_start
vs = []
for j in range(p.loop_total):
vi = me.loops[li].vertex_index
v = me.vertices[vi]
vs.append(drawbm.verts.new(Vector(v.co)))
li += 1
drawbm.faces.new(vs)
matrix = ob2.matrix_world
for v in drawbm.verts:
v.co = matrix @ v.co
drawbm.normal_update()
c = [1, 1, 1, 1.0];
for f in drawbm.faces:
#c = colormap_get(0.9) #random.random()*0.15 + 0.15)
if len(f.verts) == 3:
v1, v2, v3 = f.verts
t = sm.tri(v1.co, v2.co, v3.co)
t.normals(v1.normal, v2.normal, v3.normal)
t.colors(c, c, c);
#t.uvs([0, 0], [0, 1], [1, 1])
elif len(f.verts) == 4:
v1, v2, v3, v4 = f.verts
q = sm.quad(v1.co, v2.co, v3.co, v4.co)
q.normals(v1.normal, v2.normal, v3.normal, v4.normal)
q.colors(c, c, c, c);
#q.uvs([0, 0], [0, 1], [1, 1], [1, 0])
else:
print("error; should have gotten subd surface with all quads");
ob2.to_mesh_clear()
def modal(self, context, event):
coord = event.mouse_region_x, event.mouse_region_y
dx = coord[0] - self.start_mpos[0]
dy = coord[1] - self.start_mpos[1]
#dl = sqrt(dx*dx + dy*dy) / 250
#print(dl)
self.factor = -dy / 250
self._lines = [
[self.start_mpos, coord]
]
#print(event.type, dir(event), event.value, event.oskey, event.tilt)
if event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
context.area.tag_redraw()
# allow navigation
return {'PASS_THROUGH'}
elif event.type == 'MOUSEMOVE':
context.area.tag_redraw()
self.main(context)
return {'RUNNING_MODAL'}
elif event.type == "LEFTMOUSE" or (event.type == "RET" and event.value != "RELEASE"):
self.stop()
return {'FINISHED'}
elif event.type in {'RIGHTMOUSE', 'ESC'}:
self.stop()
context.area.tag_redraw()
ob = context.active_object
#can't rely on aborting earlier if this is false (in invoke) cause of dumb crash
if ob is not None and ob.type == "MESH" and not ob.name.startswith("_"):
bm = bmesh.from_edit_mesh(ob.data)
for i, v in enumerate(bm.verts):
v.co = self.cos[i]
bmesh.update_edit_mesh(ob.data)
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def stop(self):
if hasattr(self, "_handle") and self._handle is not None:
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
self._handle = None
if hasattr(self, "_handle3d") and self._handle3d is not None:
bpy.types.SpaceView3D.draw_handler_remove(self._handle3d, 'WINDOW')
self._handle3d = None
self._ob2 = None
self._me = None
def invoke(self, context, event):
self._lines = []
if context.space_data.type == 'VIEW_3D':
self._ob2 = shadow.getUnSubShadow(context.active_object, ctx=context)
self.makeDrawData(self._ob2)
#print(event, dir(event))
self.start_mpos = event.mouse_region_x, event.mouse_region_y
self.mouse_path = []
args = (self, context)
self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback_px, args, 'WINDOW', 'POST_PIXEL')
self._handle3d = bpy.types.SpaceView3D.draw_handler_add(draw_callback_3dpx, args, 'WINDOW', 'POST_VIEW')
self.text = ""
context.window_manager.modal_handler_add(self)
ob = context.active_object
if ob.mode == "EDIT":
ob.update_from_editmode()
if ob is not None and ob.type == "MESH" and not ob.name.startswith("_"):
#bpy.ops.ed.undo_push()
self.cos = [Vector(v.co) for v in ob.data.vertices]
self.nos = [Vector(v.normal) for v | |
from math import sin, cos, pi
import os
import numpy as np
from math import cos, sin, sqrt, pi
import random
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
from numpy import arange, sin, pi
from matplotlib import cm, colors
import matplotlib.animation as animation
import sys
import matplotlib.image as image
import time
import ruche
def getValues (folder):
TDM_ID = 0b1110
Hit_Amplitude_Id = 0b0011
Hit_Time_Id = 0b0010
Gtrig_Header_Id = 0b0001
Gtrig_trailer_1_Id = 0b0100
Gtrig_trailer_2_Id = 0b0101
Special_Word_id = 0b1111
mean_rate = dict((i, []) for i in np.arange(0, 144))
std_rate = dict((i, []) for i in np.arange(0, 144))
mean_trigger_rate=[]
std_trigger_rate =[]
listeFichier=[]
# make a list of file use
for folder_to_open in os.listdir(folder):
if os.path.isfile(folder+"/"+folder_to_open):
listeFichier.append(folder+"/"+folder_to_open)
else:
aTester=folder+"/"+folder_to_open
if os.listdir(aTester) is not []:
for file in os.listdir(aTester):
if os.path.isfile(aTester+"/"+file):
listeFichier.append(aTester+"/"+file)
for formatFile in listeFichier:
with open(formatFile, "r+b") as file:
line = file.read(4)
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
event_data_amplitude_LG = {}
event_data_amplitude_HG = {}
event_data_tot = {}
data_LG = [[0]*144]
data_HG = [[0]*144]
data_time =[[0]*144]
dict_queue_edge = {}
negative_tot = 0
positive_tot = 0
pin_complete_slots = []
_break = 0
sumX1_rate = dict((i, 0) for i in np.arange(0, 144))
sumX2_rate = dict((i, 0) for i in np.arange(0, 144))
dict_for_calc_rate = dict((i, 0) for i in np.arange(0, 144))
nbre_ampli_and_tot = dict((i, 0) for i in np.arange(0, 144))
write_in_new_file = 0
X1_trigger_rate = 0
X2_trigger_rate = 0
nbre_trigger_rate = 0
gtrig_header = {}
global_trigger_header_amplitude = dict((i, []) for i in [0, 2])
global_trigger_header_time = dict((i, []) for i in [0, 2])
gtrig_ampli_or_tot_old = dict((i, 0) for i in [0, 2])
countss = 0
gtrig_header_used_for_rate = {}
calc_rate = dict((i, 0) for i in np.arange(0, 144))
start_time = time.time()
duration = 0
pqr = 0
aux_dict_to_test_coincidence={}
while line != b'':# and countss < 40000:
duration += time.time() - start_time
start_time = time.time()
countss += 1
if int(Word_Id, 2) == TDM_ID and int(line_out_b[4:6], 2) == 0:
slot = int(line_out_b[6:11], 2)
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
if slot not in pin_complete_slots:
pin_complete_slots.append(slot)
else:
pin_complete_slots = []
pin_complete_slots.append(slot)
while int(Word_Id, 2) != TDM_ID and line != b'':
if int(Word_Id, 2) == TDM_ID and int(line_out_b[4:6], 2) == 1:
break
else:
if int(Word_Id, 2) == Special_Word_id and int(line_out_b[11], 2) == 0 and int(
line_out_b[12:32], 2) == 3:
# print("Gtrig + Spill REset for slot {}".format(slot))
nmo = 1
else:
if int(Word_Id, 2) == Gtrig_Header_Id:
gtrig_header[slot] = int(line_out_b[4:32], 2)
while int(Word_Id, 2) != Gtrig_trailer_1_Id and line != b'':
if int(Word_Id, 2) == Hit_Amplitude_Id or int(Word_Id,
2) == Hit_Time_Id:
if slot == 0:
Channel_id = int(line_out_b[4:11], 2)
elif slot == 2:
Channel_id = int(line_out_b[4:11], 2) + 96
Hit_Id = int(line_out_b[11:14], 2)
Tag_Id = int(line_out_b[14:16], 2)
if int(Word_Id, 2) == Hit_Amplitude_Id:
Amplitude_Id = int(line_out_b[16:20], 2)
elif int(Word_Id, 2) == Hit_Time_Id:
Edge_time = int(line_out_b[16], 2)
Amplitude_or_tot_measurement = int(line_out_b[20:32], 2)
if len(pin_complete_slots) == 2: # if pin_complete_slots == [0, 2]:
write_in_new_file = 1
if (gtrig_header[slot] - gtrig_ampli_or_tot_old[slot]) != 0: # to verify
X1_trigger_rate += 1 / ((gtrig_header[slot] - gtrig_ampli_or_tot_old[slot]) * 10e-6)
X2_trigger_rate += (1 / ((gtrig_header[slot] - gtrig_ampli_or_tot_old[slot]) * 10e-6)) ** 2
nbre_trigger_rate += 1
gtrig_ampli_or_tot_old[slot] = gtrig_header[slot]
if (slot, Channel_id, Tag_Id,
Hit_Id) in dict_queue_edge.keys():
if int(Word_Id, 2) == Hit_Time_Id:
if (slot, Channel_id, Tag_Id,
Hit_Id) in dict_queue_edge.keys():
if Edge_time == 1:
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][1] = Amplitude_or_tot_measurement
gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)] = gtrig_header[slot]
elif Edge_time == 0 and dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][1] != 'a':
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][0] = Amplitude_or_tot_measurement
else:
del dict_queue_edge[
(slot, Channel_id, Tag_Id, Hit_Id)]
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)] = 4 * ['a']
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][0] = Amplitude_or_tot_measurement
elif int(Word_Id, 2) == Hit_Amplitude_Id:
if Amplitude_Id == 3:
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
2] = Amplitude_or_tot_measurement
elif Amplitude_Id == 2 and dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
2] != 'a':
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
3] = Amplitude_or_tot_measurement
else:
del dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)]
'''dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)] = 4 * ['a']
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][2]=Amplitude_or_tot_measurement'''
try:
aux_diff_amplitude = dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
3] + \
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
2] + \
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
1] + \
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
0]
tot = dict_queue_edge[
(slot, Channel_id, Tag_Id, Hit_Id)][
1] - \
dict_queue_edge[
(slot, Channel_id, Tag_Id, Hit_Id)][
0]
if tot >= 0: #testé, on rentre dedans (il faut laisser les old pixels en commentaire sinon ne marche pas)
global_trigger_header_amplitude[slot].append(gtrig_header[slot])
global_trigger_header_time[slot].append(gtrig_header_used_for_rate[(slot, Channel_id, Tag_Id,Hit_Id)])
positive_tot += 1
val_LG=dict_queue_edge[(slot, Channel_id, Tag_Id,Hit_Id)][2]
val_HG= dict_queue_edge[(slot, Channel_id, Tag_Id,Hit_Id)][3]
if Channel_id not in aux_dict_to_test_coincidence.keys():
aux_dict_to_test_coincidence[Channel_id]=[val_LG]
else:
aux_dict_to_test_coincidence[Channel_id].append(val_LG)
data_LG[pqr][Channel_id]= val_LG
data_HG[pqr][Channel_id]= val_HG
data_time[pqr][Channel_id]=tot
#fill global histo
# self.old_dict_pixelid_values_LG_for_histo_global[keys].fill(val_LG, Channel_id)
# self.old_dict_pixelid_values_HG_for_histo_global[keys].fill(val_HG, Channel_id)
# self.old_dict_pixelid_values_tot_for_histo_global[keys].fill(tot, Channel_id)
#fill local histo
# self.old_dict_pixelid_values_LG_for_histo_local[keys].fill(val_LG, Channel_id)
# self.old_dict_pixelid_values_HG_for_histo_local[keys].fill(val_HG, Channel_id)
# self.old_dict_pixelid_values_tot_for_histo_local[keys].fill(tot, Channel_id)
event_data_amplitude_LG[nmo] = [(Channel_id,dict_queue_edge[(slot, Channel_id,Tag_Id,Hit_Id)][2])]
event_data_amplitude_HG[nmo] = [(Channel_id,dict_queue_edge[(slot, Channel_id,Tag_Id,Hit_Id)][3])]
event_data_tot[nmo] = (Channel_id,tot)
if nbre_ampli_and_tot[Channel_id] != 0:
rate_aux = 1 / ((gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)] - dict_for_calc_rate[
Channel_id]) * 10e-6)
sumX1_rate[
Channel_id] += rate_aux # + rate_HG # this rate is in Mhz. we divide by 10 because 10us is time between header and trailer
sumX2_rate[Channel_id] += (
rate_aux) ** 2 # + rate_HG ** 2 # this rate is in Mhz. we divide by 10 because 10us is time between header and trailer
nbre_ampli_and_tot[Channel_id] += 1
dict_for_calc_rate[
Channel_id] = gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)]
nmo += 1
del gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)]
else:
negative_tot += 1
del dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)]
except:
pass
else:
dict_queue_edge[(slot,
Channel_id, Tag_Id,
Hit_Id)] = 4 * ['a']
if int(Word_Id, 2) == Hit_Time_Id:
if Edge_time == 0:
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][0] = Amplitude_or_tot_measurement
elif Edge_time == 1:
del dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)]
elif int(Word_Id, 2) == Hit_Amplitude_Id:
if Amplitude_Id == 2:
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
3] = Amplitude_or_tot_measurement
elif Amplitude_Id == 3:
del dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)]
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
if int(Word_Id, 2) == TDM_ID and int(line_out_b[4:6], 2) == 1:
_break = 1
break
if duration > time_allowed_to_display_events:
#add for ruche
data_electronics_LG = np.array([0 for r in range(144)])
data_electronics_HG = np.array([0 for r in range(144)])
data_electronics_tot = np.array([0 for r in range(144)])
data_LG.append([0]*144)
data_HG.append([0]*144)
data_time.append([0]*144)
index_max_sum = [np.sum(l) for l in data_LG].index(np.max([np.sum(l) for l in data_LG]))
data_electronics_LG = data_LG[index_max_sum]
data_electronics_HG = data_HG[index_max_sum]
data_electronics_tot = data_time[index_max_sum]
list_of_pixels_on_events = [data_LG.index(item) for item in data_LG if item != 0]
if list_of_pixels_on_events !=[] :
if choice == "HG":
sum_to_have_more_event_ligthed = np.sum(data_electronics_HG)
elif choice == "LG":
sum_to_have_more_event_ligthed = np.sum(data_electronics_LG)
elif choice() == "TOT":
sum_to_have_more_event_ligthed = np.sum(data_electronics_tot)
if sum_to_have_more_event_ligthed >= dac:
ruche.Makeruche(choice,data_electronics_HG,data_electronics_LG,data_electronics_tot,folderResult) #draw pixels
pqr = 0
data_LG = [[0] * 144]
data_HG = [[0] * 144]
data_time = [[0] * 144]
duration = 0
start_time = time.time()
if _break:
_break = 0
break
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
for keys in sumX1_rate.keys():
if nbre_ampli_and_tot[keys] not in [0, 1]:
mean_rate[keys].append(sumX1_rate[keys] / (nbre_ampli_and_tot[keys] - 1))
a=((sumX2_rate[keys] / (nbre_ampli_and_tot[keys] - 1)) - mean_rate[keys][0] ** 2)
if a >0:
std_rate[keys].append(sqrt((sumX2_rate[keys] / (nbre_ampli_and_tot[keys] - 1)) - mean_rate[keys][0] ** 2))
else:
std_rate[keys].append(0)
# std_rate[keys].append(1)
else:
mean_rate[keys].append(0)
std_rate[keys].append(0)
if nbre_trigger_rate != 0:
mean_trigger_rate.append(X1_trigger_rate / nbre_trigger_rate)
std_trigger_rate.append(sqrt((X2_trigger_rate / nbre_trigger_rate) - (X1_trigger_rate / nbre_trigger_rate) ** 2))
else:
mean_trigger_rate.append(0)
std_trigger_rate.append(0)
std_rate = dict((keys,np.mean(mean_rate[keys])) for keys in std_rate.keys())
mean_rate = dict((keys,np.mean(mean_rate[keys])) for keys in mean_rate.keys())
mean_trigger_rate=np.mean(mean_trigger_rate)
std_trigger_rate=np.mean(std_trigger_rate)
list_rate_components = [mean_rate, std_rate, mean_trigger_rate, std_trigger_rate]
# print("[mean_rate ,std_rate,mean_trigger_rate,std_trigger_rate]===",[aux[74] for aux in list_rate_components[0:2]], mean_trigger_rate, std_trigger_rate)
list_mean_cosmicray_rate_HG.append(list_rate_components[0][0])
list_std_cosmicray_rate_HG.append(list_rate_components[1][0])
list_mean_cosmicray_rate_LG.append(list_rate_components[0][0])
list_std_cosmicray_rate_LG.append(list_rate_components[1][0])
list_mean_cosmicray_rate_tot.append(list_rate_components[0][0])
list_std_cosmicray_rate_tot.append(list_rate_components[1][0])
list_mean_trigger_rate_ampli.append(list_rate_components[2])
| |
3: {
'sid': 16063,
'sid_type': 'Prefix-SID',
'local_address': '10.169.196.241',
},
},
},
},
},
},
},
'attributes': {
'binding_sid': {
15000: {
'allocation_mode': 'explicit',
'state': 'programmed',
},
},
},
'forwarding_id': '65536',
'stats': {
'packets': 44,
'bytes': 1748,
},
'event_history': {
1: {
'timestamp': '08-29 14:51:29.074',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
2: {
'timestamp': '08-29 14:51:29.099',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
3: {
'timestamp': '08-29 14:51:29.114',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
4: {
'timestamp': '08-29 14:51:29.150',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
5: {
'timestamp': '08-29 14:51:29.199',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
6: {
'timestamp': '08-29 14:51:29.250',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
7: {
'timestamp': '08-29 14:51:29.592',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
8: {
'timestamp': '08-29 14:51:29.733',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
9: {
'timestamp': '08-29 14:51:29.873',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
10: {
'timestamp': '08-29 14:51:30.013',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
11: {
'timestamp': '08-29 14:51:30.162',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
12: {
'timestamp': '08-29 14:51:33.875',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
13: {
'timestamp': '08-29 14:51:33.879',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
14: {
'timestamp': '08-29 14:51:33.919',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
15: {
'timestamp': '08-29 14:51:33.978',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
16: {
'timestamp': '08-29 14:51:33.982',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
17: {
'timestamp': '08-29 14:51:34.724',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
18: {
'timestamp': '08-29 14:51:35.227',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
19: {
'timestamp': '09-03 13:06:47.604',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
20: {
'timestamp': '09-03 13:06:47.607',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
21: {
'timestamp': '09-09 20:15:36.537',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
22: {
'timestamp': '09-09 20:15:36.541',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
23: {
'timestamp': '09-09 20:15:36.545',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
24: {
'timestamp': '09-09 20:15:36.557',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
25: {
'timestamp': '09-09 20:15:36.571',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
26: {
'timestamp': '09-09 20:15:36.598',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
27: {
'timestamp': '09-09 20:15:36.614',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
28: {
'timestamp': '09-09 20:15:36.629',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
29: {
'timestamp': '09-09 20:15:36.641',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
30: {
'timestamp': '09-09 20:15:36.667',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
31: {
'timestamp': '09-09 20:15:36.698',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
32: {
'timestamp': '09-09 20:15:36.734',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
33: {
'timestamp': '09-09 20:15:36.764',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
34: {
'timestamp': '09-09 20:15:36.789',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
35: {
'timestamp': '09-09 20:15:36.800',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
36: {
'timestamp': '09-09 20:15:36.823',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
37: {
'timestamp': '09-09 20:16:23.743',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
38: {
'timestamp': '09-09 20:16:23.745',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
39: {
'timestamp': '09-09 20:19:30.199',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
40: {
'timestamp': '09-09 20:19:30.205',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
41: {
'timestamp': '09-09 20:50:52.378',
'client': 'CLI AGENT',
'event_type': 'Set colour',
'context': 'Colour: 200',
},
42: {
'timestamp': '09-09 20:52:04.236',
'client': 'CLI AGENT',
'event_type': 'Policy ADMIN DOWN',
'context': 'shutdown: test1',
},
43: {
'timestamp': '09-09 20:59:06.432',
'client': 'CLI AGENT',
'event_type': 'Policy state DOWN',
'context': 'no shutdown: test1',
},
44: {
'timestamp': '09-09 20:59:06.434',
'client': 'FH Resolution',
'event_type': 'Policy state UP',
'context': 'Status: PATH RESOLVED',
},
45: {
'timestamp': '09-09 20:59:06.442',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
46: {
'timestamp': '09-09 21:17:36.909',
'client': 'CLI AGENT',
'event_type': 'Set colour',
'context': 'Colour: 100',
},
47: {
'timestamp': '09-09 21:18:39.057',
'client': 'CLI AGENT',
'event_type': 'Policy ADMIN DOWN',
'context': 'shutdown: test1',
},
48: {
'timestamp': '09-09 21:25:58.931',
'client': 'CLI AGENT',
'event_type': 'Policy state DOWN',
'context': 'no shutdown: test1',
},
49: {
'timestamp': '09-09 21:25:58.933',
'client': 'FH Resolution',
'event_type': 'Policy state UP',
'context': 'Status: PATH RESOLVED',
},
50: {
'timestamp': '09-09 21:25:58.945',
'client': 'FH Resolution',
'event_type': 'REOPT triggered',
'context': 'Status: REOPTIMIZED',
},
},
},
}
golden_output = {'execute.return_value': '''
sr_ve-hkgasr01#show segment-routing traffic-eng policy all detail
Name: maxsid (Color: 100 End-point: 10.169.196.241)
Status:
Admin: up, Operational: up for 04:54:31 (since 09-09 20:19:30.195)
Candidate-paths:
Preference 200:
Explicit: segment-list maxsid (active)
Weight: 0, Metric Type: TE
16071 [Prefix-SID, 10.189.5.252]
16072 [Prefix-SID, 10.189.5.253]
16071 [Prefix-SID, 10.189.5.252]
16072 [Prefix-SID, 10.189.5.253]
16071 [Prefix-SID, 10.189.5.252]
16072 [Prefix-SID, 10.189.5.253]
16071 [Prefix-SID, 10.189.5.252]
16072 [Prefix-SID, 10.189.5.253]
16071 [Prefix-SID, 10.189.5.252]
16072 [Prefix-SID, 10.189.5.253]
16071 [Prefix-SID, 10.189.5.252]
16072 [Prefix-SID, 10.189.5.253]
16063 [Prefix-SID, 10.169.196.241]
Preference 100:
Explicit: segment-list test1 (inactive)
Weight: 0, Metric Type: TE
16072 [Prefix-SID, 10.189.5.253]
16052 [Prefix-SID, 10.169.14.241]
16062 [Prefix-SID, 10.34.2.251]
16063 [Prefix-SID, 10.169.196.241]
Attributes:
Binding SID: 15001
Allocation mode: explicit
State: Programmed
Forwarding-ID: 65537 (0x1C)
Stats:
Packets: 1878 Bytes: 295606
Event history:
Timestamp Client Event type Context: Value
--------- ------ ---------- -------: -----
09-09 20:15:58.969 CLI AGENT Policy created Name: maxsid
09-09 20:16:09.573 CLI AGENT Set colour Colour: 100
09-09 20:16:09.573 CLI AGENT Set end point End-point: 10.169.196.241
09-09 20:16:23.728 CLI AGENT Set explicit path Path option: maxsid
09-09 20:19:30.195 FH Resolution Policy state UP Status: PATH RESOLVED
09-09 20:19:30.202 FH Resolution REOPT triggered Status: REOPTIMIZED
09-09 20:56:19.877 FH Resolution REOPT triggered Status: REOPTIMIZED
09-09 20:57:51.007 CLI AGENT Set binding SID BSID: Binding SID set
09-09 21:15:51.840 CLI AGENT Set explicit path Path option: test1
09-09 21:19:04.452 CLI AGENT Set explicit path Path option: test1
09-09 21:19:04.454 FH Resolution Policy state UP Status: PATH RESOLVED
09-09 21:19:04.458 FH Resolution REOPT triggered Status: REOPTIMIZED
09-09 21:20:20.811 CLI AGENT Remove path option Path option: 300
09-09 21:20:20.812 FH Resolution Policy state UP Status: PATH RESOLVED
Name: test1 (Color: 100 End-point: 10.169.196.241)
Status:
Admin: up, Operational: up for 03:48:03 (since 09-09 21:25:58.933)
Candidate-paths:
Preference 400:
Dynamic (pce) (inactive)
Weight: 0, Metric Type: TE
Preference 300:
Dynamic (active)
Weight: 0, Metric Type: TE
Metric Type: TE, Path Accumulated Metric: 2115
16052 [Prefix-SID, 10.169.14.241]
24 [Adjacency-SID, 10.169.14.33 - 10.169.14.34]
16063 [Prefix-SID, 10.169.196.241]
Preference 200:
Explicit: segment-list test1 (inactive)
Weight: 0, Metric Type: TE
16072 [Prefix-SID, 10.189.5.253]
16052 [Prefix-SID, 10.169.14.241]
16062 [Prefix-SID, 10.34.2.251]
16063 [Prefix-SID, 10.169.196.241]
Preference 100:
Dynamic (inactive)
Weight: 0, Metric Type: TE
Metric Type: TE, Path Accumulated Metric: 2115
16052 [Prefix-SID, 10.169.14.241]
24 [Adjacency-SID, 10.169.14.33 - 10.169.14.34]
16063 [Prefix-SID, 10.169.196.241]
Attributes:
Binding SID: 15000
Allocation mode: explicit
State: Programmed
Forwarding-ID: 65536 (0x18)
Stats:
Packets: 44 Bytes: 1748
Event history:
Timestamp Client Event type Context: Value
--------- ------ ---------- -------: -----
08-29 14:51:29.074 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:29.099 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:29.114 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:29.150 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:29.199 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:29.250 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:29.592 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:29.733 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:29.873 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:30.013 FH Resolution REOPT triggered Status: REOPTIMIZED
08-29 14:51:30.162 FH Resolution | |
contract where each key is a
``string`` of the year, such as '2017' and each value is a
``dictionary`` with the ``string`` key-value pairs of the player's age,
team name, and salary.
"""
return self._contract
@_int_property_decorator
def games(self):
"""
Returns an ``int`` of the number of games the player participated in.
"""
return self._games
@_int_property_decorator
def games_started(self):
"""
Returns an ``int`` of the number of games the player started.
"""
return self._games_started
@_int_property_decorator
def plate_appearances(self):
"""
Returns an ``int`` of the number of plate appearances the player had.
"""
return self._plate_appearances
@_int_property_decorator
def at_bats(self):
"""
Returns an ``int`` of the number of at bats the player had.
"""
return self._at_bats
@_int_property_decorator
def runs(self):
"""
Returns an ``int`` of the number of runs the player scored.
"""
return self._runs
@_int_property_decorator
def hits(self):
"""
Returns an ``int`` of the number of hits the player had.
"""
return self._hits
@_int_property_decorator
def doubles(self):
"""
Returns an ``int`` of the number of doubles the player hit.
"""
return self._doubles
@_int_property_decorator
def triples(self):
"""
Returns an ``int`` of the number of triples the player hit.
"""
return self._triples
@_int_property_decorator
def home_runs(self):
"""
Returns an ``int`` of the number of home runs the player hit.
"""
return self._home_runs
@_int_property_decorator
def runs_batted_in(self):
"""
Returns an ``int`` of the number of runs batted in the player
registered.
"""
return self._runs_batted_in
@_int_property_decorator
def stolen_bases(self):
"""
Returns an ``int`` of the number of bases the player has stolen.
"""
return self._stolen_bases
@_int_property_decorator
def times_caught_stealing(self):
"""
Returns an ``int`` of the number of times the player was caught
stealing.
"""
return self._times_caught_stealing
@_int_property_decorator
def bases_on_balls(self):
"""
Returns an ``int`` of the number of bases the player registered as a
result of balls.
"""
return self._bases_on_balls
@_int_property_decorator
def times_struck_out(self):
"""
Returns an ``int`` of the number of times the player was struck out.
"""
return self._times_struck_out
@_float_property_decorator
def batting_average(self):
"""
Returns a ``float`` of the batting average for the player.
"""
return self._batting_average
@_float_property_decorator
def on_base_percentage(self):
"""
Returns a ``float`` of the percentage of at bats that result in the
batter getting on base.
"""
return self._on_base_percentage
@_float_property_decorator
def slugging_percentage(self):
"""
Returns a ``float`` of the slugging percentage for the player based
on the number of bases gained per at-bat with bigger plays getting more
weight.
"""
return self._slugging_percentage
@_float_property_decorator
def on_base_plus_slugging_percentage(self):
"""
Returns a ``float`` of the on base percentage plus the slugging
percentage. Percentage ranges from 0-1.
"""
return self._on_base_plus_slugging_percentage
@_int_property_decorator
def on_base_plus_slugging_percentage_plus(self):
"""
Returns an ``int`` of the on base percentage plus the slugging
percentage, adjusted to the player's ballpark.
"""
return self._on_base_plus_slugging_percentage_plus
@_int_property_decorator
def total_bases(self):
"""
Returns an ``int`` of the number of bases the player has gained.
"""
return self._total_bases
@_int_property_decorator
def grounded_into_double_plays(self):
"""
Returns an ``int`` of the number of double plays the player grounded
into.
"""
return self._grounded_into_double_plays
@_int_property_decorator
def times_hit_by_pitch(self):
"""
Returns an ``int`` of the number of times the player has been hit by a
pitch.
"""
return self._times_hit_by_pitch
@_int_property_decorator
def sacrifice_hits(self):
"""
Returns an ``int`` of the number of sacrifice hits or sacrafice bunts
the player made.
"""
return self._sacrifice_hits
@_int_property_decorator
def sacrifice_flies(self):
"""
Returns an ``int`` of the number of sacrifice flies the player hit.
"""
return self._sacrifice_flies
@_int_property_decorator
def intentional_bases_on_balls(self):
"""
Returns an ``int`` of the number of times the player has been
intentionally walked by the opposition.
"""
return self._intentional_bases_on_balls
@_int_property_decorator
def complete_games(self):
"""
Returns an ``int`` of the number of complete games the player has
participated in.
"""
return self._complete_games
@_float_property_decorator
def innings_played(self):
"""
Returns a ``float`` of the total number of innings the player has
played in.
"""
return self._innings_played
@_int_property_decorator
def defensive_chances(self):
"""
Returns an ``int`` of the number of defensive chances (equal to the
number of putouts + assists + errors) the player had.
"""
return self._defensive_chances
@_int_property_decorator
def putouts(self):
"""
Returns an ``int`` of the number of putouts the player had.
"""
return self._putouts
@_int_property_decorator
def assists(self):
"""
Returns an ``int`` of the number of assists the player had.
"""
return self._assists
@_int_property_decorator
def errors(self):
"""
Returns an ``int`` of the number of errors the player made.
"""
return self._errors
@_int_property_decorator
def double_plays_turned(self):
"""
Returns an ``int`` of the number of double plays the player was
involved in.
"""
return self._double_plays_turned
@_float_property_decorator
def fielding_percentage(self):
"""
Returns a ``float`` of the players fielding percentage, equivalent to
(putouts + assists) / (putouts + assists + errors). Percentage ranges
from 0-1.
"""
return self._fielding_percentage
@_int_property_decorator
def total_fielding_runs_above_average(self):
"""
Returns an ``int`` of the number of runs the player was worth compared
to an average player.
"""
return self._total_fielding_runs_above_average
@_int_property_decorator
def defensive_runs_saved_above_average(self):
"""
Returns an ``int`` of the number of defensive runs the player saved
compared to an average player.
"""
return self._defensive_runs_saved_above_average
@_int_property_decorator
def total_fielding_runs_above_average_per_innings(self):
"""
Returns an ``int`` of the number of runs the player was worth per 1,200
innings compared to an average player.
"""
return self._total_fielding_runs_above_average_per_innings
@_int_property_decorator
def defensive_runs_saved_above_average_per_innings(self):
"""
Returns an ``int`` of the number of defensive runs the player was worth
per 1,200 innings compared to an average player.
"""
return self._defensive_runs_saved_above_average_per_innings
@_float_property_decorator
def range_factor_per_nine_innings(self):
"""
Returns a ``float`` of the players range factor per nine innings, equal
to 9 * (putouts + assists) / innings_played.
"""
return self._range_factor_per_nine_innings
@_float_property_decorator
def range_factor_per_game(self):
"""
Returns a ``float`` of the players range factor per game, equal to 9 *
(putouts + assists) / games_played.
"""
return self._range_factor_per_game
@_float_property_decorator
def league_fielding_percentage(self):
"""
Returns a ``float`` of the average fielding percentage for the league
at the player's position. Percentage ranges from 0-1.
"""
return self._league_fielding_percentage
@_float_property_decorator
def league_range_factor_per_nine_innings(self):
"""
Returns a ``float`` of the average range factor for the league per nine
innings, equal to 9 * (putouts + assists) / innings_played.
"""
return self._league_range_factor_per_nine_innings
@_float_property_decorator
def league_range_factor_per_game(self):
"""
Returns a ``float`` of the average range factor for the league per
game, equal to (putouts + assists) / games_played.
"""
return self._league_range_factor_per_game
@_int_property_decorator
def games_in_batting_order(self):
"""
Returns an ``int`` of the number of games the player was in the batting
lineup.
"""
return self._games_in_batting_order
@_int_property_decorator
def games_in_defensive_lineup(self):
"""
Returns an ``int`` of the number of games the player was in the
defensive lineup.
"""
return self._games_in_defensive_lineup
@_int_property_decorator
def games_pitcher(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a pitcher.
"""
return self._games_pitcher
@_int_property_decorator
def games_catcher(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a catcher.
"""
return self._games_catcher
@_int_property_decorator
def games_first_baseman(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a first baseman.
"""
return self._games_first_baseman
@_int_property_decorator
def games_second_baseman(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a second baseman.
"""
return self._games_second_baseman
@_int_property_decorator
def games_third_baseman(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a third baseman.
"""
return self._games_third_baseman
@_int_property_decorator
def games_shortstop(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a shortstop.
"""
return self._games_shortstop
@_int_property_decorator
def games_left_fielder(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a left fielder.
"""
return self._games_left_fielder
@_int_property_decorator
def games_center_fielder(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a center fielder.
"""
return self._games_center_fielder
@_int_property_decorator
def games_right_fielder(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a right fielder.
"""
return self._games_right_fielder
@_int_property_decorator
def games_outfielder(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as an outfielder.
"""
return self._games_outfielder
@_int_property_decorator
def games_designated_hitter(self):
"""
Returns an ``int`` of the number of games the player was in the lineup
as a designated hitter.
"""
return self._games_designated_hitter
| |
import sys
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
import torchvision
from torchvision import transforms
import torchvision.transforms.functional as F
from skimage.util import random_noise
from dataloading.camera import Camera
class NvidiaResizeAndCrop(object):
def __call__(self, data):
xmin = 186
ymin = 600
scale = 6.0
width = 258
height = 66
scaled_width = int(width * scale)
scaled_height = int(height * scale)
cropped = transforms.functional.resized_crop(data["image"], ymin, xmin, scaled_height, scaled_width,
(height, width))
data["image"] = cropped
return data
class NvidiaCropWide(object):
def __init__(self, x_delta=0):
self.x_delta = x_delta
def __call__(self, data):
xmin = 300
xmax = 1620
ymin = 520
ymax = 864
scale = 0.2
height = ymax - ymin
width = xmax - xmin
cropped = F.resized_crop(data["image"], ymin, xmin + self.x_delta, height, width,
(int(scale * height), int(scale * width)))
data["image"] = cropped
return data
class CropViT(object):
def __call__(self, data):
xmin = 540
xmax = 1260
ymin = 244
ymax = 964
scale = 0.312
height = ymax - ymin
width = xmax - xmin
cropped = F.resized_crop(data["image"], ymin, xmin, height, width,
(int(scale * height), int(scale * width)))
data["image"] = cropped
return data
class NvidiaSideCameraZoom(object):
def __init__(self, zoom_ratio):
self.zoom_ratio = zoom_ratio
def __call__(self, data):
width = 1920
height = 1208
xmin = int(self.zoom_ratio * width)
ymin = int(self.zoom_ratio * height)
scaled_width = width - (2 * xmin)
scaled_height = height - (2 * ymin)
cropped = F.resized_crop(data["image"], ymin, xmin, scaled_height, scaled_width,
(height, width))
data["image"] = cropped
return data
class AugmentationConfig:
def __init__(self, color_prob=0.0, noise_prob=0.0, blur_prob=0.0):
self.color_prob = color_prob
self.noise_prob = noise_prob
self.blur_prob = blur_prob
class AugmentImage:
def __init__(self, augment_config):
print(f"augmentation: color_prob={augment_config.color_prob}, "
f"noise_prob={augment_config.noise_prob}, "
f"blur_prob={augment_config.blur_prob}")
self.augment_config = augment_config
def __call__(self, data):
if np.random.random() <= self.augment_config.color_prob:
jitter = transforms.ColorJitter(contrast=0.5, saturation=0.5, brightness=0.5)
data["image"] = jitter(data["image"])
if np.random.random() <= self.augment_config.noise_prob:
if np.random.random() > 0.5:
data["image"] = torch.tensor(random_noise(data["image"], mode='gaussian', mean=0, var=0.005, clip=True),
dtype=torch.float)
else:
data["image"] = torch.tensor(random_noise(data["image"], mode='salt', amount=0.005),
dtype=torch.float)
if np.random.random() <= self.augment_config.blur_prob:
blurrer = transforms.GaussianBlur(kernel_size=(3, 3), sigma=(0.3, 1))
data["image"] = blurrer(data['image'])
return data
class Normalize(object):
def __call__(self, data, transform=None):
# normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
image = data["image"]
image = image / 255
# data["image"] = normalize(image)
data["image"] = image
return data
class NvidiaDataset(Dataset):
#CAP_WAYPOINTS = 30
def __init__(self, dataset_paths, transform=None, camera="front_wide", name="Nvidia dataset",
filter_turns=False, output_modality="steering_angle", n_branches=1, n_waypoints=6,
metadata_file="nvidia_frames.csv", color_space="rgb", side_cameras_weight=0.33):
self.name = name
self.metadata_file = metadata_file
self.color_space = color_space
self.dataset_paths = dataset_paths
if transform:
self.transform = transform
else:
self.transform = transforms.Compose([Normalize()])
self.camera_name = camera
self.output_modality = output_modality
self.n_waypoints = n_waypoints
self.side_cameras_weight = side_cameras_weight
if self.output_modality == "waypoints":
self.target_size = 2 * self.n_waypoints
elif self.output_modality == "steering_angle":
self.target_size = 1
else:
print(f"Unknown output modality {self.output_modality}")
sys.exit()
self.n_branches = n_branches
if camera == 'all':
datasets = [self.read_dataset(dataset_path, "left") for dataset_path in dataset_paths] + \
[self.read_dataset(dataset_path, "right") for dataset_path in dataset_paths] + \
[self.read_dataset(dataset_path, "front_wide") for dataset_path in dataset_paths]
else:
datasets = [self.read_dataset(dataset_path, camera) for dataset_path in dataset_paths]
self.frames = pd.concat(datasets)
if filter_turns:
print("Filtering turns with blinker signal")
self.frames = self.frames[self.frames.turn_signal == 1]
def __getitem__(self, idx):
frame = self.frames.iloc[idx]
if self.color_space == "rgb":
image = torchvision.io.read_image(frame["image_path"])
elif self.color_space == "bgr":
image = cv2.imread(frame["image_path"])
image = torch.tensor(image, dtype=torch.uint8).permute(2, 0, 1)
else:
print(f"Unknown color space: ", self.color_space)
sys.exit()
# TODO replace if-else with map
if self.camera_name == Camera.LEFT.value:
steering_angle = np.array(frame["steering_angle_left"])
elif self.camera_name == Camera.RIGHT.value:
steering_angle = np.array(frame["steering_angle_right"])
else:
steering_angle = np.array(frame["steering_angle"])
data = {
'image': image,
'steering_angle': steering_angle,
'vehicle_speed': np.array(frame["vehicle_speed"]),
'autonomous': np.array(frame["autonomous"]),
'position_x': np.array(frame["position_x"]),
'position_y': np.array(frame["position_y"]),
'yaw': np.array(frame["yaw"]),
'turn_signal': np.array(frame["turn_signal"]),
'row_id': np.array(frame["row_id"]),
}
turn_signal = int(frame["turn_signal"])
if self.output_modality == "waypoints":
waypoints = []
for i in np.arange(1, self.n_waypoints + 1):
waypoints.append(frame[f"wp{i}_{self.camera_name}_x"])
waypoints.append(frame[f"wp{i}_{self.camera_name}_y"])
data['waypoints'] = np.array(waypoints)
target_values = waypoints
else:
target_values = frame["steering_angle"]
if self.transform:
data = self.transform(data)
if self.n_branches > 1:
target = np.zeros((self.n_branches, self.target_size))
target[turn_signal, :] = target_values
conditional_mask = np.zeros((self.n_branches, self.target_size))
conditional_mask[turn_signal, :] = 1
else:
target = np.zeros((self.n_branches, self.target_size))
target[0, :] = target_values
conditional_mask = np.ones((self.n_branches, self.target_size))
return data, target.reshape(-1), conditional_mask.reshape(-1)
def __len__(self):
return len(self.frames.index)
def get_waypoints(self):
wp_x_cols = [f"wp{i}_{self.camera_name}_x" for i in np.arange(1, self.n_waypoints + 1)]
wp_y_cols = [f"wp{i}_{self.camera_name}_y" for i in np.arange(1, self.n_waypoints + 1)]
waypoint_cols = np.column_stack((wp_x_cols, wp_y_cols)).reshape(-1)
return self.frames[waypoint_cols].to_numpy()
def read_dataset(self, dataset_path, camera):
if type(dataset_path) is dict:
frames_df = pd.read_csv(dataset_path['path'] / self.metadata_file)
len_before_filtering = len(frames_df)
frames_df = frames_df.iloc[dataset_path['start']:dataset_path['end']]
dataset_path = dataset_path['path']
else:
frames_df = pd.read_csv(dataset_path / self.metadata_file)
len_before_filtering = len(frames_df)
frames_df["row_id"] = frames_df.index
# temp hack
if "autonomous" not in frames_df.columns:
frames_df["autonomous"] = False
# frames_df["autonomous"] = False
frames_df = frames_df[frames_df['steering_angle'].notna()] # TODO: one steering angle is NaN, why?
if camera != Camera.FRONT_WIDE.value:
frames_df = frames_df[frames_df['steering_angle_left'].notna()]
frames_df = frames_df[frames_df['steering_angle_right'].notna()]
frames_df = frames_df[frames_df['vehicle_speed'].notna()]
frames_df = frames_df[frames_df[f'{camera}_filename'].notna()]
frames_df["turn_signal"].fillna(1, inplace=True)
frames_df["turn_signal"] = frames_df["turn_signal"].astype(int)
# Removed frames marked as skipped
frames_df = frames_df[frames_df["turn_signal"] != -1] # TODO: remove magic values.
if self.output_modality == "waypoints":
frames_df = frames_df[frames_df[f"position_x"].notna()]
frames_df = frames_df[frames_df[f"position_y"].notna()]
for i in np.arange(1, self.n_waypoints + 1):
frames_df = frames_df[frames_df[f"wp{i}_{camera}_x"].notna()]
frames_df = frames_df[frames_df[f"wp{i}_{camera}_y"].notna()]
frames_df["yaw_delta"] = np.abs(frames_df["yaw"]) - np.abs(frames_df["yaw"]).shift(-1)
frames_df = frames_df[np.abs(frames_df["yaw_delta"]) < 0.1]
# if self.calculate_waypoints:
#
# vehicle_x = frames_df["position_x"]
# vehicle_y = frames_df["position_y"]
#
# for i in np.arange(1, self.N_WAYPOINTS + 1):
# wp_global_x = frames_df["position_x"].shift(-i * self.CAP_WAYPOINTS)
# wp_global_y = frames_df["position_y"].shift(-i * self.CAP_WAYPOINTS)
# frames_df[f"x_{i}"] = wp_global_x
# frames_df[f"y_{i}"] = wp_global_y
# yaw = frames_df["yaw"]
# #frames_df["yaw"] = yaw
#
# wp_local_x = (wp_global_x - vehicle_x) * np.cos(yaw) + (wp_global_y - vehicle_y) * np.sin(yaw)
# wp_local_y = -(wp_global_x - vehicle_x) * np.sin(yaw) + (wp_global_y - vehicle_y) * np.cos(yaw)
# frames_df[f"x_{i}_offset"] = wp_local_x
# frames_df[f"y_{i}_offset"] = wp_local_y
#
# # Remove rows without trajectory offsets, should be last N_WAYPOINTS rows
# frames_df = frames_df[frames_df[f"x_{i}_offset"].notna()]
#
# # frames_df["yaw_delta"] = np.abs(frames_df["yaw"]) - np.abs(frames_df["yaw"]).shift(-1)
# # frames_df = frames_df[np.abs(frames_df["yaw_delta"]) < 0.1]
# #
# # frames_df["x_1_delta"] = frames_df["x_1_offset"] - frames_df["x_1_offset"].shift(-1)
# # frames_df = frames_df[np.abs(frames_df["x_1_delta"]) < 0.1]
# #
# # frames_df["y_1_delta"] = frames_df["y_1_offset"] - frames_df["y_1_offset"].shift(-1)
# # frames_df = frames_df[np.abs(frames_df["y_1_delta"]) < 0.1]
#
# # frames_df = frames_df[np.abs(frames_df["steering_angle"]) < 2.0]
len_after_filtering = len(frames_df)
camera_images = frames_df[f"{camera}_filename"].to_numpy()
frames_df["image_path"] = [str(dataset_path / image_path) for image_path in camera_images]
if self.output_modality == "waypoints":
for i in np.arange(1, self.n_waypoints + 1):
frames_df[f"wp{i}_all_x"] = frames_df[f"wp{i}_{camera}_x"]
frames_df[f"wp{i}_all_y"] = frames_df[f"wp{i}_{camera}_y"]
frames_df["camera_type"] = camera
print(f"{dataset_path}: lenght={len(frames_df)}, filtered={len_before_filtering-len_after_filtering}")
frames_df.reset_index(inplace=True)
return frames_df
def steering_angles_degrees(self):
return self.frames.steering_angle.to_numpy() / np.pi * 180
class NvidiaTrainDataset(NvidiaDataset):
def __init__(self, root_path, output_modality="steering_angle", n_branches=3, n_waypoints=6,
camera="front_wide", augment_conf=AugmentationConfig(), metadata_file="nvidia_frames.csv"):
self.dataset_paths = [
root_path / "2021-05-20-12-36-10_e2e_sulaoja_20_30",
root_path / "2021-05-20-12-43-17_e2e_sulaoja_20_30",
root_path / "2021-05-20-12-51-29_e2e_sulaoja_20_30",
root_path / "2021-05-20-13-44-06_e2e_sulaoja_10_10",
root_path / "2021-05-20-13-51-21_e2e_sulaoja_10_10",
root_path / "2021-05-20-13-59-00_e2e_sulaoja_10_10",
root_path / "2021-05-28-15-07-56_e2e_sulaoja_20_30",
root_path / "2021-05-28-15-17-19_e2e_sulaoja_20_30",
{'path': root_path / "2021-06-09-13-14-51_e2e_rec_ss2", 'start': 125, 'end': 49725},
{'path': root_path / "2021-06-09-13-55-03_e2e_rec_ss2_backwards", 'start': 150, 'end': 53625},
{'path': root_path / "2021-06-09-14-58-11_e2e_rec_ss3", 'start': 175, 'end': 43775},
{'path': root_path / "2021-06-09-15-42-05_e2e_rec_ss3_backwards", 'start': 100, 'end': 40625},
root_path / "2021-06-09-16-24-59_e2e_rec_ss13",
root_path / "2021-06-09-16-50-22_e2e_rec_ss13_backwards",
root_path / "2021-06-10-12-59-59_e2e_ss4",
root_path / "2021-06-10-13-19-22_e2e_ss4_backwards",
root_path / "2021-06-10-13-51-34_e2e_ss12",
root_path / "2021-06-10-14-02-24_e2e_ss12_backwards",
root_path / "2021-06-10-14-44-24_e2e_ss3_backwards",
root_path / "2021-06-10-15-03-16_e2e_ss3_backwards",
root_path / "2021-06-14-11-08-19_e2e_rec_ss14",
root_path / "2021-06-14-11-22-05_e2e_rec_ss14",
root_path / "2021-06-14-11-43-48_e2e_rec_ss14_backwards",
{'path': root_path / "2021-09-24-11-19-25_e2e_rec_ss10", 'start': 400, 'end': 34550},
{'path': root_path / "2021-09-24-11-40-24_e2e_rec_ss10_2", 'start': 150, 'end': 16000},
{'path': root_path / "2021-09-24-12-02-32_e2e_rec_ss10_3", 'start': 350, 'end': 8050},
root_path / "2021-09-24-12-21-20_e2e_rec_ss10_backwards",
root_path / "2021-09-24-13-39-38_e2e_rec_ss11",
{'path': root_path / "2021-09-30-13-57-00_e2e_rec_ss14", 'start': 100, 'end': 3200},
root_path / "2021-09-30-15-03-37_e2e_ss14_from_half_way",
root_path / "2021-09-30-15-20-14_e2e_ss14_backwards",
{'path': root_path / "2021-09-30-15-56-59_e2e_ss14_attempt_2", 'start': 80, 'end': 54600},
root_path / "2021-10-07-11-05-13_e2e_rec_ss3",
root_path / "2021-10-07-11-44-52_e2e_rec_ss3_backwards",
root_path / "2021-10-07-12-54-17_e2e_rec_ss4",
root_path / "2021-10-07-13-22-35_e2e_rec_ss4_backwards",
root_path / "2021-10-11-16-06-44_e2e_rec_ss2",
root_path / "2021-10-11-17-10-23_e2e_rec_last_part",
root_path / "2021-10-11-17-14-40_e2e_rec_backwards",
root_path / "2021-10-11-17-20-12_e2e_rec_backwards",
root_path / "2021-10-20-14-55-47_e2e_rec_vastse_ss13_17",
root_path / "2021-10-20-13-57-51_e2e_rec_neeruti_ss19_22",
root_path / "2021-10-20-14-15-07_e2e_rec_neeruti_ss19_22_back",
root_path / "2021-10-25-17-31-48_e2e_rec_ss2_arula",
root_path / "2021-10-25-17-06-34_e2e_rec_ss2_arula_back"
# '2021-11-08-11-24-44_e2e_rec_ss12_raanitsa.bag' \
# '2021-11-08-12-08-40_e2e_rec_ss12_raanitsa_backward.bag' \
]
tr = transforms.Compose([AugmentImage(augment_config=augment_conf), Normalize()])
super().__init__(self.dataset_paths, tr, camera=camera, output_modality=output_modality, n_branches=n_branches,
n_waypoints=n_waypoints, metadata_file=metadata_file)
class NvidiaValidationDataset(NvidiaDataset):
# todo: remove default parameters
def __init__(self, root_path, output_modality="steering_angle", n_branches=3, n_waypoints=6, camera="front_wide",
metadata_file="nvidia_frames.csv"):
self.dataset_paths = [
root_path / "2021-05-28-15-19-48_e2e_sulaoja_20_30",
root_path / "2021-06-07-14-20-07_e2e_rec_ss6",
root_path / "2021-06-07-14-06-31_e2e_rec_ss6",
root_path / "2021-06-07-14-09-18_e2e_rec_ss6",
root_path / "2021-06-07-14-36-16_e2e_rec_ss6",
root_path / "2021-09-24-14-03-45_e2e_rec_ss11_backwards",
root_path / "2021-10-26-10-49-06_e2e_rec_ss20_elva",
root_path / "2021-10-26-11-08-59_e2e_rec_ss20_elva_back",
root_path / "2021-10-20-15-11-29_e2e_rec_vastse_ss13_17_back",
{'path': root_path / "2021-10-11-14-50-59_e2e_rec_vahi", 'start': 100, 'end': 15000},
{'path': root_path / "2021-10-14-13-08-51_e2e_rec_vahi_backwards", 'start': 80, 'end': 13420}
]
tr = transforms.Compose([Normalize()])
super().__init__(self.dataset_paths, tr, camera=camera, output_modality=output_modality, n_branches=n_branches,
n_waypoints=n_waypoints, metadata_file=metadata_file)
class NvidiaWinterTrainDataset(NvidiaDataset):
def __init__(self, root_path, output_modality="steering_angle",
n_branches=3, n_waypoints=6, augment_conf=AugmentationConfig()):
train_paths = [
| |
pulumi.get(self, "names")
@pulumi.output_type
class AccessPolicyRequireSaml(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "attributeName":
suggest = "attribute_name"
elif key == "attributeValue":
suggest = "attribute_value"
elif key == "identityProviderId":
suggest = "identity_provider_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessPolicyRequireSaml. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessPolicyRequireSaml.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessPolicyRequireSaml.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
attribute_name: Optional[str] = None,
attribute_value: Optional[str] = None,
identity_provider_id: Optional[str] = None):
if attribute_name is not None:
pulumi.set(__self__, "attribute_name", attribute_name)
if attribute_value is not None:
pulumi.set(__self__, "attribute_value", attribute_value)
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
@property
@pulumi.getter(name="attributeName")
def attribute_name(self) -> Optional[str]:
return pulumi.get(self, "attribute_name")
@property
@pulumi.getter(name="attributeValue")
def attribute_value(self) -> Optional[str]:
return pulumi.get(self, "attribute_value")
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> Optional[str]:
return pulumi.get(self, "identity_provider_id")
@pulumi.output_type
class AccessRuleConfiguration(dict):
def __init__(__self__, *,
target: str,
value: str):
"""
:param str target: The request property to target. Allowed values: "ip", "ip6", "ip_range", "asn", "country"
:param str value: The value to target. Depends on target's type.
"""
pulumi.set(__self__, "target", target)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def target(self) -> str:
"""
The request property to target. Allowed values: "ip", "ip6", "ip_range", "asn", "country"
"""
return pulumi.get(self, "target")
@property
@pulumi.getter
def value(self) -> str:
"""
The value to target. Depends on target's type.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ApiTokenCondition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "requestIp":
suggest = "request_ip"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApiTokenCondition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApiTokenCondition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApiTokenCondition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
request_ip: Optional['outputs.ApiTokenConditionRequestIp'] = None):
"""
:param 'ApiTokenConditionRequestIpArgs' request_ip: Request IP related conditions. See the definition below.
"""
if request_ip is not None:
pulumi.set(__self__, "request_ip", request_ip)
@property
@pulumi.getter(name="requestIp")
def request_ip(self) -> Optional['outputs.ApiTokenConditionRequestIp']:
"""
Request IP related conditions. See the definition below.
"""
return pulumi.get(self, "request_ip")
@pulumi.output_type
class ApiTokenConditionRequestIp(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "notIns":
suggest = "not_ins"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApiTokenConditionRequestIp. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApiTokenConditionRequestIp.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApiTokenConditionRequestIp.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ins: Optional[Sequence[str]] = None,
not_ins: Optional[Sequence[str]] = None):
"""
:param Sequence[str] ins: List of IPv4/IPv6 CIDR addresses where
the Token can be used from.
:param Sequence[str] not_ins: List of IPv4/IPv6 CIDR addresses where
the Token cannot be used from.
"""
if ins is not None:
pulumi.set(__self__, "ins", ins)
if not_ins is not None:
pulumi.set(__self__, "not_ins", not_ins)
@property
@pulumi.getter
def ins(self) -> Optional[Sequence[str]]:
"""
List of IPv4/IPv6 CIDR addresses where
the Token can be used from.
"""
return pulumi.get(self, "ins")
@property
@pulumi.getter(name="notIns")
def not_ins(self) -> Optional[Sequence[str]]:
"""
List of IPv4/IPv6 CIDR addresses where
the Token cannot be used from.
"""
return pulumi.get(self, "not_ins")
@pulumi.output_type
class ApiTokenPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "permissionGroups":
suggest = "permission_groups"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApiTokenPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApiTokenPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApiTokenPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
permission_groups: Sequence[str],
resources: Mapping[str, str],
effect: Optional[str] = None):
"""
:param Sequence[str] permission_groups: List of permissions groups
ids ([see official docs](https://developers.cloudflare.com/api/tokens/create/permissions)).
:param Mapping[str, str] resources: Map describes what operations against which resources
are allowed or denied.
:param str effect: Policy effect. Valid values are `allow` or `deny`. `allow`
is set as default.
"""
pulumi.set(__self__, "permission_groups", permission_groups)
pulumi.set(__self__, "resources", resources)
if effect is not None:
pulumi.set(__self__, "effect", effect)
@property
@pulumi.getter(name="permissionGroups")
def permission_groups(self) -> Sequence[str]:
"""
List of permissions groups
ids ([see official docs](https://developers.cloudflare.com/api/tokens/create/permissions)).
"""
return pulumi.get(self, "permission_groups")
@property
@pulumi.getter
def resources(self) -> Mapping[str, str]:
"""
Map describes what operations against which resources
are allowed or denied.
"""
return pulumi.get(self, "resources")
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Policy effect. Valid values are `allow` or `deny`. `allow`
is set as default.
"""
return pulumi.get(self, "effect")
@pulumi.output_type
class CustomHostnameSsl(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateAuthority":
suggest = "certificate_authority"
elif key == "cnameName":
suggest = "cname_name"
elif key == "cnameTarget":
suggest = "cname_target"
elif key == "customCertificate":
suggest = "custom_certificate"
elif key == "customKey":
suggest = "custom_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CustomHostnameSsl. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CustomHostnameSsl.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CustomHostnameSsl.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_authority: Optional[str] = None,
cname_name: Optional[str] = None,
cname_target: Optional[str] = None,
custom_certificate: Optional[str] = None,
custom_key: Optional[str] = None,
method: Optional[str] = None,
settings: Optional[Sequence['outputs.CustomHostnameSslSetting']] = None,
status: Optional[str] = None,
type: Optional[str] = None,
wildcard: Optional[bool] = None):
"""
:param str custom_certificate: If a custom uploaded certificate is used.
:param str custom_key: The key for a custom uploaded certificate.
:param str method: Domain control validation (DCV) method used for this
hostname. Valid values are `"txt"`, `"http"` and `"email"`.
:param Sequence['CustomHostnameSslSettingArgs'] settings: SSL/TLS settings for the certificate. See further notes below.
:param str type: Level of validation to be used for this hostname. Domain validation ("dv") must be used.
:param bool wildcard: Indicates whether the certificate covers a wildcard.
"""
if certificate_authority is not None:
pulumi.set(__self__, "certificate_authority", certificate_authority)
if cname_name is not None:
pulumi.set(__self__, "cname_name", cname_name)
if cname_target is not None:
pulumi.set(__self__, "cname_target", cname_target)
if custom_certificate is not None:
pulumi.set(__self__, "custom_certificate", custom_certificate)
if custom_key is not None:
pulumi.set(__self__, "custom_key", custom_key)
if method is not None:
pulumi.set(__self__, "method", method)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if status is not None:
pulumi.set(__self__, "status", status)
if type is not None:
pulumi.set(__self__, "type", type)
if wildcard is not None:
pulumi.set(__self__, "wildcard", wildcard)
@property
@pulumi.getter(name="certificateAuthority")
def certificate_authority(self) -> Optional[str]:
return pulumi.get(self, "certificate_authority")
@property
@pulumi.getter(name="cnameName")
def cname_name(self) -> Optional[str]:
return pulumi.get(self, "cname_name")
@property
@pulumi.getter(name="cnameTarget")
def cname_target(self) -> Optional[str]:
return pulumi.get(self, "cname_target")
@property
@pulumi.getter(name="customCertificate")
def custom_certificate(self) -> Optional[str]:
"""
If a custom uploaded certificate is used.
"""
return pulumi.get(self, "custom_certificate")
@property
@pulumi.getter(name="customKey")
def custom_key(self) -> Optional[str]:
"""
The key for a custom uploaded certificate.
"""
return pulumi.get(self, "custom_key")
@property
@pulumi.getter
def method(self) -> Optional[str]:
"""
Domain control validation (DCV) method used for this
hostname. Valid values are `"txt"`, `"http"` and `"email"`.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def settings(self) -> Optional[Sequence['outputs.CustomHostnameSslSetting']]:
"""
SSL/TLS settings for the certificate. See further notes below.
"""
return pulumi.get(self, "settings")
@property
@pulumi.getter
def status(self) -> Optional[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Level of validation to be used for this hostname. Domain validation ("dv") must be used.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def wildcard(self) -> Optional[bool]:
"""
Indicates whether the certificate covers a wildcard.
"""
return pulumi.get(self, "wildcard")
@pulumi.output_type
class CustomHostnameSslSetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "minTlsVersion":
suggest = "min_tls_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CustomHostnameSslSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CustomHostnameSslSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CustomHostnameSslSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ciphers: Optional[Sequence[str]] = None,
http2: Optional[str] = None,
min_tls_version: Optional[str] = None,
tls13: Optional[str] = None):
"""
:param Sequence[str] ciphers: List of SSL/TLS ciphers to associate with this certificate.
:param str http2: Whether or not HTTP2 should be supported. Valid values are `"on"` or `"off"`.
:param str min_tls_version: Lowest version of TLS this certificate should
support. Valid values are `"1.0"`, `"1.1"`, `"1.2"` and `"1.3"`.
:param str tls13: Whether or not TLSv1.3 should be supported. Valid values are `"on"` or `"off"`.
"""
if ciphers is not None:
pulumi.set(__self__, "ciphers", ciphers)
if http2 is not None:
pulumi.set(__self__, "http2", http2)
if min_tls_version is not None:
pulumi.set(__self__, "min_tls_version", | |
<reponame>cuill/PyFVCOM<gh_stars>1-10
"""
A series of tools with which tidal data can be extracted from FVCOM NetCDF
model results. Also provides a number of tools to interrogate the SQLite
database of tidal data collated from a range of sources across the north-west
European continental shelf.
"""
from __future__ import print_function
import sys
import jdcal
import inspect
import sqlite3
import numpy as np
from lxml import etree
from warnings import warn
from PyFVCOM.grid_tools import find_nearest_point
def julian_day(gregorianDateTime, mjd=False):
"""
For a given gregorian date format (YYYY,MM,DD,hh,mm,ss) get the
Julian Day.
Output array precision is the same as input precision, so if you
want sub-day precision, make sure your input data are floats.
Parameters
----------
gregorianDateTime : ndarray
Array of Gregorian dates formatted as [[YYYY, MM, DD, hh, mm,
ss],...,[YYYY, MM, DD, hh, mm, ss]]. If hh, mm, ss are missing
they are assumed to be zero (i.e. midnight).
mjd : boolean, optional
Set to True to convert output from Julian Day to Modified Julian
Day.
Returns
-------
jd : ndarray
Modified Julian Day or Julian Day (depending on the value of
mjd).
Notes
-----
Julian Day epoch: 12:00 January 1, 4713 BC, Monday
Modified Julain Day epoch: 00:00 November 17, 1858, Wednesday
"""
try:
nr, nc = np.shape(gregorianDateTime)
except:
nc = np.shape(gregorianDateTime)[0]
nr = 1
if nc < 6:
# We're missing some aspect of the time. Let's assume it's the least
# significant value (i.e. seconds first, then minutes, then hours).
# Set missing values to zero.
numMissing = 6 - nc
if numMissing > 0:
extraCols = np.zeros([nr, numMissing])
if nr == 1:
gregorianDateTime = np.hstack([gregorianDateTime, extraCols[0]])
else:
gregorianDateTime = np.hstack([gregorianDateTime, extraCols])
if nr > 1:
year = gregorianDateTime[:, 0]
month = gregorianDateTime[:, 1]
day = gregorianDateTime[:, 2]
hour = gregorianDateTime[:, 3]
minute = gregorianDateTime[:, 4]
second = gregorianDateTime[:, 5]
else:
year = gregorianDateTime[0]
month = gregorianDateTime[1]
day = gregorianDateTime[2]
hour = gregorianDateTime[3]
minute = gregorianDateTime[4]
second = gregorianDateTime[5]
julian, modified = np.empty((nr, 1)), np.empty((nr, 1))
if nr == 1:
julian, modified = jdcal.gcal2jd(year, month, day)
julian += (hour + (minute / 60.0) + (second / 3600.0)) / 24.0
modified += (hour + (minute / 60.0) + (second / 3600.0)) / 24.0
else:
for ii, tt in enumerate(gregorianDateTime):
julian[ii], modified[ii] = jdcal.gcal2jd(tt[0], tt[1], tt[2])
julian[ii] += (hour[ii] + (minute[ii] / 60.0) + (second[ii] / 3600.0)) / 24.0
modified[ii] += (hour[ii] + (minute[ii] / 60.0) + (second[ii] / 3600.0)) / 24.0
if mjd:
return modified
else:
return julian
def gregorian_date(julianDay, mjd=False):
"""
For a given Julian Day convert to Gregorian date (YYYY, MM, DD, hh, mm,
ss). Optionally convert from modified Julian Day with mjd=True).
This function is adapted to Python from the MATLAB julian2greg.m function
(http://www.mathworks.co.uk/matlabcentral/fileexchange/11410).
Parameters
----------
julianDay : ndarray
Array of Julian Days
mjd : boolean, optional
Set to True if the input is Modified Julian Days.
Returns
-------
greg : ndarray
Array of [YYYY, MM, DD, hh, mm, ss].
Example
-------
>>> greg = gregorianDate(np.array([53583.00390625, 55895.9765625]), mjd=True)
>>> greg.astype(int)
array([[2005, 8, 1, 0, 5, 37],
[2011, 11, 30, 23, 26, 15])
"""
if not mjd:
# It's easier to use jdcal in Modified Julian Day
julianDay = julianDay + 2400000.5
try:
nt = len(julianDay)
except TypeError:
nt = 1
greg = np.empty((nt, 6))
if nt == 1:
ymdf = jdcal.jd2gcal(2400000.5, julianDay)
fractionalday = ymdf[-1]
hours = int(fractionalday * 24)
minutes = int(((fractionalday * 24) - hours) * 60)
seconds = ((((fractionalday * 24) - hours) * 60) - minutes) * 60
greg = np.asarray((ymdf[0], ymdf[1], ymdf[2], hours, minutes, seconds))
else:
for ii, jj in enumerate(julianDay):
ymdf = jdcal.jd2gcal(2400000.5, jj)
greg[ii, :3] = ymdf[:3]
fractionalday = ymdf[-1]
hours = int(fractionalday * 24)
minutes = int(((fractionalday * 24) - hours) * 60)
seconds = ((((fractionalday * 24) - hours) * 60) - minutes) * 60
greg[ii, 3:] = [hours, minutes, seconds]
return greg
def add_harmonic_results(db, stationName, constituentName, phase, amplitude, speed, inferred, ident=None, noisy=False):
"""
Add data to an SQLite database.
Parameters
----------
db : str
Full path to an SQLite database. If absent, it will be created.
stationName : str
Short name for the current station. This is the table name.
constituentName : str
Name of the current tidal constituent being added.
phase : float
Tidal constituent phase (in degrees).
amplitude : float
Tidal constituent amplitude (in metres).
speed : float
Tidal constituent speed (in degrees per hour).
inferred : str
'true' or 'false' indicating whether the values are inferred
(i.e. the time series is too short to perform a robust harmonic
analysis).
ident : str
Optional prefix for the table names in the SQLite database. Usage of
this option means you can store both u and v data in the same database.
noisy : bool
Set to True to enable verbose output.
"""
if not ident:
ident = ''
else:
ident = '_' + ident
conn = sqlite3.connect(db)
c = conn.cursor()
# Create the necessary tables if they don't exist already
c.execute('CREATE TABLE IF NOT EXISTS TidalConstituents ( \
shortName TEXT COLLATE nocase, \
amplitude FLOAT(10), \
phase FLOAT(10), \
speed FLOAT(10), \
constituentName TEXT COLLATE nocase, \
amplitudeUnits TEXT COLLATE nocase, \
phaseUnits TEXT COLLATE nocase, \
speedUnits TEXT COLLATE nocase, \
inferredConstituent TEXT COLLATE nocase)')
if noisy:
print('amplitude, phase and speed.', end=' ')
for item in range(len(inferred)):
c.execute('INSERT INTO TidalConstituents VALUES (?,?,?,?,?,?,?,?,?)',
(stationName + ident, amplitude[item], phase[item], speed[item], constituentName[item], 'metres', 'degrees', 'degrees per mean solar hour', inferred[item]))
conn.commit()
conn.close()
def get_observed_data(db, table, startYear=False, endYear=False, noisy=False):
"""
Extract the tidal data from the SQLite database for a given station.
Specify the database (db), the table name (table) which needs to be the
short name version of the station of interest.
Optionally supply a start and end year (which if equal give all data from
that year) to limit the returned data. If no data exists for that station,
the output is returned as False.
Parameters
----------
db : str
Full path to the tide data SQLite database.
table : str
Name of the table to be extracted (e.g. 'AVO').
startYear : bool, optional
Year from which to start extracting data (inclusive).
endYear : bool, optional
Year at which to end data extraction (inclusive).
noisy : bool, optional
Set to True to enable verbose output.
See Also
--------
tide_tools.get_observed_metadata : extract metadata for a tide station.
Notes
-----
Search is not fuzzy, so "NorthShields" is not the same as "North Shields".
Search is case insensitive, however.
"""
if noisy:
print('Getting data for {} from the database...'.format(table), end=' ')
try:
con = sqlite3.connect(db)
with con:
c = con.cursor()
if startYear and endYear:
# We've been given a range of data
if startYear == endYear:
# We have the same start and end dates, so just do a
# simpler version
c.execute('SELECT * FROM ' + table + ' WHERE ' +
table + '.year == ' + str(startYear) +
' ORDER BY year, month, day, hour, minute, second')
else:
# We have a date range
c.execute('SELECT * FROM ' + table + ' WHERE ' +
table + '.year > ' + str(startYear) +
' AND ' + table + '.year < ' + str(endYear) +
' ORDER BY year, month, day, hour, minute, second')
else:
# Return all data
c.execute('SELECT * FROM ' + table +
' ORDER BY year, month, day, hour, minute, second')
# Now get the data in a format we might actually want to use
data = c.fetchall()
con.close()
if noisy:
print('done.')
except sqlite3.Error as e:
if con:
con.close()
print('Error {}:'.format(e.args[0]))
data = [False]
return data
def get_observed_metadata(db, originator=False, obsdepth=None):
"""
Extracts the meta data from the supplied database. If the supplied
originator is False (default), then information from all stations is
returned.
Parameters
----------
db : str
Full path to the tide data SQLite database.
originator : str, optional
Specify an originator (e.g. 'NTSLF', 'NSTD', 'REFMAR') to
extract only that data. Defaults to | |
= player.clear_votes
if not await self.is_dj(ctx):
mbrs = my_vc.members
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
percent = settings.vote
reqvotes = (
(len([1 for m in mbrs if not m.bot and not m.voice.self_deaf and not m.voice.deaf])) / (100 / percent))
voter = ctx.message.author
if voter.id not in clear_votes:
clear_votes.add(voter.id)
total_votes = len(clear_votes)
if total_votes < math.ceil(reqvotes):
return await ctx.send(get_str(ctx, "music-clear-vote") + ' **[{}/{}]**'.format(total_votes, math.ceil(reqvotes)))
else:
return await ctx.send(get_str(ctx, "music-clear-already"))
player.queue.clear()
await ctx.send(get_str(ctx, "music-clear-cleared"))
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.guild)
@commands.command(name='np', aliases=['currentsong', 'nowplaying', 'nplaying', 'current', 'now'])
async def current_song(self, ctx):
"""
{command_prefix}np
{help}
"""
player = await self.get_player(ctx.guild)
# stock it into a var avoid changes between the beg and the end of the command
current = player.current
if not current:
return await ctx.send(get_str(ctx, "not-playing"), delete_after=20)
if "listen.moe" in current.uri.lower() or ('monstercat' in current.uri.lower() and 'twitch' in current.uri.lower()):
self.radio_update(current)
color = int("FF015B", 16)
if 'kpop' in current.uri.lower():
color = int("3CA4E9", 16)
else:
color = self.get_color(ctx.guild)
pos = lavalink.utils.format_time(
player.position).lstrip('0').lstrip(':')
if current.stream:
dur = 'LIVE'
else:
dur = lavalink.utils.format_time(
current.duration).lstrip('0').lstrip(':')
thumb = await self.get_thumbnail(current, player)
requester = await self.bot.safe_fetch('member', current.requester, guild=ctx.guild)
prog_bar_str = sweet_bar(player.position, current.duration)
embed = discord.Embed(colour=color, title=f"**{current.title}**",
description=f"{'⏸' if player.paused else '🔁' if player.repeat else ''}`[{pos}/{dur}]` {prog_bar_str}")
embed.url = current.uri
if requester:
embed.set_author(
name=requester.name, icon_url=requester.avatar_url or requester.default_avatar_url, url=current.uri)
if thumb:
embed.set_image(url=thumb)
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if not settings.channel:
player.channel = ctx.channel.id
if player.node.is_perso:
name = await self.bot.safe_fetch('user', player.node.name) or player.node.name
# TODO: Translations
embed.set_footer(text=f"Hosted by {name}")
await self.send_new_np_msg(player, ctx.channel, new_embed=embed, message=ctx.message, force_send=True)
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.user)
@commands.command(aliases=['list', 'q', "songlist", "sl"])
async def queue(self, ctx, page: int = 1):
"""
{command_prefix}queue
{command_prefix}queue [page]
{help}
"""
player = await self.get_player(ctx.guild)
if not player.queue:
return await ctx.invoke(self.current_song)
if not player.current: # it can happen, but not commun
return await ctx.send(get_str(ctx, "not-playing"), delete_after=20)
pos = lavalink.utils.format_time(
player.position).lstrip('0').lstrip(':')
requester = await self.bot.safe_fetch('member', player.current.requester, guild=ctx.guild)
if not player.current.stream:
# prog_bar_str = sweet_bar(player.position, player.current.duration)
duration = lavalink.utils.format_time(
player.current.duration).lstrip('0').lstrip(':')
else:
if "listen.moe" in player.current.uri.lower() or ('monstercat' in player.current.uri.lower() and 'twitch' in player.current.uri.lower()):
self.radio_update(player.current)
duration = 'LIVE'
msg = f"{'⏸' if player.paused else '🔁' if player.repeat else ''}`[{pos}/{duration}]` " + f" **[{player.current.title}]({player.current.uri})** " + get_str(
ctx, "music-queue-added-by") + f" **{requester.name}**.\n\n"
items_per_page = 10
pages = math.ceil(len(player.queue) / items_per_page)
if page > pages:
page = pages
elif page < 1:
page = 1
start = (page - 1) * items_per_page
end = start + items_per_page
for i, track in enumerate(player.queue[start:end], start=start):
max_val = len(str(len(player.queue[start:end]) + start))
str_index = str(i + 1)
str_index = "".join(
[' ' for x in range(max_val - len(str_index))]) + str_index
# better than having an error
requester = await self.bot.safe_fetch('member', track.requester, guild=ctx.guild) or ctx.author
line = "`{}.` **[{}]({})** {} ".format(str_index, track.title.replace('[', '').replace(']', '').replace('*', '')[:40], track.uri,
get_str(ctx, "music-queue-added-by"))
msg += line
available_spaces = 67 - \
len(line) + len(track.uri) + 8 # cus of the **
if requester:
msg += f"**{requester.name[:available_spaces]}**.\n"
embed = discord.Embed(title=None, description=msg, color=self.get_color(
ctx.guild)) # Specify title to avoid issue when editing
bottom = ''
if pages > 1:
bottom = f'{page}/{pages}'
if (items_per_page * page) < len(player.queue):
rest = len(player.queue) - (items_per_page * page)
bottom += " ..." + \
get_str(ctx, "music-queue-not-displayed",
can_owo=False).format(rest) + '. - '
position, time_until = await self.get_position_in_queue(ctx, player)
bottom += f'{time_until}'
embed.set_footer(text=bottom)
await self.send_new_np_msg(player, ctx.channel, new_embed=embed, message=ctx.message)
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.guild)
@commands.command(name='pause', aliases=['resume'])
async def pause_song(self, ctx):
"""
{command_prefix}pause
{help}
"""
if not await self.is_dj(ctx):
raise commands.errors.CheckFailure
player = await self.get_player(ctx.guild)
if not ctx.guild.me.voice:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
my_vc = ctx.guild.me.voice.channel
if not sum(1 for m in my_vc.members if not (m.voice.deaf or m.bot or m.voice.self_deaf)):
if ctx.author not in my_vc.members or (ctx.author.voice.self_deaf or ctx.author.voice.deaf):
return await ctx.send(get_str(ctx, "music-not-my-channel").format(f"**{my_vc}**"), delete_after=30)
if not player.is_playing:
return await ctx.send(get_str(ctx, "not-playing"), delete_after=20)
if player.paused:
await player.set_pause(False)
await ctx.send(get_str(ctx, "music-resume-success").format(f"**{ctx.author}**"))
else:
await player.set_pause(True)
await ctx.send(get_str(ctx, "music-pause-success").format(f"**{ctx.author}**"))
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.guild)
@commands.command(name='volume', aliases=['vol', 'v'])
async def volume(self, ctx, new_volume=None):
"""
{command_prefix}volume (+/-)[volume]
{command_prefix}volume
{help}
"""
player = await self.get_player(ctx.guild)
original_content = new_volume
if not player.is_connected:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
if not await self.is_dj(ctx):
raise commands.errors.CheckFailure
channel = ctx.channel
claimed_server = await self.bot.server_is_claimed(ctx.guild.id)
if claimed_server:
max_volume = 1000
else:
# Keep the result: Smart way to only do 1 api call.
fetched_member = await is_voter(self.bot, ctx.author, fetch=True)
if fetched_member:
max_volume = 150
if await is_basicpatron(self.bot, ctx.author, resp=fetched_member):
max_volume = 200
if await is_patron(self.bot, ctx.author, resp=fetched_member):
max_volume = 1000
else:
max_volume = 100
if not new_volume:
prog_bar_str = sweet_bar(player.volume, max_volume)
ctx.command.reset_cooldown(ctx)
return await ctx.send(f'`🔈 {player.volume}%` {prog_bar_str}')
relative = False
try:
while new_volume[-1] in '%':
new_volume = new_volume[:-1]
except IndexError:
return await ctx.send(get_str(ctx, "music-volume-invalid-number").format(f"`{original_content}`"))
if new_volume[0] in '+-':
relative = True
try:
new_volume = int(new_volume)
except ValueError:
return await ctx.send(get_str(ctx, "music-volume-invalid-number").format(f"`{new_volume}`"))
if relative:
vol_change = new_volume
new_volume += player.volume
old_volume = int(player.volume)
if 0 <= new_volume <= max_volume:
prog_bar_str = sweet_bar(new_volume, max_volume)
if old_volume == new_volume:
ctx.command.reset_cooldown(ctx)
return await ctx.send('`🔈 %d%%` {progress_bar}'.format(progress_bar=prog_bar_str) % (player.volume))
await player.set_volume(new_volume)
return await channel.send(get_str(ctx, "music-volume-updated", can_owo=False).format(f"**{old_volume}%**", f"**{new_volume}%**") + '\n`🔈 {}%` {progress_bar}'.format(new_volume, progress_bar=prog_bar_str))
else:
if 9000 < new_volume:
return await ctx.send("OMG IT'S OVER NINE THOUSAND !!!")
if 100 <= new_volume <= 1000:
e = discord.Embed(description=get_str(ctx, "music-volume-higher-than-100") + "\n\n" +
"**[Patreon](https://www.patreon.com/watora)**\n**[Vote (volume up to 150)](https://discordbots.org/bot/220644154177355777)**")
try:
await ctx.send(embed=e)
except discord.Forbidden:
await ctx.send(content=get_str(ctx, "music-volume-higher-than-100") + "\n<https://www.patreon.com/watora>\nVolume up to 150 : <https://discordbots.org/bot/220644154177355777>")
elif relative:
await ctx.send(get_str(ctx, "music-volume-unreasonable-volume-relative").format(old_volume, vol_change, old_volume + vol_change, 0 - old_volume, max_volume - old_volume), delete_after=20)
else:
if await is_patron(self.bot, ctx.author):
await ctx.send(get_str(ctx, "music-volume-unreasonable-volume-patreon").format(new_volume, max_volume), delete_after=20)
else:
await ctx.send(get_str(ctx, "music-volume-unreasonable-volume").format(new_volume, max_volume), delete_after=20)
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.guild)
@commands.command()
async def shuffle(self, ctx):
"""
{command_prefix}shuffle
{help}
"""
if not await self.is_dj(ctx):
raise commands.errors.CheckFailure
player = await self.get_player(ctx.guild)
if not player.is_connected:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
if not player.queue:
return await ctx.send(get_str(ctx, "music-shuffle-empty"))
random.shuffle(player.queue)
# useless but fun part from now
cards = [':spades:', ':clubs:', ':hearts:', ':diamonds:']
hand = await ctx.send(' '.join(cards))
await asyncio.sleep(0.6)
for x in range(4):
random.shuffle(cards)
await hand.edit(content=' '.join(cards))
await asyncio.sleep(0.6)
try:
await hand.delete()
except discord.HTTPException:
pass
await ctx.send(get_str(ctx, "music-shuffle-shuffled"))
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.guild)
@commands.command(name='repeat', aliases=['loopqueue', 'loop', 'queueloop'])
async def repeat(self, ctx):
"""
{command_prefix}repeat
{help}
"""
player = await self.get_player(ctx.guild)
if not player.is_connected:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
if not await self.is_dj(ctx):
raise commands.errors.CheckFailure
if not player.current:
return await ctx.send(get_str(ctx, "not-playing"), delete_after=20)
player.repeat = not player.repeat
if player.repeat:
await ctx.send(get_str(ctx, "music-repeat-enabled"))
else:
await ctx.send(get_str(ctx, "music-repeat-disabled"))
@commands.command(name='remove', aliases=["rem"])
async def remove_from_playlist(self, ctx, position: int = None, after: int = None):
"""
{command_prefix}remove [first_position_in_queue] [second_position_in_queue]
{command_prefix}remove [position_in_queue]
{command_prefix}remove
{help}
"""
player = await self.get_player(ctx.guild)
if not ctx.guild.me.voice:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
my_vc = ctx.guild.me.voice.channel
if not await self.is_dj(ctx):
if ctx.author not in my_vc.members or (ctx.author.voice.self_deaf or ctx.author.voice.deaf):
return await ctx.send(get_str(ctx, "music-not-my-channel").format(f"**{my_vc}**"), delete_after=30)
if not player.queue:
return await ctx.send(get_str(ctx, "music-remove-empty"))
removed = 0
nb = len(player.queue)
if after and after != position:
if not await self.is_dj(ctx):
raise commands.errors.CheckFailure
if after > 1:
if after > nb:
after = nb
after -= 1
if 1 <= position <= nb and 1 <= after <= nb:
position -= 1
for index in range(position, (after + 1)):
del player.queue[index - removed]
removed += 1
else:
return await ctx.send(get_str(ctx, "music-promote-error").format(nb))
if position is None: # can be 0
position = nb
if not removed:
if 1 <= position <= nb:
position -= 1
song = player.queue[position]
title = song.title
if not await self.is_dj(ctx):
if song.requester != ctx.author.id:
raise commands.errors.CheckFailure
del player.queue[position]
else:
return await ctx.send(get_str(ctx, "music-promote-error").format(nb))
return await ctx.send(get_str(ctx, "music-remove-removed").format(f"**{title}**"))
else:
await ctx.send(get_str(ctx, "music-remove-removed-multiples").format(f"**{removed}**"))
@commands.command(aliases=["up", "mov", "move"])
async def promote(self, ctx, position: int = None, after: int = None):
"""
{command_prefix}promote [song_position_in_queue] [position_in_queue_after]
{command_prefix}promote [song_position_in_queue]
{command_prefix}promote
{help}
"""
player = await self.get_player(ctx.guild)
nb = len(player.queue)
if not player.is_connected:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
if not player.queue:
return await ctx.send(get_str(ctx, "music-promote-empty"))
if not await self.is_dj(ctx):
raise commands.errors.CheckFailure
if position is None:
position = nb
if 1 <= position <= nb:
position -= 1
song = player.queue[position]
title = song.title
del player.queue[position]
else:
return await ctx.send(get_str(ctx, "music-promote-error").format(nb))
if after:
if after < 1:
after = 0
else:
if after > (len(player.queue) + | |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command Line Interface command to train the model.
"""
import argparse
import logging
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from neurallog.knowledge.program import NeuralLogProgram, \
print_neural_log_program, \
DEFAULT_PARAMETERS
from neurallog.network import trainer
from neurallog.network.callbacks import get_formatted_name
from neurallog.network.network import LossMaskWrapper
from neurallog.network.network_functions import get_loss_function, \
CRFLogLikelihood
from neurallog.network.trainer import Trainer
from neurallog.run.command import Command, command, create_log_file, \
TRAIN_SET_NAME, VALIDATION_SET_NAME, TEST_SET_NAME
from neurallog.util import print_args
from neurallog.util.file import read_logic_program_from_file
METRIC_FILE_PREFIX = "metric_"
LOGIC_PROGRAM_EXTENSION = ".pl"
TAB_SIZE = 4
COMMAND_NAME = "train"
logger = logging.getLogger(__name__)
def get_clauses(filepath):
"""
Gets the clauses from the file in `filepath`.
:param filepath: the filepath
:type filepath: str
:return: the clauses
:rtype: List[Clause]
"""
start_func = time.perf_counter()
clauses = read_logic_program_from_file(filepath)
end_func = time.perf_counter()
logger.info("File:\t%s", filepath)
logger.info("\t- Total reading time:\t%0.3fs",
end_func - start_func)
return clauses
def format_arguments(message, arguments):
"""
Formats the arguments for the help message.
:param message: the initial message
:type message: str
:param arguments: the arguments to be formatted
:type arguments: list[list[str]] or list[tuple[str]]
:return: the formatted message
:rtype: str
"""
formatted = message
formatted += "\n\n"
formatted += "The following parameters can be set in the logic file " \
"by using the special\npredicate set_parameter or " \
"set_predicate_parameter*.\n" \
"Syntax:\n\n" \
"set_parameter(<name>, <value>).\nor\n" \
"set_parameter(<name>, class_name, " \
"<class_name>).\n" \
"set_parameter(<name>, config, <config_1>, " \
"<value_1>).\n...\n" \
"set_parameter(<name>, config, <config_n>, " \
"<value_n>).\n\nor\n\n" \
"set_predicate_parameter(<predicate>, <name>, " \
"<value>).\nor\n" \
"set_predicate_parameter(<predicate>, <name>, class_name, " \
"<class_name>).\n" \
"set_predicate_parameter(<predicate>, <name>, config, " \
"<config_1>, " \
"<value_1>).\n...\n" \
"set_predicate_parameter(<predicate>, <name>, config, " \
"<config_n>, " \
"<value_n>).\n\n" \
"One can use $<predicate>[<index>] to access the size of " \
"the predicate term\nwhen setting parameters."
formatted += "\n\n"
max_key_size = max(map(lambda x: len(x[0]), arguments))
stride = max_key_size + TAB_SIZE
for argument in arguments:
key, value = argument[0], argument[1]
if len(argument) > 2 and argument[2]:
key += "*"
formatted += key + " " * (stride - len(key) - 1)
length = 0
for word in value.split(" "):
length += len(word) + 1
if length > 79 - stride:
length = len(word) + 1
formatted += "\n"
formatted += " " * stride
else:
formatted += " "
formatted += word
formatted += "\n\n"
formatted += "* this feature may be set individually for each " \
"predicate.\n" \
"If it is not defined for a specific predicate,\n" \
"the default globally defined value will be used."
formatted += "\n\n"
return formatted
def find_best_model(checkpoint, history):
"""
Finds the best model saved by the checkpoint.
:param checkpoint: the checkpoint
:type checkpoint: ModelCheckpoint
:param history: a dictionary with the metrics and their values for
each epoch.
:type history: dict[str, np.ndarray]
:return: the path of the best model
:rtype: str or None
"""
if checkpoint.save_best_only:
return checkpoint.filepath
period = checkpoint.period
monitor = checkpoint.monitor
best = checkpoint.best
monitor_op = checkpoint.monitor_op
values = history.get(monitor, None)
if values is None:
return None
best_epoch = 0
for i in range(len(values)):
if monitor_op(values[i], best):
best = values[i]
best_epoch = i
return checkpoint.filepath.format(epoch=(best_epoch + 1) * period)
def deserialize_loss(loss_function):
"""
Deserializes the loss functions.
:param loss_function: the loss functions
:type loss_function: str or dict
:return: the deserialized loss functions
:rtype: function or dict[function]
"""
if isinstance(loss_function, dict):
result = dict()
for key, value in loss_function.items():
result[key] = get_loss_function(value)
else:
result = get_loss_function(loss_function)
return result
@command(COMMAND_NAME)
class Train(Command):
"""
Trains the neural network.
"""
def __init__(self, program, args, direct=False):
super().__init__(program, args, direct)
self.neural_program = NeuralLogProgram()
self.train_set = None
self.validation_set = None
self.test_set = None
# noinspection PyMissingOrEmptyDocstring,DuplicatedCode
def build_parser(self):
program = self.program
if not self.direct:
program += " {}".format(COMMAND_NAME)
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
prog=program,
description=self.get_command_description(),
formatter_class=argparse.RawDescriptionHelpFormatter)
# Input
parser.add_argument('--program', '-p', metavar='program',
type=str, required=True, nargs="+",
help="The program file(s)")
parser.add_argument('--train', '-t', metavar='train',
type=str, required=False, nargs="+", default=[],
help="The train file(s)")
parser.add_argument('--validation', '-valid', metavar='validation',
type=str, required=False, nargs="+", default=[],
help="The validation file(s)")
parser.add_argument('--test', '-test', metavar='test',
type=str, required=False, nargs="+", default=[],
help="The test file(s)")
parser.add_argument('--loadModel', '-l', metavar='loadModel',
type=str, default=None, required=False,
help="If set, loads the model from the path and "
"continues from the loaded model")
# Output
parser.add_argument("--outputPath", "-o", metavar='outputPath',
type=str, default=None, required=False,
help="The path to save the outputs")
parser.add_argument("--lastModel", "-lm", metavar='lastModel',
type=str, default=None, required=False,
help="The path to save the last learned model. "
"If `outputPath` is given, "
"this path will be relative to it")
parser.add_argument("--lastProgram", "-lp", metavar='lastProgram',
type=str, default=None, required=False,
help="The name of the file to save the last "
"learned program. If `outputPath` is given, "
"this path will be relative to it")
parser.add_argument("--lastInference", "-li", metavar='lastInference',
type=str, default=None, required=False,
help="The prefix of the file to save the "
"inferences of the last learned program. "
"The name of the dataset and the `.pl` "
"extension will be appended to it. "
"If `outputPath` is given, this path will "
"be relative to it")
# Log
parser.add_argument("--logFile", "-log", metavar='file',
type=str, default=None,
help="The file path to save the log into")
parser.add_argument("--tensorBoard", "-tb", metavar='directory',
type=str, default=None,
help="Creates a log event for the TensorBoard "
"on the given path")
parser.add_argument("--verbose", "-v", dest="verbose",
action="store_true",
help="Activated a verbose log")
parser.set_defaults(verbose=False)
return parser
# noinspection PyMissingOrEmptyDocstring
def get_command_description(self):
message = super().get_command_description()
arguments = list(
map(lambda x: (x[0], x[2], x[3] if len(x) > 3 else True),
DEFAULT_PARAMETERS))
arguments += trainer.PARAMETERS
return format_arguments(message, arguments)
def _read_parameters(self):
"""
Reads the default parameters found in the program
"""
self.trainer.read_parameters()
print_args(self.trainer.parameters, logger)
# noinspection PyMissingOrEmptyDocstring,PyAttributeOutsideInit
def parse_args(self):
super().parse_args()
# Log
args = self.parser.parse_args(self.args)
log_file = args.logFile
create_log_file(log_file)
print_args(args, logger)
self.tensor_board = args.tensorBoard
# Input
self.program_files = args.program
self.train_files = args.train
self.validation_files = args.validation
self.test_files = args.test
self.load_model = args.loadModel
self.train = len(self.train_files) > 0
self.valid = len(self.validation_files) > 0
self.test = len(self.test_files) > 0
# Output
self.output_path = args.outputPath
self.last_model = args.lastModel
self.last_program = args.lastProgram
self.last_inference = args.lastInference
self.verbose = args.verbose
if self.verbose:
logger.setLevel(logging.DEBUG)
# neurallog.run.H1.setLevel(logging.DEBUG)
def build(self):
"""
Builds the neural network and prepares for training.
"""
self._read_clauses_from_file()
self._build_model()
def _read_clauses_from_file(self):
"""
Read the clauses from the files.
"""
logger.info("Reading input files...")
start_func = time.perf_counter()
self._read_input_file(self.program_files, "program")
end_program = time.perf_counter()
end_train = end_program
if self.train:
self._read_input_file(self.train_files, TRAIN_SET_NAME)
end_train = time.perf_counter()
end_validation = end_train
end_test = end_train
end_reading = end_train
if self.valid > 0:
self._read_input_file(self.validation_files, VALIDATION_SET_NAME)
end_validation = time.perf_counter()
end_reading = end_validation
if self.test > 0:
self._read_input_file(self.test_files, TEST_SET_NAME)
end_test = time.perf_counter()
end_reading = end_test
self.neural_program.build_program()
end_func = time.perf_counter()
# logger.info("Total number of predictable constants:\t%d",
# len(self.neural_program.iterable_constants))
logger.info("Program reading time: \t%0.3fs",
end_program - start_func)
if self.train:
logger.info("Train reading time: \t%0.3fs",
end_train - end_program)
if self.valid:
logger.info("Validation reading time:\t%0.3fs",
end_validation - end_train)
if self.test:
logger.info("Test reading time: \t%0.3fs",
end_test - end_validation)
logger.info("Building program time: \t%0.3fs",
end_func - end_reading)
logger.info("Total reading time: \t%0.3fs",
end_reading - start_func)
def _read_input_file(self, program_files, name):
logger.info("Reading %s...", name)
for file in program_files:
file_clauses = get_clauses(file)
self.neural_program.add_clauses(file_clauses, example_set=name)
def _build_model(self):
"""
Builds and compiles the model.
"""
start_func = time.perf_counter()
logger.info("Building model...")
self.trainer = Trainer(self.neural_program, self.output_path)
self.trainer.init_model()
self._read_parameters()
# self.trainer.read_parameters()
self.neural_dataset = self.trainer.build_dataset(override_targets=False)
self._build_examples_set()
self.model = self.trainer.model
self.model.build_layers(self.neural_dataset.get_target_predicates())
# self._read_parameters()
self.trainer.log_parameters(
["clip_labels", "loss_function", "optimizer",
"regularizer" "metrics", "inverse_relations"],
self.trainer.output_map.inverse
)
self.trainer.compile_module()
if self.load_model is not None:
self.model.load_weights(self.load_model)
end_func = time.perf_counter()
logger.info("\nModel building time:\t%0.3fs", end_func - start_func)
def fit(self):
"""
Trains the neural network.
"""
start_func = time.perf_counter()
logger.info("Training the model...")
self.trainer.log_parameters(["epochs", "validation_period"])
self.trainer.build_callbacks(
train_command=self, tensor_board=self.tensor_board)
self.trainer.log_parameters(["callback"])
history = self.trainer.fit(self.train_set, self.validation_set)
end_func = time.perf_counter()
logger.info("Total training time:\t%0.3fs", end_func - start_func)
return history
def _build_examples_set(self):
start_func = time.perf_counter()
logger.info("Creating training dataset...")
shuffle = self.trainer.parameters["shuffle"]
batch_size = self.trainer.parameters["batch_size"]
self.trainer.log_parameters(["dataset_class", "batch_size", "shuffle"])
end_func = time.perf_counter()
train_set_time = 0
validation_set_time = 0
test_set_time = 0
if self.train:
self.train_set = self.neural_dataset.get_dataset(
example_set=TRAIN_SET_NAME,
batch_size=batch_size,
shuffle=shuffle)
end_train | |
+ 2*m.b303*
m.b729 + 2*m.b304*m.b305 - 2*m.b304*m.b878 - 2*m.b304*m.b1005 - 2*m.b305*m.b729 + 2*m.b305*
m.b1006 + 2*m.b306*m.b307 - 2*m.b307 + 2*m.b307*m.b384 + 2*m.b307*m.b522 - 4*m.b522 - 2*m.b307*
m.b1007 + 2*m.b308*m.b309 - 4*m.b309 + 2*m.b308*m.b778 + 2*m.b309*m.b386 + 2*m.b386 + 2*m.b309*
m.b522 + 2*m.b309*m.b980 - 2*m.b310*m.b311 + 2*m.b310 + 2*m.b310*m.b385 - 4*m.b385 - 2*m.b310*
m.b524 - 2*m.b310*m.b829 + 2*m.b311*m.b388 - 4*m.b388 + 2*m.b311*m.b980 + 2*m.b312*m.b313 + 2*
m.b312*m.b476 - 2*m.b476 + 2*m.b313*m.b388 + 2*m.b313*m.b529 - 4*m.b529 + 2*m.b314*m.b315 - 2*
m.b315 - 2*m.b314*m.b666 + 2*m.b315*m.b529 + 2*m.b316*m.b705 + 2*m.b316*m.b819 - 2*m.b316*m.b861
- 2*m.b317*m.b359 - 2*m.b317 + 2*m.b317*m.b438 - 2*m.b438 + 2*m.b317*m.b861 + 2*m.b317*m.b990 -
2*m.b318*m.b647 + 4*m.b318 - 2*m.b318*m.b757 - 2*m.b318*m.b982 - 2*m.b318*m.b984 + 2*m.b319*
m.b397 + 2*m.b319 - 4*m.b397 - 2*m.b319*m.b720 - 2*m.b319*m.b807 - 2*m.b319*m.b956 + 2*m.b320*
m.b492 + 2*m.b320*m.b594 - 4*m.b594 + 2*m.b321*m.b322 - 4*m.b322 + 2*m.b321*m.b947 + 2*m.b321*
m.b966 + 2*m.b322*m.b324 + 2*m.b322*m.b444 + 2*m.b322*m.b594 - 2*m.b323*m.b403 - 2*m.b403 + 2*
m.b323*m.b596 - 2*m.b596 - 2*m.b323*m.b893 + 2*m.b324*m.b403 + 2*m.b324*m.b707 + 2*m.b325*m.b327
- 2*m.b327 + 2*m.b326*m.b329 - 4*m.b329 - 2*m.b326*m.b848 + 2*m.b326*m.b849 + 2*m.b327*m.b329 +
2*m.b327*m.b403 - 2*m.b327*m.b918 + 2*m.b328*m.b331 - 2*m.b328*m.b889 - 2*m.b328*m.b931 + 2*
m.b329*m.b331 + 2*m.b329*m.b685 + 2*m.b330*m.b404 - 2*m.b330*m.b497 + 2*m.b330*m.b905 + 2*m.b331*
m.b497 + 2*m.b332*m.b448 + 2*m.b448 + 2*m.b332*m.b737 + 2*m.b332*m.b850 + 2*m.b333*m.b334 + 2*
m.b333*m.b825 - 2*m.b333*m.b921 - 2*m.b334*m.b1010 + 2*m.b334*m.b1011 + 2*m.b335*m.b336 - 2*
m.b336 - 2*m.b335*m.b700 + 2*m.b336*m.b799 - 2*m.b336*m.b909 + 2*m.b336*m.b1012 - 2*m.b337*m.b676
- 2*m.b337*m.b740 - 2*m.b337*m.b999 + 2*m.b338*m.b713 + 2*m.b338*m.b898 - 2*m.b338*m.b942 - 2*
m.b339*m.b378 + 2*m.b378 + 2*m.b339*m.b417 + 2*m.b339*m.b762 - 2*m.b340*m.b341 + 2*m.b340 + 2*
m.b341 - 2*m.b340*m.b703 + 2*m.b340*m.b717 - 2*m.b340*m.b891 + 2*m.b341*m.b342 - 2*m.b341*m.b886
- 2*m.b341*m.b1014 + 2*m.b342*m.b468 - 2*m.b468 - 2*m.b342*m.b717 + 2*m.b343*m.b345 - 4*m.b345
+ 2*m.b344*m.b345 - 4*m.b344 + 2*m.b344*m.b468 + 2*m.b344*m.b573 + 2*m.b344*m.b776 + 2*m.b345*
m.b428 + 2*m.b345*m.b575 - 4*m.b575 + 2*m.b346*m.b347 - 4*m.b347 + 2*m.b346*m.b766 + 2*m.b347*
m.b348 + 2*m.b348 + 2*m.b347*m.b575 + 2*m.b347*m.b974 - 2*m.b348*m.b349 - 2*m.b348*m.b475 - 2*
m.b348*m.b817 + 2*m.b349*m.b433 - 4*m.b433 + 2*m.b349*m.b974 + 2*m.b350*m.b525 - 2*m.b525 + 2*
m.b350*m.b638 - 2*m.b351*m.b658 + 2*m.b351*m.b1015 + 2*m.b352*m.b354 - 2*m.b353*m.b355 - 2*m.b355
- 2*m.b353*m.b660 - 2*m.b353*m.b858 + 2*m.b354*m.b355 + 2*m.b354*m.b584 - 2*m.b584 + 2*m.b355*
m.b767 + 2*m.b355*m.b859 + 2*m.b356*m.b358 - 2*m.b358 - 2*m.b356*m.b859 + 2*m.b356*m.b991 + 2*
m.b357*m.b696 + 2*m.b357*m.b806 - 2*m.b357*m.b847 - 2*m.b358*m.b395 + 2*m.b358*m.b488 - 2*m.b488
+ 2*m.b358*m.b847 - 2*m.b359*m.b768 - 2*m.b359*m.b976 + 2*m.b360*m.b441 + 2*m.b360 - 4*m.b441 -
2*m.b360*m.b705 - 2*m.b360*m.b820 - 2*m.b360*m.b947 + 2*m.b361*m.b363 - 2*m.b363 + 2*m.b361*
m.b541 + 2*m.b361*m.b992 + 2*m.b362*m.b364 - 4*m.b364 + 2*m.b362*m.b956 + 2*m.b362*m.b975 + 2*
m.b363*m.b364 + 2*m.b363*m.b441 - 2*m.b363*m.b733 + 2*m.b364*m.b366 + 2*m.b364*m.b401 + 2*m.b365*
m.b400 - 4*m.b400 - 2*m.b365*m.b445 - 2*m.b445 + 2*m.b366*m.b445 + 2*m.b366*m.b722 + 2*m.b367*
m.b906 + 2*m.b368*m.b758 - 2*m.b368*m.b894 - 2*m.b368*m.b919 + 2*m.b369*m.b407 + 2*m.b407 + 2*
m.b369*m.b750 + 2*m.b369*m.b863 + 2*m.b370*m.b371 - 2*m.b370*m.b449 + 2*m.b449 + 2*m.b370*m.b812
+ 2*m.b371*m.b507 - 2*m.b371*m.b1019 + 2*m.b372*m.b373 - 2*m.b372*m.b712 + 2*m.b372*m.b972 + 2*
m.b373*m.b786 + 2*m.b373*m.b1020 - 2*m.b374*m.b690 + 2*m.b374*m.b962 - 2*m.b374*m.b994 - 2*m.b375
*m.b377 - 2*m.b377 + 2*m.b375*m.b701 + 2*m.b375*m.b909 + 2*m.b376*m.b727 + 2*m.b376*m.b840 - 2*
m.b376*m.b841 + 2*m.b377*m.b841 + 2*m.b377*m.b933 + 2*m.b377*m.b994 - 2*m.b378*m.b515 + 2*m.b378*
m.b942 - 2*m.b378*m.b1014 - 2*m.b379*m.b380 + 4*m.b379 - 2*m.b380 - 2*m.b379*m.b715 - 2*m.b379*
m.b717 - 2*m.b379*m.b885 + 2*m.b380*m.b381 + 2*m.b380*m.b886 + 2*m.b380*m.b1014 + 2*m.b381*m.b519
- 2*m.b519 + 2*m.b381*m.b717 + 2*m.b382*m.b383 - 4*m.b383 + 2*m.b383*m.b472 + 2*m.b383*m.b633 -
4*m.b633 + 2*m.b383*m.b1007 + 2*m.b384*m.b385 + 2*m.b385*m.b633 + 2*m.b385*m.b965 - 2*m.b386*
m.b387 - 2*m.b386*m.b653 - 2*m.b386*m.b803 + 2*m.b387*m.b477 - 2*m.b477 + 2*m.b387*m.b965 + 2*
m.b388*m.b580 - 4*m.b580 + 2*m.b388*m.b582 - 2*m.b389*m.b478 + 2*m.b389*m.b1022 + 2*m.b390*
m.b1008 - 2*m.b391*m.b392 - 2*m.b392 - 2*m.b391*m.b669 - 2*m.b391*m.b845 + 2*m.b392*m.b756 + 2*
m.b392*m.b846 + 2*m.b392*m.b1008 + 2*m.b393*m.b394 - 2*m.b394 - 2*m.b393*m.b871 + 2*m.b393*m.b982
+ 2*m.b394*m.b535 - 2*m.b535 + 2*m.b394*m.b832 - 2*m.b394*m.b955 + 2*m.b395*m.b590 - 4*m.b590 -
2*m.b395*m.b967 + 2*m.b396*m.b491 + 2*m.b396 - 4*m.b491 - 2*m.b396*m.b696 - 2*m.b396*m.b832 - 2*
m.b396*m.b939 + 2*m.b397*m.b399 - 2*m.b399 + 2*m.b397*m.b594 + 2*m.b397*m.b984 + 2*m.b398*m.b400
+ 2*m.b398*m.b966 + 2*m.b398*m.b983 + 2*m.b399*m.b400 + 2*m.b399*m.b491 - 2*m.b399*m.b722 + 2*
m.b400*m.b402 - 2*m.b401*m.b494 - 2*m.b494 + 2*m.b402*m.b494 + 2*m.b402*m.b733 + 2*m.b403*m.b894
+ 2*m.b404*m.b406 - 4*m.b406 + 2*m.b404*m.b601 - 2*m.b601 + 2*m.b405*m.b406 + 2*m.b405*m.b749 +
2*m.b405*m.b931 + 2*m.b406*m.b606 - 2*m.b606 + 2*m.b406*m.b672 - 2*m.b407*m.b450 - 2*m.b407*
m.b784 - 2*m.b407*m.b1019 - 2*m.b408*m.b409 + 2*m.b408 - 2*m.b408*m.b771 + 2*m.b408*m.b1019 - 2*
m.b408*m.b1024 + 2*m.b409*m.b410 + 2*m.b410*m.b453 + 2*m.b410*m.b1019 - 2*m.b411*m.b413 + 2*
m.b413 - 2*m.b411*m.b785 + 2*m.b412*m.b414 - 4*m.b414 - 2*m.b412*m.b726 + 2*m.b412*m.b962 + 2*
m.b413*m.b414 - 2*m.b413*m.b714 - 2*m.b413*m.b1025 + 2*m.b414*m.b416 - 2*m.b416 + 2*m.b414*m.b909
- 2*m.b415*m.b702 + 2*m.b415 - 2*m.b415*m.b762 + 2*m.b415*m.b972 - 2*m.b415*m.b986 - 2*m.b416*
m.b689 + 2*m.b416*m.b773 + 2*m.b416*m.b986 - 2*m.b417*m.b419 - 2*m.b419 + 2*m.b417*m.b688 + 2*
m.b418*m.b715 - 2*m.b418 + 2*m.b418*m.b740 + 2*m.b418*m.b827 - 2*m.b418*m.b828 + 2*m.b419*m.b828
+ 2*m.b419*m.b922 + 2*m.b419*m.b986 - 2*m.b420*m.b464 - 2*m.b420*m.b1005 - 2*m.b421*m.b422 + 4*
m.b421 - 2*m.b422 - 2*m.b421*m.b463 - 2*m.b421*m.b727 - 2*m.b421*m.b729 + 2*m.b422*m.b424 + 2*
m.b422*m.b878 + 2*m.b422*m.b1005 - 2*m.b423*m.b570 - 2*m.b570 - 2*m.b423*m.b775 - 2*m.b423*m.b776
+ 2*m.b424*m.b570 + 2*m.b424*m.b729 + 2*m.b425*m.b427 - 4*m.b427 + 2*m.b426*m.b427 + 2*m.b426*
m.b570 + 2*m.b426*m.b1006 + 2*m.b427*m.b429 - 2*m.b429 + 2*m.b427*m.b522 + 2*m.b428*m.b430 + 2*
m.b429*m.b430 + 2*m.b429*m.b574 - 4*m.b574 - 2*m.b429*m.b645 + 2*m.b430*m.b953 - 2*m.b431*m.b432
- 2*m.b431*m.b657 - 2*m.b431*m.b791 + 2*m.b432*m.b526 - 2*m.b526 + 2*m.b432*m.b953 + 2*m.b433*
m.b527 + 2*m.b433*m.b636 - 4*m.b636 + 2*m.b433*m.b638 + 2*m.b434*m.b658 + 2*m.b434*m.b1026 + 2*
m.b435*m.b1001 - 2*m.b436*m.b437 - 2*m.b436*m.b682 - 2*m.b436*m.b830 + 2*m.b437*m.b744 + 2*m.b437
*m.b1001 + 2*m.b438*m.b439 - 2*m.b439 + 2*m.b438*m.b780 - 2*m.b438*m.b881 + 2*m.b439*m.b588 - 2*
m.b588 + 2*m.b439*m.b820 - 2*m.b439*m.b946 + 2*m.b440*m.b539 + 2*m.b440 - 4*m.b539 - 2*m.b440*
m.b683 - 2*m.b440*m.b847 - 2*m.b440*m.b929 + 2*m.b441*m.b443 - 2*m.b443 + 2*m.b441*m.b976 + 2*
m.b442*m.b596 + 2*m.b442*m.b975 + 2*m.b443*m.b539 + 2*m.b443*m.b596 - 2*m.b443*m.b707 - 2*m.b444*
m.b1028 + 2*m.b445*m.b889 + 2*m.b445*m.b906 + 2*m.b446*m.b736 + 2*m.b446*m.b940 + 2*m.b446*
m.b1018 - 2*m.b447*m.b449 + 2*m.b447 - 2*m.b447*m.b737 + 2*m.b447*m.b759 - 2*m.b447*m.b932 - 2*
m.b448*m.b504 - 2*m.b448*m.b797 - 2*m.b448*m.b1010 + 2*m.b449*m.b1010 - 2*m.b449*m.b1031 + 2*
m.b450*m.b452 + 2*m.b451*m.b642 - 2*m.b451*m.b985 + 2*m.b452*m.b985 + 2*m.b452*m.b1010 - 2*m.b453
*m.b853 - 2*m.b453*m.b1025 - 2*m.b454*m.b798 - 2*m.b454*m.b854 + 2*m.b455*m.b457 - 4*m.b457 - 2*
m.b455*m.b738 - 2*m.b456*m.b459 - 2*m.b459 - 2*m.b456*m.b699 - 2*m.b456*m.b701 + 2*m.b457*m.b459
+ 2*m.b457*m.b854 + 2*m.b457*m.b898 - 2*m.b458*m.b460 - 2*m.b460 - 2*m.b458*m.b714 - 2*m.b458*
m.b774 + 2*m.b459*m.b460 + 2*m.b459*m.b761 + 2*m.b460*m.b462 - 2*m.b462 + 2*m.b460*m.b978 + 2*
m.b461*m.b562 - 2*m.b461 + 2*m.b562 + 2*m.b461*m.b703 + 2*m.b461*m.b753 - 2*m.b461*m.b814 + 2*
m.b462*m.b814 - 2*m.b462*m.b840 + 2*m.b462*m.b910 + 2*m.b463*m.b963 - 2*m.b463*m.b1000 - 2*m.b464
*m.b465 - 2*m.b465 - 2*m.b464*m.b741 + 2*m.b465*m.b467 + 2*m.b465*m.b867 + 2*m.b465*m.b1000 - 2*
m.b466*m.b628 - 2*m.b628 - 2*m.b466*m.b789 + 2*m.b466*m.b951 + 2*m.b467*m.b628 + 2*m.b467*m.b741
+ 2*m.b468*m.b470 - 2*m.b468*m.b801 + 2*m.b469*m.b471 - 4*m.b471 + 2*m.b470*m.b471 + 2*m.b470*
m.b628 + 2*m.b471*m.b473 - 4*m.b473 + 2*m.b471*m.b575 + 2*m.b472*m.b474 + 2*m.b473*m.b474 + 2*
m.b473*m.b632 - 4*m.b632 + 2*m.b473*m.b645 + 2*m.b474*m.b944 - 2*m.b475*m.b581 - 2*m.b581 - 2*
m.b475*m.b649 + 2*m.b476*m.b581 - 2*m.b476*m.b777 + 2*m.b476*m.b944 + 2*m.b477*m.b479 + 2*m.b477*
m.b582 - 2*m.b477*m.b657 + 2*m.b478*m.b481 + 2*m.b478*m.b649 + 2*m.b479*m.b481 + 2*m.b479*m.b581
+ 2*m.b480*m.b482 - 2*m.b482 + 2*m.b480*m.b666 + 2*m.b481*m.b482 + 2*m.b483*m.b996 - 2*m.b484*
m.b486 - 2*m.b484*m.b695 - 2*m.b484*m.b818 - 2*m.b485*m.b487 - 2*m.b487 - 2*m.b485*m.b654 - 2*
m.b485*m.b730 + 2*m.b486*m.b487 + 2*m.b486*m.b996 + 2*m.b487*m.b489 - 2*m.b489 + 2*m.b487*m.b743
+ 2*m.b488*m.b490 - 4*m.b490 + 2*m.b488*m.b767 - 2*m.b488*m.b936 + 2*m.b489*m.b490 - 2*m.b489*
m.b768 + 2*m.b489*m.b914 + 2*m.b490*m.b807 + 2*m.b490*m.b946 + 2*m.b491*m.b493 + 2*m.b491*m.b967
+ 2*m.b492*m.b543 + 2*m.b492*m.b983 + 2*m.b493*m.b543 + 2*m.b493*m.b592 - 4*m.b592 + 2*m.b494*
m.b495 + 2*m.b494*m.b894 + 2*m.b495*m.b1030 + 2*m.b495*m.b1032 + 2*m.b496*m.b498 - 2*m.b496 - 2*
m.b498 - 2*m.b496*m.b723 + 2*m.b496*m.b930 + 2*m.b496*m.b1030 - 2*m.b497*m.b1009 - 2*m.b498*
m.b605 + 2*m.b498*m.b949 + 2*m.b498*m.b1009 + 2*m.b499*m.b500 - 4*m.b500 - 2*m.b499*m.b604 - 2*
m.b604 + 2*m.b500*m.b502 - 4*m.b502 + 2*m.b500*m.b783 + 2*m.b500*m.b1009 - 2*m.b501*m.b750 + 4*
m.b501 - 2*m.b501*m.b751 - 2*m.b501*m.b920 - 2*m.b501*m.b921 + 2*m.b502*m.b672 + 2*m.b502*m.b751
+ 2*m.b502*m.b921 - 2*m.b503*m.b555 - 2*m.b503*m.b810 | |
n.replace(' ', ',').split(',') if x != ''])
# flatten result and remove dupes
return list(set([y for x in result for y in x]))
def filter_wip(n, d):
return d.get('mode', 'enabled') in ['wip', 'enabled']
def filter_enabled(n, d):
return d.get('mode', 'enabled') == 'enabled'
def filter_disabled(n, d):
return d.get('mode', 'enabled') in ['enabled', 'disabled']
exclude_keys = flatten_list(self.exclude)
image_ex = list(exclude_keys)
rpm_ex = list(exclude_keys)
image_keys = flatten_list(self.images)
rpm_keys = flatten_list(self.rpms)
filter_func = None
if self.load_wip and self.load_disabled:
pass # use no filter, load all
elif self.load_wip:
filter_func = filter_wip
elif self.load_disabled:
filter_func = filter_disabled
else:
filter_func = filter_enabled
replace_vars = {}
if self.group_config.vars:
replace_vars = self.group_config.vars.primitive()
if config_excludes:
excludes = self.group_config.get(config_excludes, {})
image_ex.extend(excludes.get('images', []))
rpm_ex.extend(excludes.get('rpms', []))
# pre-load the image data to get the names for all images
# eventually we can use this to allow loading images by
# name or distgit. For now this is used elsewhere
image_name_data = self.gitdata.load_data(path='images')
for img in image_name_data.values():
name = img.data.get('name')
short_name = name.split('/')[1]
self.image_name_map[name] = img.key
self.image_name_map[short_name] = img.key
image_data = self.gitdata.load_data(path='images', keys=image_keys,
exclude=image_ex,
replace_vars=replace_vars,
filter_funcs=None if len(image_keys) else filter_func)
try:
rpm_data = self.gitdata.load_data(path='rpms', keys=rpm_keys,
exclude=rpm_ex,
replace_vars=replace_vars,
filter_funcs=None if len(rpm_keys) else filter_func)
except gitdata.GitDataPathException:
# some older versions have no RPMs, that's ok.
rpm_data = {}
missed_include = set(image_keys + rpm_keys) - set(list(image_data.keys()) + list(rpm_data.keys()))
if len(missed_include) > 0:
raise DoozerFatalError('The following images or rpms were either missing or filtered out: {}'.format(', '.join(missed_include)))
if mode in ['images', 'both']:
for i in image_data.values():
metadata = ImageMetadata(self, i)
self.image_map[metadata.distgit_key] = metadata
if not self.image_map:
self.logger.warning("No image metadata directories found for given options within: {}".format(self.group_dir))
for image in self.image_map.values():
image.resolve_parent()
# now that ancestry is defined, make sure no cyclic dependencies
for image in self.image_map.values():
for child in image.children:
if image.is_ancestor(child):
raise DoozerFatalError('{} cannot be both a parent and dependent of {}'.format(child.distgit_key, image.distgit_key))
self.generate_image_tree()
if mode in ['rpms', 'both']:
for r in rpm_data.values():
metadata = RPMMetadata(self, r, clone_source=clone_source)
self.rpm_map[metadata.distgit_key] = metadata
if not self.rpm_map:
self.logger.warning("No rpm metadata directories found for given options within: {}".format(self.group_dir))
# Make sure that the metadata is not asking us to check out the same exact distgit & branch.
# This would almost always indicate someone has checked in duplicate metadata into a group.
no_collide_check = {}
for meta in list(self.rpm_map.values()) + list(self.image_map.values()):
key = '{}/{}/#{}'.format(meta.namespace, meta.name, meta.branch())
if key in no_collide_check:
raise IOError('Complete duplicate distgit & branch; something wrong with metadata: {} from {} and {}'.format(key, meta.config_filename, no_collide_check[key].config_filename))
no_collide_check[key] = meta
# Read in the streams definite for this group if one exists
streams = self.gitdata.load_data(key='streams')
if streams:
self.streams = Model(self.gitdata.load_data(key='streams').data)
if clone_distgits:
self.clone_distgits()
self.initialized = True
def initialize_logging(self):
if self.initialized:
return
# Three flags control the output modes of the command:
# --verbose prints logs to CLI as well as to files
# --debug increases the log level to produce more detailed internal
# behavior logging
# --quiet opposes both verbose and debug
if self.debug:
log_level = logging.DEBUG
elif self.quiet:
log_level = logging.WARN
else:
log_level = logging.INFO
default_log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
root_logger = logging.getLogger()
root_logger.setLevel(logging.WARN)
root_stream_handler = logging.StreamHandler()
root_stream_handler.setFormatter(default_log_formatter)
root_logger.addHandler(root_stream_handler)
# If in debug mode, let all modules log
if not self.debug:
# Otherwise, only allow children of ocp to log
root_logger.addFilter(logging.Filter("ocp"))
# Get a reference to the logger for doozer
self.logger = logutil.getLogger()
self.logger.propagate = False
# levels will be set at the handler level. Make sure master level is low.
self.logger.setLevel(logging.DEBUG)
main_stream_handler = logging.StreamHandler()
main_stream_handler.setFormatter(default_log_formatter)
main_stream_handler.setLevel(log_level)
self.logger.addHandler(main_stream_handler)
self.debug_log_path = os.path.join(self.working_dir, "debug.log")
debug_log_handler = logging.FileHandler(self.debug_log_path)
# Add thread information for debug log
debug_log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s (%(thread)d) %(message)s'))
debug_log_handler.setLevel(logging.DEBUG)
self.logger.addHandler(debug_log_handler)
@staticmethod
def timestamp():
return datetime.datetime.utcnow().isoformat()
def assert_mutation_is_permitted(self):
"""
In group.yml, it is possible to instruct doozer to prevent all builds / mutation of distgits.
Call this method if you are about to mutate anything. If builds are disabled, an exception will
be thrown.
"""
if self.freeze_automation == FREEZE_AUTOMATION_YES:
raise DoozerFatalError('Automation (builds / mutations) for this group is currently frozen (freeze_automation set to {}). Coordinate with the group owner to change this if you believe it is incorrect.'.format(FREEZE_AUTOMATION_YES))
def image_metas(self):
return list(self.image_map.values())
def ordered_image_metas(self):
return [self.image_map[dg] for dg in self.image_order]
def filter_failed_image_trees(self, failed):
for i in self.ordered_image_metas():
if i.parent and i.parent.distgit_key in failed:
failed.append(i.distgit_key)
for f in failed:
if f in self.image_map:
del self.image_map[f]
# regen order and tree
self.generate_image_tree()
return failed
def generate_image_tree(self):
self.image_tree = {}
image_lists = {0: []}
def add_child_branch(child, branch, level=1):
if level not in image_lists:
image_lists[level] = []
for sub_child in child.children:
if sub_child.distgit_key not in self.image_map:
continue # don't add images that have been filtered out
branch[sub_child.distgit_key] = {}
image_lists[level].append(sub_child.distgit_key)
add_child_branch(sub_child, branch[sub_child.distgit_key], level + 1)
for image in self.image_map.values():
if not image.parent:
self.image_tree[image.distgit_key] = {}
image_lists[0].append(image.distgit_key)
add_child_branch(image, self.image_tree[image.distgit_key])
levels = list(image_lists.keys())
levels.sort()
self.image_order = []
for l in levels:
for i in image_lists[l]:
if i not in self.image_order:
self.image_order.append(i)
def image_distgit_by_name(self, name):
"""Returns image meta but full name, short name, or distgit"""
return self.image_name_map.get(name, None)
def rpm_metas(self):
return list(self.rpm_map.values())
def all_metas(self):
return self.image_metas() + self.rpm_metas()
def register_source_alias(self, alias, path):
self.logger.info("Registering source alias %s: %s" % (alias, path))
path = os.path.abspath(path)
assertion.isdir(path, "Error registering source alias %s" % alias)
self.source_paths[alias] = path
with Dir(path):
origin_url = "?"
rc1, out_origin, err_origin = exectools.cmd_gather(
["git", "config", "--get", "remote.origin.url"])
if rc1 == 0:
origin_url = out_origin.strip()
# Usually something like "[email protected]:openshift/origin.git"
# But we want an https hyperlink like http://github.com/openshift/origin
if origin_url.startswith("git@"):
origin_url = origin_url[4:] # remove git@
origin_url = origin_url.replace(":", "/", 1) # replace first colon with /
if origin_url.endswith(".git"):
origin_url = origin_url[:-4] # remove .git
origin_url = "https://%s" % origin_url
else:
self.logger.error("Failed acquiring origin url for source alias %s: %s" % (alias, err_origin))
branch = "?"
rc2, out_branch, err_branch = exectools.cmd_gather(
["git", "rev-parse", "--abbrev-ref", "HEAD"])
if rc2 == 0:
branch = out_branch.strip()
else:
self.logger.error("Failed acquiring origin branch for source alias %s: %s" % (alias, err_branch))
if 'source_alias' not in self.state:
self.state['source_alias'] = {}
self.state['source_alias'][alias] = {
'url': origin_url,
'branch': branch,
'path': path
}
self.add_record("source_alias", alias=alias, origin_url=origin_url, branch=branch, path=path)
def register_stream_alias(self, alias, image):
self.logger.info("Registering image stream alias override %s: %s" % (alias, image))
self.stream_alias_overrides[alias] = image
@property
def remove_tmp_working_dir(self):
"""
Provides thread safe method of checking whether runtime should clean up the working directory.
:return: Returns True if the directory should be deleted
"""
with self.log_lock:
return self._remove_tmp_working_dir
@remove_tmp_working_dir.setter
def remove_tmp_working_dir(self, remove):
"""
Provides thread safe method of setting whether runtime should clean up the working directory.
:param remove: True if the directory should be removed. Only the last value set impacts the decision.
"""
with self.log_lock:
self._remove_tmp_working_dir = remove
def add_record(self, record_type, **kwargs):
"""
Records an action taken by oit that needs to be communicated to outside
systems. For example, the update a Dockerfile which needs to be
reviewed by an owner. Each record is encoded on a single line in the
record.log. Records cannot contain line feeds -- if you need to
communicate multi-line data, create a record with a path to a file in
the working directory.
:param record_type: The type of record to create.
:param kwargs: key/value pairs
A record line is designed to be easily parsed and formatted as:
record_type|key1=value1|key2=value2|...|
"""
# Multiple image build processes could be calling us with action simultaneously, so
# synchronize output to the file.
with self.log_lock:
record = "%s|" % record_type
for k, v in kwargs.items():
assert ("\n" not in str(k))
# Make sure the values have no linefeeds as this would interfere with simple parsing.
v = str(v).replace("\n", " ;;; ").replace("\r", "")
record += "%s=%s|" % (k, v)
# Add the record to the file
self.record_log.write("%s\n" % record)
self.record_log.flush()
def add_distgits_diff(self, distgit, diff):
"""
Records the diff of changes applied to a distgit repo.
"""
with io.open(os.path.join(self.distgits_diff_dir, distgit + '.patch'), 'w', encoding='utf-8') as f:
f.write(diff)
def resolve_image(self, distgit_name, required=True):
if distgit_name not in self.image_map:
if not required:
return None
raise DoozerFatalError("Unable to find image metadata in group / included images: | |
<filename>network_library.py
"""This file implements a neural network library 'from scratch', i.e.
only using numpy to implement the matrix data structures used to construct
neural networks. Precisely, you can use this library to create
fully-connected neural networks; this library does not support the creation
of CNNs or RNNs.
Credit: Much of the code and functions in this library were inspired by
<NAME>'s own neural network library:
https://github.com/mnielsen/neural-networks-and-deep-learning, and his
online textbook: 'Neural networks and deep learning'. Without this
splendid resource, this project would not have been possible.
Indeed, much of the code in this library looks similar to Nielsen's
network.py and network2.py files. However, there is an aspect of this
library that distinguishes it from Nielsen's: The biases of the neural
network are initialized in Nielsen's library to be vertical numpy
vectors, whereas they are initialized to be horizontal numpy vectors in
this library. This minor difference turns out to change the specifics
of the matrix multiplication and arithmetic steps involved in the
gradient descent and backpropagation functions.
Another important distinction between Nielsen's library and this
library is that given the network shape: [2,3,2], that is, a network of
2 input neurons, a second layer of 3 hidden neurons, and finally an
output layer of 2 neurons, Nielsen's network.py program outputs a 2x3
matrix; on the other hand, a network initialized with the shape [2,3,2]
using this library outputs a 1x2 vector. To me, a 1x2 vector makes much
more sense, as what we are interested in are the activations of
the final layer of neurons and it is easy to interpret the two elements
of a 1x2 vector, [a,b], as the outputs of the final layer of neurons.
It is unclear to me how one is to interpret a 2x3 matrix as the output.
Nevertheless, Nielsen's library served as an invaluable resource when
writing this library and besides these distinguinshing factors, our
libraries remain very similar."""
import numpy as np
import random
import json
import sys
import matplotlib
import matplotlib.pyplot as plt
class Network:
"""The Network class holds the functions needed to initialize a
neural network and train it.
Networks are initialized with the shape given by 'netshape'. For
example, if netshape = [2,3,2], then the input layer of neurons
accepts numpy arrays (NPAs) of the form: [a,b], and outputs NPAs of
of the form: [c,d]. This example network would have one hidden
layer of 3 neurons. Each layer of biases in the network
are contained in an NPA of shape (n,) where n is the number of
neurons in that layer. In our example network, the
biases would look something like this:
[
array([ 1.24740072, -0.69648469, 2.04505759]),
array([ 0.39117851, -0.86469781])
]
This was a set of biases generated using this library for this
specific network architecture. Note that there are no biases for
the first layer of the network as is the standard convention for
neural networks. The first subarray represents the biases for the
3 neurons in the second layer. The final subarray represents the
biases for the two output neurons.
The weights are initialized in a similar way. Here, the first
subarray holds the weights connecting the first layer to the second
layer of the neural network. The first subarray has shape (3, 2)
which is determined by the 3 neurons in the second layer, and the
2 neurons in the first layer. Each row in the 3x2 matrix represents
the weights between both neurons in the first layer and one of the
neurons in the second layer:
[
array([[-0.8272883 , -1.74170864],
[ 0.22531047, 0.76300333],
[-0.14128084, -2.00334914]]),
array([[ 1.43465322, -0.69658175, -0.25336335],
[ 0.20888024, 0.00778669, 0.15188696]])
]
The first element of this subarray, [-0.8272883 , -1.74170864],
is a NPA that represents the
weights connecting the two neurons in the first layer to the first
(or 'top') neuron of the second layer. The remaining NPAs can be
similarly interpreted. The values for the weights and
biases are initialized with values taken from a normal distribution
with a mean of 0 and a standard deviation of 1.
Customizable parameters in this model include:
- netshape: The shape of the neural network.
- learning_rate: The rate at which the network learns. In other
words, this term controls how large of an impact the gradient
descent step has on the weights and biases of the network. If this
term is too large, the network becomes unstable as it constantly
overshoots 'good values' for its weights and biases. However, if
this term is too small, then it will take an extremely long time
for the network to learn.
- lmbda: Used in the gradient descent step. Lambda (written as
lmbda because 'lambda' is already a reserved word in Python)
determines the relative importance of minimizing the weights vs.
minimizing the cost function with respect to the weights. In other
words, this term controls how much of an impact L2 regularization
has on the network's weights.
- mini_batch_size: Determines how large the mini batch is. For
example, a mini batch size of 32 means each mini batch contains
32 training images.
- cost_function: This library contains two cost functions: the
quadratic cost function and the cross entropy cost function. To
initialize the network with the quadratic cost function set
cost_function=QuadraticCost, and for the cross entropy cost
function set cost_function=CrossEntropyCost."""
def __init__(self, netshape, learning_rate, mini_batch_size,
cost_function, small_weights=False):
# Record the number of layers the network has.
self.netlength = len(netshape)
# Record the number of neurons in each layer.
self.netshape = netshape
#Initialize the biases of the network. Each layer of biases is
#represented as a (1,n) array where n represented the number of
#neurons in that layer. Each of these numpy arrays (NPAs)
#are then stored in the list, biases.
self.biases = [np.random.randn(1, i)[0] for i in netshape[1:]]
# If the small_weights boolean is set to True, then the
# initialized weights have a standard deviation of 1/n where
# n is the number
# neurons in the previous layer relative to the weight.
# Note that i and j specify, the dimensions of each of the
#sublists in the network. So np.random.randn(2, 3) creates an
# numpy matrix of dimensions 2 x 3 with values taken from a
# normal distribution of mean 0 and standard deviation of 1.
if small_weights:
self.weights = [np.random.randn(j, i)/np.sqrt(i) for i, j in
zip(netshape[0:], netshape[1:])]
else:
self.weights = [np.random.randn(j, i) for i, j in
zip(netshape[0:], netshape[1:])]
self.learning_rate = learning_rate
# Since the weight decay factor is (eta * lmbda / n), where n
# is the size of the dataset (for MNIST, n=50K). So we don't
# want to make lambda too small, 5 seems like a reaosnable number
# and is what Nielsen himself uses in the textbook. While this
# value is probably not optimal, it is a reasonable value to
# start with.
self.lmbda = 5
self.mini_batch_size = mini_batch_size
self.cost_function = cost_function
def feedforward(self, a):
"""Return the output of the network where 'a' is the input
signal. If the softmax boolean value is set to true, then the
final layer of neurons will be run through the softmax activation
function rather than the sigmoid activation function."""
for b, w in zip(self.biases, self.weights):
a = np.asarray(a)
z = np.dot(w, a) + b
a = sigmoid(z)
return a
def get_activations(self, a):
""" Calculates the activations and z values for each layer of
neurons in the network.
This function is similar to feedforward(), but
get_activations() was specifically made as a helper function
for backpropagation() and so it returns two lists: first
a list containing all the network's activations and a list
containing every layer's z values. """
activations = [np.asarray(a)]
zs = []
for b, w in zip(self.biases, self.weights):
a = np.asarray(a)
z = np.dot(w, a) + b
a = sigmoid(z)
zs.append(z)
activations.append(a)
return activations, zs
def backpropagation(self, a, y, act_mask, zs_mask):
"""Calculate the cost gradient with respect to the weights and
biases of the network using the backpropagation algorithm. 'a' is
the input signal (i.e. in the case of MNIST this would be a list of
784 elements. 'y' is the label of the input signal, in the case
of MNIST, this would be a one-hot encoding of a digit between 0 and
9 (e.g. the number 5 would be encoded as [0,0,0,0,0,1,0,0,0,0]."""
# Initialize the gradients of the weights and | |
= 'disconnect'
logging.debug('condition_disconnect() returning %s' % disconnect_ok)
return disconnect_ok
def handle_getstate(self, body):
logging.debug('handle_getstate()')
return create_msg(self.state, body=self.cmstate_levels())
# returns last transition plus current state
def handle_getstatus(self, body):
logging.debug('handle_getstatus()')
return self.status_msg()
def handle_storejsonconfig(self, body):
logging.debug('handle_storejsonconfig()')
try:
with open(self.activedetfilename, 'w') as f:
print('%s' % body["json_data"], file=f)
except Exception as ex:
msg = 'handle_storejsonconfig(): %s' % ex
logging.error(msg)
return error_msg(msg)
else:
logging.info('active detectors file updated: %s' % self.activedetfilename)
return {}
def handle_getinstrument(self, body):
logging.debug('handle_getinstrument()')
body = {'instrument': self.instrument, 'station': self.station}
return create_msg('instrument', body=body)
def handle_selectplatform(self, body):
logging.debug('handle_selectplatform()')
if self.state != 'unallocated':
msg = 'selectPlatform only permitted in unallocated state'
self.report_error(msg)
return error_msg(msg)
try:
for level, val1 in body.items():
for key2, val2 in val1.items():
self.cmstate[level][int(key2)]['active'] = body[level][key2]['active']
if level == 'drp':
# drp readout group
if self.cmstate[level][int(key2)]['active'] == 1:
self.cmstate[level][int(key2)]['det_info']['readout'] = body[level][key2]['det_info']['readout']
else:
self.cmstate[level][int(key2)]['det_info']['readout'] = self.platform
except Exception as ex:
msg = 'handle_selectplatform(): %s' % ex
logging.error(msg)
return error_msg(msg)
return create_msg('ok')
def on_enter_reset(self):
self.cmstate.clear()
self.ids.clear()
return
def subtract_clients(self, missing_set):
if missing_set:
for level, item in self.cmstate_levels().items():
for xid in item.keys():
try:
alias = item[xid]['proc_info']['alias']
except KeyError as ex:
logging.error('KeyError: %s' % ex)
else:
missing_set -= set(['%s/%s' % (level, alias)])
return
def read_json_file(self, filename):
json_data = {}
try:
with open(filename) as fd:
json_data = oldjson.load(fd)
except FileNotFoundError as ex:
self.report_error('Error opening active detectors file: %s' % ex)
return {}
except Exception as ex:
self.report_error('Error reading active detectors file %s: %s' % (filename, ex))
return {}
return json_data
def get_required_set(self, d):
retval = set()
for level, item1 in d["activedet"].items():
for alias, item2 in item1.items():
retval.add(level + "/" + alias)
return retval
def progressReport(self, begin_time, end_time, *, progress_txt):
elapsed = (datetime.now(timezone.utc) - begin_time).total_seconds()
if elapsed >= 1.0:
total = (end_time - begin_time).total_seconds()
self.front_pub.send_json(progress_msg(progress_txt, elapsed, total))
return
def condition_rollcall(self):
global report_keys
retval = False
required_set = set()
if not self.bypass_activedet and not os.path.isfile(self.activedetfilename):
self.report_error('Missing active detectors file %s' % self.activedetfilename)
logging.warning("active detectors file disabled. Default settings will be used.")
# active detectors file bypassed
self.bypass_activedet = True
if not self.bypass_activedet:
# determine which clients are required by reading the active detectors file
json_data = self.read_json_file(self.activedetfilename)
if len(json_data) > 0:
if "activedet" in json_data.keys():
required_set = self.get_required_set(json_data)
else:
self.report_error('Missing "activedet" key in active detectors file %s' % self.activedetfilename)
if not required_set:
self.report_error('Failed to read configuration from active detectors file %s' % self.activedetfilename)
logging.debug('rollcall: bypass_activedet = %s' % self.bypass_activedet)
missing_set = required_set.copy()
newfound_set = set()
self.cmstate.clear()
self.ids.clear()
msg = create_msg('rollcall')
begin_time = datetime.now(timezone.utc)
end_time = begin_time + timedelta(seconds=self.rollcall_timeout)
while datetime.now(timezone.utc) < end_time:
self.back_pub.send_multipart([b'all', json.dumps(msg)])
for answer in wait_for_answers(self.back_pull, 1000, msg['header']['msg_id']):
if answer['header']['key'] in report_keys:
self.process_reports([answer])
continue
for level, item in answer['body'].items():
alias = item['proc_info']['alias']
responder = level + '/' + alias
if not self.bypass_activedet:
if responder not in required_set:
if responder not in newfound_set:
logging.info('Received response from %s, it does not appear in active detectors file' % responder)
newfound_set.add(responder)
elif responder not in missing_set:
# ignore duplicate response
continue
if level not in self.cmstate:
self.cmstate[level] = {}
id = answer['header']['sender_id']
self.cmstate[level][id] = item
if self.bypass_activedet:
# active detectors file disabled: default to active=1
self.cmstate[level][id]['active'] = 1
if level == 'drp':
self.cmstate[level][id]['det_info'] = {}
self.cmstate[level][id]['det_info']['readout'] = self.platform
elif responder in newfound_set:
# new detector + active detectors file enabled: default to active=0
self.cmstate[level][id]['active'] = 0
if level == 'drp':
self.cmstate[level][id]['det_info'] = {}
self.cmstate[level][id]['det_info']['readout'] = self.platform
else:
# copy values from active detectors file
self.cmstate[level][id]['active'] = json_data['activedet'][level][alias]['active']
if level == 'drp':
self.cmstate[level][id]['det_info'] = json_data['activedet'][level][alias]['det_info'].copy()
self.ids.add(id)
self.subtract_clients(missing_set)
if not missing_set:
break
self.progressReport(begin_time, end_time, progress_txt='rollcall')
for dup in self.check_for_dups():
self.report_error('duplicate alias responded to rollcall: %s' % dup)
if missing_set:
for client in missing_set:
self.report_error(client + ' did not respond to rollcall')
else:
retval = True
self.lastTransition = 'rollcall'
# add control info
if not 'control' in self.cmstate:
self.cmstate['control'] = {}
self.cmstate['control'][0] = {}
self.cmstate['control'][0]['active'] = 1
self.cmstate['control'][0]['control_info'] = {}
self.cmstate['control'][0]['proc_info'] = {}
self.cmstate['control'][0]['control_info']['xpm_master'] = self.xpm_master
self.cmstate['control'][0]['control_info']['pv_base'] = self.pv_base
self.cmstate['control'][0]['control_info']['cfg_dbase'] = self.cfg_dbase
self.cmstate['control'][0]['control_info']['instrument'] = self.instrument
self.cmstate['control'][0]['proc_info']['alias'] = self.alias
self.cmstate['control'][0]['proc_info']['host'] = socket.gethostname()
self.cmstate['control'][0]['proc_info']['pid'] = os.getpid()
logging.debug('cmstate after rollcall:\n%s' % self.cmstate)
logging.debug('condition_rollcall() returning %s' % retval)
return retval
# check_for_dups - check for duplicate aliases
def check_for_dups(self):
aliases = set()
dups = set()
for level, item in self.cmstate_levels().items():
for xid in item:
alias = self.cmstate[level][xid]['proc_info']['alias']
if alias in aliases:
dups.add(level + '/' + alias)
else:
aliases.add(alias)
if len(dups) > 0:
logging.debug('duplicate aliases: %s' % dups)
return dups
# filter_active_set - return subset of ids which have 'active' flag set
def filter_active_set(self, ids):
matches = set()
for level, item in self.cmstate_levels().items():
for xid in item:
if item[xid]['active'] == 1:
matches.add(xid)
return matches.intersection(ids)
# filter_active_dict - return subset of dict that has 'active' flag set
def filter_active_dict(self, oldstate):
newstate = dict()
for level, item in oldstate.items():
for xid in item:
if item[xid]['active'] == 1:
if level not in newstate:
newstate[level] = dict()
newstate[level][xid] = copy.copy(oldstate[level][xid])
return newstate
# filter_level - return subset of ids for which 'level' starts with prefix
def filter_level(self, prefix, ids):
matches = set()
for level, item in self.cmstate_levels().items():
if level.startswith(prefix):
matches.update(set(item.keys()))
return matches.intersection(ids)
def get_aliases(self, id_list):
alias_list = []
for level, item in self.cmstate_levels().items():
for xid in item.keys():
if xid in id_list and 'proc_info' in item[xid]:
try:
alias_list.append(item[xid]['proc_info']['alias'])
except KeyError:
alias_list.append('%s/%s/%s' %
(level,
item[xid]['proc_info']['pid'],
item[xid]['proc_info']['host']))
return alias_list
def report_error(self, msg):
logging.error(msg)
self.front_pub.send_json(error_msg(msg))
return
def start_run(self, experiment_name):
run_num = 0
ok = False
error_msg = "start_run error"
serverURLPrefix = "{0}run_control/{1}/ws/".format(self.url + "/" if not self.url.endswith("/") else self.url, experiment_name)
logging.debug('serverURLPrefix = %s' % serverURLPrefix)
try:
resp = requests.post(serverURLPrefix + "start_run", auth=HTTPBasicAuth(self.user, self.password))
except Exception as ex:
logging.error("start_run (user=%s) exception: %s" % (self.user, ex))
else:
logging.debug("start_run response: %s" % resp.text)
if resp.status_code == requests.codes.ok:
if resp.json().get("success", None):
logging.debug("start_run success")
run_num = resp.json().get("value", {}).get("num", None)
ok = True
else:
self.report_error("start_run (user=%s) error: status code %d" % (self.user, resp.status_code))
if not ok:
raise Exception(error_msg)
logging.debug("start_run: run number = %s" % run_num)
return run_num
def end_run(self, experiment_name):
run_num = 0
ok = False
err_msg = "end_run error"
serverURLPrefix = "{0}run_control/{1}/ws/".format(self.url + "/" if not self.url.endswith("/") else self.url, experiment_name)
logging.debug('serverURLPrefix = %s' % serverURLPrefix)
try:
resp = requests.post(serverURLPrefix + "end_run", auth=HTTPBasicAuth(self.user, self.password))
except Exception as ex:
err_msg = "end_run error (user=%s): %s" % (self.user, ex)
else:
logging.debug("Response: %s" % resp.text)
if resp.status_code == requests.codes.ok:
if resp.json().get("success", None):
logging.debug("end_run success")
ok = True
else:
err_msg = "end_run error (user=%s): status code %d" % (self.user, resp.status_code)
if not ok:
self.report_error(err_msg)
return
def get_experiment(self):
logging.debug('get_experiment()')
experiment_name = None
instrument = self.instrument
# authentication is not required, adjust url accordingly
uurl = self.url.replace('ws-auth', 'ws').replace('ws-kerb', 'ws')
try:
resp = requests.get((uurl + "/" if not uurl.endswith("/") else uurl) + "/lgbk/ws/activeexperiment_for_instrument_station",
params={"instrument_name": instrument, "station": self.station}, timeout=10)
except requests.exceptions.RequestException as ex:
logging.error("get_experiment(): request exception: %s" % ex)
else:
logging.debug("request response: %s" % resp.text)
if resp.status_code == requests.codes.ok:
logging.debug("headers: %s" % resp.headers)
if 'application/json' in resp.headers['Content-Type']:
try:
experiment_name = resp.json().get("value", {}).get("name", None)
except json.decoder.JSONDecodeError:
logging.error("Error: failed to decode JSON")
else:
logging.error("Error: failed to receive JSON")
else:
logging.error("Error: status code %d" % resp.status_code)
# result of request, or None
return experiment_name
def condition_common(self, transition, timeout, body=None):
if body is None:
body = {}
retval = True
# select procs with active flag set
ids = self.filter_active_set(self.ids)
# include phase1 info in the msg, if it exists
if transition in self.phase1Info.keys():
body['phase1Info'] = self.phase1Info[transition]
logging.debug('condition_common(%s): body = %s' % (transition, body))
msg = create_msg(transition, body=body)
self.back_pub.send_multipart([b'partition', json.dumps(msg)])
# now that the message has been sent, delete the phase1
# info so we don't send stale information next time.
self.phase1Info.pop(transition,None)
# only drp/teb/meb groups (aka levels) respond to configure and above
ids = self.filter_level('drp', ids) | self.filter_level('teb', ids) | self.filter_level('meb',ids)
if len(ids) == 0:
logging.debug('condition_common() empty set of ids')
return True
# make sure all the clients respond to transition before timeout
retlist, answers, reports = self.confirm_response(self.back_pull, timeout, msg['header']['msg_id'], ids, progress_txt=transition)
self.process_reports(reports)
answers_ok = (self.check_answers(answers) == 0)
ret = len(retlist)
if ret:
# Error
retval = False
for alias in self.get_aliases(retlist):
self.report_error('%s did not respond | |
# Copyright (c) 2020 ING Bank N.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import itertools
import warnings
import pandas as pd
from tqdm import tqdm
from probatus.binning import SimpleBucketer, AgglomerativeBucketer, QuantileBucketer
from probatus.stat_tests import es, ks, psi, ad, sw
from probatus.utils.arrayfuncs import check_numeric_dtypes
class DistributionStatistics(object):
"""
Wrapper that applies a statistical method to compare two distributions.
Depending on a test, one can also apply
binning of the data.
Example:
```python
import numpy as np
import pandas as pd
from probatus.stat_tests import DistributionStatistics
d1 = np.histogram(np.random.normal(size=1000), 10)[0]
d2 = np.histogram(np.random.normal(size=1000), 10)[0]
myTest = DistributionStatistics('KS', bin_count=10)
test_statistic, p_value = myTest.compute(d1, d2, verbose=True)
```
"""
binning_strategy_dict = {
"simplebucketer": SimpleBucketer,
"agglomerativebucketer": AgglomerativeBucketer,
"quantilebucketer": QuantileBucketer,
None: None,
}
statistical_test_dict = {
"ES": {
"func": es,
"name": "Epps-Singleton",
"default_binning": None,
},
"KS": {
"func": ks,
"name": "Kolmogorov-Smirnov",
"default_binning": None,
},
"AD": {
"func": ad,
"name": "<NAME>",
"default_binning": None,
},
"SW": {
"func": sw,
"name": "Shapiro-Wilk based difference",
"default_binning": None,
},
"PSI": {
"func": psi,
"name": "Population Stability Index",
"default_binning": "quantilebucketer",
},
}
def __init__(self, statistical_test, binning_strategy="default", bin_count=10):
"""
Initializes the class.
Args:
statistical_test (string): Statistical
method to apply, statistical methods implemented:
- `'ES'`: Epps-Singleton,
- `'KS'`: Kolmogorov-Smirnov statistic,
- `'PSI'`: Population Stability Index,
- `'SW'`: Shapiro-Wilk based difference statistic,
- `'AD'`: Anderson-Darling TS.
binning_strategy (string, optional):
Binning strategy to apply, binning strategies implemented:
- `'simplebucketer'`: equally spaced bins,
- `'agglomerativebucketer'`: binning by applying the Scikit-learn implementation of Agglomerative
Clustering,
- `'quantilebucketer'`: bins with equal number of elements,
- `'default'`: applies a default binning for a given stats_test. For all tests appart from PSI, no
binning (None) is used. For PSI by default quantilebucketer is used,
- `None`: no binning is applied. The test is computed based on original distribution.
bin_count (int, optional): In case binning_strategy is not None, specify the number of bins to be used by
the binning strategy. By default 10 bins are used.
"""
self.statistical_test = statistical_test.upper()
self.binning_strategy = binning_strategy
self.bin_count = bin_count
self.fitted = False
# Initialize the statistical test
if self.statistical_test not in self.statistical_test_dict:
raise NotImplementedError(
"The statistical test should be one of {}".format(self.statistical_test_dict.keys())
)
else:
self.statistical_test_name = self.statistical_test_dict[self.statistical_test]["name"]
self._statistical_test_function = self.statistical_test_dict[self.statistical_test]["func"]
# Initialize the binning strategy
if self.binning_strategy:
self.binning_strategy = self.binning_strategy.lower()
if self.binning_strategy == "default":
self.binning_strategy = self.statistical_test_dict[self.statistical_test]["default_binning"]
if self.binning_strategy not in self.binning_strategy_dict:
raise NotImplementedError(
"The binning strategy should be one of {}".format(list(self.binning_strategy_dict.keys()))
)
else:
binner = self.binning_strategy_dict[self.binning_strategy]
if binner is not None:
self.binner = binner(bin_count=self.bin_count)
def __repr__(self):
"""
String representation.
"""
repr_ = "DistributionStatistics object\n\tstatistical_test: {}".format(self.statistical_test)
if self.binning_strategy:
repr_ += "\n\tbinning_strategy: {}\n\tbin_count: {}".format(self.binning_strategy, self.bin_count)
else:
repr_ += "\n\tNo binning applied"
if self.fitted:
repr_ += "\nResults\n\tvalue {}-statistic: {}".format(self.statistical_test, self.statistic)
if hasattr(self, "p_value"):
repr_ += "\n\tp-value: {}".format(self.p_value)
return repr_
def compute(self, d1, d2, verbose=False):
"""
Apply the statistical test and compute statistic value and p-value.
Args:
d1: (np.array or pd.DataFrame):
distribution 1.
d2: (np.array or pd.DataFrame):
distribution 2.
verbose: (bool, optional):
Flag indicating whether prints should be shown.
Returns:
(Tuple of floats):
statistic value and p_value. For PSI test the return is only statistic
"""
check_numeric_dtypes(d1)
check_numeric_dtypes(d2)
# Bin the data
if self.binning_strategy:
self.binner.fit(d1)
d1_preprocessed = self.binner.compute(d1)
d2_preprocessed = self.binner.compute(d2)
else:
d1_preprocessed, d2_preprocessed = d1, d2
# Perform the statistical test
res = self._statistical_test_function(d1_preprocessed, d2_preprocessed, verbose=verbose)
self.fitted = True
# Check form of results and return
if type(res) == tuple:
self.statistic, self.p_value = res
return self.statistic, self.p_value
else:
self.statistic = res
return self.statistic
class AutoDist(object):
"""
Class to automatically apply all implemented statistical distribution tests and binning strategies.
to (a selection of) features in two dataframes.
Example:
```python
import numpy as np
import pandas as pd
from probatus.stat_tests import AutoDist
df1 = pd.DataFrame(np.random.normal(size=(1000, 2)), columns=['feat_0', 'feat_1'])
df2 = pd.DataFrame(np.random.normal(size=(1000, 2)), columns=['feat_0', 'feat_1'])
myAutoDist = AutoDist(statistical_tests=["KS", "PSI"], binning_strategies='simplebucketer', bin_count=10)
myAutoDist.compute(df1, df2, column_names=df1.columns)
```
<img src="../img/autodist.png" width="700" />
"""
def __init__(self, statistical_tests="all", binning_strategies="default", bin_count=10):
"""
Initializes the class.
Args:
statistical_tests (str, optional): Statistical tests to apply, either list of tests names, or 'all'.
Statistical methods implemented:
- `'ES'`: Epps-Singleton,
- `'KS'`: Kolmogorov-Smirnov statistic,
- `'PSI'`: Population Stability Index,
- `'AD'`: Anderson-Darling TS.
binning_strategies (str, optional): Binning strategies to apply for each test, either list of tests names,
'all' or 'default'. Binning strategies that can be chosen:
- `'SimpleBucketer'`: equally spaced bins,
- `'AgglomerativeBucketer'`: binning by applying the Scikit-learn implementation of Agglomerative
Clustering,
- `'QuantileBucketer'`: bins with equal number of elements,
- `None`: no binning is applied. Note that not all statistical tests will be performed since some of
them require binning strategies.
- `'default'`: applies a default binning for a given stats_test. For all tests appart from PSI, no
binning (None) is used. For PSI by default quantilebucketer is used.
- `'all'`: each binning strategy is used for each statistical test
bin_count (integer, None or list of integers, optional):
bin_count value(s) to be used, note that None can only be used when no bucketing strategy is applied.
"""
self.fitted = False
# Initialize statistical tests to be performed
if statistical_tests == "all":
self.statistical_tests = list(DistributionStatistics.statistical_test_dict.keys())
elif isinstance(statistical_tests, str):
self.statistical_tests = [statistical_tests]
else:
self.statistical_tests = statistical_tests
# Initialize binning strategies to be used
if binning_strategies == "all":
self.binning_strategies = list(DistributionStatistics.binning_strategy_dict.keys())
elif isinstance(binning_strategies, str):
self.binning_strategies = [binning_strategies]
elif binning_strategies is None:
self.binning_strategies = [None]
else:
self.binning_strategies = binning_strategies
if not isinstance(bin_count, list):
self.bin_count = [bin_count]
else:
self.bin_count = bin_count
def __repr__(self):
"""
String representation.
"""
repr_ = "AutoDist object"
if not self.fitted:
repr_ += "\n\tAutoDist not fitted"
if self.fitted:
repr_ += "\n\tAutoDist fitted"
repr_ += "\n\tstatistical_tests: {}".format(self.statistical_tests)
repr_ += "\n\tbinning_strategies: {}".format(self.binning_strategies)
repr_ += "\n\tbin_count: {}".format(self.bin_count)
return repr_
def compute(
self,
df1,
df2,
column_names=None,
return_failed_tests=True,
suppress_warnings=False,
):
"""
Fit the AutoDist object to data; i.e. apply the statistical tests and binning strategies.
Args:
df1 (pd.DataFrame):
dataframe 1 for distribution comparison with dataframe 2.
df2 (pd.DataFrame):
dataframe 2 for distribution comparison with dataframe 1.
column_names (list of str, optional):
list of columns in df1 and df2 that should be compared. If None, all column names will be compared
return_failed_tests (bool, optional):
remove tests in result that did not succeed.
suppress_warnings (bool, optional):
whether to suppress warnings during the fit process.
Returns:
(pd.DataFrame):
dataframe with results of the performed statistical tests and binning strategies.
"""
if column_names is None:
column_names = df1.columns.to_list()
if len(set(column_names) - set(df2.columns)):
raise Exception("column_names was set to None but columns in provided dataframes are different")
# Check if all columns in column_names are in df1 and df2
elif len(set(column_names) - set(df1.columns)) or len(set(column_names) - set(df2.columns)):
raise Exception("Not all columns in `column_names` are in the provided dataframes")
# Calculate statistics and p-values for all combinations
result_all = pd.DataFrame()
for col, stat_test, bin_strat, bins in tqdm(
list(
itertools.product(
column_names,
self.statistical_tests,
self.binning_strategies,
self.bin_count,
)
)
):
if self.binning_strategies == ["default"]:
bin_strat = DistributionStatistics.statistical_test_dict[stat_test]["default_binning"]
dist = DistributionStatistics(statistical_test=stat_test, binning_strategy=bin_strat, bin_count=bins)
try:
if suppress_warnings:
warnings.filterwarnings("ignore")
_ = dist.compute(df1[col], df2[col])
if suppress_warnings:
warnings.filterwarnings("default")
statistic = dist.statistic
p_value = dist.p_value
except Exception:
statistic, p_value = "an error occurred", None
pass
# Append result to | |
will be removed from the Inventory. This action CANNOT be undone!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!',
'Do you want to receive this shipment?': 'Do you want to receive this shipment?',
'Do you want to send these Committed items?': 'Do you want to send these Committed items?',
'Do you want to send this shipment?': 'Do you want to send this shipment?',
'Document Details': 'Document Details',
'Document Scan': 'Document Scan',
'Document added': 'Document added',
'Document deleted': 'Document deleted',
'Document removed': 'Document removed',
'Document updated': 'Document updated',
'Documents': 'Documents',
'Documents and Photos': 'Documents and Photos',
'Does this facility provide a cholera treatment center?': 'Does this facility provide a cholera treatment center?',
'Doing nothing (no structured activity)': 'Doing nothing (no structured activity)',
'Domain': 'Domain',
'Domestic chores': 'Domestic chores',
'Donated': 'Donated',
'Donation Certificate': 'Donation Certificate',
'Donation Phone #': 'Donation Phone #',
'Donations': 'Donations',
'Donor': 'Donor',
'Donor Details': 'Donor Details',
'Donor added': 'Donor added',
'Donor deleted': 'Donor deleted',
'Donor updated': 'Donor updated',
'Donors': 'Donors',
'Donors Report': 'Donors Report',
'Door frame': 'Door frame',
'Download OCR-able PDF Form': 'Download OCR-able PDF Form',
'Download Template': 'Download Template',
'Download last build': 'Download last build',
'Draft': 'Draft',
'Draft Features': 'Draft Features',
'Drainage': 'Drainage',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Drawing up a Budget for Staff & Equipment across various Locations.',
'Drill Down by Group': 'Drill Down by Group',
'Drill Down by Incident': 'Drill Down by Incident',
'Drill Down by Shelter': 'Drill Down by Shelter',
'Driving License': 'Driving License',
'Drought': 'Drought',
'Drugs': 'Drugs',
'Dug Well': 'Dug Well',
'Dummy': 'Dummy',
'Duplicate?': 'Duplicate?',
'Duration': 'Duration',
'Dust Storm': 'Dust Storm',
'Dwelling': 'Dwelling',
'E-mail': 'E-mail',
'EMS Reason': 'EMS Reason',
'EMS Status': 'EMS Status',
'ER Status': 'ER Status',
'ER Status Reason': 'ER Status Reason',
'EXERCISE': 'EXERCISE',
'Early Recovery': 'Early Recovery',
'Earth Enabled?': 'Earth Enabled?',
'Earthquake': 'Earthquake',
'Edit': 'Edit',
'Edit Activity': 'Edit Activity',
'Edit Address': 'Edit Address',
'Edit Alternative Item': 'Edit Alternative Item',
'Edit Application': 'Edit Application',
'Edit Assessment': 'Edit Assessment',
'Edit Assessment Answer': 'Edit Assessment Answer',
'Edit Assessment Question': 'Edit Assessment Question',
'Edit Assessment Series': 'Edit Assessment Series',
'Edit Assessment Summary': 'Edit Assessment Summary',
'Edit Assessment Template': 'Edit Assessment Template',
'Edit Asset': 'Edit Asset',
'Edit Asset Log Entry': 'Edit Asset Log Entry',
'Edit Baseline': 'Edit Baseline',
'Edit Baseline Type': 'Edit Baseline Type',
'Edit Brand': 'Edit Brand',
'Edit Budget': 'Edit Budget',
'Edit Bundle': 'Edit Bundle',
'Edit Camp': 'Edit Camp',
'Edit Camp Service': 'Edit Camp Service',
'Edit Camp Type': 'Edit Camp Type',
'Edit Catalog': 'Edit Catalogue',
'Edit Catalog Item': 'Edit Catalogue Item',
'Edit Certificate': 'Edit Certificate',
'Edit Certification': 'Edit Certification',
'Edit Cluster': 'Edit Cluster',
'Edit Cluster Subsector': 'Edit Cluster Subsector',
'Edit Commitment': 'Edit Commitment',
'Edit Commitment Item': 'Edit Commitment Item',
'Edit Committed Person': 'Edit Committed Person',
'Edit Competency Rating': 'Edit Competency Rating',
'Edit Completed Assessment': 'Edit Completed Assessment',
'Edit Contact': 'Edit Contact',
'Edit Contact Information': 'Edit Contact Information',
'Edit Contents': 'Edit Contents',
'Edit Course': 'Edit Course',
'Edit Course Certificate': 'Edit Course Certificate',
'Edit Credential': 'Edit Credential',
'Edit Dead Body Details': 'Edit Dead Body Details',
'Edit Description': 'Edit Description',
'Edit Details': 'Edit Details',
'Edit Disaster Victims': 'Edit Disaster Victims',
'Edit Document': 'Edit Document',
'Edit Donor': 'Edit Donor',
'Edit Email Settings': 'Edit Email Settings',
'Edit Entry': 'Edit Entry',
'Edit Event': 'Edit Event',
'Edit Facility': 'Edit Facility',
'Edit Feature Class': 'Edit Feature Class',
'Edit Feature Layer': 'Edit Feature Layer',
'Edit Flood Report': 'Edit Flood Report',
'Edit GPS data': 'Edit GPS data',
'Edit Group': 'Edit Group',
'Edit Home': 'Edit Home',
'Edit Home Address': 'Edit Home Address',
'Edit Hospital': 'Edit Hospital',
'Edit Human Resource': 'Edit Human Resource',
'Edit Identification Report': 'Edit Identification Report',
'Edit Identity': 'Edit Identity',
'Edit Image Details': 'Edit Image Details',
'Edit Impact': 'Edit Impact',
'Edit Impact Type': 'Edit Impact Type',
'Edit Import File': 'Edit Import File',
'Edit Incident': 'Edit Incident',
'Edit Incident Report': 'Edit Incident Report',
'Edit Inventory Item': 'Edit Inventory Item',
'Edit Item': 'Edit Item',
'Edit Item Category': 'Edit Item Category',
'Edit Item Pack': 'Edit Item Pack',
'Edit Job': 'Edit Job',
'Edit Job Role': 'Edit Job Role',
'Edit Kit': 'Edit Kit',
'Edit Layer': 'Edit Layer',
'Edit Level %d Locations?': 'Edit Level %d Locations?',
'Edit Level 1 Assessment': 'Edit Level 1 Assessment',
'Edit Level 2 Assessment': 'Edit Level 2 Assessment',
'Edit Location': 'Edit Location',
'Edit Location Details': 'Edit Location Details',
'Edit Log Entry': 'Edit Log Entry',
'Edit Map Configuration': 'Edit Map Configuration',
'Edit Marker': 'Edit Marker',
'Edit Membership': 'Edit Membership',
'Edit Message': 'Edit Message',
'Edit Mission': 'Edit Mission',
'Edit Modem Settings': 'Edit Modem Settings',
'Edit Need': 'Edit Need',
'Edit Need Type': 'Edit Need Type',
'Edit Office': 'Edit Office',
'Edit Options': 'Edit Options',
'Edit Order': 'Edit Order',
'Edit Order Item': 'Edit Order Item',
'Edit Organization': 'Edit Organisation',
'Edit Organization Domain': 'Edit Organisation Domain',
'Edit Parameters': 'Edit Parameters',
'Edit Patient': 'Edit Patient',
'Edit Person Details': 'Edit Person Details',
'Edit Personal Effects Details': 'Edit Personal Effects Details',
'Edit Photo': 'Edit Photo',
'Edit Population Statistic': 'Edit Population Statistic',
'Edit Position': 'Edit Position',
'Edit Problem': 'Edit Problem',
'Edit Project': 'Edit Project',
'Edit Project Organization': 'Edit Project Organization',
'Edit Projection': 'Edit Projection',
'Edit Question Meta-Data': 'Edit Question Meta-Data',
'Edit Rapid Assessment': 'Edit Rapid Assessment',
'Edit Received Item': 'Edit Received Item',
'Edit Received Shipment': 'Edit Received Shipment',
'Edit Record': 'Edit Record',
'Edit Registration': 'Edit Registration',
'Edit Relative': 'Edit Relative',
'Edit Repository Configuration': 'Edit Repository Configuration',
'Edit Request': 'Edit Request',
'Edit Request Item': 'Edit Request Item',
'Edit Request for Donations': 'Edit Request for Donations',
'Edit Request for Volunteers': 'Edit Request for Volunteers',
'Edit Requested Skill': 'Edit Requested Skill',
'Edit Resource': 'Edit Resource',
'Edit Resource Configuration': 'Edit Resource Configuration',
'Edit River': 'Edit River',
'Edit Role': 'Edit Role',
'Edit Room': 'Edit Room',
'Edit SMS Settings': 'Edit SMS Settings',
'Edit SMTP to SMS Settings': 'Edit SMTP to SMS Settings',
'Edit Saved Search': 'Edit Saved Search',
'Edit Scenario': 'Edit Scenario',
'Edit Sector': 'Edit Sector',
'Edit Sent Item': 'Edit Sent Item',
'Edit Setting': 'Edit Setting',
'Edit Settings': 'Edit Settings',
'Edit Shelter': 'Edit Shelter',
'Edit Shelter Service': 'Edit Shelter Service',
'Edit Shelter Type': 'Edit Shelter Type',
'Edit Skill': 'Edit Skill',
'Edit Skill Equivalence': 'Edit Skill Equivalence',
'Edit Skill Provision': 'Edit Skill Provision',
'Edit Skill Type': 'Edit Skill Type',
'Edit Solution': 'Edit Solution',
'Edit Staff Type': 'Edit Staff Type',
'Edit Subscription': 'Edit Subscription',
'Edit Subsector': 'Edit Subsector',
'Edit Synchronization Settings': 'Edit Synchronisation Settings',
'Edit Task': 'Edit Task',
'Edit Team': 'Edit Team',
'Edit Template Section': 'Edit Template Section',
'Edit Theme': 'Edit Theme',
'Edit Themes': 'Edit Themes',
'Edit Ticket': 'Edit Ticket',
'Edit Training': 'Edit Training',
'Edit Tropo Settings': 'Edit Tropo Settings',
'Edit User': 'Edit User',
'Edit Vehicle': 'Edit Vehicle',
'Edit Vehicle Details': 'Edit Vehicle Details',
'Edit Volunteer Availability': 'Edit Volunteer Availability',
'Edit Warehouse': 'Edit Warehouse',
'Edit Web API Settings': 'Edit Web API Settings',
'Edit current record': 'Edit current record',
'Edit message': 'Edit message',
'Edit the OpenStreetMap data for this area': 'Edit the OpenStreetMap data for this area',
'Editable?': 'Editable?',
'Education': 'Education',
'Education materials received': 'Education materials received',
'Education materials, source': 'Education materials, source',
'Effects Inventory': 'Effects Inventory',
'Eggs': 'Eggs',
'Either a shelter or a location must be specified': 'Either a shelter or a location must be specified',
'Either file upload or document URL required.': 'Either file upload or document URL required.',
'Either file upload or image URL required.': 'Either file upload or image URL required.',
'Elderly person headed households (>60 yrs)': 'Elderly person headed households (>60 yrs)',
'Electrical': 'Electrical',
'Electrical, gas, sewerage, water, hazmats': 'Electrical, gas, sewerage, water, hazmats',
'Elevated': 'Elevated',
'Elevators': 'Elevators',
'Email': 'Email',
'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address',
'Email Settings': 'Email Settings',
'Email and SMS': 'Email and SMS',
'Email settings updated': 'Email settings updated',
'Embalming': 'Embalming',
'Embassy': 'Embassy',
'Emergency Capacity Building project': 'Emergency Capacity Building project',
'Emergency Department': 'Emergency Department',
'Emergency Shelter': 'Emergency Shelter',
'Emergency Support Facility': 'Emergency Support Facility',
'Emergency Support Service': 'Emergency Support Service',
'Emergency Telecommunications': 'Emergency Telecommunications',
'Enable': 'Enable',
'Enable/Disable Layers': 'Enable/Disable Layers',
'Enabled': 'Enabled',
'Enabled?': 'Enabled?',
'Enabling MapMaker layers disables the StreetView functionality': 'Enabling MapMaker layers disables the StreetView functionality',
'End Date': 'End Date',
'End date': 'End date',
'End date should be after start date': 'End date should be after start date',
'English': 'English',
'Enter Coordinates:': 'Enter Coordinates:',
'Enter a GPS Coord': 'Enter a GPS Coord',
'Enter a name for the spreadsheet you are uploading.': 'Enter a name for the spreadsheet you are uploading.',
'Enter a new support request.': 'Enter a new support request.',
'Enter a unique label!': 'Enter a unique label!',
'Enter a valid date before': 'Enter a valid date before',
'Enter a valid email': 'Enter a valid email',
'Enter a valid future date': 'Enter a valid future date',
'Enter a valid past date': 'Enter a valid past date',
'Enter some characters to bring up a list of possible matches': 'Enter some characters to bring up a list of possible matches',
'Enter some characters to bring up a list of possible matches.': 'Enter some characters to bring up a list of possible matches.',
'Enter tags separated by commas.': 'Enter tags separated by commas.',
'Enter the data for an assessment': 'Enter the data for an assessment',
'Enter the same password as above': 'Enter the same password as above',
'Enter your firstname': 'Enter your firstname',
'Enter your organization': 'Enter | |
+ 2 * Ad[3, 1] + Ad[3, 2]) * (t0 - 1) + (
Ad[3, 0] + Ad[3, 1] + Ad[3, 2] + Ad[3, 3]
)
else:
Phi0_0 = (
Ad[0, 0] * tp0_0
+ Ad[0, 1] * tp0_1
+ Ad[0, 2] * tp0_2
+ Ad[0, 3] * tp0_3
)
Phi0_1 = (
Ad[1, 0] * tp0_0
+ Ad[1, 1] * tp0_1
+ Ad[1, 2] * tp0_2
+ Ad[1, 3] * tp0_3
)
Phi0_2 = (
Ad[2, 0] * tp0_0
+ Ad[2, 1] * tp0_1
+ Ad[2, 2] * tp0_2
+ Ad[2, 3] * tp0_3
)
Phi0_3 = (
Ad[3, 0] * tp0_0
+ Ad[3, 1] * tp0_1
+ Ad[3, 2] * tp0_2
+ Ad[3, 3] * tp0_3
)
Phi1_0 = 0
Phi1_1 = 0
Phi1_2 = 0
Phi1_3 = 0
if t1 < 0:
Phi1_0 = dAd[0, 3] * t1 + Ad[0, 3]
Phi1_1 = dAd[1, 3] * t1 + Ad[1, 3]
Phi1_2 = dAd[2, 3] * t1 + Ad[2, 3]
Phi1_3 = dAd[3, 3] * t1 + Ad[3, 3]
elif t1 > 1:
Phi1_0 = (3 * Ad[0, 0] + 2 * Ad[0, 1] + Ad[0, 2]) * (t1 - 1) + (
Ad[0, 0] + Ad[0, 1] + Ad[0, 2] + Ad[0, 3]
)
Phi1_1 = (3 * Ad[1, 0] + 2 * Ad[1, 1] + Ad[1, 2]) * (t1 - 1) + (
Ad[1, 0] + Ad[1, 1] + Ad[1, 2] + Ad[1, 3]
)
Phi1_2 = (3 * Ad[2, 0] + 2 * Ad[2, 1] + Ad[2, 2]) * (t1 - 1) + (
Ad[2, 0] + Ad[2, 1] + Ad[2, 2] + Ad[2, 3]
)
Phi1_3 = (3 * Ad[3, 0] + 2 * Ad[3, 1] + Ad[3, 2]) * (t1 - 1) + (
Ad[3, 0] + Ad[3, 1] + Ad[3, 2] + Ad[3, 3]
)
else:
Phi1_0 = (
Ad[0, 0] * tp1_0
+ Ad[0, 1] * tp1_1
+ Ad[0, 2] * tp1_2
+ Ad[0, 3] * tp1_3
)
Phi1_1 = (
Ad[1, 0] * tp1_0
+ Ad[1, 1] * tp1_1
+ Ad[1, 2] * tp1_2
+ Ad[1, 3] * tp1_3
)
Phi1_2 = (
Ad[2, 0] * tp1_0
+ Ad[2, 1] * tp1_1
+ Ad[2, 2] * tp1_2
+ Ad[2, 3] * tp1_3
)
Phi1_3 = (
Ad[3, 0] * tp1_0
+ Ad[3, 1] * tp1_1
+ Ad[3, 2] * tp1_2
+ Ad[3, 3] * tp1_3
)
Phi2_0 = 0
Phi2_1 = 0
Phi2_2 = 0
Phi2_3 = 0
if t2 < 0:
Phi2_0 = dAd[0, 3] * t2 + Ad[0, 3]
Phi2_1 = dAd[1, 3] * t2 + Ad[1, 3]
Phi2_2 = dAd[2, 3] * t2 + Ad[2, 3]
Phi2_3 = dAd[3, 3] * t2 + Ad[3, 3]
elif t2 > 1:
Phi2_0 = (3 * Ad[0, 0] + 2 * Ad[0, 1] + Ad[0, 2]) * (t2 - 1) + (
Ad[0, 0] + Ad[0, 1] + Ad[0, 2] + Ad[0, 3]
)
Phi2_1 = (3 * Ad[1, 0] + 2 * Ad[1, 1] + Ad[1, 2]) * (t2 - 1) + (
Ad[1, 0] + Ad[1, 1] + Ad[1, 2] + Ad[1, 3]
)
Phi2_2 = (3 * Ad[2, 0] + 2 * Ad[2, 1] + Ad[2, 2]) * (t2 - 1) + (
Ad[2, 0] + Ad[2, 1] + Ad[2, 2] + Ad[2, 3]
)
Phi2_3 = (3 * Ad[3, 0] + 2 * Ad[3, 1] + Ad[3, 2]) * (t2 - 1) + (
Ad[3, 0] + Ad[3, 1] + Ad[3, 2] + Ad[3, 3]
)
else:
Phi2_0 = (
Ad[0, 0] * tp2_0
+ Ad[0, 1] * tp2_1
+ Ad[0, 2] * tp2_2
+ Ad[0, 3] * tp2_3
)
Phi2_1 = (
Ad[1, 0] * tp2_0
+ Ad[1, 1] * tp2_1
+ Ad[1, 2] * tp2_2
+ Ad[1, 3] * tp2_3
)
Phi2_2 = (
Ad[2, 0] * tp2_0
+ Ad[2, 1] * tp2_1
+ Ad[2, 2] * tp2_2
+ Ad[2, 3] * tp2_3
)
Phi2_3 = (
Ad[3, 0] * tp2_0
+ Ad[3, 1] * tp2_1
+ Ad[3, 2] * tp2_2
+ Ad[3, 3] * tp2_3
)
values[n] = (
Phi0_0
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 0, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 0, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 0, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 0, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 3, i2 + 3])
)
)
+ Phi0_1
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 1, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 1, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 1, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 1, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 3, i2 + 3])
)
)
+ Phi0_2
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 2, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 2, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 2, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 2, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 | |
--*
An object containing a set of identities and associated mappings.
- *(dict) --*
A description of the identity.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID.
- **Logins** *(list) --*
The provider names.
- *(string) --*
- **CreationDate** *(datetime) --*
Date on which the identity was created.
- **LastModifiedDate** *(datetime) --*
Date on which the identity was last modified.
- **NextToken** *(string) --*
A pagination token.
:type IdentityPoolId: string
:param IdentityPoolId: **[REQUIRED]**
An identity pool ID in the format REGION:GUID.
:type MaxResults: integer
:param MaxResults: **[REQUIRED]**
The maximum number of identities to return.
:type NextToken: string
:param NextToken:
A pagination token.
:type HideDisabled: boolean
:param HideDisabled:
An optional boolean parameter that allows you to hide disabled identities. If omitted, the ListIdentities API will include disabled identities in the response.
:rtype: dict
:returns:
"""
pass
def list_identity_pools(self, MaxResults: int, NextToken: str = None) -> Dict:
"""
Lists all of the Cognito identity pools registered for your account.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentityPools>`_
**Request Syntax**
::
response = client.list_identity_pools(
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'IdentityPools': [
{
'IdentityPoolId': 'string',
'IdentityPoolName': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
The result of a successful ListIdentityPools action.
- **IdentityPools** *(list) --*
The identity pools returned by the ListIdentityPools action.
- *(dict) --*
A description of the identity pool.
- **IdentityPoolId** *(string) --*
An identity pool ID in the format REGION:GUID.
- **IdentityPoolName** *(string) --*
A string that you provide.
- **NextToken** *(string) --*
A pagination token.
:type MaxResults: integer
:param MaxResults: **[REQUIRED]**
The maximum number of identities to return.
:type NextToken: string
:param NextToken:
A pagination token.
:rtype: dict
:returns:
"""
pass
def list_tags_for_resource(self, ResourceArn: str) -> Dict:
"""
Lists the tags that are assigned to an Amazon Cognito identity pool.
A tag is a label that you can apply to identity pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria.
You can use this action up to 10 times per second, per account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListTagsForResource>`_
**Request Syntax**
::
response = client.list_tags_for_resource(
ResourceArn='string'
)
**Response Syntax**
::
{
'Tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Tags** *(dict) --*
The tags that are assigned to the identity pool.
- *(string) --*
- *(string) --*
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the identity pool that the tags are assigned to.
:rtype: dict
:returns:
"""
pass
def lookup_developer_identity(self, IdentityPoolId: str, IdentityId: str = None, DeveloperUserIdentifier: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Retrieves the ``IdentityID`` associated with a ``DeveloperUserIdentifier`` or the list of ``DeveloperUserIdentifier`` values associated with an ``IdentityId`` for an existing identity. Either ``IdentityID`` or ``DeveloperUserIdentifier`` must not be null. If you supply only one of these values, the other value will be searched in the database and returned as a part of the response. If you supply both, ``DeveloperUserIdentifier`` will be matched against ``IdentityID`` . If the values are verified against the database, the response returns both values and is the same as the request. Otherwise a ``ResourceConflictException`` is thrown.
``LookupDeveloperIdentity`` is intended for low-throughput control plane operations: for example, to enable customer service to locate an identity ID by username. If you are using it for higher-volume operations such as user authentication, your requests are likely to be throttled. GetOpenIdTokenForDeveloperIdentity is a better option for higher-volume operations for user authentication.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/LookupDeveloperIdentity>`_
**Request Syntax**
::
response = client.lookup_developer_identity(
IdentityPoolId='string',
IdentityId='string',
DeveloperUserIdentifier='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'IdentityId': 'string',
'DeveloperUserIdentifierList': [
'string',
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Returned in response to a successful ``LookupDeveloperIdentity`` action.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID.
- **DeveloperUserIdentifierList** *(list) --*
This is the list of developer user identifiers associated with an identity ID. Cognito supports the association of multiple developer user identifiers with an identity ID.
- *(string) --*
- **NextToken** *(string) --*
A pagination token. The first call you make will have ``NextToken`` set to null. After that the service will return ``NextToken`` values as needed. For example, let's say you make a request with ``MaxResults`` set to 10, and there are 20 matches in the database. The service will return a pagination token as a part of the response. This token can be used to call the API again and get results starting from the 11th match.
:type IdentityPoolId: string
:param IdentityPoolId: **[REQUIRED]**
An identity pool ID in the format REGION:GUID.
:type IdentityId: string
:param IdentityId:
A unique identifier in the format REGION:GUID.
:type DeveloperUserIdentifier: string
:param DeveloperUserIdentifier:
A unique ID used by your backend authentication process to identify a user. Typically, a developer identity provider would issue many developer user identifiers, in keeping with the number of users.
:type MaxResults: integer
:param MaxResults:
The maximum number of identities to return.
:type NextToken: string
:param NextToken:
A pagination token. The first call you make will have ``NextToken`` set to null. After that the service will return ``NextToken`` values as needed. For example, let\'s say you make a request with ``MaxResults`` set to 10, and there are 20 matches in the database. The service will return a pagination token as a part of the response. This token can be used to call the API again and get results starting from the 11th match.
:rtype: dict
:returns:
"""
pass
def merge_developer_identities(self, SourceUserIdentifier: str, DestinationUserIdentifier: str, DeveloperProviderName: str, IdentityPoolId: str) -> Dict:
"""
Merges two users having different ``IdentityId`` s, existing in the same identity pool, and identified by the same developer provider. You can use this action to request that discrete users be merged and identified as a single user in the Cognito environment. Cognito associates the given source user (``SourceUserIdentifier`` ) with the ``IdentityId`` of the ``DestinationUserIdentifier`` . Only developer-authenticated users can be merged. If the users to be merged are associated with the same public provider, but as two different users, an exception will be thrown.
The number of linked logins is limited to 20. So, the number of linked logins for the source user, ``SourceUserIdentifier`` , and the destination user, ``DestinationUserIdentifier`` , together should not be larger than 20. Otherwise, an exception will be thrown.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/MergeDeveloperIdentities>`_
**Request Syntax**
::
response = client.merge_developer_identities(
SourceUserIdentifier='string',
DestinationUserIdentifier='string',
DeveloperProviderName='string',
IdentityPoolId='string'
)
**Response Syntax**
::
{
'IdentityId': 'string'
}
**Response Structure**
- *(dict) --*
Returned in response to a successful ``MergeDeveloperIdentities`` action.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID.
:type SourceUserIdentifier: string
:param SourceUserIdentifier: **[REQUIRED]**
User identifier for the source user. The value should be a ``DeveloperUserIdentifier`` .
:type DestinationUserIdentifier: string
:param DestinationUserIdentifier: **[REQUIRED]**
User identifier for the destination user. The value should be a ``DeveloperUserIdentifier`` .
:type DeveloperProviderName: string
:param DeveloperProviderName: **[REQUIRED]**
The \"domain\" by which Cognito will refer to your users. This is a (pseudo) domain name that you provide while creating an identity pool. This name acts as a placeholder that allows your backend and the Cognito service to communicate about the developer provider. For the ``DeveloperProviderName`` , you can use letters as well as period (.), underscore (_), and dash (-).
:type IdentityPoolId: string
:param IdentityPoolId: **[REQUIRED]**
An identity pool ID in the format REGION:GUID.
:rtype: | |
Y).
double_lb = find_lb(DX, DY)
double_ub = find_ub(
DX, DY, mapping_sample_size_order=mapping_sample_size_order, double_lb=double_lb)
return 0.5 * double_lb, 0.5 * double_ub
def find_lb(DX, DY):
"""
For X, Y metric spaces induced by simple unweighted graphs, find
lower bound of mGH(X, Y).
Parameters
----------
DX: np.array (|X|×|X|)
(Integer) distance matrix of X.
DY: np.array (|Y|×|Y|)
(Integer) distance matrix of Y.
Returns
--------
double_lb: float
Lower bound of 2*mGH(X, Y).
"""
diam_X = np.max(DX)
diam_Y = np.max(DY)
max_diam = max(diam_X, diam_Y)
# Obtain trivial lower bound of 2*mGH(X, Y) from
# 1) mGH(X, Y) ≥ 0.5*|diam X - diam Y|;
# 2) if X and Y are not isometric, mGH(X, Y) ≥ 0.5.
trivial_double_lb = max(abs(diam_X - diam_Y), int(len(DX) != len(DY)))
# Initialize lower bound of 2*mGH(X, Y).
double_lb = trivial_double_lb
# Try tightening the lower bound.
d = max_diam
while d > double_lb:
# Try proving 2*mGH(X, Y) ≥ d using d-bounded curvatures of
# X and Y of size 3×3 or larger. 2×2 curvatures are already
# accounted for in trivial lower bound.
if d <= diam_X:
K = find_largest_size_bounded_curvature(DX, diam_X, d)
if len(K) > 2 and confirm_lb_using_bounded_curvature(d, K, DY, max_diam):
double_lb = d
if d > double_lb and d <= diam_Y:
L = find_largest_size_bounded_curvature(DY, diam_Y, d)
if len(L) > 2 and confirm_lb_using_bounded_curvature(d, L, DX, max_diam):
double_lb = d
d -= 1
return double_lb
def find_largest_size_bounded_curvature(DX, diam_X, d):
"""
Find a largest-size d-bounded curvature of metric space X induced
by simple unweighted graph.
Parameters
----------
DX: np.array (|X|×|X|)
(Integer) distance matrix of X.
diam_X: int
Largest distance in X.
Returns
--------
K: np.array (n×n)
d-bounded curvature of X of largest size; n ≤ |X|.
"""
# Initialize curvature K with the distance matrix of X.
K = DX
while np.any(K[np.triu_indices_from(K, 1)] < d):
# Pick a row (and column) with highest number of off-diagonal
# distances < d, then with smallest sum of off-diagonal
# distances ≥ d.
K_rows_sortkeys = -np.sum(K < d, axis=0) * (len(K) * diam_X) + \
np.sum(np.ma.masked_less(K, d), axis=0).data
row_to_remove = np.argmin(K_rows_sortkeys)
# Remove the row and column from K.
K = np.delete(K, row_to_remove, axis=0)
K = np.delete(K, row_to_remove, axis=1)
return K
def confirm_lb_using_bounded_curvature(d, K, DY, max_diam):
"""
For X, Y metric spaces induced by simple unweighted graph, try to
confirm 2*mGH(X, Y) ≥ d using K, a d-bounded curvature of X.
Parameters
----------
d: int
Lower bound candidate for 2*mGH(X, Y).
K: np.array (n×n)
d-bounded curvature of X; n ≥ 3.
DY: np.array (|Y|×|Y|)
Integer distance matrix of Y.
max_diam: int
Largest distance in X and Y.
Returns
--------
lb_is_confirmed: bool
Whether confirmed that 2*mGH(X, Y) ≥ d.
"""
# If K exceeds DY in size, the Hausdorff distance between the n-th
# curvature sets of X and Y is ≥ d, entailing 2*mGH(X, Y) ≥ d (from
# Theorem A).
lb_is_confirmed = len(K) > len(DY) or \
confirm_lb_using_bounded_curvature_row(d, K, DY, max_diam)
return lb_is_confirmed
def confirm_lb_using_bounded_curvature_row(d, K, DY, max_diam):
"""
For X, Y metric spaces induced by simple unweighted graph, and K,
a d-bounded curvature of X, try to confirm 2*mGH(X, Y) ≥ d using
some row of K.
Parameters
----------
d: int
Lower bound candidate for 2*mGH(X, Y).
K: np.array (n×n)
d-bounded curvature of X; n ≥ 3.
DY: np.array (|Y|×|Y|)
Integer distance matrix of Y; n ≤ |Y|.
max_diam: int
Largest distance in X and Y.
Returns
--------
lb_is_confirmed: bool
Whether confirmed that 2*mGH(X, Y) ≥ d.
"""
lb_is_confirmed = False
# Represent row of K as distance distributions, and retain those
# that are maximal by the entry-wise partial order.
K_max_rows_distance_distributions = find_unique_max_distributions(
represent_distance_matrix_rows_as_distributions(K, max_diam))
# Represent rows of DY as distance distributions.
DY_rows_distance_distributions = represent_distance_matrix_rows_as_distributions(DY, max_diam)
# For each i ∈ 1,...,n, check if ||row_i(K) - row_i(L)||_∞ ≥ d
# ∀L ∈ PSPS^n(DY), which entails that the Hausdorff distance
# between the n-th curvature sets of X and Y is ≥ d, and therefore
# 2*mGH(X, Y) ≥ d (from Theorem B).
i = 0
while not lb_is_confirmed and i < len(K_max_rows_distance_distributions):
lb_is_confirmed = True
# For fixed i, check if ||row_i(K) - row_i(L)||_∞ ≥ d
# ∀L ∈ PSPS^n_{i←j}(DY) ∀j ∈ 1,...,|Y|, which is equivalent to
# ||row_i(K) - row_i(L)||_∞ ≥ d ∀L ∈ PSPS^n(DY).
j = 0
while lb_is_confirmed and j < len(DY_rows_distance_distributions):
# For fixed i and j, checking ||row_i(K) - row_i(L)||_∞ ≥ d
# ∀L ∈ PSPS^n_{i←j}(DY) is equivalent to solving a linear
# (bottleneck) assignment feasibility problem between the
# entries of row_i(K) and row_j(DY).
lb_is_confirmed = not check_assignment_feasibility(
K_max_rows_distance_distributions[i], DY_rows_distance_distributions[j], d)
j += 1
i += 1
return lb_is_confirmed
def represent_distance_matrix_rows_as_distributions(DX, max_d):
"""
Given a metric space X induced by simple unweighted graph,
represent each row of its distance matrix as the frequency
distribution of its entries. Entry 0 in each row is omitted.
Parameters
----------
DX: np.array (n×n)
(Integer) distance matrix of X.
max_d: int
Upper bound of the entries in DX.
Returns
--------
DX_rows_distributons: np.array (n×max_d)
Each row holds frequencies of each distance from 1 to
max_d in the corresponding row of DX. Namely, the (i, j)-th
entry holds the frequency of distance (max_d - j) in row_i(DX).
"""
# Add imaginary part to distinguish identical distances from
# different rows of D.
unique_distances, distance_frequencies = np.unique(
DX + 1j * np.arange(len(DX))[:, None], return_counts=True)
# Type is signed integer to allow subtractions.
optimal_int_type = determine_optimal_int_type(len(DX))
DX_rows_distributons = np.zeros((len(DX), max_d + 1), dtype=optimal_int_type)
# Construct index pairs for distance frequencies, so that the
# frequencies of larger distances appear on the left.
distance_frequencies_index_pairs = \
(np.imag(unique_distances).astype(optimal_int_type),
max_d - np.real(unique_distances).astype(max_d.dtype))
# Fill frequency distributions of the rows of DX.
DX_rows_distributons[distance_frequencies_index_pairs] = distance_frequencies
# Remove (unit) frequency of distance 0 from each row.
DX_rows_distributons = DX_rows_distributons[:, :-1]
return DX_rows_distributons
def find_unique_max_distributions(distributions):
"""
Given frequency distributions of entries in M positive integer
vectors of size p, find unique maximal vectors under the following
(entry- wise) partial order: for v, u vectors, v < u if and only if
there exists a bijection f: {1,...,p} → {1,...,p} such that
v_k < u_{f(k)} ∀k ∈ {1,...,p}.
Parameters
----------
distributions: np.array (M×max_d)
Frequency distributions of entries in the m vectors; the
entries are bounded from above by max_d.
Returns
--------
unique_max_distributions: np.array (m×max_d)
Unique frequency distributions of the maximal vectors; m ≤ M.
"""
pairwise_distribution_differences = \
np.cumsum(distributions - distributions[:, None, :], axis=2)
pairwise_distribution_less_thans = np.logical_and(
np.all(pairwise_distribution_differences >= 0, axis=2),
np.any(pairwise_distribution_differences > 0, axis=2))
distributions_are_max = ~np.any(pairwise_distribution_less_thans, axis=1)
try:
unique_max_distributions = np.unique(distributions[distributions_are_max], axis=0)
except AttributeError:
# `np.unique` is not implemented in NumPy 1.12 (Python 3.4).
unique_max_distributions = np.vstack(
{tuple(distribution) for distribution in distributions[distributions_are_max]})
return unique_max_distributions
def check_assignment_feasibility(v_distribution, u_distribution, d):
"""
For positie integer vectors v of size p and u of size q ≥ p, check
if there exists injective f: {1,...,p} → {1,...,q}, such that
|v_k - u_{f(k)}| < d ∀k ∈ {1,...,p}
Parameters
----------
v_distribution: np.array (max_d)
Frequency distribution of entries in v; the entries are bounded
from above by max_d.
u_distribution: np.array (max_d)
Frequency distribution of entries in u; the entries are bounded
from above by max_d.
d: int
d > 0.
Returns
--------
is_assignment_feasible: bool
Whether such injective f: {1,...,p} → {1,...,q} exists.
"""
def next_i_and_j(min_i, min_j):
# Find reversed v distribution index of smallest v entries yet
# to be assigned. Then find index in reversed u distribution of
# smallest u entries to which the b entries can be assigned to.
try:
i = next(i for i in range(min_i, len(reversed_v_distribution))
if reversed_v_distribution[i] > 0)
except StopIteration:
# All v entries are assigned.
i = None
j = min_j
else:
j = next_j(i, max(i - (d - 1), min_j))
return i, j
def next_j(i, min_j):
# Find reversed u distribution index of smallest u entries to
# which v entries, corresponding to a given reversed v
# distribution index, can be assigned to.
try:
| |
<reponame>zhangym-7/QT4A
# -*- coding: UTF-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
"""ADB客户端,用于与ADB守护进程通信
"""
from __future__ import unicode_literals
import six
import os
import time
import socket, select
import struct
import threading
from io import BytesIO
from qt4a.androiddriver.util import logger, utf8_encode, TimeoutError
SYNC_DATA_MAX = 64 * 1024
class AdbError(RuntimeError):
pass
class Pipe(object):
"""模拟实现内存管道
"""
def __init__(self):
self._buffer = BytesIO()
self._max_buffer_size = 4096 * 16
self._lock = threading.Lock()
self._pos = 0 # 当前读指针位置
self._write_buffer = b"" # 保证每次都是整行写
def write(self, s):
self._write_buffer += s
pos = self._write_buffer.rfind(b"\n")
if pos <= 0:
return
s = self._write_buffer[:pos]
self._write_buffer = self._write_buffer[pos:]
with self._lock:
self._buffer.seek(0, 2) # 将指针置于尾部
self._buffer.write(s)
def readline(self):
wait = False
while True:
if wait:
time.sleep(0.1)
with self._lock:
self._buffer.seek(0, 2)
buffer_size = self._buffer.tell()
if buffer_size <= self._pos:
wait = True
continue
with self._lock:
self._buffer.seek(self._pos)
ret = self._buffer.readline()
if len(ret) == 0:
wait = True
continue
else:
self._pos = self._buffer.tell()
self._buffer.seek(0, 2)
buffer_size = self._buffer.tell()
if buffer_size >= self._max_buffer_size:
# 创建新的缓冲区
self._buffer.seek(self._pos)
buffer = self._buffer.read()
self._buffer.close()
self._buffer = BytesIO()
self._buffer.write(buffer)
self._pos = 0
return ret
def read(self):
"""读取管道中的所有数据
"""
with self._lock:
self._buffer.seek(self._pos)
result = self._buffer.read()
if self._write_buffer:
result += self._write_buffer
self._write_buffer = ""
return result
class ADBPopen(object):
"""与Popen兼容
"""
class StdinPipe(object):
"""
"""
def __init__(self, sock):
self._sock = sock
def write(self, s):
self._sock.send(s)
def flush(self):
pass
def __init__(self, sock, timeout=None):
self._sock = sock
self._stdin = self.StdinPipe(sock)
self._stdout = Pipe()
self._stderr = Pipe()
self._running = True
self._timeout = timeout
if self._timeout == None:
self._timeout = 0xFFFFFFFF
self._event = threading.Event() # 接收完数据的事件通知
self._thread = threading.Thread(
target=self._work_thread, args=(), name=self.__class__.__name__
)
self._thread.setDaemon(True)
self._thread.start()
@property
def stdin(self):
return self._stdin
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
@property
def pid(self):
return self._thread.ident
def _work_thread(self):
time0 = time.time()
while self._running and time.time() - time0 < self._timeout:
infds, outfds, errfds = select.select([self._sock,], [], [], 1)
if len(infds) > 0:
try:
buff = self._sock.recv(4096)
if len(buff) == 0:
self._sock.close()
self._sock = None
self._running = False
self._event.set()
return
self._stdout.write(buff)
except socket.error as e:
logger.info("Recv response error: %s" % (e))
self._stdout.write(b" ") # 通知接收方退出
self._sock.close()
self._sock = None
self._running = False
self._event.set()
return
self._sock.close()
self._sock = None
def poll(self):
"""是否存在
"""
if self._thread.is_alive():
return None
else:
return 0
def terminate(self):
"""结束
"""
self._running = False
time.sleep(1) # 等待线程退出
def communicate(self):
"""
"""
while True:
if self._event.wait(0.001) == True or self.poll() == 0:
if self._running:
raise TimeoutError("Execute timeout")
return self.stdout.read(), self.stderr.read()
# time.sleep(0.001)
class ADBClient(object):
"""
"""
instance_dict = {}
def __init__(self, server_addr="127.0.0.1", server_port=5037):
self._server_addr = server_addr
self._server_port = server_port
self._sock = None
self._lock = threading.Lock()
@staticmethod
def get_client(host, port=5037):
"""根据主机名获取ADBClient实例
"""
return ADBClient(host, port)
def call(self, cmd, *args, **kwds):
"""调用命令字
"""
cmd = cmd.replace("-", "_")
if cmd == "forward" and args[1] == "--remove":
method = getattr(self, "remove_forward")
args = list(args)
args.pop(1) # remove --remove args
else:
method = getattr(self, cmd)
# print (args)
sync = True
if "sync" in kwds:
sync = kwds.pop("sync")
if "timeout" in kwds and not cmd in (
"shell",
"install",
"uninstall",
"wait_for_device",
"reboot",
):
kwds.pop("timeout")
if sync:
ret = None
retry_count = kwds.pop("retry_count")
i = 0
socket_error_count = 0
while i < retry_count:
try:
self._lock.acquire()
ret = method(*args, **kwds)
break
except socket.error as e:
logger.exception("执行%s %s error" % (cmd, " ".join(args)))
socket_error_count += 1
if socket_error_count <= 10:
i -= 1
time.sleep(1)
except AdbError as e:
err_msg = str(e)
if "device not found" in err_msg:
return "", "error: device not found"
elif "cannot bind to socket" in err_msg:
return "", err_msg
elif "cannot remove listener" in err_msg:
return "", err_msg
elif "device offline" in err_msg:
return "", "error: device offline"
elif "Permission denied" in err_msg:
return "", "error: %s" % err_msg
elif (
"Bad response" in err_msg
or "Device or resource busy" in err_msg
or "closed" in err_msg
): # wetest设备有时候会返回closed错误
# 需要重试
logger.exception("Run %s%s %r" % (cmd, " ".join(args), e))
else:
raise RuntimeError("执行%s %s 命令失败:%s" % (cmd, " ".join(args), e))
time.sleep(1)
if i >= retry_count - 1:
raise e
except RuntimeError as e:
logger.exception("执行%s%s %r" % (cmd, " ".join(args), e))
if "device not found" in str(e):
self.wait_for_device(args[0], retry_count=1, timeout=300)
self._sock = None
return self.call(cmd, *args, **kwds)
finally:
i += 1
if self._sock != None:
self._sock.close()
self._sock = None
self._lock.release()
if ret == None:
raise TimeoutError("Run cmd %s %s failed" % (cmd, " ".join(args)))
if isinstance(ret, (six.string_types, six.binary_type)):
return ret, ""
else:
return ret
else:
self._transport(args[0]) # 异步操作的必然需要发送序列号
if cmd == "shell":
self._lock.acquire()
self._send_command("shell:" + " ".join(args[1:]))
pipe = ADBPopen(self._sock)
self._sock = None
self._lock.release()
return pipe
def _connect(self):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for i in range(3):
try:
self._sock.connect((self._server_addr, self._server_port))
return True
except socket.error:
pass
return False
def _check_status(self):
"""检查返回状态
"""
stat = self._sock.recv(4)
if stat == b"OKAY":
return True
elif stat == b"FAIL":
size = int(self._sock.recv(4), 16)
val = self._sock.recv(size)
self._sock.close()
self._sock = None
raise AdbError(val.decode("utf8"))
else:
raise AdbError("Bad response: %r" % (stat,))
def _send_command(self, cmd):
if not isinstance(cmd, bytes):
cmd = cmd.encode("utf8")
data = b"%04x%s" % (len(cmd), cmd)
if not self._sock:
self._connect()
self._sock.send(data)
return self._check_status()
def _recv(self, size=None):
"""从socket读取数据
"""
result = b""
if size != None:
while len(result) < size:
result += self._sock.recv(size - len(result))
else:
data = self._sock.recv(4096)
while data:
result += data
data = self._sock.recv(4096)
return result
def send_command(self, cmd):
self._send_command(cmd)
size = int(self._sock.recv(4), 16)
resp = self._sock.recv(size)
# logger.debug('recv: %r' % resp[:200])
self._sock.close()
self._sock = None
return resp.decode("utf8")
def _transport(self, device_id):
self._send_command("host:transport:%s" % device_id)
def devices(self):
"""adb devices
"""
result = self.send_command("host:devices")
return result
def shell(self, device_id, cmd, **kwds):
"""adb shell
"""
cmd_line = "shell:%s" % cmd
self._transport(device_id)
self._send_command(cmd_line)
result = ADBPopen(self._sock, timeout=kwds["timeout"]).communicate()
self._sock = None
return result
def _sync_read_mode(self, remote_path):
"""
"""
remote_path = utf8_encode(remote_path)
data = b"STAT" + struct.pack(b"I", len(remote_path)) + remote_path
self._sock.send(data)
result = self._sock.recv(16)
if result[:4] != b"STAT":
raise AdbError("sync_read_mode error")
mode, size, time = struct.unpack(b"III", result[4:])
return mode, size, time
def pull(self, device_id, src_file, dst_file):
"""adb pull
"""
time0 = time.time()
self._transport(device_id)
self._send_command("sync:")
mode, fsize, ftime = self._sync_read_mode(src_file)
if fsize > 0:
if mode == 0:
self._sock.close()
self._sock = None
raise AdbError("remote object %r does not exist" % src_file)
src_file = utf8_encode(src_file)
data = b"RECV" + struct.pack(b"I", len(src_file)) + src_file
self._sock.send(data)
f = open(dst_file, "wb")
data_size = 0
last_data = b""
while True:
result = self._sock.recv(8)
if len(result) != 8:
logger.warn("返回数据错误:%r" % result)
last_data += result
if len(last_data) < 8:
continue
else:
result = last_data[:8]
last_data = last_data[8:]
psize = struct.unpack(b"I", result[4:])[0] # 每个分包大小
if result[:4] == b"DONE":
break
elif result[:4] == b"FAIL":
raise AdbError(self._sock.recv(psize))
elif result[:4] != b"DATA":
raise AdbError("pull_file error")
result = self._recv(psize - len(last_data))
result = last_data + result
if len(result) >= psize:
last_data = result[psize:]
result = result[:psize]
else:
raise ValueError(
"Invalid data size, expect %d, actual is %d"
% (psize, len(result))
)
f.write(result)
data_size += len(result)
f.close()
self._sock.send(b"QUIT" + struct.pack(b"I", 0))
time_cost = time.time() - time0
self._sock.close()
self._sock = None
if data_size > 0:
return "%d KB/s (%d bytes in %fs)" % (
int(data_size / 1000 / time_cost) if time_cost > 0 else 65535,
data_size,
time_cost,
)
else:
return ""
else:
return "0 KB/s (0 bytes in 0 s)"
def push(self, device_id, src_file, dst_file):
"""adb push
"""
time0 = time.time()
try:
st = os.stat(src_file)
except OSError as e:
if e.errno == 2:
raise AdbError("cannot stat '%s': No such file or directory" % src_file)
else:
raise e
self._transport(device_id)
self._send_command("sync:")
dst_file = utf8_encode(dst_file)
mode, fsize, ftime = self._sync_read_mode(dst_file)
s = b"%s,%d" % (dst_file, st.st_mode)
data = b"SEND" | |
password
if account_type:
self.account_type = account_type
if service:
self.service = service
if source:
self.source = source
self.ProgrammaticLogin(captcha_token, captcha_response)
def GenerateAuthSubURL(self, next, scope, secure=False, session=True):
"""Generate a URL at which the user will login and be redirected back.
Users enter their credentials on a Google login page and a token is sent
to the URL specified in next. See documentation for AuthSub login at:
http://code.google.com/apis/accounts/AuthForWebApps.html
Args:
next: string The URL user will be sent to after logging in.
scope: string The URL of the service to be accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
"""
# Translate True/False values for parameters into numeric values acceoted
# by the AuthSub service.
if secure:
secure = 1
else:
secure = 0
if session:
session = 1
else:
session = 0
request_params = urllib.urlencode({'next': next, 'scope': scope,
'secure': secure, 'session': session})
return '%s/accounts/AuthSubRequest?%s' % (AUTH_SERVER_HOST, request_params)
def UpgradeToSessionToken(self):
"""Upgrades a single use AuthSub token to a session token.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
"""
if not self.__auth_token.startswith(AUTHSUB_AUTH_LABEL):
raise NonAuthSubToken
response = self.handler.HttpRequest(self, 'GET', None,
AUTH_SERVER_HOST + '/accounts/AuthSubSessionToken',
extra_headers={'Authorization':self.__auth_token},
content_type='application/x-www-form-urlencoded')
response_body = response.read()
if response.status == 200:
for response_line in response_body.splitlines():
if response_line.startswith('Token='):
self.SetAuthSubToken(response_line.lstrip('Token='))
def RevokeAuthSubToken(self):
"""Revokes an existing AuthSub token.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
"""
if not self.__auth_token.startswith(AUTHSUB_AUTH_LABEL):
raise NonAuthSubToken
response = self.handler.HttpRequest(self, 'GET', None,
AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken',
extra_headers={'Authorization':self.__auth_token},
content_type='application/x-www-form-urlencoded')
if response.status == 200:
self.__auth_token = None
# CRUD operations
def Get(self, uri, extra_headers=None, redirects_remaining=4,
encoding='UTF-8', converter=None):
"""Query the GData API with the given URI
The uri is the portion of the URI after the server value
(ex: www.google.com).
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
redirects_remaining: int (optional) Tracks the number of additional
redirects this method will allow. If the service object receives
a redirect and remaining is 0, it will not follow the redirect.
This was added to avoid infinite redirect loops.
encoding: string (optional) The character encoding for the server's
response. Default is UTF-8
converter: func (optional) A function which will transform
the server's results before it is returned. Example: use
GDataFeedFromString to parse the server response as if it
were a GDataFeed.
Returns:
If there is no ResultsTransformer specified in the call, a GDataFeed
or GDataEntry depending on which is sent from the server. If the
response is niether a feed or entry and there is no ResultsTransformer,
return a string. If there is a ResultsTransformer, the returned value
will be that of the ResultsTransformer function.
"""
if extra_headers is None:
extra_headers = {}
# Add the authentication header to the Get request
if self.__auth_token:
extra_headers['Authorization'] = self.__auth_token
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if uri.find('?') > -1:
uri += '&gsessionid=%s' % (self.__gsessionid,)
else:
uri += '?gsessionid=%s' % (self.__gsessionid,)
server_response = self.handler.HttpRequest(self, 'GET', None, uri,
extra_headers=extra_headers)
result_body = server_response.read()
if server_response.status == 200:
if converter:
return converter(result_body)
# There was no ResultsTransformer specified, so try to convert the
# server's response into a GDataFeed.
feed = gdata.GDataFeedFromString(result_body)
if not feed:
# If conversion to a GDataFeed failed, try to convert the server's
# response to a GDataEntry.
entry = gdata.GDataEntryFromString(result_body)
if not entry:
# The server's response wasn't a feed, or an entry, so return the
# response body as a string.
return result_body
return entry
return feed
elif server_response.status == 302:
if redirects_remaining > 0:
location = server_response.getheader('Location')
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return self.Get(location, extra_headers, redirects_remaining - 1,
encoding=encoding, converter=converter)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def GetMedia(self, uri, extra_headers=None):
"""Returns a MediaSource containing media and its metadata from the given
URI string.
"""
response_handle = self.handler.HttpRequest(self, 'GET', None, uri,
extra_headers=extra_headers)
return gdata.MediaSource(response_handle, response_handle.getheader('Content-Type'),
response_handle.getheader('Content-Length'))
def GetEntry(self, uri, extra_headers=None):
"""Query the GData API with the given URI and receive an Entry.
See also documentation for gdata.service.Get
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
Returns:
A GDataEntry built from the XML in the server's response.
"""
result = self.Get(uri, extra_headers, converter=atom.EntryFromString)
if isinstance(result, atom.Entry):
return result
else:
raise UnexpectedReturnType, 'Server did not send an entry'
def GetFeed(self, uri, extra_headers=None,
converter=gdata.GDataFeedFromString):
"""Query the GData API with the given URI and receive a Feed.
See also documentation for gdata.service.Get
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
Returns:
A GDataFeed built from the XML in the server's response.
"""
result = self.Get(uri, extra_headers, converter=converter)
if isinstance(result, atom.Feed):
return result
else:
raise UnexpectedReturnType, 'Server did not send a feed'
def GetNext(self, feed):
"""Requests the next 'page' of results in the feed.
This method uses the feed's next link to request an additional feed
and uses the class of the feed to convert the results of the GET request.
Args:
feed: atom.Feed or a subclass. The feed should contain a next link and
the type of the feed will be applied to the results from the
server. The new feed which is returned will be of the same class
as this feed which was passed in.
Returns:
A new feed representing the next set of results in the server's feed.
The type of this feed will match that of the feed argument.
"""
next_link = feed.GetNextLink()
# Create a closure which will convert an XML string to the class of
# the feed object passed in.
def ConvertToFeedClass(xml_string):
return atom.CreateClassFromXMLString(feed.__class__, xml_string)
# Make a GET request on the next link and use the above closure for the
# converted which processes the XML string from the server.
if next_link and next_link.href:
return self.Get(next_link.href, converter=ConvertToFeedClass)
else:
return None
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert or update data into a GData service at the given URI.
Args:
data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). | |
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: <NAME>
# Note: Requires Python 3.3.x or higher
desc = "formatting template for all files"
# Color values from http://pathofexile.gamepedia.com/Item_filter_guide
color = {
"normal": "200 200 200",
"magic": "136 136 255",
"rare": "255 255 119",
"unique": "175 96 37",
"gem": "27 162 155",
"currency": "170 158 130",
"quest": "74 230 58",
"divinationold": "170 230 230",
"default": "127 127 127",
"value": "255 255 255",
"augmented": "136 136 255",
"fire": "150 0 0",
"cold": "54 100 146",
"lightning": "255 215 0",
"chaos": "208 32 144",
"crafted": "184 218 242",
"corrupted": "210 0 0",
"supportpacknew": "180 96 0",
"supporterpack": "163 141 109",
"nemesis": "255 200 0",
"nemesisoutline": "255 40 0",
"bloodline": "210 0 220",
"bloodlineoutline": "74 0 160",
"torment": "50 230 100",
"tormentoutline": "0 100 150",
"title": "231 180 120",
"favour": "170 158 120",
"lpink": "255 192 203",
"divinationnew": "30 144 255",
"premiumbrown": "124 81 50",
"premiumorange": "191 91 0",
"premiumtan": "254 191 128",
"premiumdpurple": "38 0 86",
"premiumpurple": "88 0 179",
"premiumlpurple": "192 128 254",
"premiumdlime": "98 128 0",
"premiumlime": "191 244 0",
"premiumllime": "239 254 128",
"premiumdred": "86 0 0",
"premiumred": "191 0 0",
"premiumlred": "254 128 128",
"premiumdblue": "0 0 128",
"premiumblue": "0 0 254",
"premiumlblue": "128 179 254",
"premiumdyellow": "254 170 0",
"premiumyellow": "254 213 0",
"premiumlyellow": "254 254 153",
"premiumdlavender": "114 0 83",
"premiumlavender": "204 0 154",
"premiumllavender": "254 128 222",
"premiumdgreen": "0 73 0",
"premiumgreen": "0 191 0",
"premiumlgreen": "128 254 128",
"premiumdgrey": "42 42 42",
"premiumgrey": "135 135 135",
"premiumlgrey": "221 221 221",
"black": "0 0 0",
"grey": "80 80 80",
"prophecy": "128 0 200",
"highlight": "51 58 75"
}
size = {
"huge": "45",
"vlarge": "44",
"large": "39",
"normal": "32",
"small": "25",
"minimum": "18",
}
# Volumes are controlled in wav_mixer
# mirror
# max
# high
# normal
# medium
# low
# initialize settings with all of the text color options
settings = {f"{k} text": [f"SetTextColor {color[k]}", "Continue"] for k in color}
settings.update({f"{k} background": [f"SetBackgroundColor {color[k]} 200", "Continue"] for k in color})
# Text settings for various categories
# This is where you would define general settings for a category, such as PlayAlertSoundPositional.
# Each config should be its own array element. Parsing will handle tabs/etc.
# ignore and hide have special meaning(see comment)
settings.update({
# Special sound for anything worth at least 1/20th of a mirror
"challenge mirror": ["SetBorderColor {}".format(color['currency']),
'MinimapIcon 0 Yellow UpsideDownHouse',
'PlayEffect Yellow',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_challenge"'.format("mirror"),
"SetBackgroundColor {} 220".format(color['highlight'])],
"challenge extremely high": ["SetBorderColor {}".format(color['premiumdlavender']),
'CustomAlertSound "{}_challenge"'.format('max'),
'MinimapIcon 0 Green Kite',
'PlayEffect Green',
"SetFontSize {}".format(size['huge']),
"SetBackgroundColor {} 220".format(color['highlight'])],
"challenge very high": ["SetBorderColor {}".format(color['premiumdlavender']),
'CustomAlertSound "{}_challenge"'.format('high'),
'MinimapIcon 1 Blue Kite',
'PlayEffect Green',
"SetFontSize {}".format(size['vlarge'])],
"challenge high": ["SetBorderColor {}".format(color['premiumdlavender']),
'CustomAlertSound "{}_challenge"'.format('normal'),
'MinimapIcon 1 Blue Kite',
"SetFontSize {}".format(size['large'])],
"challenge normal": ["SetBorderColor {}".format(color['premiumdlavender']),
'CustomAlertSound "{}_challenge"'.format('low'),
'MinimapIcon 2 Blue Kite',
"SetFontSize {}".format(size['normal'])],
"challenge show": ["SetBorderColor {} 150".format(color['premiumdlavender']),
'MinimapIcon 2 Blue Kite',
"SetFontSize {}".format(size['normal'])],
"challenge low": ['MinimapIcon 2 Blue Kite',
"SetFontSize {}".format(size['normal'])],
"animate melee b": ["SetFontSize {}".format(size['minimum']),
"SetTextColor {}".format(color['premiumlred']),
"SetBorderColor {} 150".format(color['premiumlred']),
"SetBackgroundColor {} 0".format(color['black'])],
"animate melee": ["SetFontSize {}".format(size['minimum']),
"SetTextColor {} 150".format(color['premiumlred']),
"SetBackgroundColor {} 0".format(color['black'])],
"animate range b": ["SetFontSize {}".format(size['small']),
"SetTextColor {}".format(color['premiumlred']),
"SetBorderColor {} 200".format(color['premiumtan']),
"SetBackgroundColor {} 150".format(color['premiumbrown'])],
"animate range": ["SetFontSize {}".format(size['small']),
"SetTextColor {}".format(color['black']),
"SetBackgroundColor {} 150".format(color['premiumbrown'])],
"quest": ["SetFontSize {}".format(size['normal']),
'MinimapIcon 2 Green Moon',
'PlayEffect Green',
"SetBorderColor {} 200".format(color['quest'])],
"chance": ["Rarity Normal",
'Corrupted False',
'Mirrored False',
"SetFontSize {}".format(size['large']),
"SetBorderColor {} 150".format(color['premiumorange']),
"SetBackgroundColor {} 220".format(color['premiumdpurple'])],
"chance any": ['Corrupted False',
'Mirrored False',
"SetFontSize {}".format(size['large']),
"SetBorderColor {} 150".format(color['premiumorange']),
"SetBackgroundColor {} 220".format(color['premiumdpurple'])],
# Special sound for anything worth at least 1/20th of a mirror
"currency mirror": ["SetBorderColor {}".format(color['currency']),
'MinimapIcon 0 Yellow UpsideDownHouse',
'PlayEffect Yellow',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_currency"'.format("mirror"),
"SetBackgroundColor {} 220".format(color['highlight'])],
"currency extremely high": ["SetBorderColor {}".format(color['currency']),
'MinimapIcon 0 Green Pentagon',
'PlayEffect Green',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_currency"'.format('max'),
"SetBackgroundColor {} 220".format(color['highlight'])],
"currency very high": ["SetBorderColor {}".format(color['currency']),
'MinimapIcon 1 Blue Pentagon',
'PlayEffect Blue',
"SetFontSize {}".format(size['vlarge']),
'CustomAlertSound "{}_currency"'.format('high')],
"currency high": ["SetBorderColor {}".format(color['currency']),
'MinimapIcon 1 Blue Pentagon',
"SetFontSize {}".format(size['large']),
'CustomAlertSound "{}_currency"'.format('normal')],
"currency normal": ["SetBorderColor {}".format(color['currency']),
'MinimapIcon 2 Brown Pentagon',
"SetFontSize {}".format(size['normal']),
'CustomAlertSound "{}_currency"'.format('low')],
"currency low": ["SetFontSize {}".format(size['small'])],
"currency show": ["SetBorderColor {} 150".format(color['currency']),
'MinimapIcon 2 Grey Pentagon',
"SetFontSize {}".format(size['normal'])],
# Special sound for anything worth at least 1/20th of a mirror
"divination mirror": ["SetBorderColor {}".format(color['divinationnew']),
'MinimapIcon 0 Yellow UpsideDownHouse',
'PlayEffect Yellow',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_divination"'.format("mirror"),
"SetBackgroundColor {} 220".format(color['highlight'])],
"divination extremely high": ["SetBorderColor {}".format(color['divinationnew']),
'MinimapIcon 0 Green Triangle',
'PlayEffect Green',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_divination"'.format('max'),
"SetBackgroundColor {} 220".format(color['highlight'])],
"divination very high": ["SetBorderColor {}".format(color['divinationnew']),
'MinimapIcon 1 Blue Triangle',
'PlayEffect Blue',
"SetFontSize {}".format(size['vlarge']),
'CustomAlertSound "{}_divination"'.format('high')],
"divination high": ["SetBorderColor {}".format(color['divinationnew']),
'MinimapIcon 1 Blue Triangle',
'CustomAlertSound "{}_divination"'.format('normal'),
"SetFontSize {}".format(size['normal'])],
"divination normal": ['MinimapIcon 2 Red Triangle',
'CustomAlertSound "{}_divination"'.format('low'),
"SetFontSize {}".format(size['normal'])],
"divination show": ['MinimapIcon 2 Grey Triangle',
"SetFontSize {}".format(size['normal'])],
"divination low": ["SetFontSize {}".format(size['small'])],
# Special sound for anything worth at least 1/20th of a mirror
"gem mirror": ["SetBorderColor {}".format(color['gem']),
'MinimapIcon 0 Yellow UpsideDownHouse',
'PlayEffect Yellow',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_gem"'.format("mirror"),
"SetBackgroundColor {} 220".format(color['highlight'])],
"gem extremely high": ["SetBorderColor {}".format(color['gem']),
'MinimapIcon 1 Green Hexagon',
'PlayEffect Green',
'CustomAlertSound "{}_gem"'.format('max'),
"SetFontSize {}".format(size['huge']),
"SetBackgroundColor {} 220".format(color['highlight'])],
"gem very high": ["SetBorderColor {}".format(color['gem']),
'PlayEffect Blue',
'MinimapIcon 1 Blue Hexagon',
'CustomAlertSound "{}_gem"'.format('high'),
"SetFontSize {}".format(size['vlarge'])],
"gem high": ["SetBorderColor {}".format(color['gem']),
'MinimapIcon 2 Blue Hexagon',
'CustomAlertSound "{}_gem"'.format('normal'),
"SetFontSize {}".format(size['normal'])],
"gem normal": ["SetBorderColor {} 220".format(color['gem']),
'MinimapIcon 2 Brown Hexagon',
"SetFontSize {}".format(size['normal'])],
"gem low": ["SetFontSize {}".format(size['small'])],
# Special sound for anything worth at least 1/20th of a mirror
"fragment mirror": ["SetBorderColor {}".format(color['premiumllavender']),
"SetTextColor {}".format(color['premiumllavender']),
'MinimapIcon 0 Yellow UpsideDownHouse',
'PlayEffect Yellow',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_currency"'.format("mirror"),
"SetBackgroundColor {} 220".format(color['highlight'])],
"fragment extremely high": ['MinimapIcon 0 Green Raindrop',
"SetTextColor {}".format(color['premiumllavender']),
'PlayEffect Green',
"SetBorderColor {}".format(color['premiumllavender']),
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_currency"'.format('max'),
"SetBackgroundColor {} 220".format(color['highlight'])],
"fragment very high": ['MinimapIcon 1 Blue Raindrop',
"SetTextColor {}".format(color['premiumllavender']),
'PlayEffect Blue',
"SetBorderColor {}".format(color['premiumllavender']),
"SetFontSize {}".format(size['vlarge']),
'CustomAlertSound "{}_map_good"'.format('high')],
"fragment high": ['MinimapIcon 1 Blue Raindrop',
"SetTextColor {}".format(color['premiumllavender']),
"SetFontSize {}".format(size['normal']),
"SetBorderColor {}".format(color['premiumllavender']),
'CustomAlertSound "{}_map_good"'.format('normal')],
"fragment normal": ["SetFontSize {}".format(size['normal']),
"SetTextColor {}".format(color['premiumllavender']),
"SetBorderColor {} 150".format(color['premiumllavender']),
'MinimapIcon 2 Brown Raindrop'],
"fragment low": ["SetFontSize {}".format(size['small']),
"SetTextColor {}".format(color['premiumllavender']),
"SetBorderColor {} 100".format(color['premiumllavender'])],
"map red": ["SetBorderColor {} 150".format(color['fire']),
'MinimapIcon 1 Red Diamond',
"SetFontSize {}".format(size['large']),
'CustomAlertSound "{}_map_okay"'.format('normal')],
"map yellow": ["SetBorderColor {} 150".format(color['lightning']),
'MinimapIcon 2 Yellow Diamond',
"SetFontSize {}".format(size['normal'])],
"map white": ["SetBorderColor {} 150".format(color['normal']),
'MinimapIcon 2 White Diamond',
"SetFontSize {}".format(size['normal'])],
"map highlight": ["SetBorderColor {}".format(color['gem']),
'MinimapIcon 0 Blue Diamond',
'PlayEffect Blue',
"SetFontSize {}".format(size['vlarge']),
'CustomAlertSound "{}_map_good"'.format('high'),
"SetBackgroundColor {} 220".format(color['supporterpack'])],
"map very good": ["SetBorderColor {}".format(color['fire']),
'MinimapIcon 0 Red Diamond',
'PlayEffect Red',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_map_good"'.format('max')],
"map red good": ["SetBorderColor {}".format(color['fire']),
'MinimapIcon 0 Red Diamond',
'PlayEffect Red',
"SetFontSize {}".format(size['vlarge']),
'CustomAlertSound "{}_map_good"'.format('high')],
"map yellow good": ["SetBorderColor {}".format(color['lightning']),
'MinimapIcon 1 Yellow Diamond',
'PlayEffect Yellow',
'CustomAlertSound "{}_map_good"'.format('medium'),
"SetFontSize {}".format(size['large'])],
"map white good": ["SetBorderColor {}".format(color['normal']),
'MinimapIcon 1 White Diamond',
'PlayEffect White',
"SetFontSize {}".format(size['normal']),
'CustomAlertSound "{}_map_good"'.format('low')],
"influenced map red": ["SetBorderColor {} 150".format(color['fire']),
'MinimapIcon 1 Red Cross',
"SetFontSize {}".format(size['large']),
'CustomAlertSound "{}_map_okay"'.format('normal')],
"influenced map yellow": ["SetBorderColor {} 150".format(color['lightning']),
'MinimapIcon 2 Yellow Cross',
"SetFontSize {}".format(size['normal'])],
"influenced map white": ["SetBorderColor {} 150".format(color['normal']),
'MinimapIcon 2 White Cross',
"SetFontSize {}".format(size['normal'])],
"influenced map very good": ["SetBorderColor {}".format(color['fire']),
'MinimapIcon 0 Red Cross',
'PlayEffect Red',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_map_good"'.format('max')],
"influenced map red good": ["SetBorderColor {}".format(color['fire']),
'MinimapIcon 0 Red Cross',
'PlayEffect Red',
"SetFontSize {}".format(size['vlarge']),
'CustomAlertSound "{}_map_good"'.format('high')],
"influenced map yellow good": ["SetBorderColor {}".format(color['lightning']),
'MinimapIcon 1 Yellow Cross',
'PlayEffect Yellow',
'CustomAlertSound "{}_map_good"'.format('medium'),
"SetFontSize {}".format(size['large'])],
"influenced map white good": ["SetBorderColor {}".format(color['normal']),
'MinimapIcon 1 White Cross',
'PlayEffect White',
"SetFontSize {}".format(size['normal']),
'CustomAlertSound "{}_map_good"'.format('low')],
"leveling high": ["SetFontSize {}".format(size['normal']),
'MinimapIcon 2 Orange Moon',
"SetBorderColor {}".format(color['nemesisoutline'])],
"leveling normal": ["SetFontSize {}".format(size['small']),
'MinimapIcon 2 Orange Moon',
"SetBorderColor {}".format(color['tormentoutline'])],
"leveling low": ["SetFontSize {}".format(size['minimum']),
"SetBorderColor {}".format(color['normal'])],
"levelling rare high": ["Rarity Rare",
"SetBorderColor {} 150".format(color['rare']),
"SetFontSize {}".format(size['large'])],
"rare highlight": ["Rarity Rare",
'MinimapIcon 2 Yellow Circle',
"SetFontSize {}".format(size['large']),
"SetBorderColor {} 150".format(color['premiumorange'])],
"rare high": ["Rarity Rare",
"SetBorderColor {}".format(color['rare']),
"SetFontSize {}".format(size['normal'])],
"levelling rare normal": ["Rarity Rare",
"SetBorderColor {} 150".format(color['rare']),
"SetFontSize {}".format(size['normal'])],
"rare normal": ["Rarity Rare",
"SetFontSize {}".format(size['normal'])],
"rare low": ["Rarity Rare",
"SetFontSize {}".format(size['small'])],
"rare corrupted": ["Rarity Rare",
"SetFontSize {}".format(size['small']),
"SetBorderColor {} 100".format(color['premiumlred'])],
"chromatic": ["SocketGroup RGB",
"SetBorderColor {}".format(color['premiumgreen']),
"SetFontSize {}".format(size['normal'])],
"recipe item normal": ["SetBorderColor {}".format(color['premiumlavender']),
'MinimapIcon 2 Grey Square',
"SetFontSize {}".format(size['minimum'])],
"recipe item rare": ["SetBorderColor {}".format(color['premiumlavender']),
'MinimapIcon 2 Grey Square',
"SetFontSize {}".format(size['normal']),
"SetBackgroundColor {} 220".format(color['magic'])],
"recipe item rare small": ["SetBorderColor {}".format(color['premiumlavender']),
'MinimapIcon 2 Grey Square',
"SetFontSize {}".format(size['small']),
"SetBackgroundColor {} 220".format(color['cold'])],
"item mod": ["SetBorderColor {} 200".format(color['premiumlpurple']),
'MinimapIcon 2 Grey UpsideDownHouse',
"SetFontSize {}".format(size['normal'])],
# Special sound for anything worth at least 1/20th of a mirror
"base mirror": ["SetBorderColor {}".format(color['premiumgreen']),
"Rarity < Unique",
'MinimapIcon 0 Yellow UpsideDownHouse',
'PlayEffect Yellow',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_base"'.format("mirror"),
"SetBackgroundColor {} 220".format(color['highlight'])],
"base extremely high": ["Rarity < Unique",
'MinimapIcon 0 Green Circle',
'PlayEffect Green',
"SetBorderColor {}".format(color['premiumgreen']),
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_base"'.format('max'),
"SetBackgroundColor {} 220".format(color['highlight'])],
"base very high": ["Rarity < Unique",
'MinimapIcon 1 Blue Circle',
'PlayEffect Blue',
"SetBorderColor {}".format(color['premiumgreen']),
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_base"'.format('high')],
# Special sound for anything worth at least 1/20th of a mirror
"unique mirror": ["SetBorderColor {}".format(color['currency']),
"Rarity Unique",
'MinimapIcon 0 Yellow UpsideDownHouse',
'PlayEffect Yellow',
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_currency"'.format("mirror"),
"SetBackgroundColor {} 220".format(color['highlight'])],
"unique extremely high": ["Rarity Unique",
'MinimapIcon 0 Green Star',
'PlayEffect Green',
"SetBorderColor {}".format(color['unique']),
"SetFontSize {}".format(size['huge']),
'CustomAlertSound "{}_unique"'.format('max'),
"SetBackgroundColor {} 220".format(color['highlight'])],
"unique very high": ["Rarity Unique",
'MinimapIcon 1 Blue Star',
'PlayEffect Blue',
"SetBorderColor {}".format(color['unique']),
"SetFontSize {}".format(size['vlarge']),
'CustomAlertSound "{}_unique"'.format('high')],
"unique high": ["Rarity Unique",
'MinimapIcon 1 Blue Star',
"SetFontSize {}".format(size['normal']),
"SetBorderColor {}".format(color['unique']),
'CustomAlertSound "{}_unique"'.format('normal')],
# Special class of unique that has a low average value but has some items that are quite valuable
"unique special": ["Rarity Unique",
"SetFontSize {}".format(size['normal']),
'MinimapIcon 2 White Star',
"SetBorderColor {}".format(color['chaos']),
'CustomAlertSound "{}_unique"'.format('medium')],
# Special class of unique where only a restricted drop is valuable
"unique limited": ["Rarity Unique",
"SetFontSize {}".format(size['normal']),
"SetBorderColor {}".format(color['premiumblue']),
'MinimapIcon 2 | |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the adversarial patch attack `AdversarialPatch`. This attack generates an adversarial patch that
can be printed into the physical world with a common printer. The patch can be used to fool image and video classifiers.
| Paper link: https://arxiv.org/abs/1712.09665
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Optional, Tuple, Union, TYPE_CHECKING
import numpy as np
from tqdm.auto import trange
from art.attacks.attack import EvasionAttack
from art.estimators.estimator import BaseEstimator, NeuralNetworkMixin
from art.estimators.classification.classifier import ClassifierMixin
from art.utils import check_and_transform_label_format, is_probability, to_categorical
if TYPE_CHECKING:
# pylint: disable=C0412
import torch
from art.utils import CLASSIFIER_NEURALNETWORK_TYPE
logger = logging.getLogger(__name__)
class AdversarialPatchPyTorch(EvasionAttack):
"""
Implementation of the adversarial patch attack for square and rectangular images and videos in PyTorch.
| Paper link: https://arxiv.org/abs/1712.09665
"""
attack_params = EvasionAttack.attack_params + [
"rotation_max",
"scale_min",
"scale_max",
"distortion_scale_max",
"learning_rate",
"max_iter",
"batch_size",
"patch_shape",
"verbose",
]
_estimator_requirements = (BaseEstimator, NeuralNetworkMixin, ClassifierMixin)
def __init__(
self,
classifier: "CLASSIFIER_NEURALNETWORK_TYPE",
rotation_max: float = 22.5,
scale_min: float = 0.1,
scale_max: float = 1.0,
distortion_scale_max: float = 0.0,
learning_rate: float = 5.0,
max_iter: int = 500,
batch_size: int = 16,
patch_shape: Optional[Tuple[int, int, int]] = None,
patch_type: str = "circle",
verbose: bool = True,
):
"""
Create an instance of the :class:`.AdversarialPatchPyTorch`.
:param classifier: A trained classifier.
:param rotation_max: The maximum rotation applied to random patches. The value is expected to be in the
range `[0, 180]`.
:param scale_min: The minimum scaling applied to random patches. The value should be in the range `[0, 1]`,
but less than `scale_max`.
:param scale_max: The maximum scaling applied to random patches. The value should be in the range `[0, 1]`, but
larger than `scale_min`.
:param distortion_scale_max: The maximum distortion scale for perspective transformation in range `[0, 1]`. If
distortion_scale_max=0.0 the perspective transformation sampling will be disabled.
:param learning_rate: The learning rate of the optimization.
:param max_iter: The number of optimization steps.
:param batch_size: The size of the training batch.
:param patch_shape: The shape of the adversarial patch as a tuple of shape HWC (width, height, nb_channels).
:param patch_type: The patch type, either circle or square.
:param verbose: Show progress bars.
"""
import torch # lgtm [py/repeated-import]
import torchvision
torch_version = list(map(int, torch.__version__.lower().split(".")))
torchvision_version = list(map(int, torchvision.__version__.lower().split(".")))
assert torch_version[0] >= 1 and torch_version[1] >= 7, "AdversarialPatchPyTorch requires torch>=1.7.0"
assert (
torchvision_version[0] >= 0 and torchvision_version[1] >= 8
), "AdversarialPatchPyTorch requires torchvision>=0.8.0"
super().__init__(estimator=classifier)
self.rotation_max = rotation_max
self.scale_min = scale_min
self.scale_max = scale_max
self.distortion_scale_max = distortion_scale_max
self.learning_rate = learning_rate
self.max_iter = max_iter
self.batch_size = batch_size
if patch_shape is None:
self.patch_shape = self.estimator.input_shape
else:
self.patch_shape = patch_shape
self.patch_type = patch_type
self.image_shape = classifier.input_shape
self.verbose = verbose
self._check_params()
if not self.estimator.channels_first:
raise ValueError("Input shape has to be wither NCHW or NFCHW.")
self.i_h_patch = 1
self.i_w_patch = 2
self.input_shape = self.estimator.input_shape
self.nb_dims = len(self.image_shape)
if self.nb_dims == 3:
self.i_h = 1
self.i_w = 2
elif self.nb_dims == 4:
self.i_h = 2
self.i_w = 3
if self.patch_shape[1] != self.patch_shape[2]:
raise ValueError("Patch height and width need to be the same.")
if not (self.estimator.postprocessing_defences is None or self.estimator.postprocessing_defences == []):
raise ValueError(
"Framework-specific implementation of Adversarial Patch attack does not yet support "
+ "postprocessing defences."
)
mean_value = (self.estimator.clip_values[1] - self.estimator.clip_values[0]) / 2.0 + self.estimator.clip_values[
0
]
self._initial_value = np.ones(self.patch_shape) * mean_value
self._patch = torch.tensor(self._initial_value, requires_grad=True, device=self.estimator.device)
self._optimizer = torch.optim.Adam([self._patch], lr=self.learning_rate)
def _train_step(
self, images: "torch.Tensor", target: "torch.Tensor", mask: Optional["torch.Tensor"] = None
) -> "torch.Tensor":
import torch # lgtm [py/repeated-import]
self._optimizer.zero_grad()
loss = self._loss(images, target, mask)
loss.backward(retain_graph=True)
self._optimizer.step()
with torch.no_grad():
self._patch[:] = torch.clamp(
self._patch, min=self.estimator.clip_values[0], max=self.estimator.clip_values[1]
)
return loss
def _predictions(self, images: "torch.Tensor", mask: Optional["torch.Tensor"]) -> "torch.Tensor":
import torch # lgtm [py/repeated-import]
patched_input = self._random_overlay(images, self._patch, mask=mask)
patched_input = torch.clamp(
patched_input,
min=self.estimator.clip_values[0],
max=self.estimator.clip_values[1],
)
predictions = self.estimator._predict_framework(patched_input) # pylint: disable=W0212
return predictions
def _loss(self, images: "torch.Tensor", target: "torch.Tensor", mask: Optional["torch.Tensor"]) -> "torch.Tensor":
import torch # lgtm [py/repeated-import]
predictions = self._predictions(images, mask)
if self.use_logits:
loss = torch.nn.functional.cross_entropy(
input=predictions, target=torch.argmax(target, dim=1), reduction="mean"
)
else:
loss = torch.nn.functional.nll_loss(input=predictions, target=torch.argmax(target, dim=1), reduction="mean")
if not self.targeted:
loss = -loss
return loss
def _get_circular_patch_mask(self, nb_samples: int, sharpness: int = 40) -> "torch.Tensor":
"""
Return a circular patch mask.
"""
import torch # lgtm [py/repeated-import]
diameter = np.minimum(self.patch_shape[self.i_h_patch], self.patch_shape[self.i_w_patch])
if self.patch_type == "circle":
x = np.linspace(-1, 1, diameter)
y = np.linspace(-1, 1, diameter)
x_grid, y_grid = np.meshgrid(x, y, sparse=True)
z_grid = (x_grid ** 2 + y_grid ** 2) ** sharpness
image_mask = 1 - np.clip(z_grid, -1, 1)
elif self.patch_type == "square":
image_mask = np.ones((diameter, diameter))
image_mask = np.expand_dims(image_mask, axis=0)
image_mask = np.broadcast_to(image_mask, self.patch_shape)
image_mask = torch.Tensor(np.array(image_mask))
image_mask = torch.stack([image_mask] * nb_samples, dim=0)
return image_mask
def _random_overlay(
self,
images: "torch.Tensor",
patch: "torch.Tensor",
scale: Optional[float] = None,
mask: Optional["torch.Tensor"] = None,
) -> "torch.Tensor":
import torch # lgtm [py/repeated-import]
import torchvision
nb_samples = images.shape[0]
image_mask = self._get_circular_patch_mask(nb_samples=nb_samples)
image_mask = image_mask.float()
smallest_image_edge = np.minimum(self.image_shape[self.i_h], self.image_shape[self.i_w])
image_mask = torchvision.transforms.functional.resize(
img=image_mask,
size=(smallest_image_edge, smallest_image_edge),
interpolation=2,
)
pad_h_before = int((self.image_shape[self.i_h] - image_mask.shape[self.i_h_patch + 1]) / 2)
pad_h_after = int(self.image_shape[self.i_h] - pad_h_before - image_mask.shape[self.i_h_patch + 1])
pad_w_before = int((self.image_shape[self.i_w] - image_mask.shape[self.i_w_patch + 1]) / 2)
pad_w_after = int(self.image_shape[self.i_w] - pad_w_before - image_mask.shape[self.i_w_patch + 1])
image_mask = torchvision.transforms.functional.pad(
img=image_mask,
padding=[pad_h_before, pad_w_before, pad_h_after, pad_w_after],
fill=0,
padding_mode="constant",
)
if self.nb_dims == 4:
image_mask = torch.unsqueeze(image_mask, dim=1)
image_mask = torch.repeat_interleave(image_mask, dim=1, repeats=self.input_shape[0])
image_mask = image_mask.float()
patch = patch.float()
padded_patch = torch.stack([patch] * nb_samples)
padded_patch = torchvision.transforms.functional.resize(
img=padded_patch,
size=(smallest_image_edge, smallest_image_edge),
interpolation=2,
)
padded_patch = torchvision.transforms.functional.pad(
img=padded_patch,
padding=[pad_h_before, pad_w_before, pad_h_after, pad_w_after],
fill=0,
padding_mode="constant",
)
if self.nb_dims == 4:
padded_patch = torch.unsqueeze(padded_patch, dim=1)
padded_patch = torch.repeat_interleave(padded_patch, dim=1, repeats=self.input_shape[0])
padded_patch = padded_patch.float()
image_mask_list = list()
padded_patch_list = list()
for i_sample in range(nb_samples):
if scale is None:
im_scale = np.random.uniform(low=self.scale_min, high=self.scale_max)
else:
im_scale = scale
if mask is None:
padding_after_scaling_h = (
self.image_shape[self.i_h] - im_scale * padded_patch.shape[self.i_h + 1]
) / 2.0
padding_after_scaling_w = (
self.image_shape[self.i_w] - im_scale * padded_patch.shape[self.i_w + 1]
) / 2.0
x_shift = np.random.uniform(-padding_after_scaling_w, padding_after_scaling_w)
y_shift = np.random.uniform(-padding_after_scaling_h, padding_after_scaling_h)
else:
mask_2d = mask[i_sample, :, :]
edge_x_0 = int(im_scale * padded_patch.shape[self.i_w + 1]) // 2
edge_x_1 = int(im_scale * padded_patch.shape[self.i_w + 1]) - edge_x_0
edge_y_0 = int(im_scale * padded_patch.shape[self.i_h + 1]) // 2
edge_y_1 = int(im_scale * padded_patch.shape[self.i_h + 1]) - edge_y_0
mask_2d[0:edge_x_0, :] = False
if edge_x_1 > 0:
mask_2d[-edge_x_1:, :] = False
mask_2d[:, 0:edge_y_0] = False
if edge_y_1 > 0:
mask_2d[:, -edge_y_1:] = False
num_pos = np.argwhere(mask_2d).shape[0]
pos_id = np.random.choice(num_pos, size=1)
pos = np.argwhere(mask_2d)[pos_id[0]]
x_shift = pos[1] - self.image_shape[self.i_w] // 2
y_shift = pos[0] - self.image_shape[self.i_h] // 2
phi_rotate = float(np.random.uniform(-self.rotation_max, self.rotation_max))
image_mask_i = image_mask[i_sample]
height = padded_patch.shape[self.i_h + 1]
width = padded_patch.shape[self.i_w + 1]
half_height = height // 2
half_width = width // 2
topleft = [
int(torch.randint(0, int(self.distortion_scale_max * half_width) + 1, size=(1,)).item()),
int(torch.randint(0, int(self.distortion_scale_max * half_height) + 1, size=(1,)).item()),
]
topright = [
int(torch.randint(width - int(self.distortion_scale_max * half_width) - 1, width, size=(1,)).item()),
int(torch.randint(0, int(self.distortion_scale_max * half_height) + 1, size=(1,)).item()),
]
botright = [
int(torch.randint(width - int(self.distortion_scale_max * half_width) - 1, width, size=(1,)).item()),
int(torch.randint(height - | |
Op.callsub, subroutine3),
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
]
subroutineMapping = {
None: mainOps,
subroutine1: subroutine1Ops,
subroutine2: subroutine2Ops,
subroutine3: subroutine3Ops,
}
subroutineGraph = {
subroutine1: {subroutine1},
subroutine2: {subroutine1},
subroutine3: {subroutine3},
}
localSlots = {None: set(), subroutine1: {0}, subroutine2: {1}, subroutine3: {}}
spillLocalSlotsDuringRecursion(4, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
],
subroutine1: [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.load, 0),
TealOp(None, Op.dig, 1),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
],
subroutine2: [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
],
subroutine3: [
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_multiple_subroutines_recursion_v5():
def sub1Impl(a1):
return None
def sub2Impl(a1, a2):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine3 = SubroutineDefinition(sub1Impl, TealType.none)
subroutine1L1Label = LabelReference("l1")
subroutine1Ops = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
]
subroutine2L1Label = LabelReference("l1")
subroutine2Ops = [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
subroutine3Ops = [
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
]
subroutineMapping = {
None: mainOps,
subroutine1: subroutine1Ops,
subroutine2: subroutine2Ops,
subroutine3: subroutine3Ops,
}
subroutineGraph = {
subroutine1: {subroutine1},
subroutine2: {subroutine1},
subroutine3: {subroutine3},
}
localSlots = {None: set(), subroutine1: {0}, subroutine2: {1}, subroutine3: {}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
],
subroutine1: [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.load, 0),
TealOp(None, Op.swap),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
],
subroutine2: [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
],
subroutine3: [
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_many_args_no_return_v4():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.none)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2}}
spillLocalSlotsDuringRecursion(4, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.dig, 5),
TealOp(None, Op.dig, 5),
TealOp(None, Op.dig, 5),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 0),
TealOp(None, Op.pop),
TealOp(None, Op.pop),
TealOp(None, Op.pop),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_many_args_no_return_v5():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.none)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 0),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_many_args_return_v4():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.uint64)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2}}
spillLocalSlotsDuringRecursion(4, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.dig, 5),
TealOp(None, Op.dig, 5),
TealOp(None, Op.dig, 5),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 0),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_many_args_return_v5():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.uint64)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.cover, 3),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 0),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_more_args_than_slots_v5():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.uint64)
subroutineOps = [
| |
#############################################################################
#
#
# BF-Assist c7-branch
#
#
#############################################################################
""" This is the top/root module of bf-assist. A software that's supposed to assist with the administration and
maintenance of Battlefield 1942 and its game servers. As of the last check it contains the following package structure:
c7
bfassist -----> api
|-> certificates
|-> colours
|-> master ---> league
|-> network \-> client
| -> master
|-> references ---> binaries -> league
| \-> eventlogs -> import
| -> maps -> league
|-> sql
|-> standalone ---> admin
| |-> api
| |-> monitoring
| |-> refractor
| \-> server
| -> webclient ----> bfaconsole
| |-> offline
| |-> setup
| \-> update
| -> webclient
|-> usersystem
|-> webgen ---> framework ----> css
| \-> html
| -> js
\-> webservice ---> requesthandler
-> bfa_logging
setup
Short description of the packages and their dependencies if any.
api The api module can be used independently from bfa to help weaving in light-weight api capabilities
to any python project supporting the typing features used. Bfa is supposed to run on python 3.7 and
therefore can't utilise some features from the newer python typing packages that could be really
useful for extending functionality. The webservice and webgen packages offer some handy support
functions for the api package. For instance making the api available from HTTPS and generating
HTML, JS to interact with it.
No dependencies.
certificates Currently this package really just offers a bog-standard way for generating an SSL-certificate.
More functionality for administrating certificates, for instance auto-replacing expired certs
etc. was/is intended but probably won't be added any soon. Could potentially be replaced by some
already existing package that we were too lazy to search for.
It requires crypto from OpenSSL as 2nd party dependency.
colours Similar type of package as 'certificates'. Barely offers any functionality, but is useful in its own
regard and supports the webgen package well.
No dependencies.
master One of the main/big packages of bf-assist. Contains the main code required to run a bfa master
server.
For the future of bfa the following release-strategy is intended.
There should be 3 master servers representing the 3 version-stages of bfa:
The development, experimental and stable stage.
Each server will gain the ability to redirect connecting clients to the correct stage depending on
version information the client will send alongside requests. Version information is further
constituted by a branch code which is a code consisting of a single letter and digit as well as the
number of the last revision of the files in the SVN repository.
By the time of the first public bfa release only the development and experimental stage will run.
Since bfa is being developed closely linked with the purposes of the bf-league its content and
capabilities are currently mostly catering towards needs of the league. However, the contents are
not meant to be limited to that and league extensions for the client side of bfa are optional as
will be further explained for the standalone package.
The dependencies of the master package are as follows:
bfassist -----> certificates
|-> network -> master
|-> references
\-> sql
-> bfa_logging
However, while the master can be loaded and does run like this, it requires to be embedded in the
full c7-branch bfassist structure to be able to relay the client files and support the update and
auto-update features of bfa.
network The network package is still a bit clunky after the rework for the public release but it does its
job and can be easily refitted to also be useful for different applications. The package handles
the communication 'standalone-client -> master'.
The client always communicates with the master via request. It sends its network config file at the
start and then leaves the procession of its request to the master. A request may require some back
and forth between the client and the master but at the end of the procession the connection will be
closed. All traffic is SSL encrypted with the certificate of the master.
The dependencies of this package are a bit complicated. If the package is configured as client it
will always require:
bfassist -----> references
\-> (sql - dependency of bfa_logging)
-> bfa_logging
If it's furthermore also configured to use the league-extensions it will also require.
bfassist -----> standalone
If the package is configured as master it's generally equivalent to the master package itself and
therefore has the same dependencies as the master package:
bfassist -----> certificates
|-> master
|-> references
\-> sql
-> bfa_logging
references This package is mainly used for interacting with reference files for bf servers. For instance it
assists in managing binary executable files and maps. Also it offers some utility for dealing with
bf event logs. It's an exemplary use of the sql package for managing files.
Only dependency is:
bfassist -----> sql
sql Useful light-weight sql integration for python. Natively utilises sqlite3 but can be easily scaled
to mysql or postgresql. Main functionality: SQL database as python dictionary and size-management
for setting a database-size threshold. The management will automatically delete data in the order of
priority rules specified to remain below the size-threshold.
No dependencies.
standalone Oldest and core functionality of bfa. A package to assist with the administration of bf servers run
on the same machine. Contains an exemplary implementation of the api and the web service. Otherwise,
very bf-specific. The standalone can be installed using the 'setup.py'.
The dependencies are as follows:
bfassist -----> api
|-> certificates
|-> colours
|-> network --> client
|-> references -\-> binaries
| -> maps
|-> sql
|-> standalone
|-> usersystem
|-> webgen
\-> webservice
-> bfa_logging
The league extensions will fit in the following places if enabled:
bfassist -----> network -> client ---> leagueclient
|-> references -\-> binaries ----> league
\ -> maps --------> league
-> standalone --> admin ----> administrationleague
usersystem A very simplistic user system, tailored to the needs of bf. Used as access-restriction for the
webservice/api and in-game admin capabilities. Can be easily customised.
Dependencies:
bfassist ---\-> sql
-> bfa_logging
webgen Ambitious web-generator/framework to programmatically generate "Views"(HTML, JS, CSS) that can then
be served via a webserver, in the case of bfa, the included webservice. Sort of follows concepts of
the model-view-presenter-scheme but remains only implementing what's necessary for bfa to function
the way it is supposed to. Flask or Django might have been a more appropriate choice but seemed like
an over-kill at the start of the project. Bfa will probably continue to go with webgen and grow it
according to its needs.
The webgen package is not inherently dependent on any other packages apart from colours. However,
certain functions in the package are so tailored to the needs of bfa that they are dependant on
the standalone. So the dependencies are like this:
bfassist ---\-> colours
-> (standalone)
webservice A light-weight web server based on the python base HTTP server. It's ported to support only HTTPS
encrypted with a SSL certificate that can be automatically generated if no other certificate is
supplied.
It implements handling of GET/POST/PUT requests for the purpose of enabling a web-api in concurrence
with the api package. However, it's very easy to exclude the API functionality and simply run it as
a light-weight HTTPS web server or even remove the encryption if that's desired.
For serving web-sites it requires 'View' objects instantiated through the webgen module.
Furthermore it implements a light-weight session-management protocol that lets people identify with
(keyhash)/username/password from the usersystem package. By default api and web-service are hidden
behind a single 'offline View' and only become visible to clients that have logged in previously.
Session-integrity is maintained with a cookie that expires after 15 minutes on default.
Dependencies:
bfassist -----> api
|-> certificates
|-> (colours - | |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0501205,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.242055,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.233396,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.491029,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.850285,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.487663,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.82898,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.449578,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.209,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0440935,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0178002,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.148997,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.131643,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.193091,
'Execution Unit/Register Files/Runtime Dynamic': 0.149444,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.37377,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.944148,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.57,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0036616,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0036616,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00320604,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.0012503,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00189107,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0124203,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.034507,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.126552,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.437521,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.429828,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.04083,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0708431,
'L2/Runtime Dynamic': 0.0160794,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 5.16822,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.90363,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.127181,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.12718,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 5.77124,
'Load Store Unit/Runtime Dynamic': 2.65802,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.313605,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.627211,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.1113,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.112135,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0724011,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.750909,
'Memory Management Unit/Runtime Dynamic': 0.184536,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.3324,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.153832,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0269596,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.25469,
'Renaming Unit/Int Front End RAT/Subthreshold | |
#-------------------------------------
# Project: Transductive Propagation Network for Few-shot Learning
# Date: 2019.1.11
# Author: <NAME>
# All Rights Reserved
#-------------------------------------
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
import numpy as np
from complex_module import Complex,\
C_MaxPooling, C_conv2d, C_BatchNorm2d,\
C_ReLU, complex_weight_init, C_Linear, C_BatchNorm, C_AvePooling
class CNNEncoder(nn.Module):
"""Encoder for feature embedding"""
def __init__(self, args):
super(CNNEncoder, self).__init__()
self.args = args
h_dim, z_dim = args['h_dim'], args['z_dim']
if not self.args['complex']:
if self.args['Relation_layer'] == 1:
self.layer1 = nn.Sequential(
nn.Conv2d(3, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(128,128,kernel_size=3,padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer3 = nn.Sequential(
nn.Conv2d(128,128,kernel_size=3,padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer4 = nn.Sequential(
nn.Conv2d(128,64,kernel_size=3,padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
else:
# layer 1
self.layer11 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer12 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer13 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer14 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
# layer 2
self.layer21 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=5, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer22 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer23 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=5, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer24 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=5, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
# layer 1
self.layer31 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer32 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=7, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer33 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=7, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer34 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=7, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2))
else:
self.layer1 = nn.Sequential(
C_conv2d(3, 64, kernel_size=3, padding=1),
C_BatchNorm2d(64),
C_ReLU(),
#C_MaxPooling(2),
C_AvePooling(2)
)
self.layer2 = nn.Sequential(
C_conv2d(64, 64, kernel_size=3, padding=1),
C_BatchNorm2d(64),
C_ReLU(),
#C_MaxPooling(2),
C_AvePooling(2)
)
self.layer3 = nn.Sequential(
C_conv2d(64, 64, kernel_size=3, padding=1),
C_BatchNorm2d(64),
C_ReLU(),
#C_MaxPooling(2),
C_AvePooling(2)
)
self.layer4 = nn.Sequential(
C_conv2d(64, 64, kernel_size=3, padding=1),
C_BatchNorm2d(64),
C_ReLU(),
#C_MaxPooling(2),
C_AvePooling(2)
)
def forward(self,x):
"""x: bs*3*84*84 """
if self.args['Relation_layer'] == 1:
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
return out
else:
out1 = self.layer11(x)
out1 = self.layer12(out1)
out1 = self.layer13(out1)
out1 = self.layer14(out1)
out2 = self.layer21(x)
out2 = self.layer22(out2)
out2 = self.layer23(out2)
out2 = self.layer24(out2)
out3 = self.layer31(x)
out3 = self.layer32(out3)
out3 = self.layer33(out3)
out3 = self.layer34(out3)
return [out1, out2, out3]
class RelationNetwork(nn.Module):
"""Graph Construction Module"""
def __init__(self, args):
super(RelationNetwork, self).__init__()
self.args = args
if self.args['complex']:
self.layer1 = nn.Sequential(
C_conv2d(64, 64, kernel_size=3, padding=1),
C_BatchNorm2d(64),
C_ReLU(),
#C_MaxPooling(kernel_size=2, padding=1),
C_AvePooling(kernel_size=2, padding=1)
)
self.layer2 = nn.Sequential(
C_conv2d(64, 1, kernel_size=3, padding=1),
C_BatchNorm2d(1),
C_ReLU(),
#C_MaxPooling(kernel_size=2, padding=1),
C_AvePooling(kernel_size=2, padding=1),
)
self.fc3 = C_Linear(2 * 2, 8)
self.fc4 = C_Linear(8, 1)
self.relu = C_ReLU()
else:
self.layer1 = nn.Sequential(
nn.Conv2d(64,64,kernel_size=3,padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, padding=1)
)
self.layer2 = nn.Sequential(
nn.Conv2d(64,1,kernel_size=3,padding=1),
nn.BatchNorm2d(1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, padding=1))
self.fc3 = nn.Linear(2*2, 8)
self.fc4 = nn.Linear(8, 1)
self.relu = nn.ReLU()
self.m0 = nn.MaxPool2d(2) # max-pool without padding
self.m1 = nn.MaxPool2d(2, padding=1) # max-pool with padding
def forward(self, x, rn):
x = x.view(-1, 64, 5, 5)
out = self.layer1(x)
out = self.layer2(out)
# flatten
out = out.view(out.size(0), -1)
#out = out.view(out.size(0),-1)
out = self.relu(self.fc3(out))
out = self.fc4(out) # no relu
out = out.view(out.size(0), -1)
# bs*1
return out
class Prototypical(nn.Module):
"""Main Module for prototypical networlks"""
def __init__(self, args):
super(Prototypical, self).__init__()
self.im_width, self.im_height, self.channels = list(map(int, args['x_dim'].split(',')))
self.h_dim, self.z_dim = args['h_dim'], args['z_dim']
self.args = args
self.encoder = CNNEncoder(args)
def forward(self, inputs):
"""
inputs are preprocessed
support: (N_way*N_shot)x3x84x84
query: (N_way*N_query)x3x84x84
s_labels: (N_way*N_shot)xN_way, one-hot
q_labels: (N_way*N_query)xN_way, one-hot
"""
[support, s_labels, query, q_labels] = inputs
num_classes = s_labels.shape[1]
num_support = int(s_labels.shape[0] / num_classes)
num_queries = int(query.shape[0] / num_classes)
inp = torch.cat((support,query), 0)
if self.args['complex']:
inp = Complex()
emb = self.encoder(inp) # 80x64x5x5
emb_s, emb_q = torch.split(emb, [num_classes*num_support, num_classes*num_queries], 0)
emb_s = emb_s.view(num_classes, num_support, 1600).mean(1)
emb_q = emb_q.view(-1, 1600)
emb_s = torch.unsqueeze(emb_s,0) # 1xNxD
emb_q = torch.unsqueeze(emb_q,1) # Nx1xD
dist = ((emb_q-emb_s)**2).mean(2) # NxNxD -> NxN
ce = nn.CrossEntropyLoss().cuda(0)
loss = ce(-dist, torch.argmax(q_labels,1))
## acc
pred = torch.argmax(-dist,1)
gt = torch.argmax(q_labels,1)
correct = (pred==gt).sum()
total = num_queries*num_classes
acc = 1.0 * correct.float() / float(total)
return loss, acc
class LabelPropagation(nn.Module):
"""Label Propagation"""
def __init__(self, args):
super(LabelPropagation, self).__init__()
self.im_width, self.im_height, self.channels = list(map(int, args['x_dim'].split(',')))
self.h_dim, self.z_dim = args['h_dim'], args['z_dim']
self.args = args
self.encoder = CNNEncoder(args)
self.relation = RelationNetwork(args)
if args['rn'] == 300: # learned sigma, fixed alpha
self.alpha = torch.tensor([args['alpha']], requires_grad=False).cuda(0)
elif args['rn'] == 30: # learned sigma, learned alpha
self.alpha = nn.Parameter(torch.tensor([args['alpha']]).cuda(0), requires_grad=True)
if args['phrase']:
if args['beta'] ==30:
self.beta = nn.Parameter(torch.tensor([args['phrase']]).cuda(0), requires_grad=True)
else:
self.beta = nn.Parameter(torch.tensor([args['phrase']]), requires_grad=False).cuda(0)
#elif args['Beta'] ==300:
# self.BetaNet =
self.bceloss = nn.BCELoss().cuda(0)
self.CELoss = nn.CrossEntropyLoss().cuda(0)
def forward(self, inputs):
"""
inputs are preprocessed
support: (N_way*N_shot)x3x84x84
query: (N_way*N_query)x3x84x84
s_labels: (N_way*N_shot)xN_way, one-hot
q_labels: (N_way*N_query)xN_way, one-hot
"""
# init
eps = np.finfo(float).eps
[support, s_labels, query, q_labels] = inputs
num_classes = s_labels.shape[1]
num_support = int(s_labels.shape[0] / num_classes)
num_queries = int(query.shape[0] / num_classes)
# Step1: Embedding
inp = torch.cat((support,query), 0)
if self.args['complex']:
inp = Complex(inp)
emb_all = self.encoder(inp)
emb_all = emb_all.view(-1, 1600) if self.args['Relation_layer'] == 1 else [emb.view(-1, 1600) for emb in emb_all]
(N, d) = (emb_all.shape[0], emb_all.shape[1]) if self.args['Relation_layer'] == 1 else (emb_all[0].shape[0], emb_all[0].shape[1])
# Step2: Graph Construction
## sigmma
if self.args['rn'] in [30, 300]:
if not self.args['Relation_layer'] == 1:
emb_all = torch.cat(emb_all, 0)
self.sigma = self.relation(emb_all , self.args)
#self.sigma = 0.25
## W
if self.args['complex']:
emb_all.real = (emb_all.real*self.sigma.real + emb_all.imag*self.sigma.imag)/self.sigma.mag()
emb_all.imag = (emb_all.imag*self.sigma.real - emb_all.real*self.sigma.imag)/self.sigma.mag()
else:
emb_all = emb_all / (self.sigma+eps) # N*d
if self.args['center'] != 0:
if self.args['complex']:
emb_support_real = emb_all.real[:len(support)].view(num_classes, num_support, -1)
emb_support_imag = emb_all.imag[:len(support)].view(num_classes, num_support, -1)
emb_query_real = emb_all.real[len(support):].view(num_classes, num_queries, -1)
emb_query_imag = emb_all.imag[len(support):].view(num_classes, num_queries, -1)
Center_emb_real = torch.cat([emb_support_real, emb_query_real], 1)
Center_emb_imag = torch.cat([emb_support_imag, emb_query_imag], 1)
even_emb_real = Center_emb_real.mean(1).unsqueeze(1)
even_emb_imag = Center_emb_imag.mean(1).unsqueeze(1)
Center_emb_real = Center_emb_real - even_emb_real
Center_emb_imag = Center_emb_imag - even_emb_imag
Center_emb = (Center_emb_real**2 + Center_emb_imag**2).mean(-1)
Center_emb = torch.exp(-Center_emb / 2)
else:
emb_support = emb_all[:len(support)].view(num_classes, num_support, -1)
emb_query = emb_all[len(support):].view(num_classes, num_queries, -1)
Center_emb = torch.cat([emb_support, emb_query], 1)
even_emb = Center_emb.mean(1).unsqueeze(1)
Center_emb = ((Center_emb - even_emb)**2).mean(2)
Center_emb = torch.exp(-Center_emb/2)
center_loss = self.bceloss(Center_emb, torch.ones(Center_emb.shape).cuda(0))
'''if self.args['complex']:
emb1 = emb_all.real.unsqueeze(0) - emb_all.real.unsqueeze(1)
emb2 = emb_all.imag.unsqueeze(0) - emb_all.imag.unsqueeze(1)
W = (emb1 ** 2 + emb2 ** 2).mean(-1)
else:
emb1 = torch.unsqueeze(emb_all.mag() if self.args['complex'] else emb_all, 1) # N*1*d
emb2 = torch.unsqueeze(emb_all.mag() if self.args['complex'] else emb_all, 0) # 1*N*d
W = ((emb1 - emb2) ** 2).mean(2) # N*N*d -> N*N'''
emb1 = torch.unsqueeze(emb_all.mag() if self.args['complex'] else emb_all, 1) # N*1*d
emb2 = torch.unsqueeze(emb_all.mag() if self.args['complex'] else emb_all, 0) # 1*N*d
W = ((emb1 - emb2) ** 2).mean(2) # N*N*d -> N*N
W = torch.softmax(W, -1)
if self.args['phrase'] and self.args['complex']:
#emb_all_real = emb_all.real.clone()
#emb_all_imag = emb_all.imag.clone()
sign = emb_all.real / (emb_all.imag +eps)
sign = (sign>0).int()
sign = sign + (emb_all.real<0).int()*2 + 1
phrase_emb1 = sign.unsqueeze(0)
phrase_emb2 = sign.unsqueeze(1)
phrase_W = (phrase_emb1-phrase_emb2).abs()
phrase_W[phrase_W==3]=1
phrase_W = (phrase_W.float()**2).mean(-1)
phrase_W = torch.softmax(phrase_W, -1)
'''phrase_emb = emb_all.imag/(emb_all.mag()+eps)
phrase_emb1 = phrase_emb.unsqueeze(0)
phrase_emb2 = phrase_emb.unsqueeze(1)'''
#phrase_W = ((phrase_emb1 - phrase_emb2)**2).mean(2)
if self.args['phrase']:
W = self.beta*W + (1-self.beta)*phrase_W
if not self.args['Relation_layer'] == 1:
W = W.view(N, self.args['Relation_layer'], N, self.args['Relation_layer'])
W = W.transpose(1, 2)
W = W.contiguous()
W = W.view(N, N, -1)
W = W.min(-1)[0]
W = torch.exp(-W/2)
## keep top-k values
if self.args['k']>0:
topk, indices = torch.topk(W, self.args['k'])
mask = torch.zeros_like(W)
mask = mask.scatter(1, indices, 1)
mask = torch.eq((mask+torch.t(mask))>0).type(torch.float32) # union, kNN graph
#mask = ((mask>0)&(torch.t(mask)>0)).type(torch.float32) # intersection, kNN graph
W = W*mask
## normalize
D = W.sum(0)
D_sqrt_inv = torch.sqrt(1.0/(D+eps))
D1 = torch.unsqueeze(D_sqrt_inv,1).repeat(1,N)
D2 = torch.unsqueeze(D_sqrt_inv,0).repeat(N,1)
S = D1*W*D2
# Step3: Label Propagation, F = (I-\alpha S)^{-1}Y
#ys = s_labels
#yu = torch.zeros(num_classes*num_queries, num_classes).cuda(0)
#yu = (torch.ones(num_classes*num_queries, num_classes)/num_classes).cuda(0)
s_index = torch.argmax(s_labels, 1)
q_index = torch.argmax(q_labels, 1).long().cuda(0)
index = torch.cat([s_index, q_index], 0)
y = self.label2edge(index)
#get phrase-loss
'''if self.args['phrase']:
phrase_label = y.clone()
phrase_loss = self.bceloss(phrase_W, phrase_label)'''
y[num_classes * num_support:, :] = 0
y[:, num_classes * num_support:] = 0
y[num_classes * num_support:, num_classes * num_support:] = 1 / num_classes
#############
#y = torch.cat((ys,yu),0)
F = torch.matmul(torch.inverse(torch.eye(N).cuda(0)-self.alpha*S+eps), y)
'''except:
tmp = torch.eye(N).cuda(0)-self.alpha*S+eps
tmp = torch.from_numpy(np.linalg.pinv(tmp.cpu().detach().numpy())).cuda(0)
F = torch.matmul(tmp, y)'''
F_q2s = F[num_classes * num_support:, :num_classes * num_support]
F_q2s = F_q2s.view(F_q2s.shape[0], num_classes, num_support)
F_q2s = F_q2s.sum(-1) / num_support
Fq = F[num_classes*num_support:, :num_classes*num_support] # query predictions
Fq = | |
dims=outdimnames, attrs=outattrs)
else:
outdata = outdata[:]
outarr = outdata
return outarr
def _get_numfiles(wrfseq):
"""Return the number of files in the sequence.
This function will first try to call the builtin :meth:`len` function, but
if that fails, the entire squence will be iterated over and counted.
Args:
wrfseq (iterable): An iterable type, which includes lists, tuples,
dictionaries, generators, and user-defined classes.
Returns:
:obj:`int`: The number of files in the sequence.
"""
try:
return len(wrfseq)
except TypeError:
wrf_iter = iter(wrfseq)
return sum(1 for _ in wrf_iter)
def _join_files(wrfseq, varname, timeidx, is_moving, meta, _key):
"""Return an array object from a sequence of files using the join
method.
The join method creates a new leftmost dimension for the file/sequence
index. In situations where there are multiple files with multiple times,
and the last file contains less times than the previous files, the
remaining arrays will be arrays filled with missing values. There are
checks in place within the wrf-python algorithms to look for these missing
arrays, but be careful when calling compiled routines outside of
wrf-python.
In general, join is rarely used, so the concatenate method should be used
for most cases.
Args:
wrfseq (iterable): An iterable type, which includes lists, tuples,
dictionaries, generators, and user-defined classes.
varname (:obj:`str`) : The variable name.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
is_moving (:obj:`bool`): A boolean type that indicates if the
sequence is a moving nest.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): Cache key for the coordinate variables.
This is used for internal purposes only. Default is None.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: If xarray is
enabled and the *meta* parameter is True, then the result will be a
:class:`xarray.DataArray` object. Otherwise, the result will be a
:class:`numpy.ndarray` object with no metadata.
"""
if is_moving is None:
is_moving = is_moving_domain(wrfseq, varname, _key=_key)
multitime = is_multi_time_req(timeidx)
numfiles = _get_numfiles(wrfseq)
maxtimes = _find_max_time_size(wrfseq)
time_idx_or_slice = timeidx if not multitime else slice(None)
file_times_less_than_max = False
file_idx = 0
# wrfseq might be a generator
wrf_iter = iter(wrfseq)
wrfnc = next(wrf_iter)
numtimes = extract_dim(wrfnc, "Time")
if xarray_enabled() and meta:
first_var = _build_data_array(wrfnc, varname, ALL_TIMES, is_moving,
True, _key)
time_coord = np.full((numfiles, maxtimes), np.datetime64("NaT"),
"datetime64[ns]")
time_coord[file_idx, 0:numtimes] = first_var.coords["Time"][:]
else:
first_var = wrfnc.variables[varname][:]
if numtimes < maxtimes:
file_times_less_than_max = True
# Out dimensions will be the number of files, maxtimes, then the
# non-time shapes from the first variable
outdims = [numfiles]
outdims += [maxtimes]
outdims += first_var.shape[1:]
# For join, always need to start with full masked values
outdata = np.full(outdims, default_fill(first_var.dtype), first_var.dtype)
if first_var.ndim > 1:
outdata[file_idx, 0:numtimes, :] = first_var[:]
else:
outdata[file_idx, 0:numtimes] = first_var[:]
# Create the secondary coordinate arrays
if xarray_enabled() and meta:
latname, lonname, timename = _find_coord_names(first_var.coords)
outcoorddims = outdims[0:2] + outdims[-2:]
timecached = False
latcached = False
loncached = False
outxtimes = None
outlats = None
outlons = None
timekey = timename+"_join" if timename is not None else None
latkey = latname + "_join" if latname is not None else None
lonkey = lonname + "_join" if lonname is not None else None
if timename is not None:
outxtimes = get_cached_item(_key, timekey)
if outxtimes is None:
outxtimes = np.full(outdims[0:2],
default_fill(first_var.dtype),
first_var.dtype)
outxtimes[file_idx, 0:numtimes] = first_var.coords[timename][:]
else:
timecached = True
if is_moving:
if latname is not None:
outlats = get_cached_item(_key, latkey)
if outlats is None:
outlats = np.full(outcoorddims,
default_fill(first_var.dtype),
first_var.dtype)
outlats[file_idx, 0:numtimes, :] = (
first_var.coords[latname][:])
else:
latcached = True
if lonname is not None:
outlons = get_cached_item(_key, lonkey)
if outlons is None:
outlons = np.full(outcoorddims,
default_fill(first_var.dtype),
first_var.dtype)
outlons[file_idx, 0:numtimes, :] = (
first_var.coords[lonname][:])
else:
loncached = True
file_idx=1
while True:
try:
wrfnc = next(wrf_iter)
except StopIteration:
break
else:
numtimes = extract_dim(wrfnc, "Time")
if numtimes < maxtimes:
file_times_less_than_max = True
outvar = wrfnc.variables[varname][:]
if not multitime:
outvar = outvar[np.newaxis, :]
if outvar.ndim > 1:
outdata[file_idx, 0:numtimes, :] = outvar[:]
else:
outdata[file_idx, 0:numtimes] = outvar[:]
if xarray_enabled() and meta:
# For join, the times are a function of fileidx
file_times = extract_times(wrfnc, ALL_TIMES, meta=False,
do_xtime=False)
time_coord[file_idx, 0:numtimes] = np.asarray(file_times,
"datetime64[ns]")[:]
if timename is not None and not timecached:
xtimedata = wrfnc.variables[timename][:]
outxtimes[file_idx, 0:numtimes] = xtimedata[:]
if is_moving:
if latname is not None and not latcached:
latdata = wrfnc.variables[latname][:]
outlats[file_idx, 0:numtimes, :] = latdata[:]
if lonname is not None and not loncached:
londata = wrfnc.variables[lonname][:]
outlons[file_idx, 0:numtimes, :] = londata[:]
# Need to update coords here
file_idx += 1
# If any of the output files contain less than the max number of times,
# then a mask array is needed to flag all the missing arrays with
# missing values
if file_times_less_than_max:
outdata = np.ma.masked_values(outdata, default_fill(outdata.dtype))
if xarray_enabled() and meta:
# Cache the coords if applicable
if not latcached and outlats is not None:
cache_item(_key, latkey, outlats)
if not loncached and outlons is not None:
cache_item(_key, lonkey, outlons)
if not timecached and outxtimes is not None:
cache_item(_key, timekey, outxtimes)
outname = first_var.name
outcoords = OrderedDict(first_var.coords)
outattrs = OrderedDict(first_var.attrs)
# New dimensions
outdimnames = ["file"] + list(first_var.dims)
outcoords["file"] = [i for i in py3range(numfiles)]
# Time needs to be multi dimensional, so use the default dimension
del outcoords["Time"]
time_coord = time_coord[:, time_idx_or_slice]
if not multitime:
time_coord = time_coord[:, np.newaxis]
outcoords["datetime"] = outdimnames[0:2], time_coord
if isinstance(outdata, np.ma.MaskedArray):
outattrs["_FillValue"] = default_fill(outdata.dtype)
outattrs["missing_value"] = default_fill(outdata.dtype)
if timename is not None:
outxtimes = outxtimes[:, time_idx_or_slice]
if not multitime:
outxtimes = outxtimes[:, np.newaxis]
outcoords[timename] = outdimnames[0:2], outxtimes[:]
# If the domain is moving, need to create the lat/lon coords
# since they can't be copied
if is_moving:
outlatdims = outdimnames[0:2] + outdimnames[-2:]
if latname is not None:
outlats = outlats[:, time_idx_or_slice, :]
if not multitime:
outlats = outlats[:, np.newaxis, :]
outcoords[latname] = outlatdims, outlats
if lonname is not None:
outlons = outlons[:, time_idx_or_slice, :]
if not multitime:
outlons = outlons[:, np.newaxis, :]
outcoords[lonname] = outlatdims, outlons
if not multitime:
outdata = outdata[:, timeidx, :]
outdata = outdata[:, np.newaxis, :]
outarr = DataArray(outdata, name=outname, coords=outcoords,
dims=outdimnames, attrs=outattrs)
else:
if not multitime:
outdata = outdata[:, timeidx, :]
outdata = outdata[:, np.newaxis, :]
outarr = outdata
return outarr
def combine_files(wrfin, varname, timeidx, is_moving=None,
method="cat", squeeze=True, meta=True,
_key=None):
"""Combine and return an array object for the sequence of WRF output
files.
Two aggregation methodologies are available to combine the sequence:
- 'cat': Concatenate the files along the 'Time' dimension. The Time
dimension will be the leftmost dimension. No sorting is performed,
so files must be properly ordered in the sequence prior to calling
this function.
- 'join': Join the files by creating a new leftmost dimension for the
file index. In situations where there are multiple files with
multiple times, and the last file contains less times than the
previous files, the remaining arrays will be arrays filled with
missing values. There are checks in place within the wrf-python
algorithms to look for these missing arrays, | |
edge whose key we are getting
Returns:
The canonical edge key
"""
return self.canonical_edge_keys[edge]
def _add_edge_key(self, u_agent: Agent, v_agent: Agent):
"""Creates a canonical edge key for the pair of nodes.
Args:
u_agent: the u node agent
v_agent: the v node agent
"""
edge_key = (u_agent, v_agent)
self.canonical_edge_keys[edge_key] = edge_key
self.canonical_edge_keys[(v_agent, u_agent)] = edge_key
return edge_key
def _remove_edge_key(self, u_agent: Agent, v_agent: Agent):
"""Removes the canonical edge key for the specified edge.
Args:
u_agent: the u node agent
v_agent: the v node agent
"""
edge_key = (u_agent, v_agent)
self.canonical_edge_keys.pop(edge_key)
self.canonical_edge_keys.pop((v_agent, u_agent))
def add_edge(self, u_agent: Agent, v_agent: Agent, **kwattr):
"""Adds an edge between u_agent and v_agent.
If the u and v agents are not existing nodes in the network, they
will be added. Edge attributes can be added using keyword
arguments.
Args:
u_agent: The u agent
v_agent: The v agent
kwattr: optional keyword arguments for assigning edge data.
Examples:
Add an edge with a weight attribute
>>> g.add_edge(agent1, agent2, weight=3.1)
"""
if u_agent.local_rank == self.rank and v_agent.local_rank != self.rank:
if not self.graph.has_node(v_agent):
self.ghosts_to_ref.append(v_agent)
self.graph.add_edge(u_agent, v_agent, **kwattr)
edge_key = self._add_edge_key(u_agent, v_agent)
ge = GhostedEdge(u_agent, v_agent, self.graph.edges[edge_key])
self.new_edges[edge_key] = ge
self.edges_to_remove.pop(edge_key, None)
elif u_agent.local_rank != self.rank and v_agent.local_rank == self.rank:
# assume v_agent is local
if not self.graph.has_node(u_agent):
self.ghosts_to_ref.append(u_agent)
self.graph.add_edge(u_agent, v_agent, **kwattr)
edge_key = self._add_edge_key(u_agent, v_agent)
ge = GhostedEdge(v_agent, u_agent, self.graph.edges[edge_key])
self.new_edges[edge_key] = ge
self.edges_to_remove.pop(edge_key, None)
else:
self._add_edge_key(u_agent, v_agent)
self.graph.add_edge(u_agent, v_agent, **kwattr)
def _edges(self, agent: Agent, data: bool=False):
"""Gets an iterator over the incoming and outgoing edges for the specifed agent.
Args:
agent: agent whose edges will be returned
data: if true, the edge data dictionary will be returned, otherwise
not
Returns:
An iterator over the incoming and outgoing edges for the specifed agent
"""
return self.graph.edges(agent, data=data)
def num_edges(self, agent: Agent) -> int:
"""Gets the number of edges that contain the specified agent.
Args:
agent: agent whose edge will be counted
Returns:
The number of edges that contain the specified agent
"""
return len(self.graph.edges(agent))
class DirectedSharedNetwork(SharedNetwork):
"""Encapsulates a directed network shared over multiple process ranks.
This wraps a `networkx <https://networkx.org>`_ DiGraph object and delegates all the network
related operations to it. That Graph is exposed as the `graph`
attribute. See the `networkx <https://networkx.org>`_ Graph documentation for more information
on the network functionality that it provides. **The network structure
must NOT be changed using the networkx functions and methods
(adding and removing nodes, for example)**. Use this class'
methods for manipulating the network structure.
Attributes:
graph (networkx.DiGraph): a `networkx <https://networkx.org>`_ graph object wrapped by this class.
comm (MPI.Comm): the communicator over which the network is shared.
names (str): the name of this network.
Args:
name: the name of the SharedNetwork
comm: the communicator over which this DirectedSharedNetwork is distributed
"""
def __init__(self, name: str, comm: MPI.Comm):
super().__init__(name, comm, nx.DiGraph())
@property
def is_directed(self) -> bool:
"""Gets whether or not this network is directed.
Returns:
True
"""
return True
def _has_edge(self, agent: Agent) -> bool:
"""Gets whether or not the specified agent participates in
an edge in this network.
Args:
agent: the agent to check
Returns:
True if the agent is part of an edge, otherwise false
"""
return len(self.graph.in_edges(agent)) > 0 or len(self.graph.out_edges(agent)) > 0
def _get_edge_key(self, edge: Tuple):
"""Gets the canonical edge key used to manage dictionaries of edges.
Returns the passed in edge on a SharedDirectedNetwork.
Args:
edge: the edge whose key we are getting
Returns:
The canonical edge key
"""
return edge
def _remove_edge_key(self, u_agent: Agent, v_agent: Agent):
"""Removes the canonical edge key for the specified edge.
Null op on SharedDirectedNetwork.
Args:
u_agent: the u node agent
v_agent: the v node agent
"""
pass
def _add_edge_key(self, u_agent: Agent, v_agent: Agent):
"""Creates a canonical edge key for the pair of nodes.
This is a NOOP on SharedDirectedNetwork
Args:
u_agent: the u node agent
v_agent: the v node agent
"""
pass
def add_edge(self, u_agent: Agent, v_agent: Agent, **kwattr):
"""Adds an edge beteen u_agent and v_agent.
If the u and v agents are not existing nodes in the network, they
will be added. Edge attributes can be added using keyword
arguments.
Args:
u_agent: The u agent
v_agent: The v agent
kwattr: optional keyword arguments for assigning edge data.
Examples:
Add an edge with a weight attribute
>>> g.add_edge(agent1, agent2, weight=3.1)
"""
if u_agent.local_rank == self.rank and v_agent.local_rank != self.rank:
if not self.graph.has_node(v_agent):
self.ghosts_to_ref.append(v_agent)
self.graph.add_edge(u_agent, v_agent, **kwattr)
edge_key = (u_agent, v_agent)
ge = GhostedEdge(u_agent, v_agent, self.graph.edges[edge_key])
self.new_edges[edge_key] = ge
self.edges_to_remove.pop(edge_key, None)
elif u_agent.local_rank != self.rank and v_agent.local_rank == self.rank:
# assume v_agent is local
if not self.graph.has_node(u_agent):
self.ghosts_to_ref.append(u_agent)
self.graph.add_edge(u_agent, v_agent, **kwattr)
edge_key = (u_agent, v_agent)
ge = GhostedEdge(v_agent, u_agent, self.graph.edges[edge_key])
self.new_edges[edge_key] = ge
self.edges_to_remove.pop(edge_key, None)
else:
self.graph.add_edge(u_agent, v_agent, **kwattr)
def _edges(self, agent: Agent, data: bool=False):
"""Gets an iterator over the incoming and outgoing edges for the specifed agent.
Args:
agent: agent whose edges will be returned
data: if true, the edge data dictionary will be returned, otherwise
not
Returns:
An iterator over the incoming and outgoing edges for the specifed agent
"""
return chain(self.graph.out_edges(agent, data=data), self.graph.in_edges(agent, data=data))
def num_edges(self, agent: Agent) -> int:
"""Gets the number of edges that contain the specified agent.
Returns:
The number of edges that contain the specified agent
"""
return len(self.graph.out_edges(agent)) + len(self.graph.in_edges(agent))
def _parse_graph_description(line: str):
vals = line.split(' ')
if len(vals) != 2:
raise ValueError('Error reading graph description file. Invalid format on first line.')
try:
val = int(vals[1])
except Exception:
raise ValueError('Error reading graph description file. Invalid format on first line. Second value must be an integer')
return (val[0], val != 0)
@dataclass
class GraphData:
rank: int
agents: Dict
remote_agents: Dict
requested_agents: List
edges: List
def _parse_graph_desc(line: str):
try:
vals = line.split(' ')
if len(vals) != 2:
raise ValueError
id = vals[0]
is_directed = int(vals[1]) != 0
return (id, is_directed)
except Exception:
raise ValueError('Error reading graph description file on line 1. Expected graph description with '
'format: "id, [0|1]", where 0 indicates an undirected graph and 1 directed')
_LINE_P = re.compile('\{[^}]+\}|\S+')
_EMPTY_DICT = {}
def _parse_node(line: str, line_num: int, graph_data: GraphData, ctx, create_agent: Callable):
try:
vals = _LINE_P.findall(line)
n_vals = len(vals)
if n_vals < 3 or n_vals > 4:
raise ValueError
n_id = int(vals[0])
agent_type = int(vals[1])
rank = int(vals[2])
if rank == graph_data.rank or graph_data.rank == -1:
attribs = _EMPTY_DICT
if n_vals == 4:
attribs = json.loads(vals[3])
agent = create_agent(n_id, agent_type, rank, **attribs)
ctx.add(agent)
graph_data.agents[n_id] = agent
else:
graph_data.remote_agents[n_id] = (n_id, agent_type, rank)
except Exception:
raise ValueError(f'Error reading graph description file on line {line_num}: "{line}". Expected node description with format: '
'"node_id agent_type rank" following by an optional json agent attribute dictionary')
def _request_remote_agents(graph_data: GraphData, ctx, create_agent: Callable):
requested_agents = ctx.request_agents(graph_data.requested_agents, create_agent)
for agent in requested_agents:
graph_data.agents[agent.uid[0]] = agent
def _parse_edge(line: str, line_num: int, graph, graph_data: GraphData):
requested = set()
try:
vals = _LINE_P.findall(line)
n_vals = len(vals)
if n_vals < 2 or n_vals > 3:
raise ValueError
u_id = int(vals[0])
v_id = int(vals[1])
u_in = u_id in graph_data.agents
v_in = v_id in graph_data.agents
if u_in and v_in:
u = graph_data.agents[u_id]
v = graph_data.agents[v_id]
if n_vals == 3:
graph.add_edge(u, v, **(json.loads(vals[2])))
else:
graph.add_edge(u, v)
elif u_in and not v_in:
v_uid = graph_data.remote_agents[v_id]
if v_id not in requested:
graph_data.requested_agents.append((v_uid, v_uid[2]))
requested.add(v_id)
u_uid = graph_data.agents[u_id].uid
if n_vals == 3:
graph_data.edges.append((u_uid, v_uid, vals[2]))
else:
graph_data.edges.append((u_uid, v_uid))
elif not u_in and v_in:
u_uid = graph_data.remote_agents[u_id]
if u_id not in requested:
graph_data.requested_agents.append((u_uid, u_uid[2]))
requested.add(u_id)
v_uid = graph_data.agents[v_id].uid
if n_vals == 3:
graph_data.edges.append((u_uid, v_uid, vals[2]))
else:
graph_data.edges.append((u_uid, v_uid))
except KeyError:
raise ValueError(f'Error reading graph description file on line {line_num}. Agent with {u_id} or {v_id} cannot be found')
except Exception:
raise ValueError(f'Error reading graph description file on line {line_num}. Expected edge description with format: '
'"u_id v_id" following by an optional json edge attribute dictionary')
def _create_edges(graph, graph_data: GraphData):
for edge in graph_data.edges:
u = graph_data.agents[edge[0][0]]
v = graph_data.agents[edge[1][0]]
if len(edge) == 3:
graph.add_edge(u, v, **(json.loads(edge[2])))
else:
graph.add_edge(u, v)
def read_network(fpath: str, ctx, create_agent: Callable, restore_agent: Callable):
"""Creates and initializes | |
'id': not_id
})
not_check = Notifications_Model.query.filter_by(
title='{} liked your post'.format(user.name)).filter_by(body=post.title).first()
if not_check is not None:
not_check.checked = False
else:
db.session.add(notify)
user.liked_posts = like
db.session.commit()
return make_response(response, 200)
@api.route('/follow-user/<int:id>')
def follow_user(id):
token = request.args.get('t')
if not token:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
not_id = str(db.session.execute(Sequence('notifications_id_seq')))
follow = list(user.follow)
followed = UserModel.query.filter_by(id=id).first()
user_followed = list(followed.followed)
if follow is not None:
if id in follow:
follow.remove(id)
user_followed.remove(user.id)
response = jsonify({'operation': 'unfollowed'})
notify = Notifications_Model(
int(not_id),
user.id,
'{} unfolowed you'.format(user.name),
user.name,
'/user/' + str(user.name) + '?notification_id=' + not_id,
id,
False,
None,
'follow'
)
send_notification(id, {
'text': '@{} unfolowed you'.format(user.name),
'link': '/user/' + str(user.name),
'icon': user.avatar,
'id': not_id
})
else:
follow.append(id)
user_followed.append(user.id)
response = jsonify({'operation': 'followed'})
notify = Notifications_Model(
int(not_id),
user.id,
'{} started folowing you'.format(user.name),
user.name,
'/user/' + str(user.name),
id,
False,
None,
'follow'
)
send_notification(id, {
'text': '@{} started folowing you'.format(user.name),
'link': '/user/' + str(user.name),
'icon': user.avatar,
'id': not_id
})
else:
follow.append(id)
user_followed.append(user.id)
response = jsonify({'operation': 'followed'})
notify = Notifications_Model(
int(not_id),
user.id,
'{} started folowing you'.format(user.name),
user.name,
'/user/' + str(user.name),
id,
False,
None,
'follow'
)
send_notification(id, {
'text': '@{} started folowing you'.format(user.name),
'link': '/user/' + str(user.name),
'icon': user.avatar,
'id': not_id
})
db.session.add(notify)
user.follow = follow
followed.followed = user_followed
db.session.commit()
return make_response(response, 200)
@api.route('/save-post/<int:id>')
def save_post(id):
token = request.args.get('t')
if not token:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
posts = list(user.saved_posts)
if posts is not None:
if id in posts:
posts.remove(id)
response = jsonify({'operation': 'deleted'})
else:
response = jsonify({'operation': 'saved'})
posts.append(id)
else:
response = jsonify({'operation': 'saved'})
posts.append(id)
user.saved_posts = posts
db.session.commit()
return make_response(response, 200)
@api.route('/newreply', methods=['POST'])
def newreply():
if request.method != 'POST':
return make_response(jsonify({'operation': 'error', 'error': 'Invalid method'}), 401)
data = request.json
if not data['token'] or not data['post_id'] or not data['content']:
return make_response(jsonify({'operation': 'error', 'error': 'Missing data'}), 401)
try:
decoded = jwt.decode(str(data['token']).encode(), key_c)
except:
return make_response(jsonify({'operation': 'error', 'error': 'Invalid token'}), 401)
new_reply = ReplyModel(None, data['content'], data['post_id'], decoded['id'], None)
not_id = str(db.session.execute(Sequence('notifications_id_seq')))
index = db.session.execute(Sequence('replyes_id_seq'))
post = PostModel.query.filter_by(id=data['post_id']).first()
notify = Notifications_Model(
int(not_id),
decoded['id'],
'{} replied to your post'.format(decoded['name']),
post.title,
'/post/' + (str(post.title).replace(' ', '-')).replace('?', '') + '-' + str(
post.id) + '?notification_id=' + str(not_id),
post.user_in.id,
False,
None,
'reply'
)
send_notification(post.user_in.id, {
'text': '@{} replied to your post'.format(decoded['name']),
'link': '/post/' + (str(post.title).replace(' ', '-')).replace('?', '') + '-' + str(post.id) + '#reply_' + str(
index),
'icon': decoded['avatar'],
'id': not_id
})
db.session.add(new_reply)
db.session.commit()
db.session.add(notify)
db.session.commit()
return make_response(jsonify({'operation': 'success', 'reply_id': index}), 200)
@api.route('/newpost', methods=['POST'])
def newpost():
if request.method != 'POST':
return make_response(jsonify({'operation': 'error', 'error': 'Invalid method'}), 401)
data = json.loads(request.form['data'].encode().decode('utf-8'))
if not data['token'] or not data['title'] or not data['content'] or not data['title'] or not data['tags']:
return make_response(jsonify({'operation': 'error', 'error': 'Missing data'}), 401)
try:
decoded = jwt.decode(str(data['token']).encode(), key_c)
user_ = UserModel.query.filter_by(id=decoded['id']).first()
except:
return make_response(jsonify({'operation': 'error', 'error': 'Invalid token'}), 401)
index = db.session.execute(Sequence('posts_id_seq'))
not_id = str(db.session.execute(Sequence('notifications_id_seq')))
thumbnail_link = None
if data['image']:
thumbnail = save_img(index)
thumbnail_link = 'https://newapp.nl' + url_for('static', filename='thumbail_post/{}'.format(thumbnail))
lang = translate.getLanguageForText(str(cleanhtml(data['content'])).encode('utf-8-sig'))
new_post = PostModel(
index,
data['title'],
data['content'],
None,
None,
decoded['id'],
None,
True,
False,
None,
None,
str(lang.iso_tag).lower(),
thumbnail_link,
None,
str(readtime.of_html(data['content']))
)
tags = []
tag_p = str(data['tags']).lower()
tag = tag_p.replace(" ", "")
tags = tag.split(",")
for t in tags:
temp = TagModel.query.filter_by(name=str(t).lower()).first()
if temp is not None:
d = []
d = list(temp.post)
d.append(index)
temp.post = d
else:
tag = TagModel(
None,
str(t).lower(),
[index]
)
db.session.add(tag)
for user in user_.followed:
notification = Notifications_Model(
int(not_id),
decoded['id'],
'{} shared a new post'.format(decoded['name']),
str(data['title']),
'/post/' + (str(data['title']).replace(' ', '-')).replace('?', '') + '-' + str(index),
user,
None,
None,
'post'
)
send_notification(post.user_in.id, {
'text': '@{} shared a new post'.format(decoded['name']),
'link': '/post/' + (str(data['title']).replace(' ', '-')).replace('?', '') + '-' + str(index),
'icon': decoded['avatar'],
'id': not_id
})
db.session.add(notification)
db.session.add(new_post)
db.session.commit()
return make_response(jsonify({'operation': 'success',
'link': '/post/' + (str(data['title']).replace(' ', '-')).replace('?',
'') + '-' + str(
index)}), 200)
@api.route('/post/delete/<int:id>')
def delete_post(id):
token = request.args.get('t')
if not token:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
post = PostModel.query.filter_by(id=id).first()
if user.id != post.user_in.id and user.roleinfo.delete_post_permission == False:
return make_response(jsonify({'operation': 'failed'}), 401)
if post.thumbnail:
try:
picture_fn = 'post_' + str(id) + '.webp'
os.remove(os.path.join(
config['UPLOAD_FOLDER_POST'], picture_fn))
except:
pass
PostModel.query.filter_by(id=id).delete()
ReplyModel.query.filter_by(post_id=id).delete()
tags = TagModel.query.filter(
TagModel.post.contains([id])).all()
for t in tags:
x = list(t.post)
x.remove(id)
t.post = x
db.session.commit()
return make_response(jsonify({'operation': 'success'}), 200)
@api.route('/post/close/<int:id>')
def close_post(id):
token = request.args.get('t')
if not token:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
post = PostModel.query.filter_by(id=id).first()
if not user.roleinfo.close_post_permission:
return make_response(jsonify({'operation': 'failed'}), 401)
post.closed = True
post.closed_on = datetime.now()
post.closed_by = user.id
db.session.commit()
return make_response(jsonify({'operation': 'success'}), 200)
@api.route("/post/edit/<int:id>", methods=['GET', 'POST'])
def edit_post(id):
token = request.args.get('t')
if not token:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
post = PostModel.query.filter_by(id=id).first()
if request.method == 'POST':
data = request.json
post.text = data['text']
post.title = data['title']
post_link = (str(post.title).replace(' ', '-')).replace('?', '') + '-' + str(post.id)
db.session.commit()
return make_response(jsonify({'operation': 'success', 'link': post_link}), 200)
post_json = {}
post_json['title'] = post.title
post_json['text'] = post.text
post_json['id'] = post.id
return make_response(jsonify(post_json), 200)
@api.route("/reply/delete")
def delete_reply():
token = request.args.get('t')
reply_id = request.args.get('id')
if not token or not reply_id:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
reply = ReplyModel.query.filter_by(id=reply_id).first()
if user.roleinfo.delete_reply_permission == False and user.id != reply.user:
return make_response(jsonify({'operation': 'no permission'}), 401)
ReplyModel.query.filter_by(id=reply_id).delete()
db.session.commit()
return make_response(jsonify({'operation': 'success'}), 200)
@api.route("/reply/edit", methods=['POST'])
def edit_reply():
if request.method != 'POST':
return make_response(jsonify({'operation': 'error', 'error': 'Invalid method'}), 401)
data = request.json
if not data['token'] or not data['r_id'] or not data['content']:
return make_response(jsonify({'operation': 'error', 'error': 'Missing data'}), 401)
try:
decoded = jwt.decode(str(data['token']).encode(), key_c)
except:
return make_response(jsonify({'operation': 'error', 'error': 'Invalid token'}), 401)
reply = ReplyModel.query.filter_by(id=data['r_id']).first()
reply.text = data['content']
db.session.commit()
return make_response(jsonify({'operation': 'success'}), 200)
@api.route('/notifications')
def notifications():
token = request.args.get('t')
extended = request.args.get('ex')
if not token:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
if extended == 'true':
notifications = {'notify': {'new': [], 'posts': [], 'comments': [], 'likes': [], 'follows': []},
'count_new': user.get_not_count(user.id)}
temp = {}
for val, n in enumerate(user.n_receiver):
if val == 50:
break
temp['body'] = n.body
temp['checked'] = n.checked
temp['id'] = n.id
temp['title'] = n.title
temp['link'] = n.link
temp['category'] = n.category
temp['author'] = {
'avatar': n.author.avatar,
'name': n.author.name
}
temp['time_ago'] = n.time_ago()
if n.checked == False:
notifications['notify']['new'].append(temp.copy())
if n.category == 'post':
notifications['notify']['posts'].append(temp.copy())
elif n.category == 'reply':
notifications['notify']['comments'].append(temp.copy())
elif n.category == 'like':
notifications['notify']['likes'].append(temp.copy())
elif n.category == 'follow':
notifications['notify']['follows'].append(temp.copy())
notifications['notify']['new'].sort(key=getItemForKeyN, reverse=True)
notifications['notify']['posts'].sort(key=getItemForKeyN, reverse=True)
notifications['notify']['comments'].sort(key=getItemForKeyN, reverse=True)
notifications['notify']['likes'].sort(key=getItemForKeyN, reverse=True)
notifications['notify']['follows'].sort(key=getItemForKeyN, reverse=True)
else:
limit = user.get_not_count(user.id) if user.get_not_count(user.id) < 10 else 10
notifications = {'notify': [], 'count_new': user.get_not_count(user.id), 'count': limit}
temp = {}
for n in user.n_receiver:
if n.checked == False:
temp['body'] = n.body
temp['checked'] = n.checked
temp['id'] = n.id
temp['title'] = n.title
temp['link'] = n.link
temp['category'] = n.category
temp['author'] = {
'avatar': n.author.avatar,
'name': n.author.name
}
temp['time_ago'] = n.time_ago()
notifications['notify'].append(temp.copy())
notifications['notify'].sort(key=getItemForKeyN, reverse=True)
notifications['notify'] = notifications['notify'][:limit]
return make_response(jsonify(notifications), 200)
@api.route("/notifications/check")
def check_not():
token = request.args.get('t')
notification_id = request.args.get('not_id')
if not token or not notification_id:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
notification = Notifications_Model.query.filter_by(id=notification_id).first()
if notification is None:
return make_response(jsonify({'operation': 'failed'}), 401)
if notification.for_user != user.id:
return make_response(jsonify({'operation': 'failed'}), 401)
notification.checked = True
db.session.commit()
return make_response(jsonify({'operation': 'success'}), 200)
@api.route("/save-subscription", methods=['POST'])
def sub():
if request.method != 'POST':
return make_response(jsonify({'operation': 'failed'}), 401)
data = request.json
user_t = jwt.decode(data['user'], key_c)
sub = Subscriber(None, user_t['id'], None, None, str(data['sub_info']), True)
db.session.add(sub)
db.session.commit()
return make_response(jsonify({'operation': 'success'}), 200)
@api.route("/send-notification")
def notif():
check = Subscriber.query.filter_by(user=2).filter_by(is_active=True).all()
for c in check:
try:
sub = (str(c.subscription_info).encode().decode('utf-8')).replace("'", '"')
sub = sub.replace("None", "null")
send_web_push(json.loads(sub), "hello")
except:
pass
db.session.commit()
return make_response(jsonify({'operation': 'success'}), 200)
@api.route("/admin/dashboard")
def dashboard():
token = request.args.get('t')
if not token:
return make_response(jsonify({'operation': 'failed'}), 401)
try:
user_t = jwt.decode(token, key_c)
except:
return make_response(jsonify({'operation': 'failed'}), 401)
user = UserModel.query.filter_by(id=user_t['id']).first()
if not user.roleinfo.admin_panel_permission:
return make_response(jsonify({'operation': 'no permission'}), 401)
sessions = db.session.query(Analyze_Session).order_by(Analyze_Session.id).all()
now = dt.datetime.now()
sess = {}
sess_old = {}
label_days = []
referer = CustomDict()
country = CustomDict()
countries = CustomDict()
replies = | |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
__all__ = (
"ExportHelper",
"ImportHelper",
"orientation_helper",
"axis_conversion",
"axis_conversion_ensure",
"create_derived_objects",
"free_derived_objects",
"unpack_list",
"unpack_face_list",
"path_reference",
"path_reference_copy",
"path_reference_mode",
"unique_name"
)
import bpy
from bpy.props import (
BoolProperty,
EnumProperty,
StringProperty,
)
def _check_axis_conversion(op):
if hasattr(op, "axis_forward") and hasattr(op, "axis_up"):
return axis_conversion_ensure(op,
"axis_forward",
"axis_up",
)
return False
class ExportHelper:
filepath: StringProperty(
name="File Path",
description="Filepath used for exporting the file",
maxlen=1024,
subtype='FILE_PATH',
)
check_existing: BoolProperty(
name="Check Existing",
description="Check and warn on overwriting existing files",
default=True,
options={'HIDDEN'},
)
# subclasses can override with decorator
# True == use ext, False == no ext, None == do nothing.
check_extension = True
def invoke(self, context, _event):
import os
if not self.filepath:
blend_filepath = context.blend_data.filepath
if not blend_filepath:
blend_filepath = "untitled"
else:
blend_filepath = os.path.splitext(blend_filepath)[0]
self.filepath = blend_filepath + self.filename_ext
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def check(self, _context):
import os
change_ext = False
change_axis = _check_axis_conversion(self)
check_extension = self.check_extension
if check_extension is not None:
filepath = self.filepath
if os.path.basename(filepath):
filepath = bpy.path.ensure_ext(filepath,
self.filename_ext
if check_extension
else "")
if filepath != self.filepath:
self.filepath = filepath
change_ext = True
return (change_ext or change_axis)
class ImportHelper:
filepath: StringProperty(
name="File Path",
description="Filepath used for importing the file",
maxlen=1024,
subtype='FILE_PATH',
)
def invoke(self, context, _event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def check(self, _context):
return _check_axis_conversion(self)
def orientation_helper(axis_forward='Y', axis_up='Z'):
"""
A decorator for import/export classes, generating properties needed by the axis conversion system and IO helpers,
with specified default values (axes).
"""
def wrapper(cls):
# Without that, we may end up adding those fields to some **parent** class' __annotations__ property
# (like the ImportHelper or ExportHelper ones)! See T58772.
if "__annotations__" not in cls.__dict__:
setattr(cls, "__annotations__", {})
def _update_axis_forward(self, _context):
if self.axis_forward[-1] == self.axis_up[-1]:
self.axis_up = (self.axis_up[0:-1] +
'XYZ'[('XYZ'.index(self.axis_up[-1]) + 1) % 3])
cls.__annotations__['axis_forward'] = EnumProperty(
name="Forward",
items=(
('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default=axis_forward,
update=_update_axis_forward,
)
def _update_axis_up(self, _context):
if self.axis_up[-1] == self.axis_forward[-1]:
self.axis_forward = (self.axis_forward[0:-1] +
'XYZ'[('XYZ'.index(self.axis_forward[-1]) + 1) % 3])
cls.__annotations__['axis_up'] = EnumProperty(
name="Up",
items=(
('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default=axis_up,
update=_update_axis_up,
)
return cls
return wrapper
# Axis conversion function, not pretty LUT
# use lookup table to convert between any axis
_axis_convert_matrix = (
((-1.0, 0.0, 0.0), (0.0, -1.0, 0.0), (0.0, 0.0, 1.0)),
((-1.0, 0.0, 0.0), (0.0, 0.0, -1.0), (0.0, -1.0, 0.0)),
((-1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, 1.0, 0.0)),
((-1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, -1.0)),
((0.0, -1.0, 0.0), (-1.0, 0.0, 0.0), (0.0, 0.0, -1.0)),
((0.0, 0.0, 1.0), (-1.0, 0.0, 0.0), (0.0, -1.0, 0.0)),
((0.0, 0.0, -1.0), (-1.0, 0.0, 0.0), (0.0, 1.0, 0.0)),
((0.0, 1.0, 0.0), (-1.0, 0.0, 0.0), (0.0, 0.0, 1.0)),
((0.0, -1.0, 0.0), (0.0, 0.0, 1.0), (-1.0, 0.0, 0.0)),
((0.0, 0.0, -1.0), (0.0, -1.0, 0.0), (-1.0, 0.0, 0.0)),
((0.0, 0.0, 1.0), (0.0, 1.0, 0.0), (-1.0, 0.0, 0.0)),
((0.0, 1.0, 0.0), (0.0, 0.0, -1.0), (-1.0, 0.0, 0.0)),
((0.0, -1.0, 0.0), (0.0, 0.0, -1.0), (1.0, 0.0, 0.0)),
((0.0, 0.0, 1.0), (0.0, -1.0, 0.0), (1.0, 0.0, 0.0)),
((0.0, 0.0, -1.0), (0.0, 1.0, 0.0), (1.0, 0.0, 0.0)),
((0.0, 1.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 0.0)),
((0.0, -1.0, 0.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0)),
((0.0, 0.0, -1.0), (1.0, 0.0, 0.0), (0.0, -1.0, 0.0)),
((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)),
((0.0, 1.0, 0.0), (1.0, 0.0, 0.0), (0.0, 0.0, -1.0)),
((1.0, 0.0, 0.0), (0.0, -1.0, 0.0), (0.0, 0.0, -1.0)),
((1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, -1.0, 0.0)),
((1.0, 0.0, 0.0), (0.0, 0.0, -1.0), (0.0, 1.0, 0.0)),
)
# store args as a single int
# (X Y Z -X -Y -Z) --> (0, 1, 2, 3, 4, 5)
# each value is ((src_forward, src_up), (dst_forward, dst_up))
# where all 4 values are or'd into a single value...
# (i1<<0 | i1<<3 | i1<<6 | i1<<9)
_axis_convert_lut = (
{0x8C8, 0x4D0, 0x2E0, 0xAE8, 0x701, 0x511, 0x119, 0xB29, 0x682, 0x88A,
0x09A, 0x2A2, 0x80B, 0x413, 0x223, 0xA2B, 0x644, 0x454, 0x05C, 0xA6C,
0x745, 0x94D, 0x15D, 0x365},
{0xAC8, 0x8D0, 0x4E0, 0x2E8, 0x741, 0x951, 0x159, 0x369, 0x702, 0xB0A,
0x11A, 0x522, 0xA0B, 0x813, 0x423, 0x22B, 0x684, 0x894, 0x09C, 0x2AC,
0x645, 0xA4D, 0x05D, 0x465},
{0x4C8, 0x2D0, 0xAE0, 0x8E8, 0x681, 0x291, 0x099, 0x8A9, 0x642, 0x44A,
0x05A, 0xA62, 0x40B, 0x213, 0xA23, 0x82B, 0x744, 0x354, 0x15C, 0x96C,
0x705, 0x50D, 0x11D, 0xB25},
{0x2C8, 0xAD0, 0x8E0, 0x4E8, 0x641, 0xA51, 0x059, 0x469, 0x742, 0x34A,
0x15A, 0x962, 0x20B, 0xA13, 0x823, 0x42B, 0x704, 0xB14, 0x11C, 0x52C,
0x685, 0x28D, 0x09D, 0x8A5},
{0x708, 0xB10, 0x120, 0x528, 0x8C1, 0xAD1, 0x2D9, 0x4E9, 0x942, 0x74A,
0x35A, 0x162, 0x64B, 0xA53, 0x063, 0x46B, 0x804, 0xA14, 0x21C, 0x42C,
0x885, 0x68D, 0x29D, 0x0A5},
{0xB08, 0x110, 0x520, 0x728, 0x941, 0x151, 0x359, 0x769, 0x802, 0xA0A,
0x21A, 0x422, 0xA4B, 0x053, 0x463, 0x66B, 0x884, 0x094, 0x29C, 0x6AC,
0x8C5, 0xACD, 0x2DD, 0x4E5},
{0x508, 0x710, 0xB20, 0x128, 0x881, 0x691, 0x299, 0x0A9, 0x8C2, 0x4CA,
0x2DA, 0xAE2, 0x44B, 0x653, 0xA63, 0x06B, 0x944, 0x754, 0x35C, 0x16C,
0x805, 0x40D, 0x21D, 0xA25},
{0x108, 0x510, 0x720, 0xB28, 0x801, 0x411, 0x219, 0xA29, 0x882, 0x08A,
0x29A, 0x6A2, 0x04B, 0x453, 0x663, 0xA6B, 0x8C4, 0x4D4, 0x2DC, 0xAEC,
0x945, 0x14D, 0x35D, 0x765},
{0x748, 0x350, 0x160, 0x968, 0xAC1, 0x2D1, 0x4D9, 0x8E9, 0xA42, 0x64A,
0x45A, 0x062, 0x68B, 0x293, 0x0A3, 0x8AB, 0xA04, 0x214, 0x41C, 0x82C,
0xB05, 0x70D, 0x51D, 0x125},
{0x948, 0x750, 0x360, 0x168, 0xB01, 0x711, 0x519, 0x129, 0xAC2, 0x8CA,
0x4DA, 0x2E2, 0x88B, 0x693, 0x2A3, 0x0AB, 0xA44, 0x654, 0x45C, 0x06C,
0xA05, 0x80D, 0x41D, 0x225},
{0x348, 0x150, 0x960, 0x768, 0xA41, 0x051, 0x459, 0x669, 0xA02, 0x20A,
0x41A, 0x822, 0x28B, 0x093, 0x8A3, 0x6AB, 0xB04, 0x114, 0x51C, 0x72C,
0xAC5, 0x2CD, 0x4DD, 0x8E5},
{0x148, 0x950, 0x760, 0x368, 0xA01, 0x811, 0x419, 0x229, 0xB02, 0x10A,
0x51A, 0x722, 0x08B, 0x893, 0x6A3, 0x2AB, 0xAC4, 0x8D4, 0x4DC, 0x2EC,
0xA45, 0x04D, 0x45D, 0x665},
{0x688, 0x890, 0x0A0, 0x2A8, 0x4C1, 0x8D1, 0xAD9, 0x2E9, 0x502, 0x70A,
0xB1A, 0x122, 0x74B, 0x953, 0x163, 0x36B, 0x404, 0x814, 0xA1C, 0x22C,
0x445, 0x64D, 0xA5D, 0x065},
{0x888, 0x090, 0x2A0, 0x6A8, 0x501, 0x111, 0xB19, 0x729, 0x402, 0x80A,
0xA1A, 0x222, 0x94B, 0x153, 0x363, 0x76B, 0x444, 0x054, 0xA5C, 0x66C,
0x4C5, 0x8CD, 0xADD, 0x2E5},
{0x288, 0x690, 0x8A0, 0x0A8, 0x441, 0x651, 0xA59, 0x069, 0x4C2, 0x2CA,
0xADA, 0x8E2, 0x34B, 0x753, 0x963, 0x16B, 0x504, 0x714, 0xB1C, 0x12C,
0x405, 0x20D, 0xA1D, 0x825},
{0x088, 0x290, 0x6A0, 0x8A8, 0x401, 0x211, 0xA19, 0x829, 0x442, 0x04A,
0xA5A, 0x662, 0x14B, 0x353, 0x763, 0x96B, 0x4C4, 0x2D4, 0xADC, 0x8EC,
0x505, 0x10D, 0xB1D, 0x725},
{0x648, 0x450, 0x060, 0xA68, 0x2C1, 0x4D1, 0x8D9, 0xAE9, 0x282, 0x68A,
0x89A, 0x0A2, 0x70B, 0x513, 0x123, 0xB2B, 0x204, 0x414, 0x81C, 0xA2C,
0x345, 0x74D, 0x95D, 0x165},
{0xA48, 0x650, 0x460, 0x068, 0x341, 0x751, 0x959, 0x169, 0x2C2, 0xACA,
0x8DA, 0x4E2, 0xB0B, 0x713, 0x523, 0x12B, 0x284, 0x694, 0x89C, 0x0AC,
0x205, 0xA0D, 0x81D, 0x425},
{0x448, 0x050, 0xA60, 0x668, 0x281, 0x091, 0x899, 0x6A9, 0x202, 0x40A,
0x81A, 0xA22, 0x50B, 0x113, 0xB23, 0x72B, 0x344, 0x154, 0x95C, 0x76C,
0x2C5, 0x4CD, 0x8DD, 0xAE5},
{0x048, 0xA50, 0x660, 0x468, 0x201, 0xA11, 0x819, 0x429, 0x342, 0x14A,
0x95A, 0x762, 0x10B, 0xB13, 0x723, 0x52B, 0x2C4, 0xAD4, 0x8DC, 0x4EC,
0x285, 0x08D, 0x89D, 0x6A5},
{0x808, 0xA10, 0x220, 0x428, 0x101, 0xB11, 0x719, 0x529, 0x142, 0x94A,
0x75A, 0x362, 0x8CB, 0xAD3, 0x2E3, 0x4EB, 0x044, 0xA54, 0x65C, 0x46C,
0x085, 0x88D, 0x69D, 0x2A5},
{0xA08, 0x210, 0x420, 0x828, 0x141, 0x351, 0x759, 0x969, 0x042, 0xA4A,
0x65A, 0x462, 0xACB, 0x2D3, 0x4E3, 0x8EB, 0x084, 0x294, 0x69C, 0x8AC,
0x105, 0xB0D, 0x71D, 0x525},
{0x408, 0x810, 0xA20, 0x228, 0x081, 0x891, | |
from your character's IC point of view. Descriptions
should be written in first person. Old relationships are never
erased - when they are changed, the old relationship notes are
stored with a timestamp to show how the relationship changed over
time. Dates of relationship changes will be noted in a character's
timeline to identify significant events for character development.
Every relationship that you add should also have a short
relationship added to it via @relationship/short, with 'secret'
being the type for secret relationships. Those are not publicly
viewable by other players.
To list the relationships of other players, use the /list switch.
To list your own, simply use @relationship with no arguments.
For @relationship/short, this builds the {w@sheet/social{n tree
on a character's sheet, such as friends, family, acquaintances,
and enemies. For example:
@relationship/short friend=percy,war buddy
To create a new relationship or update an existing one, use
@relationship/change.
"""
key = "relationship"
aliases = ["relationships"]
help_category = "Social"
locks = "cmd:all()"
typelist = ['parent', 'sibling', 'friend', 'enemy', 'frenemy', 'family', 'client', 'patron', 'protege',
'acquaintance', 'secret', 'rival', 'ally', 'spouse', 'The Crown', 'Crownlands', 'Oathlands',
'Lyceum', 'Mourning Isles', 'Northlands', 'deceased']
# noinspection PyUnusedLocal
def get_help(self, caller, cmdset):
"""Returns docstr plus the types of shortrels"""
msg = self.__doc__ + "\n\nShort relationship types: %s" % ", ".join(self.typelist)
return msg
def func(self):
"""Executes relationship command"""
caller = self.caller
args = self.args
switches = self.switches
charob = caller.char_ob
# builders can see /list info
show_hidden = caller.check_permstring("builders")
# check if it's a guest modifying their character
if not charob:
charob = caller.ndb.char
if not charob:
caller.msg("No character found.")
return
white = True
if "newprivate" in self.switches:
self.switches.append("changeprivate")
if "changeprivate" in self.switches:
white = False
if "new" in self.switches:
self.switches.append("change")
jname = "White Journal" if white else "Black Journal"
if not args or 'list' in self.switches:
if 'list' in self.switches:
old = charob
charob = caller.search(self.lhs)
if not charob:
return
charob = charob.char_ob
if not charob:
caller.msg("No character.")
return
# check to see if this is caller's own character, if so, we show hidden stuff
if old == charob:
show_hidden = True
if show_hidden:
rels = dict(list(charob.messages.white_relationships.items()
) + list(charob.messages.black_relationships.items()))
else:
rels = dict(charob.messages.white_relationships.items())
# display list of relationships
if not rels:
caller.msg("No relationships found.")
else:
caller.msg("{w%s has relationships with the following characters:{n" % charob.key)
caller.msg("{w--------------------------------------------{n")
disp = ", ".join(key for key in sorted(rels.keys()))
caller.msg(disp)
caller.msg("To see the individual relationships, use {w@relationship %s=<name>{n" % charob.key)
caller.msg("\nSocial information for %s:" % charob.key)
caller.execute_cmd("@sheet/social %s" % charob.key)
return
if not switches:
if not self.lhs and self.rhs:
char = charob
name = self.rhs.lower()
elif self.lhs and not self.rhs:
char = charob
name = self.lhs.lower()
else:
char = caller.search(self.lhs)
if not char:
return
char = char.char_ob
if not char:
caller.msg("No character.")
return
name = self.rhs.lower()
white = char.messages.white_relationships
black = char.messages.black_relationships
rels = {k: white.get(k, []) + black.get(k, []) for k in set(list(white.keys()) + list(black.keys()))}
if not rels:
caller.msg("No relationships found.")
return
entries = rels.get(name, [])
entries = [msg for msg in entries if msg.access(caller, 'read') or 'white_journal' in msg.tags.all()]
if not entries:
caller.msg("No relationship found.")
return
if self.rhs:
caller.msg("{wRelationship of %s to %s:{n" % (self.lhs.capitalize(), self.rhs.capitalize()))
else:
caller.msg("{wRelationship with %s:{n" % args.capitalize())
sep = "{w-------------------------------------------------------------------{n"
caller.msg(sep)
for msg in entries:
jname = "{wJournal:{n %s\n" % ("White Journal" if msg in white.get(self.rhs.lower() if self.rhs
else self.args.lower(), [])
else "Black Reflection")
caller.msg("\n" + jname + charob.messages.disp_entry(msg), options={'box': True})
msg.receivers = caller
return
lhs = self.lhs
rhs = self.rhs
if (not lhs or not rhs) and 'delshort' not in switches:
caller.msg("Usage: @relationship/switches <name>=<description>")
return
# lhs will be used for keys, so need to make sure always lower case
lhs = lhs.lower()
desc = rhs
if 'change' in switches or 'changeprivate' in switches:
targ = caller.search(lhs)
if not targ:
return
targ = targ.char_ob
if not targ:
caller.msg("No character found.")
return
msg = charob.messages.add_relationship(desc, targ, white)
caller.msg("Entry added to %s:\n%s" % (jname, msg))
caller.msg("Relationship note added. If the 'type' of relationship has changed, "
"such as a friend becoming an enemy, please adjust it with /changeshort.")
if white:
charob.msg_watchlist("A character you are watching, {c%s{n, has updated their white journal." % caller)
return
if 'short' in switches:
rhslist = self.rhslist
rel_types = [ob.lower() for ob in self.typelist]
if lhs not in rel_types:
caller.msg("The type of relationship must be in: %s." % ", ".join(self.typelist))
return
if len(rhslist) < 2:
caller.msg("Usage: @relationship/short <type>=<name>,<desc>")
return
name = rhslist[0].title()
desc = ", ".join(rhslist[1:])
name = name.rstrip()
desc = desc.lstrip()
# if there's no relationship tree yet, initialize it as an empty dict
if not charob.db.relationship_short:
charob.db.relationship_short = {}
# if that type of relationship doesn't exist, add it
if not charob.db.relationship_short.get(lhs):
charob.db.relationship_short[lhs] = [(name, desc)]
caller.msg("Short relationship added to tree.")
return
# it exists, so add our name/desc tuple to the list
charob.db.relationship_short[lhs].append((name, desc))
caller.msg("Short relationship added to tree.")
return
if 'changeshort' in switches:
lhslist = lhs.split(",")
if not lhslist or len(lhslist) != 2:
caller.msg("Must have both old type and new type of relationship specified before '='.")
return
rhslist = self.rhslist
if len(rhslist) < 2:
caller.msg("Must have both name and description specified after '='.")
return
rels = charob.db.relationship_short
if not rels:
caller.msg("No relationships in tree to change - use /short to add instead.")
return
oldtype, newtype = lhslist[0].lower(), lhslist[1]
if newtype not in self.typelist:
caller.msg("Relationship must be one of the following: %s" % ", ".join(self.typelist))
return
name = rhslist[0].lower()
desc = ", ".join(rhslist[1:])
typelist = rels.get(oldtype)
if not typelist:
caller.msg("No relationships match the old type given.")
return
# now we go through the tuples in the list of that relationship type.
# if one matches the name, we'll remove it before we add the new one.
# Names are unique, so we stop with first instance we encounter
for tups in typelist:
# each tups == (name, desc)
if tups[0].lower() == name:
# we got a match
typelist.remove(tups)
break
if newtype not in rels:
rels[newtype] = []
name = name.title()
name = name.rstrip()
desc = desc.lstrip()
rels[newtype].append((name, desc))
caller.msg("Relationship tree changed.")
return
if 'delshort' in switches:
args = self.args.lower()
rels = charob.db.relationship_short
if not rels:
caller.msg("No relationships to delete.")
return
# Go through every list, remove first match
for sh_list in rels.values():
for tup in sh_list:
if tup[0].lower() == args:
sh_list.remove(tup)
caller.msg("Entry for %s deleted." % args.capitalize())
return
caller.msg("No match found to delete.")
return
caller.msg("Usage: @relationship/switches <arguments>")
return
# currently removed until we find a better way to do this
class CmdComment(ArxPlayerCommand):
"""
@comment - Leave a public comment on another character's sheet.
Usage:
@comment
@comment <name>
@comment <name>=<comment>
Using @comment without a right-hand-side argument will look up
comments upon yourself or the given character.
The @comment command represents an entry into a character's White
Journal where they give their thoughts on another character. Like
all white journal entries, they may be read by absolutely anyone.
Therefore, all comments should be treated as IC and completely
public knowledge.
Remember, since all comments are treated as public knowledge,
in-character retribution is very appropriate and may range from
a mean-spirited retalitatory statement to a team of highly-paid
assassins with surprisingly detailed instructions on how long it
should take their target to die.
As always, comments which are inappropriate (information that a
character does not know, for example), may be removed or changed
by GMs, and may incur possible penalties.
"""
key = "@comment"
aliases = ["+comment"]
help_category = "Social"
locks = "cmd:all()"
def func(self):
"""Executes comment command"""
caller = self.caller
lhs = self.lhs
comment_txt = self.rhs
caller_char = caller.char_ob
if not caller_char:
caller.msg("Can only leave IC @comments when you have a character.")
return
if not comment_txt:
if not lhs:
char = caller_char
else:
char = caller.search(lhs)
try:
char = char.char_ob
except AttributeError:
caller.msg("No character found by that name.")
return
if char == caller_char:
caller.attributes.remove("new_comments")
caller.msg("{wFive most recent comments on {c%s{n:" % char)
if not char.messages.comments:
caller.msg("No | |
* phe[k][i] / 2.
xqi1[k] = xqi0[k] * rpower[k][la]
if rpower[k][lap] != 0.:
xqi2[k] = xqi0[k] / rpower[k][lap]
else:
xqi2[k] = 0.
xouti += xqi2[k]
xqj0[k] = dr[k] * phe[k][j] * phe[k][j] / 2.
xqj1[k] = xqj0[k] * rpower[k][la]
if rpower[k][lap] != 0.:
xqj2[k] = xqj0[k] / rpower[k][lap]
else:
xqj2[k] = 0.
xoutj += xqj2[k]
xinti = xqi1[1]
xintj = xqj1[1]
xouti = 2. * xouti - xqi2[1]
xoutj = 2. * xoutj - xqj2[1]
for k in range(2, nr + 1):
xinti += xqi1[k] + xqi1[k - 1]
xouti -= xqi2[k] - xqi2[k - 1]
vali = xouti * rpower[k][la]
if rpower[k][lap] != 0.:
vali += xinti / rpower[k][lap]
orb[k][j] += ri * vali
xintj = xintj + xqj1[k] + xqj1[k - 1]
xoutj = xoutj - xqj2[k] - xqj2[k - 1]
valj = xoutj * rpower[k][la]
if rpower[k][lap] != 0.:
valj += xintj / rpower[k][lap]
orb[k][i] += rj * valj
etot = etot + rc * (xqi0[k] * valj + xqj0[k] * vali)
if (iss[i] != iss[j] and occ[i] <= 1. and
occ[j] <= 1. and xnj[i] >= 0. and xnj[j] >= 0.):
continue # goto 2990
if abs(alfa) >= 0.001:
continue # goto 2990
# exchange interaction
lmx = li + lj
lmin = abs(mi - mj)
if (occ[i] > 1. or occ[j] > 1. or
xnj[i] < 0. or xnj[j] < 0.):
lmin = 0
for la in range(lmx, lmin - 1, - 2):
lap = la + 1
coeff = (float((li + li + 1) * (lj + lj + 1)
) / float(pow(la + la + 1, 2.) *
pow(cg[li][lj][la][-mi][mj]
* cg[li][lj][la][0][0], 2.)))
if occ[i] > 1. or occ[j] > 1. or xnj[i] < 0. or xnj[j] < 0.:
coeff = pin[li][lj][la] / 4.
if i == j:
coeff /= 2.
coeffi = occ[i] * coeff
coeffj = occ[j] * coeff
ri = ratio * coeffi
rj = ratio * coeffj
rc = coeff * occ[i] * occ[j]
xnum2 = xnum * xnum
xout = 0.
for k in range(1, nr + 1):
xq0[k] = dr[k] * phe[k][i] * phe[k][j] / 2.
xq1[k] = xq0[k] * rpower[k][la]
if rpower[k][lap] != 0.:
xq2[k] = xq0[k] / rpower[k][lap]
else:
xq2[k] = 0.
xout += xq2[k]
xint = xq1[1]
xout = 2. * xout - xq2[1]
for k in range(2, nr + 1):
xint += xq1[k] + xq1[k - 1]
xout -= xq2[k] - xq2[k - 1]
if xq0[k] != 0.:
val = xout * rpower[k][la]
if rpower[k][lap] != 0.:
val += xint / rpower[k][lap]
etot -= 2. * xq0[k] * rc * val
xx = phe[k][j] / phe[k][i]
if abs(xx) / xnum > 1.:
orb[k][i] -= rj * xnum2 / xx * val
else:
orb[k][i] -= rj * xx * val
xx = phe[k][i] / phe[k][j]
if (abs(xx) / xnum > 1.):
orb[k][j] -= ri * xnum2 / xx * val
else:
orb[k][j] -= ri * xx * val
# here we compute the charge density, if needed, for treating
# exchange/correlation in a local fashion...
if abs(alfa) >= 0.001:
if (alfa > 0.):
fx = 1.0
fc = 1.0
else:
fx = 1.5 * abs(alfa)
fc = 0.0
# note: we don't deal with spin-polarization in local exchange
# picture, since local exchange is totally wrong for such
# effects, anyway. local exchange pretends charge density
# is paramagnetic. also, local exchange treats everything
# as spherical.
ex = ec = ux1 = ux2 = uc1 = uc2 = 0. # initialise
for i in range(1, nr + 1):
xn = 0.
for j in range(1, nel + 1):
xn += phe[i][j] * phe[i][j] * occ[j]
xn1 = xn / 2.
xn2 = xn / 2.
nst = 2
(nst, rel, r2[i], xn1, xn2, ex, ec, ux1, ux2, uc1, uc2) = exchcorr(
nst, rel, r2[i], xn1, xn2, ex, ec, ux1, ux2, uc1, uc2)
exc = fx * ex + fc * ec
uxc = fx * ux1 + fc * uc1
etot = etot + dr[i] * xn * exc
for j in range(1, nel + 1):
orb[i][j] += uxc * ratio
for i in range(1, nr + 1):
if iuflag:
jj = 1
ii = jj # 8960
icond = True
while icond: # 8965
if ii != nel:
# goto 8970
icond = False
if (no[jj] == no[ii + 1] and nl[jj] == nl[ii + 1]
and iuflag == 2):
icond = True
if (no[jj] == no[ii + 1] and nl[jj] == nl[ii + 1]
and iss[jj] == iss[ii + 1] and iuflag == 1):
icond = True
if icond:
ii += 1
orba = 0. # 8970
div = 0.
for k in range(jj, ii + 1):
div += occ[k]
orba += orb[i][k] * occ[k]
if div != 0.:
orba /= div
for k in range(jj, ii + 1):
orb[i][k] = orba
if ii != nel:
jj = ii + 1
continue # goto 8960
return (etot, nst, rel, alfa, dl, nr, dr, r, r2,
xntot, phe, ratio, orb, occ, iss,
nel, nl, nm, no, xnj, rpower, xnum, etot2, iuflag)
def elsolve(i, occ, n, l, xkappa, xj, zorig, zeff, e, phi, v,
q0, xm1, xm2, nr, r, dr, r2, dl, rel):
'''elsolve subroutine'''
el = - zorig * zorig / float(n * n)
eh = 0.
etol = 0.0000000001
ief = x0 = float
nn = int
while True:
e = (el + eh) / 2. # label 155
istop = 0
(e, l, xkappa, n, nn, istop, ief, x0, phi, zeff, v, q0, xm1,
xm2, nr, r, dr, r2, dl, rel) = integ(e, l, xkappa, n, nn, istop,
ief, x0, phi, zeff, v, q0, xm1, xm2, nr, r, dr, r2, dl, rel)
if nn < n - l - 1:
ief = -1
if ief != 1: # label 200
el = e
if el > -0.001:
print('Mixing too strong for level : %i' % i)
return
if ief != -1:
eh = e
if eh - el > etol:
continue # goto 155
if abs(abs(xj) - abs(float(l))) > 0.25:
augment(e, l, xj, phi, v, nr, r, dl) # adjust phi array only
aa = 0.
for j in range(1, nr + 1):
aa += phi[j] * phi[j] * dr[j]
xnorm = sqrt(aa)
for j in range(1, nr + 1):
phi[j] /= xnorm
break
return (i, occ, n, l, xkappa, xj, zorig, zeff, e, phi, v,
q0, xm1, xm2, nr, r, dr, r2, dl, rel)
def augment(e, l, xj, phi, v, nr, r, dl):
'''augment subroutine'''
phi2 = [None] * len(phi)
c = 137.038
cc = c * c
c2 = cc + cc
xkappa = -1
if abs(xj) > l + 0.25:
xkappa = -l - 1
if abs(xj) < l - 0.25:
xkappa = l
for j in range(4, nr - 3 + 1):
if phi[j] != 0.:
g0 = phi[j]
ga = phi[j + 1] - phi[j - 1]
gb = (phi[j + 2] - phi[j - 2]) / 2.
gc = (phi[j + 3] - phi[j - 3]) / 3.
gg = ((1.5 * ga - 0.6 * gb + 0.1 * gc)
/ (2. * dl) + xkappa * g0) / r[j]
f0 = c * gg / (e - v[j] + c2)
phi2[j] = sqrt(g0 * g0 + f0 * f0)
if g0 < 0.:
phi2[j] = -phi2[j]
else:
phi2[j] = phi[j]
for j in range(1, 3 + 1):
phi2[j] *= phi[4] / phi2[4]
phi = phi2
return
def setqmm(i, orb, l, ns, idoflag, v, | |
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
markov_chains.py
This will house the machinery used to make random walks using Markov Chains.
For this purpose, it will export a MarkovChain Class which can be used
to create random walks.
"""
import datetime
from collections import OrderedDict
import numpy as np
from .markov_chains import states
from .. import sources
class MarkovError(Exception):
pass
class MarkovChain:
"""
This class represents a Markov Chain from which random walks can be
generated. The user passes in a list of states, a transition mapping
and a start state.
This class overloads the __iter__ and __next__ methods, so it can be
iterated over using all the list methods.
Attributes:
current_state: The current state of the process
Args:
states (List[State]): A list of states of the process, can be State
objects, but just needs to be a hashable object
with an equality operator defined.
transition_func: One of three things:
1. A function from the state space to itself
2. A dictionary mapping states to another
dictionary mapping states to transition
probabilities.
{State -> {State -> probability}}
In this way if A is this dictionary,
A[S1][S2] is the probability of transitioning
from state S1 to state S2.
3. A numpy matrix of probabilities, the order
of rows must match order of states passed in
start_state (State): Optional, This specifies the start state,
It can be a function which when called returns
the start state. If not passed in, the first state
in the list is assumed to be the start state
end_state (State): Optional, this specifies what state to end at,
can be a list of states, or a function to test
if at an end state.
"""
def __init__(self, states, transition_func, start_state=None,
end_state=None):
self.states = states
if callable(transition_func):
self.transition_function = transition_func
elif isinstance(transition_func, dict):
self.transition_function = self._func_from_dict(transition_func)
elif isinstance(transition_func, np.matrix):
self.transition_function = self._func_from_matrix(transition_func)
# current_state being None means has not gotten to start state
self.current_state = None
self.start_state = start_state
self.end_state = end_state
def _func_from_matrix(self, matrix):
"""
Generates a transition function from a transition matrix.
This is necessarily probabilistic. It generates a number uniformly on
[0,1] and then iterates through the states accumulating probabilities.
Once the accumulated probability is greater than the random number,
iteration stops and the current state is returned.
"""
# To find the next state,
# We generate a random number between 0 and 1
self.n, _ = matrix.shape
self.matrix = matrix
def f(state):
random_number = np.random.rand()
cum_prob = 0
curr_index = self.states.index(self.current_state)
# Then we accumulate the probabilities over the current state row
for i in range(self.n):
state_prob = self.matrix[curr_index,i]
cum_prob += state_prob
if random_number < cum_prob:
next_index = i
return self.states[next_index]
else:
raise MarkovError("There is no transition from state {}"
.format(state))
return f
def _func_from_dict(self, mapping):
"""
Generates a transition function from a mapping of the form
{State -> {State -> probability}}
This is necessarily probabilistic. It generates a number uniformly on
[0,1] and then iterates through the states accumulating probabilities.
Once the accumulated probability is greater than the random number,
iteration stops and the current state is returned.
"""
self.mapping = mapping
def f(state):
# To find the next state,
# We generate a random number between 0 and 1
random_number = np.random.rand()
cum_prob = 0
# Then we accumulate the probabilities over the current state row
for state in self.mapping[self.current_state]:
state_probability = self.mapping[self.current_state][state]
cum_prob += state_probability
if random_number < cum_prob:
return state
else:
raise MarkovError("There is no transition from state {}"
.format(state))
return f
def _pick_start_state(self):
"""
Sets the current state attribute to the start state
"""
start_state = self.start_state
if start_state is None:
# If no start_state passed in, we assume first state is start
self.current_state = self.states[0]
elif callable(start_state):
# If start_state is a function, we call it to get first state
self.current_state = start_state()
else:
# Otherwise, what is passed in is a state
self.current_state = start_state
def step(self):
"""
Moves the current state attribute to the next state according to
the function. Picks a start state if current state is None.
"""
if self.current_state is None:
self._pick_start_state()
else:
if self.is_end_state(self.current_state):
raise StopIteration
self.current_state = self.transition_function(self.current_state)
def is_end_state(self, state):
"""
This function checks if the passed in state is an end state.
"""
if self.end_state is None:
return False
elif isinstance(self.end_state, states.State):
return state == self.end_state
elif isinstance(self.end_state, list):
return state in self.end_state
elif callable(self.end_state):
return self.end_state(state)
else:
return state == self.end_state
def reset(self):
"""
Sets the current state attribute to None, so the Markov Chain
can be run again.
"""
self.current_state = None
def random_walk(self, n=None):
"""
This generates states and yields them (so it does not create a list
in memory). This should not be called in conjunction with other methods
which change the current state as then the walk generated may not
correspond to an actual walk.
This resets the current state whenever it is called.
Args:
n (int): The number of steps taken in the random walk,
not passed in if want to generate states indefinitely
"""
self.reset()
if n is None:
while True:
self.step()
yield self.current_state
else:
for _ in range(n):
self.step()
yield self.current_state
def __iter__(self):
return self
def __next__(self):
self.step()
return self.current_state
def increment(mapping, state, next_state, count=1):
"""
Increments mapping[state][next_state], creating entries
if need be
Specify count you want to increment by something other
than 1.
Args:
mapping (dict[State,dict[State,int]]): The transition counts
state (State): The state of first state
next_state (State): The state to transition to
count (int): The amount to increment by
"""
if state in mapping:
if next_state in mapping[state]:
mapping[state][next_state] += count
else:
mapping[state][next_state] = count
else:
mapping[state] = {next_state:count}
def mapping_from_walk(walk, memory=1):
"""
From a sequence of states, generates the count map from which one
state transitions to another. This is stored as a dictionary of the form
{State -> {State -> count}}.
The memory argument means to consider the frequency n states transition
to the next n states. For example, if we have states [A, B, A, A] and
set memory = 2,
our mapping would be {(A, B): {(B, A): 1}, (B, A): {(A, A): 1}}
If memory > 1, the keys in the dictionary will be tuples of states
Args:
walk (List[State]): A sequence of states
memory (int): A number representing how many states to use for memory
"""
count_map = {}
if memory == 1:
for (state, next_state) in zip(walk, walk[1:]):
increment(count_map, state, next_state)
else:
offsets = [walk[i:] for i in range(memory)]
state_tuples = list(zip(*offsets))
for (state, next_state) in zip(state_tuples, state_tuples[1:]):
increment(count_map, state, next_state)
return count_map
def merge_maps(maps):
"""
Merges counts of transitions from multiple different mappings
into a single mapping.
Args:
maps (List[dict]): A list of mappings, these should be dictionaries
of States mapped to dictionaries of States mapped to numbers
{State -> {State -> int}}
Returns:
dict: A dictionary of the merged counts
"""
merged = {}
for mapping in maps:
for state in mapping:
for next_state in mapping[state]:
count = mapping[state][next_state]
increment(merged, state, next_state, count)
return merged
def normalize_map(mapping):
"""
Creates a new dictionary with the frequency of each transition.
Each state transition count is normalized by the total number of
transitions out of a given state.
Args:
maps (List[dict]): A list of mappings, these should be dictionaries
of States mapped to dictionaries of States mapped to numbers
{State -> {State -> int}}
Returns:
dict: A dictionary of the normalized counts
"""
normalized_dict = {}
for word in mapping:
normalized_dict[word] = {}
count = sum(mapping[word].values())
for other in mapping[word]:
normalized_dict[word][other] = mapping[word][other] / count
return normalized_dict
def matrix_from_mapping(mapping):
"""
From a mapping of the form {State -> {State -> probability}}, it creates
an equivalent transition matrix. Note if the mapping is just a dictionary,
| |
to reduce redundant features first
before comparing different clusters, or you could also interpret the
important features further after obtaining the important features.
For details about xgboost parameters, check the following links:
[1] https://www.analyticsvidhya.com/blog/2016/03/\
complete-guide-parameter-tuning-xgboost-with-codes-python/
[2] http://xgboost.readthedocs.io/en/latest/python/python_intro.html
[3] http://xgboost.readthedocs.io/en/latest/parameter.html
[4] https://xgboost.readthedocs.io/en/latest/how_to/param_tuning.html
[5] https://www.analyticsvidhya.com/blog/2016/03/\
complete-guide-parameter-tuning-xgboost-with-codes-python/
"""
num_boost_round = int(num_boost_round)
if num_boost_round <= 0:
raise ValueError("num_boost_round must >= 1")
# This is for implementing caching in the future.
selected_uniq_labs = np.unique(selected_labs).tolist()
# subset SLCS
lab_selected_slcs = self.lab_x(selected_uniq_labs)
# unique labels in SLCS after subsetting
# Since lab_x checks whether selected labels are all existing,
# the unique labels of the subset is equivalent to input selected
# labels.
uniq_labs = lab_selected_slcs._uniq_labs.tolist()
# convert labels into indices from 0 to n_classes
n_uniq_labs = len(uniq_labs)
if n_uniq_labs <= 1:
raise ValueError("The number of unique labels should > 1. "
"Provided uniq labs:"
" {}".format(uniq_labs))
lab_ind_lut = dict(zip(uniq_labs, range(n_uniq_labs)))
lab_inds = [lab_ind_lut[lab] for lab in lab_selected_slcs._labs]
np.random.seed(random_state)
# shuffle features if necessary
fids = lab_selected_slcs.fids
if shuffle_features:
feature_inds = np.arange(lab_selected_slcs._x.shape[1])
feature_inds, fids = sklearn.utils.shuffle(
feature_inds, fids)
else:
feature_inds = slice(None, None)
# perform bootstrapping if necessary
num_bootstrap_round = int(num_bootstrap_round)
if num_bootstrap_round <= 0:
# no bootstrapping
# _xgb_train_runner returns (fscores, bst, eval_stats)
fscores, bst, eval_stats = self._xgb_train_runner(
lab_selected_slcs._x[:, feature_inds],
lab_inds, fids, test_size=test_size,
num_boost_round=num_boost_round,
xgb_params=xgb_params, random_state=random_state,
nprocs=nprocs, silent=silent)
# make sorted_fs_list consistent to the bootstrapped one
fs_list = [(t[0], t[1], 0, 1, [t[1]])
for t in fscores]
sorted_fs_list = sorted(fs_list, key=lambda t: (t[3], t[1]),
reverse=True)
print(eval_stats)
bst_list = [bst]
else:
# do bootstrapping
# ([dict of scores], [list of bsts], dict of eval stats)
fs_dict = defaultdict(list)
bst_list = []
eval_stats_dict = defaultdict(list)
if bootstrap_size is None:
bootstrap_size = lab_selected_slcs._x.shape[0]
sample_inds = np.arange(lab_selected_slcs._x.shape[0])
# bootstrapping rounds
for i in range(num_bootstrap_round):
# random state determined by numpy
# ensure all labels present
# initialize resample sample_indices and labels
bs_s_inds, bs_lab_inds = sklearn.utils.resample(
sample_inds, lab_inds, replace=True,
n_samples=bootstrap_size)
while len(np.unique(bs_lab_inds)) != n_uniq_labs:
bs_s_inds, bs_lab_inds = sklearn.utils.resample(
sample_inds, lab_inds, replace=True,
n_samples=bootstrap_size)
fscores, bst, eval_stats = self._xgb_train_runner(
lab_selected_slcs._x[bs_s_inds, :][:, feature_inds],
bs_lab_inds, fids, test_size=test_size,
num_boost_round=num_boost_round,
xgb_params=xgb_params, random_state=random_state,
nprocs=nprocs, silent=silent)
# Sum fscores
for fid, fs in fscores:
fs_dict[fid] += [fs]
bst_list.append(bst)
# est: eval stats tuple
# [ [('train...', float), ...],
# [('test...', float), ...] ]
for elist in eval_stats:
for ename, evalue in elist:
eval_stats_dict[ename].append(evalue)
if shuffle_features:
feature_inds, fids = sklearn.utils.shuffle(
feature_inds, fids)
# score summary: average, std, times showed up
fid_s_list = []
for fid, fs in fs_dict.items():
fid_s_list.append((fid, np.mean(fs), np.std(fs, ddof=0),
len(fs), fs))
sorted_fs_list = sorted(fid_s_list, key=lambda t: (t[3], t[1]),
reverse=True)
# calculate mean +/- std of eval stats
for ename, evalue_list in eval_stats_dict.items():
print("{}: mean {}, std {}".format(
ename, np.mean(evalue_list), np.std(evalue_list, ddof=1)))
# return same things for two branches
return sorted_fs_list, bst_list
def feature_importance_distintuishing_labs(self, selected_labs,
test_size=0.3,
num_boost_round=10, nprocs=1,
random_state=None, silent=1,
xgb_params=None,
num_bootstrap_round=0,
bootstrap_size=None,
shuffle_features=False):
"""
Use xgboost to compare selected labels and others.
"""
selected_s_bool_inds = self.lab_x_bool_inds(selected_labs)
# binary labs distinguishing selected and non-selected
io_bin_lab_arr = ["selected" if s else "non-selected"
for s in selected_s_bool_inds]
# create a new SLCS instance with new labels
nl_slcs = self.relabel(io_bin_lab_arr)
fi_res = nl_slcs.feature_importance_across_labs(
["selected", "non-selected"], test_size=test_size,
num_boost_round=num_boost_round, nprocs=nprocs,
random_state=random_state, silent=silent,
xgb_params=xgb_params, num_bootstrap_round=num_bootstrap_round,
bootstrap_size=bootstrap_size, shuffle_features=shuffle_features)
return fi_res
def feature_importance_each_lab(self, test_size=0.3, num_boost_round=10,
nprocs=1, random_state=None, silent=1,
xgb_params=None, num_bootstrap_round=0,
bootstrap_size=None,
shuffle_features=False):
"""
Use xgboost to compare each label with others. Experimental.
"""
# Construct important feature lut
# {ulab0: [if1, if2, ...], ...}
ulab_fi_lut = defaultdict(list)
for ulab in self._uniq_labs:
# get bool indices of current label
ulab_s_bool_inds = self.lab_x_bool_inds(ulab)
# compare current label with other samples
fi_res = self.feature_importance_distintuishing_labs(
ulab, test_size=test_size,
num_boost_round=num_boost_round, nprocs=nprocs,
random_state=random_state, silent=silent,
xgb_params=xgb_params,
num_bootstrap_round=num_bootstrap_round,
bootstrap_size=bootstrap_size,
shuffle_features=shuffle_features)
for fid in [t[0] for t in fi_res[0]]:
fx = self.f_id_x_vec(fid)
# current label values
ulab_x = fx[ulab_s_bool_inds]
# other values
other_x = fx[np.logical_not(ulab_s_bool_inds)]
# current lab mean
ulab_x_mean = np.mean(ulab_x)
# other mean
other_x_mean = np.mean(other_x)
# mean fold change
ulab_mfc = (ulab_x_mean - other_x_mean) / ulab_x_mean
# ks test result
ks_res = ks_2samp(ulab_x, other_x)
ulab_fi_lut[ulab].append((fid, ulab_mfc, ks_res.pvalue))
ulab_fi_lut[ulab].sort(key=lambda t: t[1], reverse=True)
ulab_fi_lut[ulab] = [t for t in ulab_fi_lut[ulab]]
return ulab_fi_lut
def tsne_plot(self, gradient=None, labels=None,
selected_labels=None,
shuffle_label_colors=False,
title=None, xlab=None, ylab=None,
figsize=(20, 20), add_legend=True,
n_txt_per_cluster=3, alpha=1, s=0.5,
random_state=None, **kwargs):
"""
Plot the last t-SNE projection with the provided gradient as color.
"""
if labels is None:
labels = self.labs
return super(SingleLabelClassifiedSamples,
self).tsne_plot(
gradient=gradient, labels=labels,
selected_labels=selected_labels,
shuffle_label_colors=shuffle_label_colors,
title=title, xlab=xlab, ylab=ylab,
figsize=figsize,
add_legend=add_legend,
n_txt_per_cluster=n_txt_per_cluster,
alpha=alpha, s=s,
random_state=random_state,
**kwargs)
def tsne_feature_gradient_plot(self, fid, transform=None, labels=None,
selected_labels=None,
shuffle_label_colors=False,
title=None, xlab=None, ylab=None,
figsize=(20, 20), add_legend=True,
n_txt_per_cluster=3, alpha=1, s=0.5,
random_state=None, **kwargs):
"""
Plot the last t-SNE projection with the provided gradient as color.
Parameters
----------
fid: feature id scalar
ID of the feature to be used for gradient plot.
transform: callable
Map transform on feature before plotting.
"""
if labels is None:
labels = self.labs
return super(SingleLabelClassifiedSamples,
self).tsne_feature_gradient_plot(
fid=fid, transform=transform, labels=labels,
selected_labels=selected_labels,
shuffle_label_colors=shuffle_label_colors,
title=title, xlab=xlab, ylab=ylab,
figsize=figsize,
add_legend=add_legend,
n_txt_per_cluster=n_txt_per_cluster,
alpha=alpha, s=s,
random_state=random_state,
**kwargs)
def feature_swarm_plot(self, fid, transform=None, labels=None,
selected_labels=None,
title=None, xlab=None, ylab=None,
figsize=(10, 10)):
f_ind = self.f_id_to_ind([fid])[0]
fx = self.f_ind_x_vec(f_ind)
if transform is not None:
if callable(transform):
fx = np.array(list(map(transform, fx)))
else:
raise ValueError("transform must be a callable")
if labels is not None and len(labels) != fx.shape[0]:
raise ValueError("labels ({}) must have same length as "
"n_samples.".format(labels))
else:
labels = self.labs
return swarm(fx, labels=labels, selected_labels=selected_labels,
title=title, xlab=xlab, ylab=ylab, figsize=figsize)
def dmat_heatmap(self, selected_labels=None, col_labels=None,
transform=None,
title=None, xlab=None, ylab=None, figsize=(10, 10),
**kwargs):
"""
Plot distance matrix with rows colored by current labels.
"""
selected_s_bool_inds = self.lab_x_bool_inds(selected_labels)
selected_labels = self._labs[selected_s_bool_inds].tolist()
selected_d = self._d[selected_s_bool_inds, :][:, selected_s_bool_inds]
return heatmap(selected_d, row_labels=selected_labels,
col_labels=col_labels, transform=transform,
title=title, xlab=xlab, ylab=ylab,
figsize=figsize, **kwargs)
def xmat_heatmap(self, selected_labels=None, selected_fids=None,
col_labels=None, transform=None,
title=None, xlab=None, ylab=None, figsize=(10, 10),
**kwargs):
"""
Plot x as heatmap.
"""
selected_s_bool_inds = self.lab_x_bool_inds(selected_labels)
selected_s_ids = self._sids[selected_s_bool_inds]
selected_slcs = self.id_x(selected_s_ids, selected_fids)
return heatmap(selected_slcs._x, row_labels=selected_slcs.labs,
col_labels=col_labels, transform=transform,
title=title, xlab=xlab, ylab=ylab,
figsize=figsize, **kwargs)
@property
def labs(self):
return self._labs.tolist()
# Sort the clustered sample_ids with the reference order of another.
#
# Sort sids according to labs
# If ref_sid_order is not None:
# sort sids further according to ref_sid_order
def lab_sorted_sids(self, ref_sid_order=None):
sep_lab_sid_list = []
sep_lab_list = []
for iter_lab in sorted(self._sid_lut.keys()):
iter_sid_arr = self._sid_lut[iter_lab]
sep_lab_sid_list.append(iter_sid_arr)
sep_lab_list.append(np.repeat(iter_lab, len(iter_sid_arr)))
if ref_sid_order is not None:
mtype.check_is_valid_sfids(ref_sid_order)
ref_sid_order = np.array(ref_sid_order)
# sort r according to q
# assumes:
# - r contains all elements in q
# - r is 1d np array
def sort_flat_sids(query_sids, ref_sids):
return ref_sids[np.in1d(ref_sids, query_sids)]
# sort inner sid list but maintains the order as sep_lab_list
sep_lab_sid_list = [sort_flat_sids(x, ref_sid_order)
for x in sep_lab_sid_list]
sep_lab_min_sid_list = [x[0] for x in sep_lab_sid_list]
sorted_sep_lab_min_sid_list = list(
sort_flat_sids(sep_lab_min_sid_list, ref_sid_order))
min_sid_sorted_sep_lab_ind_list = [
sep_lab_min_sid_list.index(x)
for x in sorted_sep_lab_min_sid_list
]
sep_lab_list = [sep_lab_list[i]
for i in min_sid_sorted_sep_lab_ind_list]
sep_lab_sid_list = [sep_lab_sid_list[i]
for i in min_sid_sorted_sep_lab_ind_list]
lab_sorted_sid_arr = np.concatenate(sep_lab_sid_list)
lab_sorted_lab_arr = np.concatenate(sep_lab_list)
# check sorted sids are the same set as original
np.testing.assert_array_equal(
np.sort(lab_sorted_sid_arr), np.sort(self._sids))
# check sorted labs are the same set as original
np.testing.assert_array_equal(
np.sort(lab_sorted_lab_arr), np.sort(self._labs))
# check sorted (sid, lab) matchings are the same set as original
np.testing.assert_array_equal(
lab_sorted_lab_arr[np.argsort(lab_sorted_sid_arr)],
self._labs[np.argsort(self._sids)])
return (lab_sorted_sid_arr, lab_sorted_lab_arr)
# See how two clustering criteria match with each other.
# When given q_slc_samples is not None, sids and labs are ignored.
# When q_slc_samples is None, sids and labs must be provided
def cross_labs(self, q_slc_samples):
if not isinstance(q_slc_samples, SingleLabelClassifiedSamples):
raise TypeError("Query should be an instance of "
"SingleLabelClassifiedSamples")
try:
ref_labs = np.array([self._lab_lut[x]
for x in q_slc_samples.sids])
except KeyError as e:
raise ValueError("query sid {} is not in ref sids.".format(e))
query_labs = np.array(q_slc_samples.labs)
uniq_rlabs, uniq_rlab_cnts = np.unique(ref_labs, return_counts=True)
cross_lab_lut = {}
for i in range(len(uniq_rlabs)):
# ref cluster i. query unique labs.
ref_ci_quniq = tuple(map(list, np.unique(
query_labs[np.where(np.array(ref_labs) == uniq_rlabs[i])],
return_counts=True)))
cross_lab_lut[uniq_rlabs[i]] = (uniq_rlab_cnts[i],
tuple(map(tuple, ref_ci_quniq)))
return cross_lab_lut
class MDLSingleLabelClassifiedSamples(SingleLabelClassifiedSamples):
"""
MDLSingleLabelClassifiedSamples inherits SingleLabelClassifiedSamples to
offer MDL operations.
Args:
x (2d number array): data matrix
labs (list of str or int): labels
sids (list of str or int): sample ids
fids (list of str or int): feature ids
encode_type | |
2210, 2220, 2229, 2237, 2244, 2250, 2255,
282, 2211, 2221, 2230, 2238, 2245, 2251, 2256,
1607, 283, 119, 120, 121, 122, 123, 124,
125, 126, 127, 20, 176, 2476, 188, 32,
176, 2567, 2688, 188, 2487, 2798, 2608, 200,
200, 44, 176, 2567, 2688, 188, 2568, 5526,
2689, 2798, 2799, 200, 2498, 2897, 2619, 2897,
2898, 2729, 212, 212, 212, 56, 176, 2567,
2688, 188, 2568, 5526, 2689, 2798, 2799, 200,
2569, 5527, 2690, 5536, 5581, 2800, 2897, 2898,
2899, 212, 2509, 2985, 2630, 2985, 2986, 2740,
2985, 2986, 2987, 2839, 224, 224, 224, 224,
68, 176, 2567, 2688, 188, 2568, 5526, 2689,
2798, 2799, 200, 2569, 5527, 2690, 5536, 5581,
2800, 2897, 2898, 2899, 212, 2570, 5528, 2691,
5537, 5582, 2801, 5545, 5590, 5626, 2900, 2985,
2986, 2987, 2988, 224, 2520, 3062, 2641, 3062,
3063, 2751, 3062, 3063, 3064, 2850, 3062, 3063,
3064, 3065, 2938, 236, 236, 236, 236, 236,
80, 176, 2567, 2688, 188, 2568, 5526, 2689,
2798, 2799, 200, 2569, 5527, 2690, 5536, 5581,
2800, 2897, 2898, 2899, 212, 2570, 5528, 2691,
5537, 5582, 2801, 5545, 5590, 5626, 2900, 2985,
2986, 2987, 2988, 224, 2571, 5529, 2692, 5538,
5583, 2802, 5546, 5591, 5627, 2901, 5553, 5598,
5634, 5662, 2989, 3062, 3063, 3064, 3065, 3066,
236, 2531, 3128, 2652, 3128, 3129, 2762, 3128,
3129, 3130, 2861, 3128, 3129, 3130, 3131, 2949,
3128, 3129, 3130, 3131, 3132, 3026, 248, 248,
248, 248, 248, 248, 92, 176, 2567, 2688,
188, 2568, 5526, 2689, 2798, 2799, 200, 2569,
5527, 2690, 5536, 5581, 2800, 2897, 2898, 2899,
212, 2570, 5528, 2691, 5537, 5582, 2801, 5545,
5590, 5626, 2900, 2985, 2986, 2987, 2988, 224,
2571, 5529, 2692, 5538, 5583, 2802, 5546, 5591,
5627, 2901, 5553, 5598, 5634, 5662, 2989, 3062,
3063, 3064, 3065, 3066, 236, 2572, 5530, 2693,
5539, 5584, 2803, 5547, 5592, 5628, 2902, 5554,
5599, 5635, 5663, 2990, 5560, 5605, 5641, 5669,
5690, 3067, 3128, 3129, 3130, 3131, 3132, 3133,
248, 2542, 3183, 2663, 3183, 3184, 2773, 3183,
3184, 3185, 2872, 3183, 3184, 3185, 3186, 2960,
3183, 3184, 3185, 3186, 3187, 3037, 3183, 3184,
3185, 3186, 3187, 3188, 3103, 260, 260, 260,
260, 260, 260, 260, 104, 176, 2567, 2688,
188, 2568, 5526, 2689, 2798, 2799, 200, 2569,
5527, 2690, 5536, 5581, 2800, 2897, 2898, 2899,
212, 2570, 5528, 2691, 5537, 5582, 2801, 5545,
5590, 5626, 2900, 2985, 2986, 2987, 2988, 224,
2571, 5529, 2692, 5538, 5583, 2802, 5546, 5591,
5627, 2901, 5553, 5598, 5634, 5662, 2989, 3062,
3063, 3064, 3065, 3066, 236, 2572, 5530, 2693,
5539, 5584, 2803, 5547, 5592, 5628, 2902, 5554,
5599, 5635, 5663, 2990, 5560, 5605, 5641, 5669,
5690, 3067, 3128, 3129, 3130, 3131, 3132, 3133,
248, 2573, 5531, 2694, 5540, 5585, 2804, 5548,
5593, 5629, 2903, 5555, 5600, 5636, 5664, 2991,
5561, 5606, 5642, 5670, 5691, 3068, 5566, 5611,
5647, 5675, 5696, 5711, 3134, 3183, 3184, 3185,
3186, 3187, 3188, 3189, 260, 2553, 3227, 2674,
3227, 3228, 2784, 3227, 3228, 3229, 2883, 3227,
3228, 3229, 3230, 2971, 3227, 3228, 3229, 3230,
3231, 3048, 3227, 3228, 3229, 3230, 3231, 3232,
3114, 3227, 3228, 3229, 3230, 3231, 3232, 3233,
3169, 272, 272, 272, 272, 272, 272, 272,
272, 116, 176, 2567, 2688, 188, 2568, 5526,
2689, 2798, 2799, 200, 2569, 5527, 2690, 5536,
5581, 2800, 2897, 2898, 2899, 212, 2570, 5528,
2691, 5537, 5582, 2801, 5545, 5590, 5626, 2900,
2985, 2986, 2987, 2988, 224, 2571, 5529, 2692,
5538, 5583, 2802, 5546, 5591, 5627, 2901, 5553,
5598, 5634, 5662, 2989, 3062, 3063, 3064, 3065,
3066, 236, 2572, 5530, 2693, 5539, 5584, 2803,
5547, 5592, 5628, 2902, 5554, 5599, 5635, 5663,
2990, 5560, 5605, 5641, 5669, 5690, 3067, 3128,
3129, 3130, 3131, 3132, 3133, 248, 2573, 5531,
2694, 5540, 5585, 2804, 5548, 5593, 5629, 2903,
5555, 5600, 5636, 5664, 2991, 5561, 5606, 5642,
5670, 5691, 3068, 5566, 5611, 5647, 5675, 5696,
5711, 3134, 3183, 3184, 3185, 3186, 3187, 3188,
3189, 260, 2574, 5532, 2695, 5541, 5586, 2805,
5549, 5594, 5630, 2904, 5556, 5601, 5637, 5665,
2992, 5562, 5607, 5643, 5671, 5692, 3069, 5567,
5612, 5648, 5676, 5697, 5712, 3135, 1607, 1607,
1607, 1607, 1607, 1607, 1606, 1607, 3227, 3228,
3229, 3230, 3231, 3232, 3233, 1607, 272, 2564,
3260, 2685, 3260, 3261, 2795, 3260, 3261, 3262,
2894, 3260, 3261, 3262, 3263, 2982, 3260, 3261,
3262, 3263, 3264, 3059, 3260, 3261, 3262, 3263,
3264, 3265, 3125, 3260, 3261, 3262, 3263, 3264,
3265, 3266, 3180, 3260, 3261, 3262, 3263, 3264,
3265, 3266, 1607, 3224, 284, 284, 284, 284,
284, 284, 284, 284, 284, 128, 176, 287,
288, 188, 287, 2270, 288, 289, 289, 200,
287, 2270, 288, 2271, 2281, 289, 290, 290,
290, 212, 287, 2270, 288, 2271, 2281, 289,
2272, 2282, 2291, 290, 291, 291, 291, 291,
224, 287, 2270, 288, 2271, 2281, 289, 2272,
2282, 2291, 290, 2273, 2283, 2292, 2300, 291,
292, 292, 292, 292, 292, 236, 287, 2270,
288, 2271, 2281, 289, 2272, 2282, 2291, 290,
2273, 2283, 2292, 2300, 291, 2274, 2284, 2293,
2301, 2308, 292, 293, 293, 293, 293, 293,
293, 248, 287, 2270, 288, 2271, 2281, 289,
2272, 2282, 2291, 290, 2273, 2283, 2292, 2300,
291, 2274, 2284, 2293, 2301, 2308, 292, 2275,
2285, 2294, 2302, 2309, 2315, 293, 294, 294,
294, 294, 294, 294, 294, 260, 287, 2270,
288, 2271, 2281, 289, 2272, 2282, 2291, 290,
2273, 2283, 2292, 2300, 291, 2274, 2284, 2293,
2301, 2308, 292, 2275, 2285, 2294, 2302, 2309,
2315, 293, 2276, 2286, 2295, 2303, 2310, 2316,
2321, 294, 295, 295, 295, 295, 295, 295,
295, 295, 272, 287, 2270, 288, 2271, 2281,
289, 2272, 2282, 2291, 290, 2273, 2283, 2292,
2300, 291, 2274, 2284, 2293, 2301, 2308, 292,
2275, 2285, 2294, 2302, 2309, 2315, 293, 2276,
2286, 2295, 2303, 2310, 2316, 2321, 294, 2277,
2287, 2296, 2304, 2311, 2317, 2322, 1607, 295,
296, 296, 296, 296, 296, 296, 296, 296,
296, 284, 131, 131, 132, 131, 132, 133,
131, 132, 133, 134, 131, 132, 133, 134,
135, 131, 132, 133, 134, 135, 136, 131,
132, 133, 134, 135, 136, 137, 131, 132,
133, 134, 135, 136, 137, 138, 131, 132,
133, 134, 135, 136, 137, 138, 139, 131,
132, 133, 134, 135, 136, 137, 138, 139,
140, 11, 167, 179, 23, 12, 177, 2468,
189, 24, 168, 2479, 2600, 180, 191, 201,
192, 35, 36, 13, 177, 2469, 189, 25,
177, 2578, 2699, 189, 2480, 2809, 2601, 201,
201, 37, 169, 2490, 2611, 181, 2491, 2908,
2612, 2721, 2722, 193, 203, 213, 204, 213,
213, 205, 47, 48, 49, 14, 177, 2470,
189, 26, 177, 2578, 2699, 189, 2481, 2809,
2602, 201, 201, 38, 177, 2578, 2699, 189,
2579, 1600, 2700, 2809, 2810, 201, 2492, 2908,
2613, 2908, 2909, 2723, 213, 213, 213, 50,
170, 2501, 2622, 182, 2502, 2996, 2623, 2732,
2733, 194, 2503, 2996, 2624, 2996, 2997, 2734,
2831, 2832, 2833, 206, 215, 225, 216, 225,
225, 217, 225, 225, 225, 218, 59, 60,
61, 62, 15, 177, 2471, 189, 27, 177,
2578, 2699, 189, 2482, 2809, 2603, 201, 201,
39, 177, 2578, 2699, 189, 2579, 5746, 2700,
2809, 2810, 201, 2493, 2908, 2614, 2908, 2909,
2724, 213, 213, 213, 51, 177, 2578, 2699,
189, 2579, 5746, 2700, 2809, 2810, 201, 2580,
5747, 2701, 5756, 1601, 2811, 2908, 2909, 2910,
213, 2504, 2996, 2625, 2996, 2997, 2735, 2996,
2997, 2998, 2834, 225, 225, 225, 225, 63,
171, 2512, 2633, 183, 2513, 3073, 2634, 2743,
2744, 195, 2514, 3073, 2635, 3073, 3074, 2745,
2842, 2843, 2844, 207, 2515, 3073, 2636, 3073,
3074, 2746, 3073, 3074, 3075, 2845, 2930, 2931,
2932, 2933, 219, 227, 237, 228, 237, 237,
229, 237, 237, 237, 230, 237, 237, 237,
237, 231, 71, 72, 73, 74, 75, 16,
177, 2472, 189, 28, 177, 2578, 2699, 189,
2483, 2809, 2604, 201, 201, 40, 177, 2578,
2699, | |
<gh_stars>0
import random
import string
from datetime import datetime
from lxml.etree import XML
import asyncio
import json
import os
import shutil
import tempfile
import unittest
import unittest.mock
import urllib.request
from cate.core.ds import DATA_STORE_REGISTRY, DataAccessError, DataStoreNotice
from cate.ds.esa_cci_odp import _fetch_file_list_json, _extract_metadata_from_odd, _extract_metadata_from_odd_url, \
_extract_metadata_from_descxml, _extract_metadata_from_descxml_url, _harmonize_info_field_names, \
_DownloadStatistics, EsaCciOdpDataStore, find_datetime_format, _retrieve_infos_from_dds
from cate.core.types import PolygonLike, TimeRangeLike, VarNamesLike
from cate.ds.local import LocalDataStore
class EsaCciOdpOsTest(unittest.TestCase):
@unittest.skipIf(os.environ.get('CATE_DISABLE_WEB_TESTS', None) == '1', 'CATE_DISABLE_WEB_TESTS = 1')
def test_fetch_opensearch_json(self):
file_list = asyncio.run(_fetch_file_list_json(
dataset_id='4eb4e801424a47f7b77434291921f889',
drs_id='esacci.OZONE.mon.L3.NP.multi-sensor.multi-platform.MERGED.fv0002.r1'
))
self.assertEqual('ESACCI-OZONE-L3-NP-MERGED-KNMI-199701-fv0002.nc', file_list[0][0])
self.assertEqual("1997-01-04T00:00:00", file_list[0][1])
self.assertEqual("1997-01-04T00:00:00", file_list[0][2])
self.assertEqual(14417751, file_list[0][3])
self.assertEqual(2, len(file_list[0][4]))
self.assertTrue("Download" in file_list[0][4])
self.assertEqual('http://data.cci.ceda.ac.uk/thredds/fileServer/esacci/ozone/data/nadir_profiles/l3/'
'merged/v0002/1997/ESACCI-OZONE-L3-NP-MERGED-KNMI-199701-fv0002.nc',
file_list[0][4].get("Download"))
self.assertEqual('http://data.cci.ceda.ac.uk/thredds/dodsC/esacci/ozone/data/nadir_profiles/l3/merged/'
'v0002/1997/ESACCI-OZONE-L3-NP-MERGED-KNMI-199701-fv0002.nc',
file_list[0][4].get("Opendap"))
def test_extract_metadata_from_odd_file(self):
odd_file = os.path.join(os.path.dirname(__file__), 'resources/odd.xml')
with open(odd_file) as odd:
json_obj = _extract_metadata_from_odd(XML(odd.read()))
self.assertFalse('query' in json_obj)
self.assertTrue('ecv' in json_obj)
self.assertEqual('LC', json_obj['ecv'])
self.assertTrue('time_frequencies' in json_obj)
self.assertEqual(['day', 'year'], json_obj['time_frequencies'])
self.assertTrue('institute' in json_obj)
self.assertEqual('Universite Catholique de Louvain', json_obj['institute'])
self.assertTrue('processing_level' in json_obj)
self.assertEqual('L4', json_obj['processing_level'])
self.assertTrue('product_string' in json_obj)
self.assertEqual('Map', json_obj['product_string'])
self.assertTrue('product_version' in json_obj)
self.assertEqual('2.0.7', json_obj['product_version'])
self.assertTrue('data_type' in json_obj)
self.assertEqual('LCCS', json_obj['data_type'])
self.assertFalse('sensor_id' in json_obj)
self.assertFalse('platform_id' in json_obj)
self.assertTrue('file_format' in json_obj)
self.assertEqual('.nc', json_obj['file_format'])
@unittest.skipIf(os.environ.get('CATE_DISABLE_WEB_TESTS', None) == '1', 'CATE_DISABLE_WEB_TESTS = 1')
def test_extract_metadata_from_odd_url(self):
odd_url = 'https://archive.opensearch.ceda.ac.uk/opensearch/description.xml?' \
'parentIdentifier=4eb4e801424a47f7b77434291921f889'
json_obj = asyncio.run(_extract_metadata_from_odd_url(odd_url=odd_url))
self.assertFalse('query' in json_obj)
self.assertTrue('ecv' in json_obj)
self.assertEqual('OZONE', json_obj['ecv'])
self.assertTrue('time_frequency' in json_obj)
self.assertEqual('month', json_obj['time_frequency'])
self.assertTrue('institute' in json_obj)
self.assertEqual('Royal Netherlands Meteorological Institute', json_obj['institute'])
self.assertTrue('processing_level' in json_obj)
self.assertEqual('L3', json_obj['processing_level'])
self.assertTrue('product_string' in json_obj)
self.assertEqual('MERGED', json_obj['product_string'])
self.assertTrue('product_version' in json_obj)
self.assertEqual('fv0002', json_obj['product_version'])
self.assertTrue('data_type' in json_obj)
self.assertEqual('NP', json_obj['data_type'])
self.assertTrue('sensor_ids' in json_obj)
self.assertEqual(['GOME', 'GOME-2', 'OMI', 'SCIAMACHY'], json_obj['sensor_ids'])
self.assertTrue('platform_ids' in json_obj)
self.assertEqual(['Aura', 'ERS-2', 'Envisat', 'Metop-A'], json_obj['platform_ids'])
self.assertTrue('file_formats' in json_obj)
self.assertEqual(['.nc', '.txt'], json_obj['file_formats'])
self.assertTrue('drs_id' in json_obj)
self.assertEqual('esacci.OZONE.mon.L3.NP.multi-sensor.multi-platform.MERGED.fv0002.r1', json_obj['drs_id'])
@unittest.skipIf(os.environ.get('CATE_DISABLE_WEB_TESTS', None) == '1', 'CATE_DISABLE_WEB_TESTS = 1')
def test_extract_metadata_from_descxml_url(self):
desc_url = 'https://catalogue.ceda.ac.uk/export/xml/49bcb6f29c824ae49e41d2d3656f11be.xml'
json_obj = asyncio.run(_extract_metadata_from_descxml_url(None, desc_url))
self.assert_json_obj_from_desc_xml(json_obj)
def test_extract_metadata_from_descxml(self):
desc_file = os.path.join(os.path.dirname(__file__), 'resources/49bcb6f29c824ae49e41d2d3656f11be.xml')
with open(desc_file) as desc:
json_obj = _extract_metadata_from_descxml(XML(desc.read()))
self.assert_json_obj_from_desc_xml(json_obj)
def test_extract_metadata_from_descxml_faulty_url(self):
desc_url = 'http://brockmann-consult.de'
json_obj = asyncio.run(_extract_metadata_from_descxml_url(None, desc_url))
self.assertIsNotNone(json_obj)
self.assertEqual(0, len(json_obj.keys()))
def test_retrieve_dimensions_from_dds(self):
dds_file = os.path.join(os.path.dirname(__file__),
"resources/ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19861125000000-fv04.4.nc.dds")
dds = open(dds_file)
dimensions, variable_infos = _retrieve_infos_from_dds(dds.readlines())
self.assertEqual(4, len(dimensions))
self.assertTrue('xc' in dimensions)
self.assertEqual(216, dimensions['xc'])
self.assertTrue('yc' in dimensions)
self.assertEqual(216, dimensions['yc'])
self.assertTrue('time' in dimensions)
self.assertEqual(1, dimensions['time'])
self.assertTrue('nv' in dimensions)
self.assertEqual(2, dimensions['nv'])
self.assertEqual(13, len(variable_infos))
self.assertTrue('Lambert_Azimuthal_Grid' in variable_infos)
self.assertEqual('Int32', variable_infos['Lambert_Azimuthal_Grid']['data_type'])
self.assertEqual(0, len(variable_infos['Lambert_Azimuthal_Grid']['dimensions']))
self.assertTrue('time_bnds' in variable_infos)
self.assertEqual('Float64', variable_infos['time_bnds']['data_type'])
self.assertEqual(2, len(variable_infos['time_bnds']['dimensions']))
self.assertTrue('time' in variable_infos['time_bnds']['dimensions'])
self.assertTrue('nv' in variable_infos['time_bnds']['dimensions'])
self.assertTrue('ice_conc' in variable_infos)
self.assertEqual('Int32', variable_infos['ice_conc']['data_type'])
self.assertEqual(3, len(variable_infos['ice_conc']['dimensions']))
self.assertTrue('time' in variable_infos['ice_conc']['dimensions'])
self.assertTrue('yc' in variable_infos['ice_conc']['dimensions'])
self.assertTrue('xc' in variable_infos['ice_conc']['dimensions'])
def assert_json_obj_from_desc_xml(self, json_obj: dict):
self.assertTrue('abstract' in json_obj)
self.maxDiff = None
self.assertTrue('title' in json_obj)
self.assertTrue('licences' in json_obj)
self.assertTrue('bbox_minx' in json_obj)
self.assertEqual('-180.0', json_obj['bbox_minx'])
self.assertTrue('bbox_miny' in json_obj)
self.assertEqual('-90.0', json_obj['bbox_miny'])
self.assertTrue('bbox_maxx' in json_obj)
self.assertEqual('180.0', json_obj['bbox_maxx'])
self.assertTrue('bbox_maxy' in json_obj)
self.assertEqual('90.0', json_obj['bbox_maxy'])
self.assertTrue('temporal_coverage_start' in json_obj)
self.assertEqual('1997-09-03T23:00:00', json_obj['temporal_coverage_start'])
self.assertTrue('temporal_coverage_end' in json_obj)
self.assertEqual('2013-12-31T23:59:59', json_obj['temporal_coverage_end'])
self.assertTrue('file_formats' in json_obj)
self.assertEqual('.nc', json_obj['file_formats'])
self.assertTrue('publication_date' in json_obj)
self.assertTrue('creation_date' in json_obj)
self.assertEqual('2016-12-12T17:08:42', json_obj['creation_date'])
def test_harmonize_info_field_names(self):
test_catalogue = {'file_format': '.tiff', 'platform_ids': ['dfjh', 'ftrzg6'], 'sensor_id': 'hxfb75z',
'sensor_ids': ['hxfb75z'], 'processing_level': 'L2', 'processing_levels': ['L1'],
'time_frequency': 'gsyhdx', 'time_frequencies': ['gsyhdx', 'zsgsteczh', 'fzgu'],
'field6': 'njgil', 'field6s': ['<dshjbre', 'hsr6u'], 'field7': 'multiple_field7s',
'field7s': ['saedf', 'kihji']}
_harmonize_info_field_names(test_catalogue, 'file_format', 'file_formats')
_harmonize_info_field_names(test_catalogue, 'platform_id', 'platform_ids')
_harmonize_info_field_names(test_catalogue, 'sensor_id', 'sensor_ids')
_harmonize_info_field_names(test_catalogue, 'processing_level', 'processing_levels')
_harmonize_info_field_names(test_catalogue, 'time_frequency', 'time_frequencies')
_harmonize_info_field_names(test_catalogue, 'field6', 'field6s', 'multiple_field6s'),
_harmonize_info_field_names(test_catalogue, 'field7', 'field7s', 'multiple_field7s')
self.assertTrue('file_format' in test_catalogue)
self.assertEqual('.tiff', test_catalogue['file_format'])
self.assertFalse('file_formats' in test_catalogue)
self.assertFalse('platform_id' in test_catalogue)
self.assertTrue('platform_ids' in test_catalogue)
self.assertEqual(['dfjh', 'ftrzg6'], test_catalogue['platform_ids'])
self.assertTrue('sensor_id' in test_catalogue)
self.assertFalse('sensor_ids' in test_catalogue)
self.assertEqual('hxfb75z', test_catalogue['sensor_id'])
self.assertFalse('processing_level' in test_catalogue)
self.assertTrue('processing_levels' in test_catalogue)
self.assertEqual(['L1', 'L2'], test_catalogue['processing_levels'])
self.assertFalse('time_frequency' in test_catalogue)
self.assertTrue('time_frequencies' in test_catalogue)
self.assertEqual(['gsyhdx', 'zsgsteczh', 'fzgu'], test_catalogue['time_frequencies'])
self.assertFalse('field6' in test_catalogue)
self.assertTrue('field6s' in test_catalogue)
self.assertEqual(['<dshjbre', 'hsr6u', 'njgil'], test_catalogue['field6s'])
self.assertFalse('field7' in test_catalogue)
self.assertTrue('field7s' in test_catalogue)
self.assertEqual(['saedf', 'kihji'], test_catalogue['field7s'])
@unittest.skip(reason='Because it writes a lot of files')
# @unittest.skipUnless(condition=os.environ.get('CATE_ODP_TEST', None), reason="skipped unless CATE_ODP_TEST=1")
class EsaCciOdpDataStoreIndexCacheTest(unittest.TestCase):
def test_index_cache(self):
self.data_store = EsaCciOdpDataStore(index_cache_used=True, index_cache_expiration_days=1.0e-6)
data_sources = self.data_store.query()
self.assertIsNotNone(data_sources)
for data_source in data_sources:
data_source.update_file_list()
def _create_test_data_store():
with open(os.path.join(os.path.dirname(__file__), 'resources/os-data-list.json')) as fp:
json_text = fp.read()
json_dict = json.loads(json_text)
with open(os.path.join(os.path.dirname(__file__), 'resources/drs_ids.txt')) as fp:
drs_ids = fp.read().split('\n')
for d in DATA_STORE_REGISTRY.get_data_stores():
d.get_updates(reset=True)
metadata_path = os.path.join(os.path.dirname(__file__), 'resources/datasources/metadata')
# The EsaCciOdpDataStore created with an initial json_dict and a metadata dir avoids fetching from remote
data_store = EsaCciOdpDataStore('test-odp', index_cache_json_dict=json_dict, index_cache_update_tag='test1',
meta_data_store_path=metadata_path, drs_ids=drs_ids)
DATA_STORE_REGISTRY.add_data_store(data_store)
return data_store
class EsaCciOdpDataStoreTest(unittest.TestCase):
def setUp(self):
self.data_store = _create_test_data_store()
def tearDown(self):
self.data_store.get_updates(reset=True)
def test_id_title_and_is_local(self):
self.assertEqual(self.data_store.id, 'test-odp')
self.assertEqual(self.data_store.title, 'ESA CCI Open Data Portal')
self.assertEqual(self.data_store.is_local, False)
def test_description(self):
self.assertIsNotNone(self.data_store.description)
self.assertTrue(len(self.data_store.description) > 40)
def test_notices(self):
self.assertIsInstance(self.data_store.notices, list)
self.assertEqual(2, len(self.data_store.notices))
notice0 = self.data_store.notices[0]
self.assertIsInstance(notice0, DataStoreNotice)
self.assertEqual(notice0.id, "terminologyClarification")
self.assertEqual(notice0.title, "Terminology Clarification")
self.assertEqual(notice0.icon, "info-sign")
self.assertEqual(notice0.intent, "primary")
self.assertTrue(len(notice0.content) > 20)
notice1 = self.data_store.notices[1]
self.assertIsInstance(notice0, DataStoreNotice)
self.assertEqual(notice1.id, "dataCompleteness")
self.assertEqual(notice1.title, "Data Completeness")
self.assertEqual(notice1.icon, "warning-sign")
self.assertEqual(notice1.intent, "warning")
self.assertTrue(len(notice1.content) > 20)
def test_query(self):
data_sources = self.data_store.query()
self.assertIsNotNone(data_sources)
self.assertEqual(len(data_sources), 4)
@unittest.skipIf(os.environ.get('CATE_DISABLE_WEB_TESTS', None) == '1', 'CATE_DISABLE_WEB_TESTS = 1')
def test_query_web_access(self):
store = EsaCciOdpDataStore()
all_data_sources = store.query()
self.assertIsNotNone(all_data_sources)
def test_query_with_string(self):
data_sources = self.data_store.query(query_expr='OC')
self.assertIsNotNone(data_sources)
self.assertEqual(len(data_sources), 1)
def test_adjust_json_dict(self):
test_dict = dict(
time_frequencies=['day', 'month'],
processing_level='L1C',
data_types=['abc', 'xyz', 'tfg'],
platform_id='platt',
product_version='1'
)
drs_id = 'esacci.SOILMOISTURE.day.L3S.SSMS.multi - sensor.multi - platform.ACTIVE.04 - 5.r1'
self.data_store._adjust_json_dict(test_dict, drs_id)
self.assertEqual(test_dict['time_frequency'], 'day')
self.assertFalse('time_frequencies' in test_dict)
self.assertEqual(test_dict['processing_level'], 'L3S')
self.assertEqual(test_dict['data_type'], 'SSMS')
self.assertFalse('data_types' in test_dict)
self.assertEqual(test_dict['sensor_id'], 'multi - sensor')
self.assertEqual(test_dict['platform_id'], 'multi - platform')
self.assertEqual(test_dict['product_string'], 'ACTIVE')
self.assertEqual(test_dict['product_version'], '04 - 5')
def test_convert_time_from_drs_id(self):
self.assertEqual('month', self.data_store._convert_time_from_drs_id('mon'))
self.assertEqual('year', self.data_store._convert_time_from_drs_id('yr'))
self.assertEqual('16 years', self.data_store._convert_time_from_drs_id('16-yrs'))
self.assertEqual('32 days', self.data_store._convert_time_from_drs_id('32-days'))
self.assertEqual('satellite-orbit-frequency',
self.data_store._convert_time_from_drs_id('satellite-orbit-frequency'))
class EsaCciOdpDataSourceTest(unittest.TestCase):
def setUp(self):
self.data_store = _create_test_data_store()
oc_data_sources = self.data_store.query(query_expr='OC')
self.assertIsNotNone(oc_data_sources)
self.assertIsNotNone(oc_data_sources[0])
self.first_oc_data_source = oc_data_sources[0]
self.tmp_dir = tempfile.mkdtemp()
self._existing_local_data_store = DATA_STORE_REGISTRY.get_data_store('local')
DATA_STORE_REGISTRY.add_data_store(LocalDataStore('local', self.tmp_dir))
def tearDown(self):
if self._existing_local_data_store:
DATA_STORE_REGISTRY.add_data_store(self._existing_local_data_store)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
self.data_store.get_updates(reset=True)
def test_make_local_and_update(self):
soil_moisture_data_sources = self.data_store.query(
query_expr='esacci.OZONE.mon.L3.NP.multi-sensor.multi-platform.MERGED.fv0002.r1')
soilmoisture_data_source = soil_moisture_data_sources[0]
reference_path = os.path.join(os.path.dirname(__file__),
os.path.normpath('resources/datasources/local/files/'))
def find_files_mock(_, time_range):
def build_file_item(item_name: str, date_from: datetime, date_to: datetime, size: int):
return [item_name, date_from, date_to, size,
{'Opendap': os.path.join(reference_path, item_name),
'Download': 'file:' + urllib.request.pathname2url(os.path.join(reference_path, item_name))}]
reference_files = {
'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781114000000-fv02.2.nc': {
'date_from': datetime(1995, 11, 14, 0, 0),
'date_to': datetime(1995, 11, 14, 23, 59),
'size': 21511378
},
'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781115000000-fv02.2.nc': {
'date_from': datetime(1995, 11, 15, 0, 0),
'date_to': datetime(1995, 11, 15, 23, 59),
'size': 21511378
},
'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781116000000-fv02.2.nc': {
'date_from': datetime(1995, 11, 16, 0, 0),
'date_to': datetime(1995, 11, 16, 23, 59),
'size': 21511378
}
}
reference_files_list = []
for reference_file in reference_files.items():
file_name = reference_file[0]
file_date_from = reference_file[1].get('date_from')
file_date_to = reference_file[1].get('date_to')
file_size = reference_file[1].get('size')
if time_range:
if file_date_from >= time_range[0] and file_date_to <= time_range[1]:
reference_files_list.append(build_file_item(file_name,
file_date_from,
file_date_to,
file_size))
else:
reference_files_list.append(build_file_item(file_name,
file_date_from,
file_date_to,
file_size))
return reference_files_list
with unittest.mock.patch('cate.ds.esa_cci_odp.EsaCciOdpDataSource._find_files', find_files_mock):
with unittest.mock.patch.object(EsaCciOdpDataStore, 'query', return_value=[]):
new_ds_title = 'local_ds_test'
new_ds_time_range = TimeRangeLike.convert((datetime(1995, 11, 14, 0, 0),
datetime(1995, 11, 16, 23, 59)))
# new_ds_time_range = TimeRangeLike.convert((datetime(1997, 5, 10, 0, 0),
# datetime(1997, 5, 12, 23, 59)))
try:
new_ds = soilmoisture_data_source.make_local(new_ds_title, time_range=new_ds_time_range)
except Exception:
raise ValueError(reference_path, os.listdir(reference_path))
self.assertIsNotNone(new_ds)
self.assertEqual(new_ds.id, "local.%s" % new_ds_title)
self.assertEqual(new_ds.temporal_coverage(), new_ds_time_range)
new_ds_w_one_variable_title = 'local_ds_test_var'
new_ds_w_one_variable_time_range = TimeRangeLike.convert((datetime(1995, 11, 14, 0, 0),
datetime(1995, 11, 16, 23, 59)))
new_ds_w_one_variable_var_names = VarNamesLike.convert(['sm'])
new_ds_w_one_variable = soilmoisture_data_source.make_local(
new_ds_w_one_variable_title,
time_range=new_ds_w_one_variable_time_range,
var_names=new_ds_w_one_variable_var_names
)
self.assertIsNotNone(new_ds_w_one_variable)
self.assertEqual(new_ds_w_one_variable.id, "local.%s" % new_ds_w_one_variable_title)
ds = new_ds_w_one_variable.open_dataset()
new_ds_w_one_variable_var_names.extend(['lat', 'lon', 'time'])
self.assertSetEqual(set(ds.variables),
set(new_ds_w_one_variable_var_names))
new_ds_w_region_title = 'from_local_to_local_region'
new_ds_w_region_time_range = TimeRangeLike.convert((datetime(1995, 11, 14, 0, 0),
datetime(1995, 11, 16, 23, 59)))
new_ds_w_region_spatial_coverage = PolygonLike.convert("10,20,30,40")
new_ds_w_region = soilmoisture_data_source.make_local(
new_ds_w_region_title,
time_range=new_ds_w_region_time_range,
region=new_ds_w_region_spatial_coverage)
self.assertIsNotNone(new_ds_w_region)
self.assertEqual(new_ds_w_region.id, "local.%s" % new_ds_w_region_title)
self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)
new_ds_w_region_title = 'from_local_to_local_region_one_var'
new_ds_w_region_time_range = TimeRangeLike.convert((datetime(1995, 11, 14, 0, 0),
datetime(1995, 11, 16, 23, 59)))
new_ds_w_region_var_names = VarNamesLike.convert(['sm'])
new_ds_w_region_spatial_coverage = PolygonLike.convert("10,20,30,40")
new_ds_w_region = soilmoisture_data_source.make_local(
new_ds_w_region_title,
time_range=new_ds_w_region_time_range,
var_names=new_ds_w_region_var_names,
region=new_ds_w_region_spatial_coverage)
self.assertIsNotNone(new_ds_w_region)
self.assertEqual(new_ds_w_region.id, "local.%s" % new_ds_w_region_title)
self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)
data_set = new_ds_w_region.open_dataset()
new_ds_w_region_var_names.extend(['lat', 'lon', 'time'])
self.assertSetEqual(set(data_set.variables), set(new_ds_w_region_var_names))
new_ds_w_region_title = 'from_local_to_local_region_two_var_sm_uncertainty'
new_ds_w_region_time_range = TimeRangeLike.convert((datetime(1995, 11, 14, 0, 0),
datetime(1995, 11, 16, 23, 59)))
new_ds_w_region_var_names = VarNamesLike.convert(['sm', 'sm_uncertainty'])
new_ds_w_region_spatial_coverage = PolygonLike.convert("10,20,30,40")
new_ds_w_region = soilmoisture_data_source.make_local(
new_ds_w_region_title,
time_range=new_ds_w_region_time_range,
var_names=new_ds_w_region_var_names,
region=new_ds_w_region_spatial_coverage)
self.assertIsNotNone(new_ds_w_region)
self.assertEqual(new_ds_w_region.id, "local.%s" % new_ds_w_region_title)
self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)
data_set = new_ds_w_region.open_dataset()
new_ds_w_region_var_names.extend(['lat', 'lon', 'time'])
self.assertSetEqual(set(data_set.variables), set(new_ds_w_region_var_names))
empty_ds_timerange = (datetime(1917, 12, 1, 0, 0), datetime(1917, 12, 31, 23, 59))
with self.assertRaises(DataAccessError) as cm:
soilmoisture_data_source.make_local('empty_ds', time_range=empty_ds_timerange)
self.assertEqual(f'Data source "{soilmoisture_data_source.id}" does not'
f' seem to have any datasets in given'
f' time range {TimeRangeLike.format(empty_ds_timerange)}',
str(cm.exception))
new_ds_time_range = TimeRangeLike.convert((datetime(1995, 11, 14, 0, 0),
datetime(1995, 11, 14, 23, 59)))
new_ds = soilmoisture_data_source.make_local("title_test_copy", time_range=new_ds_time_range)
self.assertIsNotNone(new_ds)
self.assertEqual(new_ds.meta_info['title'], soilmoisture_data_source.meta_info['title'])
title = "Title Test!"
new_ds = soilmoisture_data_source.make_local("title_test_set", title, time_range=new_ds_time_range)
self.assertIsNotNone(new_ds)
self.assertEqual(new_ds.meta_info['title'], title)
def test_data_store(self):
self.assertIs(self.first_oc_data_source.data_store,
self.data_store)
def test_id(self):
self.assertEqual('esacci.OC.day.L3S.CHLOR_A.multi-sensor.multi-platform.MERGED.3-1.sinusoidal',
self.first_oc_data_source.id)
def test_schema(self):
self.assertEqual(self.first_oc_data_source.schema,
None)
def test_temporal_coverage(self):
self.assertEqual(self.first_oc_data_source.temporal_coverage(),
(datetime(1997, 9, 3, 23, 0, 0), datetime(2016, 12, 31, 23, 59, 59)))
def assert_tf(self, filename: str, expected_time_format: str):
time_format, p1, p2 = find_datetime_format(filename)
self.assertEqual(time_format, expected_time_format)
def test_time_filename_patterns(self):
self.assert_tf('20020730174408-ESACCI-L3U_GHRSST-SSTskin-AATSR-LT-v02.0-fv01.1.nc', '%Y%m%d%H%M%S')
self.assert_tf('19911107054700-ESACCI-L2P_GHRSST-SSTskin-AVHRR12_G-LT-v02.0-fv01.0.nc', '%Y%m%d%H%M%S')
self.assert_tf('ESACCI-SEAICE-L4-SICONC-SSMI-NH25kmEASE2-19920610-fv01.11.nc', '%Y%m%d')
self.assert_tf('ESACCI-SEAICE-L4-SICONC-SSMI-SH25kmEASE2-20000101-20001231-fv01.11.nc', '%Y%m%d')
self.assert_tf('ESACCI-SEAICE-L4-SICONC-AMSR-NH25kmEASE2-20070204-fv01.11.nc', '%Y%m%d')
self.assert_tf('ESACCI-SEAICE-L4-SICONC-AMSR-SH25kmEASE2-20040427-fv01.11.nc', '%Y%m%d')
self.assert_tf('19921018120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_LT-v02.0-fv01.0.nc', '%Y%m%d%H%M%S')
self.assert_tf('19940104120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_LT-v02.0-fv01.1.nc', '%Y%m%d%H%M%S')
self.assert_tf('ESACCI-OZONE-L3S-TC-MERGED-DLR_1M-20090301-fv0100.nc', '%Y%m%d')
self.assert_tf('20070328-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-15-fv1.0.nc', '%Y%m%d')
self.assert_tf('20091002-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-16-fv1.0.nc', '%Y%m%d')
self.assert_tf('20090729-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-18-fv1.0.nc', '%Y%m%d')
self.assert_tf('20070410-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-17-fv1.0.nc', '%Y%m%d')
self.assert_tf('ESACCI-OC-L3S-K_490-MERGED-1D_DAILY_4km_SIN_PML_KD490_Lee-20000129-fv1.0.nc', '%Y%m%d')
self.assert_tf('ESACCI-OC-L3S-K_490-MERGED-1D_DAILY_4km_GEO_PML_KD490_Lee-19980721-fv1.0.nc', | |
#!/usr/bin/env python
import os
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import email_alerts
from grr.lib import flags
from grr.lib import flow
from grr.lib import hunts
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import security
from grr.lib.aff4_objects import user_managers
from grr.lib.aff4_objects import users
from grr.lib.authorization import client_approval_auth
from grr.lib.rdfvalues import aff4_rdfvalues
from grr.lib.rdfvalues import client as rdf_client
class GRRUserTest(test_lib.AFF4ObjectTest):
def testUserPasswords(self):
with aff4.FACTORY.Create(
"aff4:/users/test", users.GRRUser, token=self.token) as user:
user.SetPassword("<PASSWORD>")
user = aff4.FACTORY.Open(user.urn, token=self.token)
self.assertFalse(user.CheckPassword("<PASSWORD>"))
self.assertTrue(user.CheckPassword("<PASSWORD>"))
def testLabels(self):
with aff4.FACTORY.Create(
"aff4:/users/test", users.GRRUser, token=self.token) as user:
user.SetLabels("hello", "world", owner="GRR")
user = aff4.FACTORY.Open(user.urn, token=self.token)
self.assertListEqual(["hello", "world"], user.GetLabelsNames())
def testBackwardsCompatibility(self):
"""Old GRR installations used crypt based passwords.
Since crypt is not available on all platforms this has now been removed. We
still support it on those platforms which have crypt. Backwards support
means we can read and verify old crypt encoded passwords, but new passwords
are encoded with sha256.
"""
password = users.CryptedPassword()
# This is crypt.crypt("hello", "ax")
password._value = "<PASSWORD>"
self.assertFalse(password.CheckPassword("<PASSWORD>"))
self.assertTrue(password.CheckPassword("<PASSWORD>"))
class CheckAccessHelperTest(test_lib.GRRBaseTest):
def setUp(self):
super(CheckAccessHelperTest, self).setUp()
self.helper = user_managers.CheckAccessHelper("test")
self.subject = rdfvalue.RDFURN("aff4:/some/path")
def testReturnsFalseByDefault(self):
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testReturnsFalseOnFailedMatch(self):
self.helper.Allow("aff4:/some/otherpath")
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testReturnsTrueOnMatch(self):
self.helper.Allow("aff4:/some/path")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testReturnsTrueIfOneMatchFails1(self):
self.helper.Allow("aff4:/some/otherpath")
self.helper.Allow("aff4:/some/path")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testReturnsTrueIfOneMatchFails2(self):
self.helper.Allow("aff4:/some/path")
self.helper.Allow("aff4:/some/otherpath")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchFormatIsUsedByDefault1(self):
self.helper.Allow("aff4:/some/*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchFormatIsUsedByDefault2(self):
self.helper.Allow("aff4:/some*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchPatternCorrectlyMatchesFilesBelowDirectory(self):
self.helper.Allow("aff4:/some/*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess,
rdfvalue.RDFURN("aff4:/some"), self.token)
def testCustomCheckWorksCorrectly(self):
def CustomCheck(unused_subject, unused_token):
return True
self.helper.Allow("aff4:/some/path", CustomCheck)
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testCustomCheckFailsCorrectly(self):
def CustomCheck(unused_subject, unused_token):
raise access_control.UnauthorizedAccess("Problem")
self.helper.Allow("aff4:/some/path", CustomCheck)
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testCustomCheckAcceptsAdditionalArguments(self):
def CustomCheck(subject, unused_token, another_subject):
if subject == another_subject:
return True
else:
raise access_control.UnauthorizedAccess("Problem")
self.helper.Allow("aff4:/*", CustomCheck, self.subject)
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess,
rdfvalue.RDFURN("aff4:/some/other/path"), self.token)
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
class AdminOnlyFlow(flow.GRRFlow):
AUTHORIZED_LABELS = ["admin"]
# Flow has to have a category otherwise FullAccessControlManager won't
# let non-supervisor users to run it at all (it will be considered
# externally inaccessible).
category = "/Test/"
class BasicAccessControlManagerTest(test_lib.GRRBaseTest):
"""Unit tests for FullAccessControlManager."""
def setUp(self):
super(BasicAccessControlManagerTest, self).setUp()
self.access_manager = user_managers.BasicAccessControlManager()
def testUserWithoutAuthorizedLabelsCanNotStartFlow(self):
self.CreateUser("nonadmin")
nonadmin_token = access_control.ACLToken(
username="noadmin", reason="testing")
with self.assertRaises(access_control.UnauthorizedAccess):
self.access_manager.CheckIfCanStartFlow(
nonadmin_token, AdminOnlyFlow.__name__, with_client_id=False)
with self.assertRaises(access_control.UnauthorizedAccess):
self.access_manager.CheckIfCanStartFlow(
nonadmin_token, AdminOnlyFlow.__name__, with_client_id=True)
def testUserWithAuthorizedLabelsCanStartFlow(self):
self.CreateAdminUser("admin")
admin_token = access_control.ACLToken(username="admin", reason="testing")
self.access_manager.CheckIfCanStartFlow(
admin_token, AdminOnlyFlow.__name__, with_client_id=False)
self.access_manager.CheckIfCanStartFlow(
admin_token, AdminOnlyFlow.__name__, with_client_id=True)
class FullAccessControlManagerTest(test_lib.GRRBaseTest):
"""Unit tests for FullAccessControlManager."""
def setUp(self):
super(FullAccessControlManagerTest, self).setUp()
self.access_manager = user_managers.FullAccessControlManager()
def Ok(self, subject, access="r"):
self.assertTrue(
self.access_manager.CheckDataStoreAccess(self.token, [subject], access))
def NotOk(self, subject, access="r"):
self.assertRaises(access_control.UnauthorizedAccess,
self.access_manager.CheckDataStoreAccess, self.token,
[subject], access)
def testReadSomePaths(self):
"""Tests some real world paths."""
access = "r"
self.Ok("aff4:/", access)
self.Ok("aff4:/users", access)
self.NotOk("aff4:/users/randomuser", access)
self.Ok("aff4:/blobs", access)
self.Ok("aff4:/blobs/12345678", access)
self.Ok("aff4:/FP", access)
self.Ok("aff4:/FP/12345678", access)
self.Ok("aff4:/files", access)
self.Ok("aff4:/files/12345678", access)
self.Ok("aff4:/ACL", access)
self.Ok("aff4:/ACL/randomuser", access)
self.Ok("aff4:/stats", access)
self.Ok("aff4:/stats/FileStoreStats", access)
self.Ok("aff4:/config", access)
self.Ok("aff4:/config/drivers", access)
self.Ok("aff4:/config/drivers/windows/memory/winpmem.amd64.sys", access)
self.Ok("aff4:/flows", access)
self.Ok("aff4:/flows/F:12345678", access)
self.Ok("aff4:/hunts", access)
self.Ok("aff4:/hunts/H:12345678/C.1234567890123456", access)
self.Ok("aff4:/hunts/H:12345678/C.1234567890123456/F:AAAAAAAA", access)
self.Ok("aff4:/cron", access)
self.Ok("aff4:/cron/OSBreakDown", access)
self.Ok("aff4:/audit", access)
self.Ok("aff4:/audit/log", access)
self.Ok("aff4:/audit/logs", access)
self.Ok("aff4:/C.0000000000000001", access)
self.NotOk("aff4:/C.0000000000000001/fs/os", access)
self.NotOk("aff4:/C.0000000000000001/flows/F:12345678", access)
self.Ok("aff4:/tmp", access)
self.Ok("aff4:/tmp/C8FAFC0F", access)
def testQuerySomePaths(self):
"""Tests some real world paths."""
access = "rq"
self.NotOk("aff4:/", access)
self.NotOk("aff4:/users", access)
self.NotOk("aff4:/users/randomuser", access)
self.NotOk("aff4:/blobs", access)
self.NotOk("aff4:/FP", access)
self.NotOk("aff4:/files", access)
self.Ok("aff4:/files/hash/generic/sha256/" + "a" * 64, access)
self.Ok("aff4:/ACL", access)
self.Ok("aff4:/ACL/randomuser", access)
self.NotOk("aff4:/stats", access)
self.Ok("aff4:/config", access)
self.Ok("aff4:/config/drivers", access)
self.Ok("aff4:/config/drivers/windows/memory/winpmem.amd64.sys", access)
self.NotOk("aff4:/flows", access)
self.Ok("aff4:/flows/W:12345678", access)
self.Ok("aff4:/hunts", access)
self.Ok("aff4:/hunts/H:12345678/C.1234567890123456", access)
self.Ok("aff4:/hunts/H:12345678/C.1234567890123456/F:AAAAAAAA", access)
self.Ok("aff4:/cron", access)
self.Ok("aff4:/cron/OSBreakDown", access)
self.NotOk("aff4:/audit", access)
self.Ok("aff4:/audit/logs", access)
self.Ok("aff4:/C.0000000000000001", access)
self.NotOk("aff4:/C.0000000000000001/fs/os", access)
self.NotOk("aff4:/C.0000000000000001/flows", access)
self.NotOk("aff4:/tmp", access)
def testSupervisorCanDoAnything(self):
token = access_control.ACLToken(username="unknown", supervisor=True)
self.assertTrue(
self.access_manager.CheckClientAccess(token,
"aff4:/C.0000000000000001"))
self.assertTrue(
self.access_manager.CheckHuntAccess(token, "aff4:/hunts/H:12344"))
self.assertTrue(
self.access_manager.CheckCronJobAccess(token, "aff4:/cron/blah"))
self.assertTrue(
self.access_manager.CheckIfCanStartFlow(
token, "SomeFlow", with_client_id=True))
self.assertTrue(
self.access_manager.CheckIfCanStartFlow(
token, "SomeFlow", with_client_id=False))
self.assertTrue(
self.access_manager.CheckDataStoreAccess(
token, ["aff4:/foo/bar"], requested_access="w"))
def testEmptySubjectShouldRaise(self):
token = access_control.ACLToken(username="unknown")
with self.assertRaises(ValueError):
self.access_manager.CheckClientAccess(token, "")
with self.assertRaises(ValueError):
self.access_manager.CheckHuntAccess(token, "")
with self.assertRaises(ValueError):
self.access_manager.CheckCronJobAccess(token, "")
with self.assertRaises(ValueError):
self.access_manager.CheckDataStoreAccess(
token, [""], requested_access="r")
def testCheckIfCanStartFlowReturnsTrueForClientFlowOnClient(self):
self.assertTrue(
self.access_manager.CheckIfCanStartFlow(
self.token, ClientFlowWithCategory.__name__, with_client_id=True))
def testCheckIfCanStartFlowRaisesForClientFlowWithoutCategoryOnClient(self):
with self.assertRaises(access_control.UnauthorizedAccess):
self.access_manager.CheckIfCanStartFlow(
self.token, ClientFlowWithoutCategory.__name__, with_client_id=True)
def testCheckIfCanStartFlowReturnsTrueForNotEnforcedFlowOnClient(self):
self.assertTrue(
self.access_manager.CheckIfCanStartFlow(
self.token, NotEnforcedFlow.__name__, with_client_id=True))
def testCheckIfCanStartFlowRaisesForClientFlowAsGlobal(self):
with self.assertRaises(access_control.UnauthorizedAccess):
self.access_manager.CheckIfCanStartFlow(
self.token, ClientFlowWithCategory.__name__, with_client_id=False)
def testCheckIfCanStartFlowRaisesForGlobalFlowWithoutCategoryAsGlobal(self):
with self.assertRaises(access_control.UnauthorizedAccess):
self.access_manager.CheckIfCanStartFlow(
self.token, GlobalFlowWithoutCategory.__name__, with_client_id=False)
def testCheckIfCanStartFlowReturnsTrueForGlobalFlowWithCategoryAsGlobal(self):
self.assertTrue(
self.access_manager.CheckIfCanStartFlow(
self.token, GlobalFlowWithCategory.__name__, with_client_id=False))
def testNoReasonShouldSearchForApprovals(self):
token_without_reason = access_control.ACLToken(username="unknown")
token_with_reason = access_control.ACLToken(
username="unknown", reason="I have one!")
client_id = "aff4:/C.0000000000000001"
self.RequestAndGrantClientApproval(client_id, token=token_with_reason)
self.access_manager.CheckClientAccess(token_without_reason, client_id)
# Check that token's reason got modified in the process:
self.assertEqual(token_without_reason.reason, "I have one!")
class ValidateTokenTest(test_lib.GRRBaseTest):
"""Tests for ValidateToken()."""
def testTokenWithUsernameAndReasonIsValid(self):
token = access_control.ACLToken(username="test", reason="For testing")
user_managers.ValidateToken(token, "aff4:/C.0000000000000001")
def testNoneTokenIsNotValid(self):
with self.assertRaises(access_control.UnauthorizedAccess):
user_managers.ValidateToken(None, "aff4:/C.0000000000000001")
def testTokenWithoutUsernameIsNotValid(self):
token = access_control.ACLToken(reason="For testing")
with self.assertRaises(access_control.UnauthorizedAccess):
user_managers.ValidateToken(token, "aff4:/C.0000000000000001")
class ClientFlowWithoutCategory(flow.GRRFlow):
pass
class ClientFlowWithCategory(flow.GRRFlow):
category = "/Test/"
class NotEnforcedFlow(flow.GRRFlow):
ACL_ENFORCED = False
class GlobalFlowWithoutCategory(flow.GRRGlobalFlow):
pass
class GlobalFlowWithCategory(flow.GRRGlobalFlow):
category = "/Test/"
class FullAccessControlManagerIntegrationTest(test_lib.GRRBaseTest):
"""Integration tests for FullAccessControlManager.
This test differs from FullAccessControlManagerTest, as it doesn't call
FullAccessControlManager's methods directly, but checks it through
calls to GRR's functionality that has to check access via access control
manager.
"""
install_mock_acl = False
def setUp(self):
super(FullAccessControlManagerIntegrationTest, self).setUp()
data_store.DB.security_manager = user_managers.FullAccessControlManager()
def ACLChecksDisabled(self):
return test_lib.ACLChecksDisabledContextManager()
def RevokeClientApproval(self, approval_urn, token, remove_from_cache=True):
with aff4.FACTORY.Open(
approval_urn, mode="rw", token=self.token.SetUID()) as approval_request:
approval_request.DeleteAttribute(approval_request.Schema.APPROVER)
if remove_from_cache:
data_store.DB.security_manager.acl_cache.ExpireObject(
rdfvalue.RDFURN(approval_urn.Dirname()))
def CreateHuntApproval(self, hunt_urn, token, admin=False):
approval_urn = aff4.ROOT_URN.Add("ACL").Add(hunt_urn.Path()).Add(
token.username).Add(utils.EncodeReasonString(token.reason))
with aff4.FACTORY.Create(
approval_urn,
security.HuntApproval,
mode="rw",
token=self.token.SetUID()) as approval_request:
approval_request.AddAttribute(
approval_request.Schema.APPROVER("Approver1"))
approval_request.AddAttribute(
approval_request.Schema.APPROVER("Approver2"))
if admin:
self.CreateAdminUser("Approver1")
def CreateSampleHunt(self):
"""Creats SampleHunt, writes it to the data store and returns it's id."""
with hunts.GRRHunt.StartHunt(
hunt_name="SampleHunt", token=self.token.SetUID()) as hunt:
return hunt.session_id
def testSimpleAccess(self):
"""Tests that simple access requires a token."""
client_urn = rdf_client.ClientURN("C.%016X" % 0)
# These should raise for a lack of token
for urn, mode in [("aff4:/ACL", "r"), ("aff4:/config/drivers", "r"),
("aff4:/", "rw"), (client_urn, "r")]:
with self.assertRaises(access_control.UnauthorizedAccess):
aff4.FACTORY.Open(urn, mode=mode)
# These should raise for trying to get write access.
for urn, mode in [("aff4:/ACL", "rw"), (client_urn, "rw")]:
fd = aff4.FACTORY.Open(urn, mode=mode, token=self.token)
# Force cache flush.
fd._dirty = True
self.assertRaises(access_control.UnauthorizedAccess, fd.Close)
# These should raise for access without a token:
for urn, mode in [(client_urn.Add("flows").Add("W:1234"), "r"),
(client_urn.Add("/fs"), "r")]:
with self.assertRaises(access_control.UnauthorizedAccess):
aff4.FACTORY.Open(urn, mode=mode)
# Even if a token is provided - it is not authorized.
with self.assertRaises(access_control.UnauthorizedAccess):
aff4.FACTORY.Open(urn, mode=mode, token=self.token)
def testSupervisorToken(self):
"""Tests that the supervisor token overrides the approvals."""
urn = rdf_client.ClientURN("C.%016X" % 0).Add("/fs/os/c")
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open, urn)
super_token = access_control.ACLToken(username="test")
super_token.supervisor = True
aff4.FACTORY.Open(urn, mode="rw", token=super_token)
def testExpiredTokens(self):
"""Tests that expired tokens are rejected."""
urn = rdf_client.ClientURN("C.%016X" % 0).Add("/fs/os/c")
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open, urn)
with test_lib.FakeTime(100):
# Token expires in 5 seconds.
super_token = access_control.ACLToken(username="test", expiry=105)
super_token.supervisor = True
# This should work since token is a super token.
aff4.FACTORY.Open(urn, mode="rw", token=super_token)
# Change the time to 200
with test_lib.FakeTime(200):
# Should be expired now.
with self.assertRaises(access_control.ExpiryError):
aff4.FACTORY.Open(urn, token=super_token, mode="rw")
def testApprovalExpiry(self):
"""Tests that approvals expire after the correct time."""
client_id = "C.%016X" % 0
urn = rdf_client.ClientURN(client_id).Add("/fs/os/c")
token = access_control.ACLToken(username="test", reason="For testing")
with self.assertRaises(access_control.UnauthorizedAccess):
aff4.FACTORY.Open(urn, mode="rw", token=token)
with test_lib.FakeTime(100.0, increment=1e-3):
self.RequestAndGrantClientApproval(client_id, token)
# This should work now.
aff4.FACTORY.Open(urn, mode="rw", token=token)
token_expiry = config_lib.CONFIG["ACL.token_expiry"]
# This is close to expiry but should still work.
data_store.DB.security_manager.acl_cache.Flush()
with test_lib.FakeTime(100.0 + token_expiry - 100.0):
aff4.FACTORY.Open(urn, mode="rw", token=token)
# Past expiry, should fail.
data_store.DB.security_manager.acl_cache.Flush()
with test_lib.FakeTime(100.0 + token_expiry + 100.0):
with self.assertRaises(access_control.UnauthorizedAccess):
aff4.FACTORY.Open(urn, mode="rw", token=token)
def testClientApproval(self):
"""Tests that we can create an approval object to access clients."""
client_id = "C.%016X" % 0
urn = rdf_client.ClientURN(client_id).Add("/fs")
token = access_control.ACLToken(username="test", reason="For testing")
self.assertRaises(
access_control.UnauthorizedAccess,
aff4.FACTORY.Open,
urn,
None,
"rw",
token=token)
approval_urn = self.RequestAndGrantClientApproval(client_id, token)
fd = aff4.FACTORY.Open(urn, None, "rw", token=token)
fd.Close()
self.RevokeClientApproval(approval_urn, token)
self.assertRaises(
access_control.UnauthorizedAccess,
aff4.FACTORY.Open,
urn,
None,
"rw",
token=token)
def testHuntApproval(self):
"""Tests that we can create an approval object to run hunts."""
token = access_control.ACLToken(username="test", reason="For testing")
hunt_urn = self.CreateSampleHunt()
self.assertRaisesRegexp(
access_control.UnauthorizedAccess,
"No approval found for",
flow.GRRFlow.StartFlow,
flow_name="StartHuntFlow",
token=token,
hunt_urn=hunt_urn)
self.CreateHuntApproval(hunt_urn, token, admin=False)
self.assertRaisesRegexp(
access_control.UnauthorizedAccess,
r"At least 1 approver\(s\) should have 'admin' label.",
flow.GRRFlow.StartFlow,
flow_name="StartHuntFlow",
token=token,
hunt_urn=hunt_urn)
self.CreateHuntApproval(hunt_urn, token, admin=True)
flow.GRRFlow.StartFlow(
flow_name="StartHuntFlow", token=token, hunt_urn=hunt_urn)
def testUserAccess(self):
"""Tests access to user objects."""
token = access_control.ACLToken(username="test", reason="For testing")
urn = aff4.ROOT_URN.Add("users")
# We cannot open any user account.
self.assertRaises(access_control.UnauthorizedAccess, aff4.FACTORY.Open,
urn.Add("some_user"), None, "rw", False, token)
# But we can open our own.
aff4.FACTORY.Open(urn.Add("test"), mode="rw", token=token)
# And we can also access our labels.
label_urn = urn.Add("test").Add("labels")
labels = aff4.FACTORY.Open(label_urn, mode="rw", token=token)
# But we cannot write to them.
l = labels.Schema.LABELS()
l.AddLabel(aff4_rdfvalues.AFF4ObjectLabel(name="admin", owner="GRR"))
labels.Set(labels.Schema.LABELS, l)
self.assertRaises(access_control.UnauthorizedAccess, labels.Close)
def testForemanAccess(self):
"""Test admin users can access the foreman."""
token = access_control.ACLToken(username="test", reason="For testing")
self.assertRaises(
access_control.UnauthorizedAccess,
aff4.FACTORY.Open,
"aff4:/foreman",
token=token)
| |
<filename>backend/resources/packageManagement.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:jingtongyu
# datetime:2020/6/8 10:14 下午
# software: PyCharm
from flask import g
from models.packages import PackageModel
from models.carts import CartsModel
from flask_restful import Resource, inputs
from models.mallUsers import MallUsersModel
from flask_restful.reqparse import RequestParser
from common import code, pretty_result
from common.decorators import login_required
from models.logs import LogsModel
from werkzeug.datastructures import FileStorage
from config import config
from models.products import ProductsModel
import datetime
import os
filePath = r'./download/package/'
if not os.path.exists(filePath):
os.makedirs(filePath)
class PackageManagementResource(Resource):
"""
package management资源类
"""
def __init__(self):
self.parser = RequestParser()
# @login_required
def get(self):
"""
获取套餐管理列表信息
:return: json
"""
self.parser.add_argument("pageNo", type=int, required=True, location="args",
help='pageNo is required')
self.parser.add_argument("pageSize", type=int, required=True, location="args", help='pageSize is required')
self.parser.add_argument("name", type=str, required=True, location="args", help='name is required')
self.parser.add_argument("Pclass[]", type=str, location="args", action='append', help='Pclass is required')
self.parser.add_argument("age", type=str, required=True, location="args", help='age is required')
self.parser.add_argument("size", type=str, required=True, location="args", help='size is required')
self.parser.add_argument("gender", type=str, required=True, location="args", help='gender is required')
args = self.parser.parse_args()
# package_list = PackageModel.paginate(PackageModel, args.pageNo, args.pageSize)
items = []
# totalCount = package_list.total
# package_list = package_list.items
# if args.name and args.Pclass:
Pclass = args['Pclass[]']
if(not args['Pclass[]']):
Pclass = ['coat', 'pants', 'skirt']
package_list = PackageModel.filter_by_name_gender_size_age_Pclass(PackageModel, args.name, args.gender, args.size, args.age, Pclass)
# package_list = PackageModel.filter_by_name_Pclass(PackageModel, args.name, args.Pclass)
totalCount = len(package_list)
for package in package_list:
fileList = []
fileList2 = []
url = config.domain +"/api/v1/pictureManagement/get?type=package&id=" + package.picture
fileList.append({"name": package.picture, "url": url})
description = package.description.split(",")
for i in description:
url2 = config.domain + "/api/v1/pictureManagement/get?type=package&id=" + i
fileList2.append({"name": i, "url": url2})
items.append(
{
'id': package.id,
'name': package.name,
'no': package.no,
'type': package.type,
'gender': package.gender,
'size': package.size,
'age': package.age,
'Pclass': package.Pclass,
'count': package.count,
# 'price': package.price,
# 'total': package.total,
'point': package.point,
'description': fileList2,
'picture': fileList,
'remark': package.remark,
'updateUser': package.updateUser,
'updateTime': package.update_time.strftime("%m/%d/%Y %H:%M:%S")
}
)
data = {
'pageNo': args.pageNo,
'pageSize': args.pageSize,
'totalCount': totalCount,
'items': items
}
return pretty_result(code.OK, data=data, msg='套餐配置信息获取成功!')
@login_required
def post(self):
self.parser.add_argument("name", type=str, required=True, location="form", help='name is required')
self.parser.add_argument("no", type=str, required=True, location="form", help='no is required')
self.parser.add_argument("type", type=str, required=True, location="form", help='type is required')
self.parser.add_argument("gender", type=str, required=True, location="form", help='gender is required')
self.parser.add_argument("size", type=str, required=True, location="form", help='size is required')
self.parser.add_argument("age", type=str, required=True, location="form", help='age is required')
self.parser.add_argument("Pclass", type=str, required=True, location="form", help='Pclass is required')
self.parser.add_argument("count", type=str, required=True, location="form", help='count is required')
# self.parser.add_argument("price", type=int, required=True, location="form", help='price is required')
# self.parser.add_argument("total", type=int, required=True, location="form", help='total is required')
self.parser.add_argument("point", type=int, required=True, location="form", help='point is required')
self.parser.add_argument("picture", type=FileStorage, required=True, location='files', action='append',
help='picture is required')
self.parser.add_argument("description", type=FileStorage, required=True, location='files', action='append',
help='description is required')
self.parser.add_argument("remark", type=str, location="form", help='remark is required')
self.parser.add_argument("updateUser", type=str, required=True, location="form", help='updateUser is required')
self.parser.add_argument("removeList", type=str, required=True, location="form", help='removelist is required')
self.parser.add_argument("removeList2", type=str, required=True, location="form", help='removelist2 is required')
args = self.parser.parse_args()
packageInfo = PackageModel.query.filter_by(name=args.name).all()
if packageInfo:
return pretty_result(code.ERROR, msg='该套餐名称管理已经被添加!')
removeList = args.removeList.split(",")
pictureList = ''
for item in args.picture:
if item.filename in removeList:
continue
new_fname = filePath + str(item.filename) + '.png'
item.save(new_fname)
pictureList = pictureList + str(item.filename) + ","
pictureList = pictureList[:-1]
removeList2 = args.removeList2.split(",")
pictureList2 = ''
for item in args.description:
if item.filename in removeList2:
continue
new_fname = filePath + str(item.filename) + '.png'
item.save(new_fname)
pictureList2 = pictureList2 + str(item.filename) + ","
pictureList2 = pictureList2[:-1]
Package = PackageModel(name=args.name, no=args.no, gender=args.gender, size=args.size, age=args.age,
Pclass=args.Pclass, count=args.count, type=args.type, description=pictureList2,
point=args.point, picture=pictureList, remark=args.remark, updateUser=args.updateUser)
PackageModel.add(PackageModel, Package)
if Package.id:
content = str({"name": args.name, "gender": args.gender,"no":args.no, "size": args.size, "age": args.age,
"Pclass": args.Pclass, "count":args.count, "type": args.type, "description": pictureList2,
"remark": args.remark, "point": args.point, "picture":pictureList,"updateUser": args.updateUser})
log = LogsModel(username=args.updateUser, model="package", action="add", content=content)
LogsModel.add(LogsModel, log)
return pretty_result(code.OK, msg='套餐管理信息添加成功!')
else:
return pretty_result(code.ERROR, msg='套餐管理信息添加失败!')
@login_required
def put(self):
self.parser.add_argument("id", type=int, required=True, location="form", help='id is required')
self.parser.add_argument("no", type=str, required=True, location="form", help='no is required')
self.parser.add_argument("name", type=str, required=True, location="form", help='name is required')
self.parser.add_argument("type", type=str, required=True, location="form", help='type is required')
self.parser.add_argument("gender", type=str, required=True, location="form", help='gender is required')
self.parser.add_argument("size", type=str, required=True, location="form", help='size is required')
self.parser.add_argument("age", type=str, required=True, location="form", help='age is required')
self.parser.add_argument("Pclass", type=str, required=True, location="form", help='Pclass is required')
self.parser.add_argument("count", type=str, required=True, location="form", help='count is required')
# self.parser.add_argument("price", type=int, required=True, location="form", help='price is required')
# self.parser.add_argument("total", type=int, required=True, location="form", help='total is required')
self.parser.add_argument("picture", type=FileStorage, location='files', action='append',
help='picture is file')
self.parser.add_argument("description", type=FileStorage, location='files', action='append',
help='description is file')
self.parser.add_argument("point", type=int, required=True, location="form", help='point is required')
self.parser.add_argument("remark", type=str, location="form", help='remark is required')
self.parser.add_argument("updateUser", type=str, required=True, location="form", help='updateUser is required')
self.parser.add_argument("removeList", type=str, required=True, location="form", help='removelist is required')
self.parser.add_argument("removeList2", type=str, required=True, location="form", help='removelist2 is required')
args = self.parser.parse_args()
packageInfo = PackageModel.query.filter_by(name=args.name).all()
for item in packageInfo:
if item.id != args.id:
return pretty_result(code.ERROR, msg='该套餐管理已经被添加!')
packageInfo = PackageModel.query.filter_by(id=args.id).first()
packagePictureList = packageInfo.picture.split(",")
removeList = args.removeList.split(",")
pictureList = ''
for j in removeList:
if j in packagePictureList:
packagePictureList.remove(j)
old_fname = filePath + str(j) + '.png'
if os.path.exists(old_fname):
os.remove(old_fname)
else:
print(str(j) + " the file does not exist")
if args.picture:
for item in args.picture:
if item.filename in removeList:
continue
new_fname = filePath + str(item.filename) + '.png'
item.save(new_fname)
packagePictureList.append(str(item.filename))
pictureList = ','.join(packagePictureList)
packagePictureList2 = packageInfo.description.split(",")
removeList2 = args.removeList2.split(",")
pictureList2 = ''
for j in removeList2:
if j in packagePictureList2:
packagePictureList2.remove(j)
old_fname = filePath + str(j) + '.png'
if os.path.exists(old_fname):
os.remove(old_fname)
else:
print(str(j) + " the file does not exist")
if args.description:
for item in args.description:
if item.filename in removeList2:
continue
new_fname = filePath + str(item.filename) + '.png'
item.save(new_fname)
packagePictureList2.append(str(item.filename))
pictureList2 = ','.join(packagePictureList2)
packageInfo.id = args.id
packageInfo.no = args.no
packageInfo.name = args.name
packageInfo.type = args.type
packageInfo.gender = args.gender
packageInfo.size = args.size
packageInfo.age = args.age
packageInfo.Pclass = args.Pclass
# packageInfo.price = args.price
# packageInfo.total = args.total
packageInfo.point = args.point
packageInfo.picture = pictureList
packageInfo.description = pictureList2
packageInfo.remark = args.remark
packageInfo.updateUser = args.updateUser
PackageModel.update(packageInfo)
content = str({"name": args.name, "no":args.no, "type": args.type, "gender": args.gender,
"size": args.size, "age": args.age, "Pclass": args.Pclass,
"point": args.point,"picture": pictureList, "description": pictureList2, "remark": args.remark,
"updateUser": args.updateUser})
log = LogsModel(username=args.updateUser, model="package", action="edit", content=content)
LogsModel.add(LogsModel, log)
return pretty_result(code.OK, msg='套餐管理信息更新成功!')
@login_required
def delete(self):
self.parser.add_argument("ids", type=list, required=True, location="json", help='ids is required')
self.parser.add_argument("updateUser", type=str, required=True, location="json", help='updateUser is required')
self.parser.add_argument("content", type=list, required=True, location="json", help='content is required')
args = self.parser.parse_args()
PackageModel.delete(PackageModel, args.ids)
for item in args.ids:
packageInfo = PackageModel.query.filter_by(id=item).first()
packagePictureList = packageInfo.picture.split(",")
for j in packagePictureList:
old_fname = filePath + str(j) + '.png'
if os.path.exists(old_fname):
os.remove(old_fname)
else:
print(str(j) + " the file does not exist")
packagePictureList2 = packageInfo.description.split(",")
for j in packagePictureList2:
old_fname = filePath + str(j) + '.png'
if os.path.exists(old_fname):
os.remove(old_fname)
else:
print(str(j) + " the file does not exist")
content = str(args.content)
if len(str(args.content)) > 500:
content = str(args.ids)
log = LogsModel(username=args.updateUser, model="package", action="delete", content=content)
LogsModel.add(LogsModel, log)
return pretty_result(code.OK, msg='套餐管理信息删除成功!')
class PackageManagementNameResource(Resource):
"""
package name资源类
"""
def __init__(self):
self.parser = RequestParser()
@login_required
def get(self):
"""
名字是否存在判断
:return: json
"""
self.parser.add_argument("name", type=str, required=True, location="args",
help='name is required')
args = self.parser.parse_args()
packageInfo = PackageModel.query.filter_by(name=args.name).all()
if packageInfo:
return pretty_result(code.OK, data=False, msg='该套餐管理名称已经被添加!')
return pretty_result(code.OK, data=True, msg='该套餐管理名称不存在!')
class PackageDetailResource(Resource):
"""
package detail资源类
"""
def __init__(self):
self.parser = RequestParser()
def get(self):
"""
套餐细节信息
:return: json
"""
self.parser.add_argument("no", type=str, required=True, location="args",
help='no is required')
args = self.parser.parse_args()
package_list = PackageModel.query.filter_by(no=args.no).all()
items = []
for package in package_list:
fileList = []
fileList2 = []
url = config.domain + "/api/v1/pictureManagement/get?type=package&id=" + package.picture
fileList.append({"name": package.picture, "url": url})
description = package.description.split(",")
for i in description:
url2 = config.domain + "/api/v1/pictureManagement/get?type=package&id=" + i
fileList2.append({"name": i, "url": url2})
items.append(
{
'id': package.id,
'no': package.no,
'name': package.name,
'type': package.type,
'gender': package.gender,
'size': package.size,
'age': package.age,
'Pclass': package.Pclass,
'count': package.count,
# 'price': package.price,
# 'total': package.total,
'point': package.point,
'picture': fileList,
'description': fileList2,
'remark': package.remark,
'updateUser': package.updateUser,
'updateTime': package.update_time.strftime("%m/%d/%Y %H:%M:%S")
}
)
if items:
return pretty_result(code.OK, data=items, msg='Get package detail successful!')
else:
return pretty_result(code.ERROR, data=[], msg='The package id is exit!')
class PackageIDResource(Resource):
"""
packages management资源类
"""
def __init__(self):
self.parser = RequestParser()
@login_required
def get(self):
"""
获取包裹列表信息
:return: json
"""
self.parser.add_argument("type", type=str, required=True, location="args", help='ids is required')
args = self.parser.parse_args()
id = PackageModel.get_id(PackageModel)
no = ""
if not id[0][0]:
no = args.type + "%04d" % 1
else:
no = args.type + "%04d" % (id[0][0] + 1)
return pretty_result(code.OK, data=no, msg='获取套餐No信息成功!')
class PackageAwardResource(Resource):
"""
package detail资源类
"""
def __init__(self):
self.parser = RequestParser()
@login_required
def post(self):
"""
抽奖信息
:return: json
"""
self.parser.add_argument("gender", type=str, required=True, location="json",
help='gender is required')
self.parser.add_argument("size", type=str, required=True, location="json",
help='size is required')
self.parser.add_argument("age", type=str, required=True, location="json",
help='age is required')
self.parser.add_argument("Pclass", type=str, required=True, location="json",
help='Pclass is required')
self.parser.add_argument("count", type=int, required=True, location="json",
help='count is required')
self.parser.add_argument("point", type=int, required=True, location="json",
help='point | |
from lib.game_agent import GameAgent
from lib.machine_learning.context_classification.context_classifiers import CNNInceptionV3ContextClassifier
from lib.sprite import Sprite
import lib.cv
import lib.ocr
from .helpers.ocr import preprocess as ocr_preprocess
from .helpers.game import parse_game_board, generate_game_board_deltas, score_game_board, score_game_board_vector, generate_boolean_game_board_deltas, display_game_board
import offshoot
import numpy as np
import h5py
import xtermcolor
import skimage.io
import sklearn
from datetime import datetime, timedelta
import time
import uuid
import random
import collections
import pickle
import os
import subprocess
import shlex
class YouMustBuildABoatGameAgent(GameAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.frame_handlers["PLAY"] = self.handle_play
self.frame_handlers["PLAY_BOT"] = self.handle_play_bot
self.frame_handlers["PLAY_RANDOM"] = self.handle_play_random
self.frame_handler_setups["PLAY"] = self.setup_play
self.frame_handler_setups["PLAY_BOT"] = self.setup_play_bot
self.analytics_client = None
@property
def game_contexts(self):
return dict(
)
@property
def rows(self):
return ["A", "B", "C", "D", "E", "F"]
@property
def columns(self):
return [1, 2, 3, 4, 5, 6, 7, 8]
@property
def match_milestone_sfx_mapping(self):
return {
10: "/home/serpent/SFX/first_blood.wav",
20: "/home/serpent/SFX/Double_Kill.wav",
30: "/home/serpent/SFX/Killing_Spree.wav",
40: "/home/serpent/SFX/Dominating.wav",
50: "/home/serpent/SFX/MegaKill.wav",
60: "/home/serpent/SFX/Unstoppable.wav",
70: "/home/serpent/SFX/WhickedSick.wav",
80: "/home/serpent/SFX/MonsterKill.wav",
90: "/home/serpent/SFX/GodLike.wav",
100: "/home/serpent/SFX/Combowhore.wav"
}
def setup_play(self):
plugin_path = offshoot.config["file_paths"]["plugins"]
ocr_classifier_path = f"{plugin_path}/YouMustBuildABoatGameAgentPlugin/files/ml_models/you_must_build_a_boat_ocr.model"
self.machine_learning_models["ocr_classifier"] = self.load_machine_learning_model(ocr_classifier_path)
context_classifier_path = f"{plugin_path}/YouMustBuildABoatGameAgentPlugin/files/ml_models/you_must_build_a_boat_context.model"
context_classifier = CNNInceptionV3ContextClassifier(input_shape=(384, 512, 3))
context_classifier.prepare_generators()
context_classifier.load_classifier(context_classifier_path)
self.machine_learning_models["context_classifier"] = context_classifier
self.ocr_policy = lib.ocr.OCRPolicy(
ocr_classifier=self.machine_learning_models["ocr_classifier"],
character_window_shape="rectangle",
character_window_size=(7, 2),
word_window_shape="rectangle",
word_window_size=(1, 10),
preprocessing_function=ocr_preprocess,
preprocessing_options=dict(
contrast_stretch_percentiles=(80, 100)
)
)
self.game_board = np.zeros((6, 8))
self.previous_game_board = np.zeros((6, 8))
self.mode = "PREDICT" # "RANDOM"
self.current_run = 0
self.current_run_started_at = None
self.current_attempts = 0
self.current_matches = 0
self.last_run_duration = 0
self.last_attempts = 0
self.last_matches = 0
self.record_random_duration = 0
self.record_random_duration_run = 0
self.record_random_matches = 0
self.record_random_matches_run = 0
self.record_random_duration_values = collections.deque(maxlen=1000)
self.record_random_matches_values = collections.deque(maxlen=1000)
self.record_predict_duration = 0
self.record_predict_duration_run = 0
self.record_predict_matches = 0
self.record_predict_matches_run = 0
self.record_predict_duration_values = collections.deque(maxlen=10)
self.record_predict_matches_values = collections.deque(maxlen=10)
self.game_boards = list()
if os.path.isfile("datasets/ymbab_matching.model"):
with open("datasets/ymbab_matching.model", "rb") as f:
self.model = pickle.loads(f.read())
else:
self.model = sklearn.linear_model.SGDRegressor()
def setup_play_bot(self):
plugin_path = offshoot.config["file_paths"]["plugins"]
ocr_classifier_path = f"{plugin_path}/YouMustBuildABoatGameAgentPlugin/files/ml_models/you_must_build_a_boat_ocr.model"
self.machine_learning_models["ocr_classifier"] = self.load_machine_learning_model(ocr_classifier_path)
context_classifier_path = f"{plugin_path}/YouMustBuildABoatGameAgentPlugin/files/ml_models/you_must_build_a_boat_context.model"
context_classifier = CNNInceptionV3ContextClassifier(input_shape=(384, 512, 3))
context_classifier.prepare_generators()
context_classifier.load_classifier(context_classifier_path)
self.machine_learning_models["context_classifier"] = context_classifier
self.ocr_policy = lib.ocr.OCRPolicy(
ocr_classifier=self.machine_learning_models["ocr_classifier"],
character_window_shape="rectangle",
character_window_size=(7, 2),
word_window_shape="rectangle",
word_window_size=(1, 10),
preprocessing_function=ocr_preprocess,
preprocessing_options=dict(
contrast_stretch_percentiles=(80, 100)
)
)
self.game_board = np.zeros((6, 8))
self.previous_game_board = np.zeros((6, 8))
def handle_play(self, game_frame):
context = self.machine_learning_models["context_classifier"].predict(game_frame.frame)
if context is None:
return
if context == "game_over":
self.last_run_duration = (datetime.utcnow() - self.current_run_started_at).seconds if self.current_run_started_at else 0
self.last_attempts = self.current_attempts if self.current_attempts > 0 else 1
self.last_matches = self.current_matches
if self.current_run > 0:
if self.mode == "RANDOM":
self.record_random_duration_values.appendleft(self.last_run_duration)
self.record_random_matches_values.appendleft(self.last_matches)
if self.last_run_duration > self.record_random_duration:
self.record_random_duration = self.last_run_duration
self.record_random_duration_run = self.current_run
if self.last_matches > self.record_random_matches:
self.record_random_matches = self.last_matches
self.record_random_matches_run = self.current_run
elif self.mode == "PREDICT":
self.record_predict_duration_values.appendleft(self.last_run_duration)
self.record_predict_matches_values.appendleft(self.last_matches)
record = False
if self.last_run_duration > self.record_predict_duration:
record = True
self.record_predict_duration = self.last_run_duration
self.record_predict_duration_run = self.current_run
if self.last_matches > self.record_predict_matches:
record = True
self.record_predict_matches = self.last_matches
self.record_predict_matches_run = self.current_run
if record:
subprocess.Popen(shlex.split(f"play -v 0.45 /home/serpent/SFX/HolyShit_F.wav"))
if self.last_matches < 10:
subprocess.Popen(shlex.split(f"play -v 0.45 /home/serpent/SFX/Humiliating_defeat.wav"))
print("\033c")
game_board_vector_data = list()
scores = list()
if len(self.game_boards):
print(f"GENERATING TRAINING DATASETS: 0 / 1")
print(f"NEXT RUN: {self.current_run + 1}")
game_board_deltas = generate_game_board_deltas(self.game_boards[-1])
boolean_game_board_deltas = generate_boolean_game_board_deltas(game_board_deltas)
for game_move, boolean_game_boards in boolean_game_board_deltas.items():
for boolean_game_board in boolean_game_boards:
for i in range(6):
row = boolean_game_board[i, :]
game_board_vector_data.append(row)
scores.append(score_game_board_vector(row))
for i in range(8):
column = boolean_game_board[:, i]
column = np.append(column, [0, 0])
game_board_vector_data.append(column)
scores.append(score_game_board_vector(column))
print("\033c")
print(f"GENERATING TRAINING DATASETS: 1 / 1")
print(f"NEXT RUN: {self.current_run + 1}")
with h5py.File(f"datasets/ymbab/ymbab_run_{self.current_run}.h5", "w") as f:
for index, data in enumerate(game_board_vector_data):
f.create_dataset(f"{index}", data=data)
for index, data in enumerate(scores):
f.create_dataset(f"{index}_score", data=data)
self.game_boards = list()
self.current_run += 1
if self.current_run % 10 == 0:
self.mode = "PREDICT"
print("\033c")
print("UPDATING MODEL WITH LATEST COLLECTED DATA...")
print(f"NEXT RUN: {self.current_run}")
for i in range(9 if self.current_run <= 10 else 10):
data_file_path = f"datasets/ymbab/ymbab_run_{self.current_run - (i + 1)}.h5"
data = list()
scores = list()
with h5py.File(data_file_path, "r") as f:
count = len(f.items()) // 2
for ii in range(count):
data.append(f[f"{ii}"][:])
scores.append(f[f"{ii}_score"].value)
if len(data):
self.model.partial_fit(data, scores)
serialized_model = pickle.dumps(self.model)
with open("datasets/ymbab_matching.model", "wb") as f:
f.write(serialized_model)
else:
self.mode = "PREDICT"
print("\033c")
self.input_controller.click_screen_region(screen_region="GAME_OVER_RUN_AGAIN", game=self.game)
time.sleep(2)
self.current_run_started_at = datetime.utcnow()
self.current_attempts = 0
self.current_matches = 0
elif context.startswith("level_"):
self.previous_game_board = self.game_board
self.game_board = parse_game_board(game_frame.frame)
unknown_tile_coordinates = np.argwhere(self.game_board == 0)
if 0 < unknown_tile_coordinates.size <= 10:
coordinates = random.choice(unknown_tile_coordinates)
tile_screen_region = f"GAME_BOARD_{self.rows[coordinates[0]]}{self.columns[coordinates[1]]}"
self.input_controller.click_screen_region(screen_region=tile_screen_region, game=self.game)
self.current_attempts += 1
game_board_deltas = generate_game_board_deltas(self.game_board)
if self.game_board[self.game_board == 0].size < 3:
self.game_boards.append(self.game_board)
if self.mode == "PREDICT":
boolean_game_board_deltas = generate_boolean_game_board_deltas(game_board_deltas, obfuscate=False)
top_game_move_score = -10
top_game_move = None
game_move_scores = dict()
for game_move, boolean_game_boards in boolean_game_board_deltas.items():
split_game_move = game_move.split(" to ")
axis = "ROW" if split_game_move[0][0] == split_game_move[1][0] else "COLUMN"
total_score = 0
for boolean_game_board in boolean_game_boards:
input_vectors = list()
if axis == "ROW":
row_index = self.rows.index(split_game_move[0][0])
row = boolean_game_board[row_index, :]
input_vectors.append(row)
for ii in range(8):
column = boolean_game_board[:, ii]
column = np.append(column, [False, False])
input_vectors.append(column)
elif axis == "COLUMN":
for ii in range(6):
row = boolean_game_board[ii, :]
input_vectors.append(row)
column_index = self.columns.index(int(split_game_move[0][1]))
column = boolean_game_board[:, column_index]
column = np.append(column, [False, False])
input_vectors.append(column)
prediction = self.model.predict(input_vectors)
total_score += max(prediction)
game_move_scores[game_move] = total_score
if total_score > top_game_move_score:
top_game_move_score = total_score
top_game_move = game_move
if top_game_move is None:
return False
start_coordinate, end_coordinate = top_game_move.split(" to ")
start_screen_region = f"GAME_BOARD_{start_coordinate}"
end_screen_region = f"GAME_BOARD_{end_coordinate}"
elif self.mode == "RANDOM":
axis = random.choice(["ROW", "COLUMN"])
if axis == "ROW":
row = random.choice(self.rows)
column = 1
end_column = 1 + (random.choice(range(7)) + 1)
start_screen_region = f"GAME_BOARD_{row}{column}"
end_screen_region = f"GAME_BOARD_{row}{end_column}"
else:
column = random.choice(self.columns)
row = "A"
end_row = self.rows[random.choice(range(5)) + 1]
start_screen_region = f"GAME_BOARD_{row}{column}"
end_screen_region = f"GAME_BOARD_{end_row}{column}"
start_coordinate = start_screen_region.split('_')[-1]
end_coordinate = end_screen_region.split('_')[-1]
game_board_key = f"{start_coordinate} to {end_coordinate}"
game_board_delta = None
for board_delta in game_board_deltas:
if board_delta[0] == game_board_key:
game_board_delta = board_delta[1]
break
if score_game_board(game_board_delta) > 0:
self.current_matches += 1
if self.current_matches in self.match_milestone_sfx_mapping:
subprocess.Popen(shlex.split(f"play -v 0.45 {self.match_milestone_sfx_mapping[self.current_matches]}"))
print("\033c")
print(f"CURRENT RUN: {self.current_run}")
print(f"CURRENT MODE: {self.mode}\n")
print("BOARD STATE:\n")
display_game_board(self.game_board)
print("")
print(xtermcolor.colorize(f" Moving {game_board_key}... ", ansi=0, ansi_bg=39))
print(f"\nCurrent Run Duration: {(datetime.utcnow() - self.current_run_started_at).seconds} seconds")
print(f"Current Run Matches (Approximate): {self.current_matches}/{self.current_attempts}")
print(f"\nLast Run Duration: {self.last_run_duration} seconds")
print(f"Last Run Matches (Approximate): {self.last_matches}/{self.last_attempts}")
print("")
print(xtermcolor.colorize(" RECORDS ", ansi=29, ansi_bg=15))
print("")
# print(f"Duration (RANDOM): {self.record_random_duration} seconds (Run #{self.record_random_duration_run})")
print(f"Duration (PREDICT): {self.record_predict_duration} seconds (Run #{self.record_predict_duration_run})")
# print(f"Matches (RANDOM - Approximate): {self.record_random_matches} (Run #{self.record_random_matches_run})")
print(f"Matches (PREDICT - Approximate): {self.record_predict_matches} (Run #{self.record_predict_matches_run})")
print("")
print(xtermcolor.colorize(" PREDICT AVERAGES (Last 10 runs)", ansi=29, ansi_bg=15))
print("")
print(f"Duration: {round(np.mean(self.record_predict_duration_values), 2)} seconds")
print(f"{', '.join([str(v) for v in list(self.record_predict_duration_values)])}")
print(f"\nMatches (Approximate): {np.mean(self.record_predict_matches_values)}")
print(f"{', '.join([str(int(v)) for v in list(self.record_predict_matches_values)])}")
game_move_direction = "ROW" if self.game.screen_regions[start_screen_region][0] == self.game.screen_regions[end_screen_region][0] else "COLUMN"
if game_move_direction == "ROW":
game_move_distance = int(end_coordinate[1]) - int(start_coordinate[1])
else:
game_move_distance = self.rows.index(end_coordinate[0]) - self.rows.index(start_coordinate[0])
self.input_controller.drag_screen_region_to_screen_region(
start_screen_region=start_screen_region,
end_screen_region=end_screen_region,
duration=(0.1 + (game_move_distance * 0.05)),
game=self.game
)
def handle_play_bot(self, game_frame):
context = self.machine_learning_models["context_classifier"].predict(game_frame.frame)
if context is None:
return
# if context == "game_over":
# self.input_controller.click_screen_region(screen_region="GAME_OVER_RUN_AGAIN", game=self.game)
# time.sleep(2)
# elif context.startswith("level_"):
# print("\033c")
# print(context)
# print("BOARD STATE:\n")
#
# self.previous_game_board = self.game_board
# self.game_board = parse_game_board(game_frame.frame)
# print(self.game_board)
#
# # Click the Unknown Tiles
# unknown_tile_coordinates = np.argwhere(self.game_board == 0)
#
# if 0 < unknown_tile_coordinates.size <= 10:
# coordinates = random.choice(unknown_tile_coordinates)
# tile_screen_region = f"GAME_BOARD_{self.rows[coordinates[0]]}{self.columns[coordinates[1]]}"
#
# self.input_controller.click_screen_region(screen_region=tile_screen_region, game=self.game)
#
# if not np.array_equal(self.game_board, self.previous_game_board):
# return
#
# game_board_deltas = generate_game_board_deltas(self.game_board)
# game_board_delta_matches = detect_game_board_delta_matches(game_board_deltas)
#
# game_move = None
#
# for i in [5, 4, 3]:
# if not len(game_board_delta_matches[i]):
# continue
#
# game_move = random.choice(game_board_delta_matches[i])
# break
#
# if game_move is None:
# time.sleep(0.1)
# return
#
# game_move_start_cell, game_move_end_cell = game_move.split(" to ")
#
# start_screen_region = f"GAME_BOARD_{game_move_start_cell}"
# end_screen_region = f"GAME_BOARD_{game_move_end_cell}"
#
# game_move_direction = "ROW" if self.game.screen_regions[start_screen_region][0] == self.game.screen_regions[end_screen_region][0] else "COLUMN"
#
# if game_move_direction == "ROW":
# game_move_distance = int(game_move_end_cell[1]) - int(game_move_start_cell[1])
# else:
# game_move_distance = self.rows.index(game_move_end_cell[0]) - self.rows.index(game_move_start_cell[0])
#
# print(f"\nMoving {game_move_start_cell} to {game_move_end_cell}...")
#
# print(game_board_delta_matches)
#
# self.input_controller.drag_screen_region_to_screen_region(
# start_screen_region=start_screen_region,
# end_screen_region=end_screen_region,
# duration=(0.1 + (game_move_distance * 0.05)),
# game=self.game
# )
def handle_play_random(self, game_frame):
rows = ["A", "B", "C", "D", "E", "F"]
columns = [1, 2, 3, 4, 5, 6, 7, 8]
row = random.choice(rows)
column = random.choice(columns)
start_screen_region = f"GAME_BOARD_{row}{column}"
axis = "row" if random.randint(0, 1) else "column"
if axis == "row":
end_column = random.choice(columns)
while end_column == column:
end_column = random.choice(columns)
end_screen_region = f"GAME_BOARD_{row}{end_column}"
else:
end_row = random.choice(rows)
while end_row == row:
end_row = random.choice(rows)
end_screen_region = f"GAME_BOARD_{end_row}{column}"
print(f"\nMoving {start_screen_region.split('_')[-1]} to {end_screen_region.split('_')[-1]}...")
self.input_controller.drag_screen_region_to_screen_region(
start_screen_region=start_screen_region,
end_screen_region=end_screen_region,
duration=0.3,
game=self.game
)
| |
dense], axis=-1)
return x + x_init
def clown_conv(x, channels, opt, use_bias=True, scope='clown', z=None):
split_ch = channels//8
half_split_ch = split_ch//2
other_half_split_ch = split_ch - half_split_ch
rest_split = channels - split_ch*7
deconv4_ch = split_ch + rest_split
conv5_ch = split_ch
no_deconv2 = opt.get("mixed_conv_no_deconv2", False)
if no_deconv2:
deconv4_ch += half_split_ch
conv5_ch += other_half_split_ch
with tf.variable_scope(scope):
splits = []
splits.append(deconv(x, deconv4_ch, kernel=4, stride=1, use_bias=use_bias, scope="deconv4", opt=opt))
splits.append(deconv(x, split_ch * 2, kernel=3, stride=1, use_bias=use_bias, scope="deconv3", opt=opt))
if not no_deconv2:
splits.append(deconv(x, split_ch, kernel=2, stride=1, use_bias=use_bias, scope="deconv2", opt=opt))
splits.append(conv(x, split_ch, kernel=3, stride=1, pad=1, use_bias=use_bias, scope="conv3", opt=opt))
splits.append(conv(x, conv5_ch, kernel=5, stride=1, pad=2, use_bias=use_bias, scope="conv5", opt=opt))
splits.append(conv(x, split_ch, kernel=5, stride=1, pad=4, dilation=2, use_bias=use_bias, scope="dilconv5", opt=opt))
concat = tf.concat(splits, axis=-1)
if z!=None:
concat = cond_bn(concat, z, opt=opt)
else:
concat = bn(concat, opt=opt)
concat = prelu(concat)
return concat
def mixed_resblock(x, inner_channels, out_channels, opt, use_bias=False, z=None, scope='res_mixed'):
with tf.variable_scope(scope):
res = clown_conv(x, inner_channels, scope="clown", opt=opt, z=z)
res = conv(res, channels=out_channels, kernel=1, stride=1, pad=0, use_bias=False, opt=opt, scope='proj')
return x + res
def self_attention(x, channels, opt, scope='self_attention'):
with tf.variable_scope(scope):
use_bias = opt.get("self_attention_bias", False)
f = conv(x, channels // 8, kernel=1, stride=1, opt=opt, scope='f_conv', use_bias=use_bias) # [bs, h, w, c']
g = conv(x, channels // 8, kernel=1, stride=1, opt=opt, scope='g_conv', use_bias=use_bias) # [bs, h, w, c']
h = conv(x, channels, kernel=1, stride=1, opt=opt, scope='h_conv', use_bias=use_bias) # [bs, h, w, c]
# N = h * w
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
beta = tf.nn.softmax(s) # attention map
o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]
gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0), dtype=gan_dtype)
o = tf.reshape(o, shape=x.shape) # [bs, h, w, C]
x = gamma * o + x
return x
def self_attention_2(x, channels, opt, scope='self_attention'):
with tf.variable_scope(scope):
use_bias = opt.get("self_attention_bias", False)
f = conv(x, channels // 8, kernel=1, stride=1, opt=opt, scope='f_conv', use_bias=use_bias) # [bs, h, w, c']
f = max_pooling(f)
g = conv(x, channels // 8, kernel=1, stride=1, opt=opt, scope='g_conv', use_bias=use_bias) # [bs, h, w, c']
h = conv(x, channels // 2, kernel=1, stride=1, opt=opt, scope='h_conv', use_bias=use_bias) # [bs, h, w, c]
h = max_pooling(h)
# N = h * w
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
beta = tf.nn.softmax(s) # attention map
o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]
gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0), dtype=gan_dtype)
o = tf.reshape(o, shape=[x.shape[0], x.shape[1], x.shape[2], channels // 2]) # [bs, h, w, C]
o = conv(o, channels, kernel=1, stride=1, opt=opt, scope='attn_conv', use_bias=use_bias)
x = gamma * o + x
return x
##################################################################################
# Sampling
##################################################################################
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2])
return gap
def global_sum_pooling(x) :
gsp = tf.reduce_sum(x, axis=[1, 2])
return gsp
def max_pooling(x) :
x = tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='SAME')
return x
def avg_pooling(x):
x = tf.layers.average_pooling2d(x, pool_size=2, strides=2, padding='SAME')
return x
def up_sample(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [h * scale_factor, w * scale_factor]
return tf.image.resize_nearest_neighbor(x, size=new_size)
##################################################################################
# Activation function
##################################################################################
def lrelu(x, alpha=0.2):
return tf.nn.leaky_relu(x, alpha)
def relu(x):
return tf.nn.relu(x)
def prelu(x, scope=None, init_val=0.0):
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
alphas = tf.get_variable('alpha', x.get_shape()[-1], initializer=tf.constant_initializer(init_val), dtype=gan_dtype)
pos = tf.nn.relu(x)
neg = alphas * (x - abs(x)) * 0.5
return pos + neg
def tanh(x):
return tf.tanh(x)
##################################################################################
# Normalization function
##################################################################################
def bn(x, opt={}, scope='batch_norm'):
type = opt.get("bn",{}).get("type","bn")
if type=='batch_norm_broken_renorm':
type = 'batch_norm'
if scope=='batch_norm':
scope = 'batch_renorm'
if type=='bn' or type=='batch_norm':
return batch_norm(x, opt=opt, scope=scope)
elif type=='batch_renorm':
if scope=='batch_norm':
scope = 'batch_renorm'
return batch_renorm(x, opt=opt, scope=scope)
else:
raise ValueError("Unknown BN type: "+str(type))
def cond_bn(x, z, opt={}, scope='batch_norm'):
type = opt.get("bn",{}).get("type","bn")
if type=='batch_norm_broken_renorm':
type = 'batch_norm'
if scope=='batch_norm':
scope = 'batch_renorm'
if type=='bn' or type=='batch_norm':
return condition_batch_norm(x, z, opt=opt, scope=scope)
elif type=='batch_renorm':
if scope=='batch_norm':
scope = 'batch_renorm'
return condition_batch_renorm(x, z, opt=opt, scope=scope)
else:
raise ValueError("Unknown BN type: "+str(type))
def batch_norm(x, opt={}, scope='batch_norm'):
return tf.layers.batch_normalization(x,
momentum=opt.get("bn", {}).get("momentum", 0.98),
epsilon=1e-05,
training=opt["is_training"],
name=scope)
def normalize_renorm_clipping_params(renorm_clipping):
if "rmax" not in renorm_clipping:
renorm_clipping["rmax"] = 1.5
if "dmax" not in renorm_clipping:
renorm_clipping["dmax"] = 0.5
if "rmax" in renorm_clipping and not "rmin" in renorm_clipping:
renorm_clipping["rmin"] = 1.0/renorm_clipping["rmax"]
return renorm_clipping
def batch_renorm(x, opt={}, scope='batch_renorm'):
renorm_clipping = normalize_renorm_clipping_params(opt.get("bn", {}).get("renorm_clipping", {}))
return tf.layers.batch_normalization(x,
momentum=opt.get("bn", {}).get("momentum", 0.98),
epsilon=1e-05,
training=opt["is_training"],
name=scope,
renorm=True,
renorm_momentum=opt.get("bn", {}).get("renorm_momentum", 0.9),
renorm_clipping=renorm_clipping)
def condition_batch_norm(x, z, opt={}, scope='batch_norm'):
with tf.variable_scope(scope) :
_, _, _, c = x.get_shape().as_list()
fake = False
decay = opt.get("bn", {}).get("momentum", 0.98)
epsilon = 1e-05
test_mean = tf.get_variable("pop_mean", shape=[c], dtype=gan_dtype, initializer=tf.constant_initializer(0.0), trainable=False)
test_var = tf.get_variable("pop_var", shape=[c], dtype=gan_dtype, initializer=tf.constant_initializer(1.0), trainable=False)
beta = fully_connected(z, units=c, scope='beta', opt=opt)
gamma = fully_connected(z, units=c, scope='gamma', opt=opt)
beta = tf.reshape(beta, shape=[-1, 1, 1, c])
gamma = tf.reshape(gamma, shape=[-1, 1, 1, c])
if opt["is_training"]:
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])
ema_mean = tf.assign(test_mean, test_mean * decay + batch_mean * (1 - decay))
ema_var = tf.assign(test_var, test_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([ema_mean, ema_var]):
if fake:
batch_mean = 0.0
batch_var = 1.0
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)
else:
if fake:
test_mean = 0.0
test_var = 1.0
return tf.nn.batch_normalization(x, test_mean, test_var, beta, gamma, epsilon)
def condition_batch_renorm(x, z, opt={}, scope='batch_renorm'):
with tf.variable_scope(scope) :
_, _, _, c = x.get_shape().as_list()
fake=False
renorm_clipping = normalize_renorm_clipping_params(opt.get("bn", {}).get("renorm_clipping", {}))
test_decay = opt.get("bn", {}).get("momentum", 0.98)
renorm_decay = opt.get("bn", {}).get("renorm_momentum", 0.9)
shared = opt.get("bn", {}).get("shared_renorm", False)
renorm_fadein_decay = opt.get("bn", {}).get("renorm_fadein_decay", 0.9999)
if not shared:
test_decay = renorm_decay
epsilon = 1e-05
test_mean = tf.get_variable("pop_mean", shape=[c], dtype=gan_dtype, initializer=tf.constant_initializer(0.0), trainable=False)
test_var = tf.get_variable("pop_var", shape=[c], dtype=gan_dtype, initializer=tf.constant_initializer(1.0), trainable=False)
if not shared:
renorm_mean = tf.get_variable("renorm_mean", shape=[c], dtype=gan_dtype, initializer=tf.constant_initializer(0.0), trainable=False)
renorm_var = tf.get_variable("renorm_var", shape=[c], dtype=gan_dtype, initializer=tf.constant_initializer(1.0), trainable=False)
renorm_weight = tf.get_variable("renorm_weight", shape=[], dtype=gan_dtype, initializer=tf.constant_initializer(0.0), trainable=False)
else:
renorm_mean = test_mean
renorm_var = test_var
renorm_weight = 1.0
beta = fully_connected(z, units=c, scope='beta', opt=opt)
gamma = fully_connected(z, units=c, scope='gamma', opt=opt)
beta = tf.reshape(beta, shape=[-1, 1, 1, c])
gamma = tf.reshape(gamma, shape=[-1, 1, 1, c])
if opt["is_training"]:
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])
rmax = renorm_clipping["rmax"]
rmin = renorm_clipping["rmin"]
dmax = renorm_clipping["dmax"]
sigma = tf.sqrt(batch_var + epsilon)
renorm_sigma = tf.sqrt(renorm_var + epsilon)
weighted_renorm_sigma = renorm_weight*renorm_sigma + (1 - renorm_weight)*sigma
weighted_renorm_mean = renorm_weight*renorm_mean + (1 - renorm_weight)*batch_mean
r = tf.stop_gradient(tf.clip_by_value(sigma/weighted_renorm_sigma, rmin, rmax))
d = tf.stop_gradient(tf.clip_by_value((batch_mean - weighted_renorm_mean)/weighted_renorm_sigma, -dmax, dmax))
test_mean_op = tf.assign(test_mean, test_mean * test_decay + batch_mean * (1 - test_decay))
test_var_op = tf.assign(test_var, test_var * test_decay + batch_var * (1 - test_decay))
ema_ops = [test_mean_op, test_var_op]
if not shared:
renorm_mean_op = tf.assign(renorm_mean, renorm_mean*renorm_decay + batch_mean*(1 - renorm_decay))
renorm_var_op = tf.assign(renorm_var, renorm_var*renorm_decay + batch_var*(1 - renorm_decay))
renorm_w_op = tf.assign(renorm_weight, renorm_weight*renorm_fadein_decay + 1.0*(1 - renorm_fadein_decay))
ema_ops += [renorm_mean_op, renorm_var_op, renorm_w_op]
with tf.control_dependencies(ema_ops):
if fake:
return tf.nn.batch_normalization(x, 0.0, 1.0, beta, gamma, epsilon)
else:
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta + d*gamma, r*gamma, epsilon)
else:
if fake:
test_mean = 0.0
test_var = 1.0
return tf.nn.batch_normalization(x, test_mean, test_var, beta, gamma, epsilon)
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False, dtype=gan_dtype)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
##################################################################################
# Loss function
##################################################################################
def discriminator_loss(loss_func, real, fake, flood_level=0):
real_loss = 0
fake_loss = 0
if loss_func.__contains__('wgan') :
real_loss = -tf.reduce_mean(real)
fake_loss = tf.reduce_mean(fake)
if loss_func == 'lsgan' :
real_loss = tf.reduce_mean(tf.squared_difference(real, 1.0))
fake_loss = tf.reduce_mean(tf.square(fake))
if loss_func == 'ra-lsgan' :
d_xr = real - tf.reduce_mean(fake)
d_xf = fake - tf.reduce_mean(real)
real_loss = tf.reduce_mean(tf.squared_difference(d_xr, 1.0))
fake_loss = tf.reduce_mean(tf.squared_difference(d_xf, -1.0))
if loss_func == 'gan' or loss_func == 'dragan' :
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real), logits=real))
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake), logits=fake))
if loss_func == 'ra-gan' or loss_func == 'ra-dragan':
d_xr = real - tf.reduce_mean(fake)
d_xf = fake - tf.reduce_mean(real)
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real), logits=d_xr))
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake), logits=d_xf))
if loss_func == 'ra-hinge':
d_xr = real - tf.reduce_mean(fake)
d_xf = fake - tf.reduce_mean(real)
real_loss = tf.reduce_mean(relu(1.0 - d_xr))
fake_loss = tf.reduce_mean(relu(1.0 + d_xf))
if loss_func == 'hinge' :
real_loss = tf.reduce_mean(relu(1.0 - real))
fake_loss = tf.reduce_mean(relu(1.0 + fake))
loss = real_loss + fake_loss
if flood_level:
loss = flood_loss(loss, flood_level)
return loss
def generator_loss(loss_func, fake, real, flood_level=0):
| |
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TR-069 has mandatory attribute names that don't comply with policy
# pylint:disable=invalid-name
"""Implementation of tr-181 MoCA objects for Broadcom chipsets."""
__author__ = '<EMAIL> (<NAME>)'
import re
import subprocess
import pynetlinux
import tr.basemodel
import tr.cwmptypes
import tr.session
import tr.x_catawampus_tr181_2_0
import netdev
BASE181MOCA = tr.basemodel.Device.MoCA
CATA181MOCA = tr.x_catawampus_tr181_2_0.X_CATAWAMPUS_ORG_Device_v2_0.Device.MoCA
MOCACTL = 'mocactl'
PYNETIFCONF = pynetlinux.ifconfig.Interface
# Regexps to parse mocactl output
MAC_RE = re.compile(r'^MAC Address\s+: ((?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})')
PNC_RE = re.compile(r'Preferred NC\s+: (\d+)')
PTX_RE = re.compile(r'\ATxUc.+?(\d+[.]?\d*)\s+dBm.*?(\d+)\s+bps')
PRX_RE = re.compile(r'\ARxUc.+?(\d+[.]?\d*)\s+dBm.*?(\d+)\s+bps'
r'\s+(\d+[.]?\d*) dB')
RXB_RE = re.compile(r'\ARxBc.+?(\d+[.]?\d*)\s+dBm.*?(\d+)\s+bps')
QAM_RE = re.compile(r'256 QAM capable\s+:\s+(\d+)')
AGG_RE = re.compile(r'Aggregated PDUs\s+:\s+(\d+)')
BTL_RE = re.compile(r'\s*([0-9a-fA-F]{32})\s+([0-9a-fA-F]{32})')
TX_RE = re.compile(r'Unicast Tx Pkts To Node\s+: (\d+)')
RX_RE = re.compile(r'Unicast Rx Pkts From Node\s+: (\d+)')
E1_RE = re.compile(r'Rx CodeWord ErrorAndUnCorrected\s+: (\d+)')
E2_RE = re.compile(r'Rx NoSync Errors\s+: (\d+)')
NODE_RE = re.compile(r'\ANode\s*: (\d+)')
def IsMoca1_1():
"""Check for existence of the MoCA 1.1 utilities."""
cmd = [MOCACTL, '--version']
try:
rc = subprocess.call(cmd)
return True if rc == 0 else False
except OSError:
return False
def IntOrZero(arg):
try:
return int(arg)
except (ValueError, TypeError):
return 0
def FloatOrZero(arg):
try:
return float(arg)
except (ValueError, TypeError):
return 0.0
def _CombineBitloading(bitlines):
"""Combine bitloading information into one string.
Args:
bitlines: a list of lines of bitloading info:
00008888888888888888888888888888 00008888888888888888888888888888
88888888888888888888888888888888 88888888888888888888888888888888
88888888888888888888888888888888 88888888888888888888888888888888
88888888888888888888000000000000 88888888888888888888000000000000
00000000000008888888888888888888 00000000000008888888888888888888
88888888888888888888888888888888 88888888888888888888888888888888
88888888888888888888888888888888 88888888888888888888888888888888
88888888888888888888888888888000 88888888888888888888888888888000
Returns:
a tuple of two contiguous strings, '00008888888...888888000',
for the left-hand and right-hand bitloading.
"""
left = []
right = []
for line in bitlines:
(l, r) = line.split()
left.append(l.strip())
right.append(r.strip())
return (''.join(left), ''.join(right))
class BrcmMocaInterface(BASE181MOCA.Interface):
"""An implementation of tr181 Device.MoCA.Interface for Broadcom chipsets."""
# TODO(dgentry) Supposed to be read/write, but we don't disable yet.
Enable = tr.cwmptypes.ReadOnlyBool(True)
Name = tr.cwmptypes.ReadOnlyString('')
# In theory LowerLayers is writeable, but it is nonsensical to write to it.
LowerLayers = tr.cwmptypes.ReadOnlyString('')
X_CATAWAMPUS_ORG_ExtraTracing = tr.cwmptypes.ReadOnlyBool(False)
MAX_NODES_MOCA1 = 8
MAX_NODES_MOCA2 = 16
MaxNodes = tr.cwmptypes.ReadOnlyInt(0)
AssociatedDeviceCount = tr.cwmptypes.ReadOnlyUnsigned(0)
Upstream = tr.cwmptypes.ReadOnlyBool(False)
def __init__(self, ifname, upstream=False, qfiles=None, numq=0, hipriq=0):
BASE181MOCA.Interface.__init__(self)
type(self).MaxNodes.Set(self, self.MAX_NODES_MOCA1)
type(self).Name.Set(self, ifname)
type(self).Upstream.Set(self, bool(upstream))
self._pynet = PYNETIFCONF(ifname)
self._Stats = BrcmMocaInterfaceStatsLinux26(ifname=ifname, qfiles=qfiles,
numq=numq, hipriq=hipriq)
self.Unexport(['Alias', 'MaxBitRate', 'MaxIngressBW', 'MaxEgressBW',
'PreferredNC', 'PrivacyEnabledSetting', 'FreqCapabilityMask',
'FreqCurrentMaskSetting', 'FreqCurrentMask',
'KeyPassphrase', 'TxPowerLimit', 'PowerCntlPhyTarget',
'BeaconPowerLimit', 'NetworkTabooMask', 'NodeTabooMask',
'TxBcastRate', 'TxBcastPowerReduction'])
self.Unexport(objects=['QoS'])
@property
def Stats(self):
return self._Stats
@tr.session.cache
def _MocaCtlShowStatus(self):
"""Return output of mocactl show --status."""
mc = subprocess.Popen([MOCACTL, 'show', '--status'], stdout=subprocess.PIPE)
out, _ = mc.communicate(None)
return out.splitlines()
@tr.session.cache
def _MocaCtlShowInitParms(self):
"""Return output of mocactl show --initparms."""
mc = subprocess.Popen([MOCACTL, 'show', '--initparms'],
stdout=subprocess.PIPE)
out, _ = mc.communicate(None)
return out.splitlines()
@tr.session.cache
def _MocaCtlShowConfig(self):
"""Return output of mocactl show --config."""
mc = subprocess.Popen([MOCACTL, 'show', '--config'], stdout=subprocess.PIPE)
out, _ = mc.communicate(None)
return out.splitlines()
def _MocaCtlGetField(self, outfcn, field):
"""Look for one field in a mocactl command.
ex: field='SwVersion' would return 5.6.789 from
vendorId : 999999999 HwVersion : 0x12345678
SwVersion : 5.6.789 self MoCA Version : 0x11
Args:
outfcn: a function to call, which must return a list of text lines.
field: the text string to look for.
Returns:
The value of the field, or None.
"""
m_re = re.compile(field + r'\s*:\s+(\S+)')
for line in outfcn():
mr = m_re.search(line)
if mr is not None:
return mr.group(1)
return None
@property
def Status(self):
if not self._pynet.is_up():
return 'Down'
(_, _, _, link_up) = self._pynet.get_link_info()
if link_up:
return 'Up'
else:
return 'Dormant'
@property
def LastChange(self):
"""Parse linkUpTime y:m:d:h:m:s, return seconds."""
up = self._MocaCtlGetField(self._MocaCtlShowStatus, 'linkUpTime').split(':')
secs = 0
for t in up:
# linkUpTime ex: '23h:41m:30s'
num = IntOrZero(t[:-1])
if t[-1] == 'y':
secs += int(num * (365.25 * 24.0 * 60.0 * 60.0))
elif t[-1] == 'w':
secs += num * (7 * 24 * 60 * 60)
elif t[-1] == 'd':
secs += num * (24 * 60 * 60)
elif t[-1] == 'h':
secs += num * (60 * 60)
elif t[-1] == 'm':
secs += num * 60
elif t[-1] == 's':
secs += num
return secs
@property
def MACAddress(self):
return self._pynet.get_mac()
@property
def FirmwareVersion(self):
ver = self._MocaCtlGetField(self._MocaCtlShowStatus, 'SwVersion')
return ver if ver else '0'
def _RegToMoCA(self, regval):
moca = {'0x10': '1.0', '0x11': '1.1', '0x20': '2.0', '0x21': '2.1'}
return moca.get(regval, '0.0')
@property
def HighestVersion(self):
reg = self._MocaCtlGetField(self._MocaCtlShowStatus, 'self MoCA Version')
return self._RegToMoCA(reg)
@property
def CurrentVersion(self):
reg = self._MocaCtlGetField(self._MocaCtlShowStatus, 'networkVersionNumber')
return self._RegToMoCA(reg)
@property
def NetworkCoordinator(self):
nodeid = self._MocaCtlGetField(self._MocaCtlShowStatus, 'ncNodeId')
return IntOrZero(nodeid)
@property
def NodeID(self):
nodeid = self._MocaCtlGetField(self._MocaCtlShowStatus, 'nodeId')
return IntOrZero(nodeid)
@property
def BackupNC(self):
bnc = self._MocaCtlGetField(self._MocaCtlShowStatus, 'backupNcId')
return bnc if bnc else ''
@property
def PrivacyEnabled(self):
private = self._MocaCtlGetField(self._MocaCtlShowInitParms, 'Privacy')
return True if private == 'enabled' else False
@property
def CurrentOperFreq(self):
freq = self._MocaCtlGetField(self._MocaCtlShowStatus, 'rfChannel')
if freq:
mhz = IntOrZero(freq.split()[0])
return int(mhz * 1e6)
return 0
@property
def LastOperFreq(self):
last = self._MocaCtlGetField(self._MocaCtlShowInitParms,
'Nv Params - Last Oper Freq')
if last:
return IntOrZero(last.split()[0])
return 0
@property
def QAM256Capable(self):
qam = self._MocaCtlGetField(self._MocaCtlShowInitParms, 'qam256Capability')
return True if qam == 'on' else False
@property
def PacketAggregationCapability(self):
# example: "maxPktAggr : 10 pkts"
pkts = self._MocaCtlGetField(self._MocaCtlShowConfig, 'maxPktAggr')
if pkts:
return IntOrZero(pkts.split()[0])
return 0
@property
def AssociatedDeviceNumberOfEntries(self):
return len(self.AssociatedDeviceList)
def _MocaCtlGetNodeIDs(self):
"""Return a list of active MoCA Node IDs."""
mc = subprocess.Popen([MOCACTL, 'showtbl', '--nodestats'],
stdout=subprocess.PIPE)
out, _ = mc.communicate(None)
nodes = set()
for line in out.splitlines():
node = NODE_RE.search(line)
if node is not None:
nodes.add(int(node.group(1)))
node_list = list(nodes)
length = len(node_list)
if int(self.AssociatedDeviceCount) != length:
type(self).AssociatedDeviceCount.Set(self, length)
return node_list
@property
@tr.session.cache
def AssociatedDeviceList(self):
mocanodes = self._MocaCtlGetNodeIDs()
result = {}
for idx, nodeid in enumerate(mocanodes, start=1):
result[str(idx)] = BrcmMocaAssociatedDevice(nodeid)
return result
class BrcmMocaInterfaceStatsLinux26(netdev.NetdevStatsLinux26,
CATA181MOCA.Interface.Stats):
"""tr181 Device.MoCA.Interface.Stats for Broadcom chipsets."""
def __init__(self, ifname, qfiles=None, numq=0, hipriq=0):
netdev.NetdevStatsLinux26.__init__(self, ifname, qfiles, numq, hipriq)
CATA181MOCA.Interface.Stats.__init__(self)
if not qfiles:
self.Unexport(['X_CATAWAMPUS-ORG_DiscardFrameCnts',
'X_CATAWAMPUS-ORG_DiscardPacketsReceivedHipri'])
class BrcmMocaAssociatedDevice(CATA181MOCA.Interface.AssociatedDevice):
"""tr-181 Device.MoCA.Interface.AssociatedDevice for Broadcom chipsets."""
Active = tr.cwmptypes.ReadOnlyBool(True)
MACAddress = tr.cwmptypes.ReadOnlyString('')
NodeID = tr.cwmptypes.ReadOnlyInt(-1)
PacketAggregationCapability = tr.cwmptypes.ReadOnlyInt(0)
PHYTxRate = tr.cwmptypes.ReadOnlyInt(0)
PHYRxRate = tr.cwmptypes.ReadOnlyInt(0)
PreferredNC = tr.cwmptypes.ReadOnlyBool(False)
QAM256Capable = tr.cwmptypes.ReadOnlyInt(0)
RxBcastPowerLevel = tr.cwmptypes.ReadOnlyInt(0)
RxErroredAndMissedPackets = tr.cwmptypes.ReadOnlyInt(0)
RxPackets = tr.cwmptypes.ReadOnlyInt(0)
RxPowerLevel = tr.cwmptypes.ReadOnlyInt(0)
RxSNR = tr.cwmptypes.ReadOnlyInt(0)
TxBcastRate = tr.cwmptypes.ReadOnlyInt(0)
TxPackets = tr.cwmptypes.ReadOnlyInt(0)
TxPowerControlReduction = tr.cwmptypes.ReadOnlyInt(0)
X_CATAWAMPUS_ORG_RxBcastPowerLevel_dBm = tr.cwmptypes.ReadOnlyFloat(0.0)
X_CATAWAMPUS_ORG_RxPowerLevel_dBm = tr.cwmptypes.ReadOnlyFloat(0.0)
X_CATAWAMPUS_ORG_RxSNR_dB = tr.cwmptypes.ReadOnlyFloat(0.0)
X_CATAWAMPUS_ORG_RxNBAS = tr.cwmptypes.ReadOnlyFloat(0.0)
X_CATAWAMPUS_ORG_RxBitloading = tr.cwmptypes.ReadOnlyString('')
X_CATAWAMPUS_ORG_TxBitloading = tr.cwmptypes.ReadOnlyString('')
def __init__(self, nodeid):
super(BrcmMocaAssociatedDevice, self).__init__()
type(self).NodeID.Set(self, int(nodeid))
self.Unexport(['HighestVersion',
'X_CATAWAMPUS-ORG_RxPrimaryCwCorrected',
'X_CATAWAMPUS-ORG_RxPrimaryCwUncorrected',
'X_CATAWAMPUS-ORG_RxPrimaryCwNoErrors',
'X_CATAWAMPUS-ORG_RxPrimaryCwNoSync',
'X_CATAWAMPUS-ORG_RxSecondaryCwCorrected',
'X_CATAWAMPUS-ORG_RxSecondaryCwUncorrected',
'X_CATAWAMPUS-ORG_RxSecondaryCwNoErrors',
'X_CATAWAMPUS-ORG_RxSecondaryCwNoSync',
])
self.ParseNodeStatus()
self.ParseNodeStats()
@tr.session.cache
def ParseNodeStatus(self):
"""Run mocactl show --nodestatus for this node, parse the output."""
mc = subprocess.Popen([MOCACTL, 'show', '--nodestatus', str(self.NodeID)],
stdout=subprocess.PIPE)
out, _ = mc.communicate(None)
bitloading = [[], []]
bitloadidx = 0
for line in out.splitlines():
mac = MAC_RE.search(line)
if mac is not None:
type(self).MACAddress.Set(self, mac.group(1))
pnc = PNC_RE.search(line)
if pnc is not None:
preferred = False if pnc.group(1) is '0' else True
type(self).PreferredNC.Set(self, preferred)
ptx = PTX_RE.search(line)
if ptx is not None:
type(self).PHYTxRate.Set(self, (IntOrZero(ptx.group(2)) / 1000000))
txpowercontrol = int(FloatOrZero(ptx.group(1)))
type(self).TxPowerControlReduction.Set(self, txpowercontrol)
prx = PRX_RE.search(line)
if prx is not None:
type(self).PHYRxRate.Set(self, (IntOrZero(prx.group(2)) / 1000000))
rxpower = FloatOrZero(prx.group(1))
type(self).RxPowerLevel.Set(self, abs(int(rxpower)))
type(self).X_CATAWAMPUS_ORG_RxPowerLevel_dBm.Set(self, rxpower)
rxsnr = FloatOrZero(prx.group(3))
type(self).RxSNR.Set(self, abs(int(rxsnr)))
type(self).X_CATAWAMPUS_ORG_RxSNR_dB.Set(self, rxsnr)
rxb = RXB_RE.search(line)
if rxb is not None:
type(self).TxBcastRate.Set(self, (IntOrZero(rxb.group(2)) / 1000000))
rxbpower = FloatOrZero(rxb.group(1))
type(self).RxBcastPowerLevel.Set(self, abs(int(rxbpower)))
type(self).X_CATAWAMPUS_ORG_RxBcastPowerLevel_dBm.Set(self, rxbpower)
qam = QAM_RE.search(line)
if qam is not None:
qam256 = False if qam.group(1) is '0' else True
type(self).QAM256Capable.Set(self, qam256)
agg = AGG_RE.search(line)
if agg is not None:
aggcapable = IntOrZero(agg.group(1))
type(self).PacketAggregationCapability.Set(self, aggcapable)
if 'Unicast Bit Loading Info' in line:
bitloadidx = 0
if 'Broadcast Bit Loading Info' in line:
bitloadidx = 1
btl = BTL_RE.search(line)
if btl is not None:
bitloading[bitloadidx].append(line)
(txbitl, rxbitl) = _CombineBitloading(bitloading[0])
type(self).X_CATAWAMPUS_ORG_RxBitloading.Set(self, '$BRCM1$' + rxbitl)
type(self).X_CATAWAMPUS_ORG_TxBitloading.Set(self, '$BRCM1$' + txbitl)
@tr.session.cache
def ParseNodeStats(self):
"""Run mocactl show --nodestats for this node, parse the output."""
mc = subprocess.Popen([MOCACTL, 'show', '--nodestats', str(self.NodeID)],
stdout=subprocess.PIPE)
out, _ = mc.communicate(None)
rx_err = 0
for line in out.splitlines():
tx = TX_RE.search(line)
if tx is not None:
type(self).TxPackets.Set(self, IntOrZero(tx.group(1)))
rx = RX_RE.search(line)
if rx is not None:
type(self).RxPackets.Set(self, IntOrZero(rx.group(1)))
e1 = E1_RE.search(line)
if | |
'Uniprot']
sharma = sharma.drop(['Protein names', 'PEP', 'Sequence coverage [%]', 'Protein IDs'], axis=1)
sharma4o_df = pd.wide_to_long(sharma4o, stubnames='Log2LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
sharma4o_df = sharma4o_df.reset_index()
sharma4o_df['raw_data'] = 2 ** sharma4o_df['Log2LFQintensity']
sharma4o_df['Study'] = 'Sharma 2015, isolated'
sharma4o_df['Organism'] = 'mouse'
sharma4o_df['raw_data_units'] = 'LFQintensity'
sharma4o_df = sharma4o_df.drop('Log2LFQintensity', axis=1)
sharma_df = pd.wide_to_long(sharma, stubnames='LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
sharma_df = sharma_df.reset_index()
sharma_df['Study'] = 'Sharma 2015, cultured'
sharma_df['Organism'] = 'mouse'
sharma_df['raw_data_units'] = 'LFQintensity'
sharma_df['Age_days'] = 0 # 'cultured cells'
sharma_df = sharma_df.rename(columns={'LFQintensity': 'raw_data'})
sharma_df.loc[sharma_df['sample_id'].isin(['Neuronsdiv051',
'Neuronsdiv052', 'Neuronsdiv053', 'Neuronsdiv101', 'Neuronsdiv102',
'Neuronsdiv103', 'Neuronsdiv151', 'Neuronsdiv152',
'Neuronsdiv153']), 'location'] = 'neurons'
sharma_df.loc[sharma_df['sample_id'].isin(['Astrocytes1', 'Astrocytes2', 'Astrocytes3']), 'location'] = 'astrocytes'
sharma_df.loc[sharma_df['sample_id'].isin(['adultMicroglia1', 'adultMicroglia2', 'adultMicroglia3',
'youngMicroglia1', 'youngMicroglia2',
'youngMicroglia3']), 'location'] = 'microglia'
sharma_df.loc[sharma_df['sample_id'].isin(['Oligodendrocytesdiv11', 'Oligodendrocytesdiv12',
'Oligodendrocytesdiv13', 'Oligodendrocytesdiv251',
'Oligodendrocytesdiv252', 'Oligodendrocytesdiv253',
'Oligodendrocytesdiv41', 'Oligodendrocytesdiv42',
'Oligodendrocytesdiv43']), 'location'] = 'oligodendrocytes'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedAstrocytes', 'IsolatedMicroglia', 'IsolatedNeurons',
'IsolatedOligodendrocytes']), 'Age_days'] = 29 # 8 + 21 # 'P8'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Brain', 'Brainstem', 'Cerebellum',
'CorpusCallosum', 'MotorCortex', 'OlfactoryBulb', 'OpticNerve',
'PrefrontalCortex', 'Striatum', 'Thalamus',
'VentralHippocampus', ]), 'Age_days'] = 81 # 60 + 21 'P60'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['CerebellumP05']), 'Age_days'] = 26 # 5 + 21 # 'P5'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['CerebellumP14']), 'Age_days'] = 35 # 14 + 21 # 'P14'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['CerebellumP24']), 'Age_days'] = 45 # 24 + 21 # 'P24'
###
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedAstrocytes']), 'location'] = 'astrocytes'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedMicroglia']), 'location'] = 'microglia'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedNeurons']), 'location'] = 'neurons'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['IsolatedOligodendrocytes']), 'location'] = 'oligodendrocytes'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Brain']), 'location'] = 'brain'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Brainstem']), 'location'] = 'brainstem'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(
['Cerebellum', 'CerebellumP05', 'CerebellumP14', 'CerebellumP24']), 'location'] = 'cerebellum'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['CorpusCallosum']), 'location'] = 'corpus callosum'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['MotorCortex', 'PrefrontalCortex']), 'location'] = 'cortex'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['VentralHippocampus']), 'location'] = 'hippocampus'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['OlfactoryBulb']), 'location'] = 'olfactory bulb'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Striatum']), 'location'] = 'striatum'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['OpticNerve']), 'location'] = 'optic nerve'
sharma4o_df.loc[sharma4o_df['sample_id'].isin(['Thalamus']), 'location'] = 'thalamus'
return sharma4o_df, sharma_df
def get_wisniewski_2015_dataframe():
"""
Return pandas dataframe for Wisńiewski 2015
:return:
pandas.core.frame.DataFrame: dataframe containing Wisniewski 2015 data.
"""
print("Importing Wisńiewski 2015 pandas dataframe")
wisniewskif = pd.ExcelFile('../data/source_data/pr5b00276_si_001.xlsx')
wisniewski = wisniewskif.parse('Sheet1')
wisniewski = wisniewski[~((wisniewski['Protein concentration (mol/g protein) Brain1'] == 0) &
(wisniewski['Protein concentration (mol/g protein) Brain2'] == 0) &
(wisniewski['Protein concentration (mol/g protein) Brain3'] == 0))]
wisniewski = wisniewski.drop(
['Protein IDs', 'Protein names', 'Total protein Brain1', 'Total protein Brain2', 'Total protein Brain3'],
axis=1)
wisniewski = wisniewski.rename(columns={'Majority protein IDs': 'Uniprot',
'Gene names': 'gene_names',
'Protein concentration (mol/g protein) Brain1': 'Conc_1',
'Protein concentration (mol/g protein) Brain2': 'Conc_2',
'Protein concentration (mol/g protein) Brain3': 'Conc_3'})
wisniewski_df = pd.wide_to_long(wisniewski, stubnames='Conc',
i=['Uniprot', 'gene_names'],
j='sample_id', sep='_', suffix=r'\w+')
wisniewski_df = wisniewski_df.reset_index()
wisniewski_df['Study'] = 'Wisniewski 2015'
wisniewski_df['Organism'] = 'mouse'
wisniewski_df['location'] = 'brain'
wisniewski_df['Age_days'] = 10 * 7 + 21 # adult
wisniewski_df['raw_data_units'] = 'Protein concentration (mol/g protein)'
wisniewski_df = wisniewski_df.rename(columns={'Conc': 'raw_data'})
return wisniewski_df
def get_han_2014_dataframe():
"""
Return pandas dataframe for Han 2014
:return:
pandas.core.frame.DataFrame: dataframe containing Han 2014 data.
"""
print("Importing Han 2014 pandas dataframe")
hanf = pd.ExcelFile('../data/source_data/pmic7746-sup-0001-tables1.xlsx')
han = hanf.parse('Sheet1')
# six technical replicates of two samples (conditioned media (CM) and whole-cell lysates (WCL))
han = han.rename(columns={'Gene symbol': 'gene_names',
'LFQ intensity WCL_set1_tech1': 'LFQintensity_WCLset1tech1',
'LFQ intensity WCL_set1_tech2': 'LFQintensity_WCLset1tech2',
'LFQ intensity WCL_set1_tech3': 'LFQintensity_WCLset1tech3',
'LFQ intensity WCL_set2_tech1': 'LFQintensity_WCLset2tech1',
'LFQ intensity WCL_set2_tech2': 'LFQintensity_WCLset2tech2',
'LFQ intensity WCL_set2_tech3': 'LFQintensity_WCLset2tech3',
'LFQ intensity WCL_set3_tech1': 'LFQintensity_WCLset3tech1',
'LFQ intensity WCL_set3_tech2': 'LFQintensity_WCLset3tech2',
'LFQ intensity WCL_set3_tech3': 'LFQintensity_WCLset3tech3'
})
han = han.drop(['Protein IDs', 'Majority protein IDs', 'Leading protein', 'Intensity', 'Intensity CM_set1_tech1',
'Intensity CM_set1_tech2', 'Intensity CM_set1_tech3',
'Intensity CM_set2_tech1', 'Intensity CM_set2_tech2',
'Intensity CM_set2_tech3', 'Intensity CM_set3_tech1',
'Intensity CM_set3_tech2', 'Intensity CM_set3_tech3', 'LFQ intensity CM_set1_tech1',
'LFQ intensity CM_set1_tech2', 'LFQ intensity CM_set1_tech3',
'LFQ intensity CM_set2_tech1', 'LFQ intensity CM_set2_tech2',
'LFQ intensity CM_set2_tech3', 'LFQ intensity CM_set3_tech1',
'LFQ intensity CM_set3_tech2', 'LFQ intensity CM_set3_tech3', 'MS/MS Count CM_set1_tech1',
'MS/MS Count CM_set1_tech2', 'MS/MS Count CM_set1_tech3',
'MS/MS Count CM_set2_tech1', 'MS/MS Count CM_set2_tech2',
'MS/MS Count CM_set2_tech3', 'MS/MS Count CM_set3_tech1',
'MS/MS Count CM_set3_tech2', 'MS/MS Count CM_set3_tech3',
'MS/MS Count WCL_set1_tech1', 'MS/MS Count WCL_set1_tech2',
'MS/MS Count WCL_set1_tech3', 'MS/MS Count WCL_set2_tech1',
'MS/MS Count WCL_set2_tech2', 'MS/MS Count WCL_set2_tech3',
'MS/MS Count WCL_set3_tech1', 'MS/MS Count WCL_set3_tech2',
'MS/MS Count WCL_set3_tech3', 'Intensity WCL_set1_tech1',
'Intensity WCL_set1_tech2', 'Intensity WCL_set1_tech3', 'Intensity WCL_set2_tech1',
'Intensity WCL_set2_tech2', 'Intensity WCL_set2_tech3', 'Intensity WCL_set3_tech1',
'Intensity WCL_set3_tech2', 'Intensity WCL_set3_tech3'], axis=1)
han = han[han['gene_names'] != '-']
han_df = pd.wide_to_long(han, stubnames='LFQintensity',
i=['Uniprot', 'gene_names'],
j='sample_id', sep='_', suffix=r'\w+')
han_df = han_df.reset_index()
han_df['Study'] = 'Han 2014'
han_df['Organism'] = 'mouse'
han_df['raw_data_units'] = 'LFQintensity'
han_df['Age_days'] = 0 # 'cultured cells'
han_df['location'] = 'astrocytes'
han_df = han_df.rename(columns={'LFQintensity': 'raw_data'})
return han_df
def get_geiger_2013_dataframe():
"""
Return pandas dataframe for Geiger 2013
:return:
pandas.core.frame.DataFrame: dataframe containing Geiger 2013 data.
"""
print("Importing Geiger 2013 pandas dataframe. This operation can last a while.")
geigerf = pd.ExcelFile('../data/source_data/mcp.M112.024919-2.xlsx')
geiger = geigerf.parse('Suppl Table S1', skiprows=1, index_col=None)
geiger = geiger.drop(['Protein IDs', 'Protein names', 'Peptides', 'Razor + unique peptides', 'Unique peptides',
'Sequence coverage [%]', 'PEP',
'Ratio H/L normalized', 'Ratio H/L normalized Adrenal gland',
'Ratio H/L normalized Brain cortex',
'Ratio H/L normalized Brain medulla', 'Ratio H/L normalized Brown fat',
'Ratio H/L normalized Cerebellum', 'Ratio H/L normalized Colon',
'Ratio H/L normalized Diaphragm', 'Ratio H/L normalized Duodenum',
'Ratio H/L normalized Embryonic tissue', 'Ratio H/L normalized Eye',
'Ratio H/L normalized Heart', 'Ratio H/L normalized Ileum',
'Ratio H/L normalized Jejunum', 'Ratio H/L normalized Kidney cortex',
'Ratio H/L normalized Kidney medulla', 'Ratio H/L normalized Liver',
'Ratio H/L normalized Lung', 'Ratio H/L normalized Midbrain',
'Ratio H/L normalized Muscle', 'Ratio H/L normalized Olfactory bulb',
'Ratio H/L normalized Ovary', 'Ratio H/L normalized Pancreas',
'Ratio H/L normalized Salivary gland', 'Ratio H/L normalized Spleeen',
'Ratio H/L normalized Stomach', 'Ratio H/L normalized Thymus',
'Ratio H/L normalized Uterus', 'Ratio H/L normalized White fat',
'Intensity L Adrenal gland', 'Intensity L Brown fat', 'Intensity L Colon',
'Intensity L Diaphragm',
'Intensity L Duodenum', 'Intensity L Embryonic tissue',
'Intensity L Eye', 'Intensity L Heart', 'Intensity L Ileum',
'Intensity L Jejunum', 'Intensity L Kidney cortex',
'Intensity L Kidney medulla', 'Intensity L Liver', 'Intensity L Lung', 'Intensity L Muscle',
'Intensity L Ovary',
'Intensity L Pancreas', 'Intensity L Salivary gland',
'Intensity L Spleeen', 'Intensity L Stomach', 'Intensity L Thymus',
'Intensity L Uterus', 'Intensity L White fat'], axis=1)
geiger = geiger.rename(columns={'Majority protein IDs': 'Uniprot',
'Gene names': 'gene_names',
'Mol. weight [kDa]': 'molecular_weight_kDa',
'Intensity L Brain cortex': 'IntensityL_cortex',
'Intensity L Brain medulla': 'IntensityL_medulla',
'Intensity L Cerebellum': 'IntensityL_cerebellum',
'Intensity L Midbrain': 'IntensityL_midbrain',
'Intensity L Olfactory bulb': 'IntensityL_olfactorybulb'
})
# Lys-C
geiger = geiger[~geiger['Uniprot'].isna()]
geiger_df = pd.wide_to_long(geiger, stubnames='IntensityL',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='location', sep='_', suffix=r'\w+')
geiger_df = geiger_df.reset_index()
geiger_df['Study'] = 'Geiger 2013'
geiger_df['Organism'] = 'mouse'
geiger_df['raw_data_units'] = 'IntensityL'
geiger_df['Age_days'] = 10 * 7 + 21 # adult
geiger_df.loc[geiger_df['location'] == 'olfactorybulb', 'location'] = 'olfactory bulb'
geiger_df = geiger_df.rename(columns={'IntensityL': 'raw_data'})
return geiger_df
def get_bai_2020_dataframe():
"""
Return pandas dataframe for Bai 2020
:return:
pandas.core.frame.DataFrame: dataframe containing Bai 2020 data.
"""
print("Importing Bai 2020 pandas dataframe.")
# mouse samples:
bai2020_f = pd.ExcelFile('../data/source_data/1-s2.0-S089662731931058X-mmc7.xlsx')
bai2020 = bai2020_f.parse('Sheet1', skiprows=4)
bai2020 = bai2020.drop(['LPC1', 'LPC2', 'HPC1', 'HPC2', 'MCI1', 'MCI2', 'AD1', 'AD2', 'PSP1', 'PSP2'],
axis=1) # these are human samples
bai2020 = bai2020.rename(columns={'Human gene name': 'gene_names',
'Human protein accession': 'Uniprot_human',
'Mouse protein accession': 'Uniprot_mouse'
})
# bai2020[bai2020['Uniprot_mouse'].str.contains('CON_ENSBTAP00000024146')] #Q61838
bai2020['Uniprot_human'] = bai2020['Uniprot_human'].str.split("|").str[1]
bai2020['Uniprot_mouse'] = bai2020['Uniprot_mouse'].str.split("|").str[1]
# [i for i in bai2020['Uniprot_mouse'].unique() if len(i)>6 ]
bai2020.loc[bai2020['Uniprot_mouse']=='CON_ENSBTAP00000024146','Uniprot_mouse'] = 'Q61838' #CON_ENSBTAP00000024146 fix by human uniprot
# [i for i in bai2020['Uniprot_human'].unique() if len(i)>6 ]
bai2020['Uniprot'] = bai2020['Uniprot_mouse'] + ";" + bai2020['Uniprot_human']
bai2020 = bai2020.drop(['Uniprot_mouse','Uniprot_human'],axis=1)
bai2020 = bai2020.drop_duplicates(keep='first')
bai2020[bai2020[['Uniprot','gene_names']].duplicated(keep=False)]
bai2020_df = pd.wide_to_long(bai2020, stubnames='tmt',
i=['Uniprot','gene_names'],
j='sample_id',sep='_',suffix='\w+')
bai2020_df = bai2020_df.reset_index()
bai2020_df['Organism'] = 'mouse' # there were human and mouse data, and here I used only mouse data, human data is imported by function get_human_samples_bai_2020_dataframe()
bai2020_df['location'] = 'cortex'
bai2020_df['raw_data_units'] = 'Protein Abundance (Summerized TMT Reporter Ion Intensities)'
bai2020_df['Study'] = 'Bai 2020'
bai2020_df = bai2020_df.rename(columns = {'tmt':'raw_data'})
bai2020_df.loc[bai2020_df['sample_id'].isin(['WT3M1', 'WT3M2','AD3M1', 'AD3M2']),'Age_days'] = 111 # 3*30+21 # 3 months
bai2020_df.loc[bai2020_df['sample_id'].isin(['WT6M1', 'WT6M2', 'WT6M3', 'WT6M4', 'AD6M1', 'AD6M2', 'AD6M3', 'AD6M4']),'Age_days'] = 201 # 6*30+21 # 6 months
bai2020_df.loc[bai2020_df['sample_id'].isin(['WT12M1', 'WT12M2', 'AD12M1', 'AD12M2']),'Age_days'] = 386 # 365+21 # 12 months
bai2020_df.loc[bai2020_df['sample_id'].isin(['WT3M1', 'WT3M2', 'WT6M1', 'WT6M2', 'WT6M3', 'WT6M4', 'WT12M1', 'WT12M2']),'condition'] = 'control'
bai2020_df.loc[bai2020_df['sample_id'].isin(['AD3M1', 'AD3M2', 'AD6M1', 'AD6M2', 'AD6M3', 'AD6M4','AD12M1', 'AD12M2']),'condition'] = 'AD' # 5xFAD mice Alzheimer model
return bai2020_df
def get_human_samples_bai_2020_dataframe():
"""
Return pandas dataframe for human samples Bai 2020
:return:
pandas.core.frame.DataFrame: dataframe containing human samples 2020 data.
"""
print("Importing human samples Bai 2020 pandas dataframe.")
bai2020human_f = pd.ExcelFile('../data/source_data/1-s2.0-S089662731931058X-mmc7.xlsx')
bai2020human = bai2020human_f.parse('Sheet1', skiprows=4)
bai2020human = bai2020human.drop(['tmt_WT3M1', 'tmt_WT3M2', 'tmt_WT6M1', 'tmt_WT6M2', 'tmt_WT6M3',
'tmt_WT6M4', 'tmt_WT12M1', 'tmt_WT12M2', 'tmt_AD3M1', 'tmt_AD3M2',
'tmt_AD6M1', 'tmt_AD6M2', 'tmt_AD6M3', 'tmt_AD6M4', 'tmt_AD12M1',
'tmt_AD12M2'], axis=1) # these are mouse samples
bai2020human = bai2020human.rename(columns={'Human gene name': 'gene_names',
'Human protein | |
params=params, timeout=300)
lock_status.raise_for_status()
if lock_status.json().get('data'):
if count == 0:
LOGGER.warning('Study %s has been locked', study)
LOGGER.warning('Lock info:\n%s', lock_status.json())
return True
return False
except (requests.exceptions.HTTPError,
requests.exceptions.ConnectionError) as err:
LOGGER.error('Unable to detect lock status for %s', study)
LOGGER.error('ERROR message: %s', lock_status.text)
LOGGER.exception(err)
#return True #Should I raise here?
raise
def force_notab_unlock(self, study, dv_url, apikey=None):
'''
Checks for a study lock and forcibly unlocks and uningests
to prevent tabular file processing. Required if mime and filename
spoofing is not sufficient.
**Forcible unlocks require a superuser API key.**
----------------------------------------
Parameters:
study : str
— Persistent indentifer of study.
dv_url : str
— URL to base Dataverse installation.
apikey : str
— API key for user.
If not present authorization defaults to self.auth.
----------------------------------------
'''
if dv_url.endswith('/'):
dv_url = dv_url[:-1]
if apikey:
headers = {'X-Dataverse-key': apikey}
else:
headers = self.auth
params = {'persistentId': study}
lock_status = self.session.get(f'{dv_url}/api/datasets/:persistentId/locks',
headers=headers,
params=params, timeout=300)
lock_status.raise_for_status()
if lock_status.json()['data']:
LOGGER.warning('Study %s has been locked', study)
LOGGER.warning('Lock info:\n%s', lock_status.json())
force_unlock = self.session.delete(f'{dv_url}/api/datasets/:persistentId/locks',
params=params, headers=headers,
timeout=300)
force_unlock.raise_for_status()
LOGGER.warning('Lock removed for %s', study)
LOGGER.warning('Lock status:\n %s', force_unlock.json())
#This is what the file ID was for, in case it can
#be implemented again.
#According to Harvard, you can't remove the progress bar
#for uploaded tab files that squeak through unless you
#let them ingest first then reingest them. Oh well.
#See:
#https://groups.google.com/d/msgid/dataverse-community/
#74caa708-e39b-4259-874d-5b6b74ef9723n%40googlegroups.com
#Also, you can't uningest it because it hasn't been
#ingested once it's been unlocked. So the commented
#code below is useless (for now)
#uningest = requests.post(f'{dv_url}/api/files/{fid}/uningest',
# headers=headers,
# timeout=300)
#LOGGER.warning('Ingest halted for file %s for study %s', fid, study)
#uningest.raise_for_status()
def upload_file(self, dryadUrl=None, filename=None,
mimetype=None, size=None, descr=None,
md5=None, studyId=None, dest=None,
fprefix=None, force_unlock=False, timeout=300):
'''
Uploads file to Dataverse study. Returns a tuple of the
dryadFid (or None) and Dataverse JSON from the POST request.
Failures produce JSON with different status messages
rather than raising an exception.
----------------------------------------
Parameters:
filename : str
— Filename (not including path).
mimetype : str
— Mimetype of file.
size : int
— Size in bytes.
studyId : str
— Persistent Dataverse study identifier.
Defaults to Transfer.dvpid.
dest : str
— Destination dataverse installation url.
Defaults to constants.DVURL.
md5 : str
— md5 checksum for file.
fprefix : str
— Path to file, not including a trailing slash.
timeout : int
- Timeout in seconds for POST request. Default 300.
dryadUrl : str
- Dryad download URL if you want to include a Dryad file id.
force_unlock : bool
— Attempt forcible unlock instead of waiting for tabular
file processing.
Defaults to False.
The Dataverse `/locks` endpoint blocks POST and DELETE requests
from non-superusers (undocumented as of 31 March 2021).
**Forcible unlock requires a superuser API key.**
----------------------------------------
'''
if not studyId:
studyId = self.dvpid
if not dest:
dest = constants.DVURL
if not fprefix:
fprefix = constants.TMP
if dryadUrl:
fid = dryadUrl.strip('/download')
fid = int(fid[fid.rfind('/')+1:])
else:
fid = 0 #dummy fid for non-Dryad use
params = {'persistentId' : studyId}
upfile = fprefix + os.sep + filename[:]
badExt = filename[filename.rfind('.'):].lower()
#Descriptions are technically possible, although how to add
#them is buried in Dryad's API documentation
dv4meta = {'label' : filename[:], 'description' : descr}
#if mimetype == 'application/zip' or filename.lower().endswith('.zip'):
if mimetype == 'application/zip' or badExt in constants.NOTAB:
mimetype = 'application/octet-stream' # stop unzipping automatically
filename += '.NOPROCESS' # Also screw with their naming convention
#debug log about file names to see what is up with XSLX
#see doi:10.5061/dryad.z8w9ghxb6
LOGGER.debug('File renamed to %s for upload', filename)
if size >= constants.MAX_UPLOAD:
fail = (fid, {'status' : 'Failure: MAX_UPLOAD size exceeded'})
self.fileUpRecord.append(fail)
LOGGER.warning('%s: File %s of '
'size %s exceeds '
'Dataverse MAX_UPLOAD size. Skipping.', self.doi, filename, size)
return fail
fields = {'file': (filename, open(upfile, 'rb'), mimetype)}
fields.update({'jsonData': f'{dv4meta}'})
multi = MultipartEncoder(fields=fields)
ctype = {'Content-type' : multi.content_type}
tmphead = self.auth.copy()
tmphead.update(ctype)
url = dest + '/api/datasets/:persistentId/add'
try:
upload = self.session.post(url, params=params,
headers=tmphead,
data=multi, timeout=timeout)
#print(upload.text)
upload.raise_for_status()
self.fileUpRecord.append((fid, upload.json()))
upmd5 = upload.json()['data']['files'][0]['dataFile']['checksum']['value']
if md5 and upmd5 != md5:
try:
raise exceptions.HashError(f'md5sum mismatch:\nlocal: {md5}\nuploaded: {upmd5}')
except exceptions.HashError as e:
LOGGER.exception(e)
raise
#Make damn sure that the study isn't locked because of
#tab file processing
##SPSS files still process despite spoofing MIME and extension
##so there's also a forcible unlock check
#fid = upload.json()['data']['files'][0]['dataFile']['id']
#fid not required for unlock
#self.force_notab_unlock(studyId, dest, fid)
if force_unlock:
self.force_notab_unlock(studyId, dest)
else:
count = 0
wait = True
while wait:
wait = self.file_lock_check(studyId, dest, count=count)
if wait:
time.sleep(15) # Don't hit it too often
count += 1
return (fid, upload.json())
except Exception as e:
LOGGER.exception(e)
try:
reason = upload.json()['message']
LOGGER.warning(upload.json())
return (fid, {'status' : f'Failure: {reason}'})
except Exception as e:
LOGGER.warning('Further exceptions!')
LOGGER.exception(e)
LOGGER.warning(upload.text)
return (fid, {'status' : f'Failure: Reason {upload.reason}'})
def upload_files(self, files=None, pid=None, fprefix=None, force_unlock=False):
'''
Uploads multiple files to study with persistentId pid.
Returns a list of the original tuples plus JSON responses.
----------------------------------------
Parameters:
files : list
— List contains tuples with
(dryadDownloadURL, filename, mimetype, size).
pid : str
— Defaults to self.dvpid, which is generated by calling
dryad2dataverse.transfer.Transfer.upload_study().
fprefix : str
— File location prefix.
Defaults to dryad2dataverse.constants.TMP
force_unlock : bool
— Attempt forcible unlock instead of waiting for tabular
file processing.
Defaults to False.
The Dataverse `/locks` endpoint blocks POST and DELETE requests
from non-superusers (undocumented as of 31 March 2021).
**Forcible unlock requires a superuser API key.**
----------------------------------------
'''
if not files:
files = self.files
if not fprefix:
fprefix = constants.TMP
out = []
for f in files:
#out.append(self.upload_file(f[0], f[1], f[2], f[3],
# f[4], f[5], pid, fprefix=fprefix))
out.append(self.upload_file(*[x for x in f], pid, fprefix=fprefix,
force_unlock=force_unlock))
return out
def upload_json(self, studyId=None, dest=None):
'''
Uploads Dryad json as a separate file for archival purposes.
----------------------------------------
Parameters:
studyId : str
— Dataverse persistent identifier.
Default dryad2dataverse.transfer.Transfer.dvpid,
which is only generated on
dryad2dataverse.transfer.Transfer.upload_study()
dest : str
— Base URL for transfer.
Default dryad2datavese.constants.DVURL
----------------------------------------
'''
if not studyId:
studyId = self.dvpid
if not dest:
dest = constants.DVURL
if not self.jsonFlag:
url = dest + '/api/datasets/:persistentId/add'
pack = io.StringIO(json.dumps(self.dryad.dryadJson))
desc = {'description':f'Original JSON from Dryad',
'categories':['Documentation', 'Code']}
fname = self.doi[self.doi.rfind('/')+1:].replace('.', '_')
payload = {'file': (f'{fname}.json', pack, 'text/plain;charset=UTF-8'),
'jsonData':f'{desc}'}
params = {'persistentId':studyId}
try:
meta = self.session.post(f'{url}',
params=params,
headers=self.auth,
files=payload)
#0 because no dryad fid will be zero
meta.raise_for_status()
self.fileUpRecord.append((0, meta.json()))
self.jsonFlag = (0, meta.json())
LOGGER.debug('Successfully uploaded Dryad JSON to %s', studyId)
#JSON uploads randomly fail with a Dataverse server.log error of
#"A system exception occurred during an invocation on EJB . . ."
#Not reproducible, so errors will only be written to the log.
#Jesus.
except (requests.exceptions.HTTPError,
requests.exceptions.ConnectionError) as err:
LOGGER.error('Unable to upload Dryad JSON to %s', studyId)
LOGGER.error('ERROR message: %s', meta.text)
LOGGER.exception(err)
#And further checking as to what is happening
self.fileUpRecord.append((0, {'status':'Failure: Unable to upload Dryad JSON'}))
if not isinstance(self.dryad.dryadJson, dict):
LOGGER.error('Dryad JSON is not a dictionary')
except Exception as err:
LOGGER.error('Unable to upload Dryad JSON')
LOGGER.exception(err)
def delete_dv_file(self, dvfid, dvurl=None, key=None):
#WTAF curl -u $API_TOKEN: -X DELETE
#https://$HOSTNAME/dvn/api/data-deposit/v1.1/swordv2/edit-media/file/123
'''
Deletes files from Dataverse target given a dataverse file ID.
This information is unknowable unless discovered by
dryad2dataverse.monitor.Monitor or by other methods.
Returns 1 on success (204 response), or 0 on other response.
----------------------------------------
Parameters:
dvurl : str
— Base URL of dataverse instance.
Defaults to dryad2dataverse.constants.DVURL.
dvfid : str
— Dataverse file ID number.
----------------------------------------
'''
if not dvurl:
dvurl = constants.DVURL
if not key:
key = constants.APIKEY
delme = self.session.delete(f'{dvurl}/dvn/api/data-deposit/v1.1/swordv2/edit-media'
f'/file/{dvfid}',
auth=(key, ''))
if delme.status_code == 204:
self.fileDelRecord.append(dvfid)
return 1
return 0
def delete_dv_files(self, dvfids=None, dvurl=None, key=None):
'''
Deletes all files in list of Dataverse file ids from
a Dataverse installation.
----------------------------------------
Parameters:
dvfids : list
— List of Dataverse file ids.
Defaults to dryad2dataverse.transfer.Transfer.fileDelRecord.
dvurl : str
— Base URL of Dataverse. Defaults to dryad2dataverse.constants.DVURL.
key : str
— API key for Dataverse. Defaults to dryad2dataverse.constants.APIKEY.
----------------------------------------
'''
#if not dvfids:
# dvfids = self.fileDelRecord
if not dvurl:
dvurl = constants.DVURL
if not key:
key | |
def __init__(self, norm_df, paras, window_s=5.0, speed_range=(1,60),
plot_final=False, plot_progress=False, save_dir='./'):
"""
Initialize the algorithm
:param norm_df: the data used for detecting a vehicle. Columns are:
'pir_0x0', 'pir_1x0', ..., 'ultra', 'Tamb_1', 'Tamb_2'
:param paras: the parameters used for this algorithm
:param window_s: the time window of norm_df
:param speed_range: the speed range
:param plot_final: True or False, plot the final fitted result
:param save_dir: directory for saving the result
:return:
"""
# ------------------------------------------------------------
# Important properties
self.paras = paras
self.window_s = window_s
self.speed_range = speed_range
self.mps2mph = 2.23694 # 1 m/s = 2.23694 mph
self.save_dir = save_dir
self.plot_final = plot_final
self.plot_progress = plot_progress
# ------------------------------------------------------------
# determine the direction of the slopes; only consider the correct direction
if self.speed_range[0] >= 0 and self.speed_range[1] >= 0:
# positive direction
self.direction = 'positive'
elif self.speed_range[0] <= 0 and self.speed_range[1] <= 0:
# negative direction
self.direction = 'negative'
else:
# both direction
self.direction = 'both'
# ------------------------------------------------------------
# Nonlinear transform
# x_grid is 128 x 1, with each 4 row duplicated to the same position
self.x_grid = self._new_nonlinear_transform()
self.init_dt, self.end_dt = norm_df.index[0], norm_df.index[-1]
self.t_grid = np.asarray([(t - norm_df.index[0]).total_seconds() for t in norm_df.index])
# ------------------------------------------------------------
# Convert the PIR matrix (n_samples x 128) to a list of data points tuples
_time = []
_space = []
i = 0
for cur_t, row in norm_df.iterrows():
for col in range(0, self.paras['pir_res'][0] * self.paras['pir_res'][1]):
if ~np.isnan(row.values[col]):
_time.append(self.t_grid[i])
_space.append(self.x_grid[col])
i += 1
# remove duplicated data points
pts = set(zip(_time, _space))
l_pts = np.asarray([list(i) for i in pts])
self.pts_time = l_pts[:, 0]
self.pts_space = l_pts[:, 1]
# ------------------------------------------------------------
# construct a conversion between the point list and point matrix
# - Given clus as the index of points in the list.
# Then self.idx_time[clus] gives the column index, and self.idx_space[clus] gives the row index
# - Given t_idx, and x_idx tuples of the entries.
# Then self.mat2list[x_idx, t_idx] gives the index for pts in the list
self._t_grid = sorted(list(set(self.t_grid)))
self._x_grid = sorted(list(set(self.x_grid)))
self.mat2list = -np.ones((self.paras['pir_res'][1], len(self._t_grid))).astype(int)
# construct a time space domain with len(x_grid) x len(t_grid) and save the unique data points
# For self.tx_cor:
# np.nan - no data point;
# 0.0 - not labeled yet
# < 0 ~ initial DBSCAN cluster, can be merged
# > 0 ~ final expanded cluster, can not be merged
self.tx_cor = np.ones((len(self._x_grid), len(self._t_grid)))*np.nan
self.idx_time = []
self.idx_space = []
for i in xrange(0, len(self.pts_time)):
t_idx, s_idx = self._t_grid.index( self.pts_time[i] ), self._x_grid.index( self.pts_space[i] )
# save the index of points for clustering
self.idx_time.append( t_idx )
self.idx_space.append( s_idx )
# mark the points in the matrix for expanding the trajectory
self.tx_cor[s_idx, t_idx] = 0.0
self.mat2list[s_idx, t_idx] = i
self.idx_time = np.asarray(self.idx_time)
self.idx_space = np.asarray(self.idx_space)
# ------------------------------------------------------------
# save the final trajectories and information
# a list of pts index for each trajectory
self.all_traj = []
# a list of list of int, one for each trajectory, if no median, set as np.nan
self.all_medians = []
# a list of list, one for each trajectory, if no data row, set as 0 or np.nan
self.all_widths = []
# a list of int, one for each trajectory
self.all_traj_num_rows = []
# a list of list of int, one for each trajectory, gives the percentile
self.all_percentile_lb = []
# a list of list of int, one for each trajectory, gives the percentile
self.all_percentile_ub = []
# the structure saving all vehicles in correct format.
def estimate_slope(self):
"""
This function estimates the speed of the vehicle
:return:
"""
# ================================================================================
# First use DBSCAN on the index space of points to cluster the initial inliers
clusters = self._init_clusters_idx()
# self._plot_clusters(clusters, title='DBSCAN', save_name= None, option='index')
# ================================================================================
# Second, expand each cluster to include all data point in its trajectory
for i, clus in enumerate(clusters):
print('\n$$$ Expanding cluster {0}...'.format(i+1))
self.expand_traj(clus, i+1)
# ================================================================================
# Third, convert each trajectory to the predefined format
# ================================================================================
# Finally, print and visualize the result
print('\nFinalized {0} trajectories'.format(len(self.all_traj)))
for i, clus in enumerate(self.all_traj):
print('------ trajectory {0}: {1} pts'.format(i, len(clus)))
if self.plot_final:
self._plot_fitted_clusters(clusters=None, title='{0}'.format(time2str(self.init_dt)),
save_name='{0}'.format(time2str_file(self.init_dt)))
def convert_to_veh(self):
"""
This function converts the detected trajecotries into vehs dict with predefined keys
- 'line': (k,c), s = kt+c, where t is in seconds, and s is relative space after nonlinear transformation
- 't_in': datetime, vehicle enter time
- 't_out': datetime, vehicle exit time
- 'detection_window': tuple of datetime, the start and end time of the detection window
- 'medians': a list of tuples, each tuple is (t,x), t is datetime
- 'inliers': [(t,s)], a list of tuples, each tuple is (t,x), t is datetime
:return:
"""
all_vehs = []
for i, clus in enumerate(self.all_traj):
veh = OrderedDict()
# ---------------------------------------------------------------
# compute the line through the median: line = (k,c), where x = kt+c
line = self.run_lr(self.all_medians[i])
veh['line'] = line
# ---------------------------------------------------------------
# compute the t_in and t_out
t_l, t_r = (self._x_grid[0]-line[1])/line[0], (self._x_grid[-1]-line[1])/line[0]
if t_l > t_r:
t_l, t_r = t_r, t_l
veh['t_in'] = self.init_dt + timedelta(seconds=t_l)
veh['t_out'] = self.init_dt + timedelta(seconds=t_r)
# ---------------------------------------------------------------
# save the time window for this detection
veh['detection_window'] = (self.init_dt, self.end_dt)
# ---------------------------------------------------------------
# save the medians in tuples (t datetime, x relative)
medians_tx = []
for row, col in enumerate(self.all_medians[i]):
if ~np.isnan(col):
# convert units to datetime from seconds
t_sec, x_loc = self.rowcol_to_loc(row, col)
medians_tx.append( [ self.init_dt + timedelta(seconds=t_sec) , x_loc ] )
veh['medians'] = np.asarray(medians_tx)
# ---------------------------------------------------------------
# save the list of inliers in (t datetime, x relative)
pts_t = [ self.init_dt + timedelta(seconds=self.pts_time[pt_idx]) for pt_idx in self.all_traj[i] ]
# for pt_idx in self.all_traj[i]:
# inliers.append( [ self.init_dt + timedelta(seconds=self.pts_time[pt_idx]),
# self.pts_space[pt_idx]] )
veh['inliers'] = zip( pts_t, self.pts_space[self.all_traj[i]] )
# ---------------------------------------------------------------
# append to all vehicles
all_vehs.append(veh)
return all_vehs
def run_lr(self, medians):
"""
This function runs linear regression through the medians, and returns a line
line = (k,c) where x = kt+c
:param medians: 32 x1 array, each row contains the time index of the median in that row
:return:
line = (k,c) where x = kt+c
"""
# convert the medians to the actual time space locations in units
# (seconds, relative space after nonlinear transform)
tx = []
for x, t in enumerate(medians):
if ~np.isnan(t):
tx.append( self.rowcol_to_loc(x, t) )
tx = np.asarray(tx)
t, x = tx[:,0], tx[:,1]
# t = _slope*s + _intercept
_slope, _intercept, _r_value, _p_value, _std_err = scipy.stats.linregress(x, t)
# convert to the line format: s = kt + c
line = np.array([1 / _slope, -_intercept / _slope])
return line
def _init_clusters_idx(self):
"""
This function performs DBSCAN in the index space of data points to identify initial inliers
:return: [cluster_1, cluster_2], each cluster is a list of int (indices) of idx_time, idx_space
"""
clusters = []
samples = np.vstack([self.idx_time, self.idx_space]).T
if len(samples) == 0:
return []
y_pre = DBSCAN(eps=self.paras['DBSCAN_r'], min_samples=self.paras['DBSCAN_min_pts']).fit_predict(samples)
num_clusters = len(set(y_pre)) - (1 if -1 in y_pre else 0)
y_pre = np.asarray(y_pre)
# print out the clustering information
print('{0} clusters:'.format(num_clusters))
for i in range(0, num_clusters):
print('-- Cluster {0}: {1} pts'.format(i, sum(y_pre == i)))
# convert clusters to list of indices
for cluster_label in range(0, num_clusters):
clus = (y_pre == cluster_label)
clusters.append([i for i, x in enumerate(clus) if x])
# ----------------------------------------------------------------------------------
# set the clusters in tx_cor
for i, clus in enumerate(clusters):
self.tx_cor[self.idx_space[clus], self.idx_time[clus]] = -(i+1)
return clusters
def expand_traj(self, clus, cluster_idx):
"""
This function expands the data points in each cluster to include the full trajectory
:param clus: index of pts
:param cluster_idx: the index for this cluster
:return: updated_clus, num_data_rows
"""
clus = list(clus)
updated_clus = []
_, num_cols = self.tx_cor.shape
# ----------------------------------------------------------------------------------
# save the width and medians of each data row in 0~32 rows in the array
# traj_data is a 32 x n list. Each row contains the time index at that row
traj_data = [[] for i in xrange(self.paras['pir_res'][1])]
widths = np.ones(self.paras['pir_res'][1])*np.nan
medians = np.ones(self.paras['pir_res'][1])*np.nan
percentile_lb = np.ones(self.paras['pir_res'][1])*np.nan
| |
ksize=20,
do_partition=False,
annotate_partitions=False,
stop_big_traverse=False):
script = 'load-graph.py'
args = ['-x', str(min_hashsize), '-N', str(n_hashes), '-k', str(ksize)]
outfile = utils.get_temp_filename('out')
infile = infilename
args.extend([outfile, infile])
utils.runscript(script, args)
ht_file = outfile
assert os.path.exists(ht_file), ht_file
tagset_file = outfile + '.tagset'
assert os.path.exists(tagset_file), tagset_file
if do_partition:
script = 'partition-graph.py'
args = [outfile]
if stop_big_traverse:
args.insert(0, '--no-big-traverse')
utils.runscript(script, args)
script = 'merge-partitions.py'
args = [outfile, '-k', str(ksize)]
utils.runscript(script, args)
final_pmap_file = outfile + '.pmap.merged'
assert os.path.exists(final_pmap_file)
if annotate_partitions:
script = 'annotate-partitions.py'
args = ["-k", str(ksize), outfile, infilename]
in_dir = os.path.dirname(outfile)
utils.runscript(script, args, in_dir)
baseinfile = os.path.basename(infilename)
assert os.path.exists(os.path.join(in_dir, baseinfile + '.part'))
return outfile
def test_partition_graph_1():
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'))
utils.runscript('partition-graph.py', [graphbase])
utils.runscript('merge-partitions.py', [graphbase, '-k', '20'])
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_nodegraph(graphbase)
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (1, 0), x # should be exactly one partition.
def test_partition_graph_nojoin_k21():
# test with K=21
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'), ksize=21)
script = 'partition-graph.py'
args = [graphbase]
utils.runscript(script, args)
script = 'merge-partitions.py'
args = [graphbase, '-k', str(21)]
utils.runscript(script, args)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_nodegraph(graphbase)
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (99, 0), x # should be 99 partitions at K=21
def test_partition_load_empty_pmap():
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'), ksize=24)
script = 'partition-graph.py'
args = [graphbase, '-s', '10']
utils.runscript(script, args)
script = 'merge-partitions.py'
args = [graphbase, '-k', '24']
status, out, err = utils.runscript(script, args, fail_ok=True)
assert status == -1
assert 'only a header and no partition IDs' in err
def test_partition_graph_nojoin_stoptags():
# test with stoptags
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'))
# add in some stop tags
ht = khmer.load_nodegraph(graphbase)
ht.add_stop_tag('TTGCATACGTTGAGCCAGCG')
stoptags_file = graphbase + '.stoptags'
ht.save_stop_tags(stoptags_file)
del ht
# run script with stoptags option
script = 'partition-graph.py'
args = ['--stoptags', stoptags_file, graphbase]
utils.runscript(script, args)
script = 'merge-partitions.py'
args = [graphbase, '-k', str(20)]
utils.runscript(script, args)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_nodegraph(graphbase)
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (2, 0), x # should be 2 partitions
def test_partition_graph_big_traverse():
graphbase = _make_graph(utils.get_test_data('biglump-random-20-a.fa'),
do_partition=True, stop_big_traverse=False)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_nodegraph(graphbase)
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (1, 0), x # should be exactly one partition.
def test_partition_graph_no_big_traverse():
# do NOT exhaustively traverse
graphbase = _make_graph(utils.get_test_data('biglump-random-20-a.fa'),
do_partition=True, stop_big_traverse=True)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_nodegraph(graphbase)
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x[0] == 4, x # should be four partitions, broken at knot.
def test_partition_find_knots_execute():
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'))
script = 'partition-graph.py'
args = [graphbase]
utils.runscript(script, args)
script = 'find-knots.py'
args = [graphbase]
utils.runscript(script, args)
stoptags_file = graphbase + '.stoptags'
assert os.path.exists(stoptags_file)
def test_partition_find_knots_existing_stoptags():
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'))
script = 'partition-graph.py'
args = [graphbase]
utils.runscript(script, args)
script = 'make-initial-stoptags.py'
args = [graphbase]
utils.runscript(script, args)
script = 'find-knots.py'
args = [graphbase]
(status, out, err) = utils.runscript(script, args)
stoptags_file = graphbase + '.stoptags'
assert os.path.exists(stoptags_file)
assert "loading stoptags" in err, err
assert "these output stoptags will include the already" in err, err
def test_partition_graph_too_many_threads():
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'))
utils.runscript('partition-graph.py', [graphbase, '--threads', '100'])
utils.runscript('merge-partitions.py', [graphbase, '-k', '20'])
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_nodegraph(graphbase)
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (1, 0), x # should be exactly one partition.
def test_annotate_partitions():
seqfile = utils.get_test_data('random-20-a.fa')
graphbase = _make_graph(seqfile, do_partition=True)
in_dir = os.path.dirname(graphbase)
# get the final pmap file
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
script = 'annotate-partitions.py'
args = ["-k", "20", graphbase, seqfile]
utils.runscript(script, args, in_dir)
partfile = os.path.join(in_dir, 'random-20-a.fa.part')
parts = [r.name.split('\t')[1] for r in screed.open(partfile)]
parts = set(parts)
assert '2' in parts
assert len(parts) == 1
def test_annotate_partitions_2():
# test with K=21 (no joining of sequences)
seqfile = utils.get_test_data('random-20-a.fa')
graphbase = _make_graph(seqfile, do_partition=True,
ksize=21)
in_dir = os.path.dirname(graphbase)
# get the final pmap file
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
script = 'annotate-partitions.py'
args = ["-k", "21", graphbase, seqfile]
utils.runscript(script, args, in_dir)
partfile = os.path.join(in_dir, 'random-20-a.fa.part')
parts = [r.name.split('\t')[1] for r in screed.open(partfile)]
parts = set(parts)
print(parts)
assert len(parts) == 99, len(parts)
def test_extract_partitions():
seqfile = utils.get_test_data('random-20-a.fa')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'random-20-a.fa.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fa')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
dist = open(distfile).readline()
assert dist.strip() == '99 1 1 99'
parts = [r.name.split('\t')[1] for r in screed.open(partfile)]
assert len(parts) == 99, len(parts)
parts = set(parts)
assert len(parts) == 1, len(parts)
def test_extract_paired_inconsistent_formats():
fa_seqfile = utils.get_test_data('random-20-a.fa')
fq_seqfile = utils.get_test_data('random-20-a.fq')
graphbase = _make_graph(
fa_seqfile, do_partition=True, annotate_partitions=True)
fa_in_dir = os.path.dirname(graphbase)
graphbase = _make_graph(
fq_seqfile, do_partition=True, annotate_partitions=True)
fq_in_dir = os.path.dirname(graphbase)
# XXX
# get the final part file
fa_partfile = os.path.join(fa_in_dir, 'random-20-a.fa.part')
fq_partfile = os.path.join(fq_in_dir, 'random-20-a.fq.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['extracted', fa_partfile, fq_partfile]
failed = True
try:
utils.runscript(script, args, fa_in_dir)
failed = False
except AssertionError as err:
assert "Input files must have consistent format." in str(err), err
assert failed, "Expected to fail"
def test_extract_partitions_header_whitespace():
seqfile = utils.get_test_data('test-overlap2.fa')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'test-overlap2.fa.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fa')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
dist = open(distfile).readline()
assert dist.strip() == '1 11960 11960 11960', dist.strip()
parts = [r.name.split('\t')[1]
for r in screed.open(partfile)]
assert len(parts) == 13538, len(parts)
parts = set(parts)
assert len(parts) == 12602, len(parts)
def test_extract_partitions_fq():
seqfile = utils.get_test_data('random-20-a.fq')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'random-20-a.fq.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fq')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
dist = open(distfile).readline()
assert dist.strip() == '99 1 1 99'
screed_iter = screed.open(partfile)
names = [r.name.split('\t')[0] for r in screed_iter]
assert '35 1::FOO' in names
assert '46 1::FIZ' in names
screed_iter = screed.open(partfile)
parts = [r.name.split('\t')[1] for r in screed_iter]
assert len(parts) == 99, len(parts)
parts = set(parts)
assert len(parts) == 1, len(parts)
quals = set([r.quality for r in screed.open(partfile)])
quals = list(quals)
assert quals[0], quals
def test_extract_partitions_output_unassigned():
seqfile = utils.get_test_data('random-20-a.fa')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'random-20-a.fa.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['-U', 'extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fa')
unassigned_file = os.path.join(in_dir, 'extracted.unassigned.fa')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
assert os.path.exists(unassigned_file)
dist = open(distfile).readline()
assert dist.strip() == '99 1 1 99'
parts = [r.name.split('\t')[1] for r in screed.open(partfile)]
assert len(parts) == 99, len(parts)
parts = set(parts)
assert len(parts) == 1, len(parts)
def test_extract_partitions_no_output_groups():
seqfile = utils.get_test_data('random-20-a.fq')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'random-20-a.fq.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['-n', 'extracted', partfile]
# We expect a sys.exit -> we need the test to be tolerant
status, out, err = utils.runscript(script, args, in_dir)
assert "NOT outputting groups! Beware!" in err
# Group files are created after output_groups is
# checked. They should not exist in this scenario
groupfile = os.path.join(in_dir, 'extracted.group0000.fa')
assert not os.path.exists(groupfile)
def test_extract_partitions_pid_0():
partfile = utils.copy_test_data('random-20-a.fa.part')
in_dir = os.path.dirname(partfile)
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['-U', 'extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fa')
unassigned_file = os.path.join(in_dir, 'extracted.unassigned.fa')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
assert os.path.exists(unassigned_file)
# Assert unassigned file not empty
unassigned_content = open(unassigned_file).readline()
assert unassigned_content.strip().split('\t')[0] != ''
def test_extract_partitions_multi_groups():
partfile = utils.copy_test_data('random-20-a.fa.part')
in_dir = os.path.dirname(partfile)
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['-m', '1', '-X', '1', 'extracted', partfile]
utils.runscript(script, args, in_dir)
# Multiple group files are created after should be created
groupfile1 = os.path.join(in_dir, 'extracted.group0000.fa')
groupfile2 = os.path.join(in_dir, 'extracted.group0001.fa')
groupfile3 = os.path.join(in_dir, 'extracted.group0002.fa')
assert os.path.exists(groupfile1)
assert os.path.exists(groupfile2)
| |
#!/usr/bin/env python3
#Works on at least Python 3.6 from what I've tried, although most testing
#has been done on Python 3.7.
import sys
import os
#version string
#this changed significantly, but I've been standardizing them now
#X.Y-release will always be newer than X.Y-beta
#2.9 will always be earlier than 2.10
ezpyle_major_version=2
ezpyle_minor_version=0
ezpyle_extra_version="beta 6"
ezpyle_version=f"v{ezpyle_major_version}.{ezpyle_minor_version}-{ezpyle_extra_version}"
"""
ezpyle -- a friendly line editor written in Python
(C) 2021, 2022 B.M.Deeal
Distributed under the ISC license, see the provided isc-license.txt or
visit <https://opensource.org/licenses/ISC> for details.
This was written entirely so I could edit text with my old Windows CE
system while hooked up to a raspi over serial, while using a nicer,
more user friendly editor than ed or ex. The VT100 emulation on the
system doesn't play nicely with nano or vi or anything much.
At some point, I might consider porting this to C or C++, so it can
be used on systems without Python 3.x.
This has been used at the astonishingly low rate of 1200 baud. In
fact, not merely used, but comfortably used. At least, as comfortably
as one might expect a line editor to be.
Also, I wish I wrote this twelve years ago, back when I had a
Blackberry, and had to attempt to use SSH over a particularly poor
terminal emulation with a particularly iffy EDGE internet connection.
Would have made it a lot more bearable. :P
Could juuuust barely use vi on it, emphasis on barely.
Terrible, terrible screen update issues everywhere.
Hopefully, this thing is useful to you in some way.
Cheers!
-- B.M.Deeal.
changelog:
2.0 (in progress):
* TODO: investigate a bug in mv
- this is the show stopper before 2.0-release
- it's almost certainly something simple
- I think it's just an issue with the preview
* revamped file display, made more interactive
- need to add configurable view length
* initial implementation of replace
* line split command
* handles ^C and ^D better
* several new commands, line join, line move, file navigation, etc
0001r1:
* made all the prompts friendlier, some were quite terse
it turns out that even at 1200baud, just being a little friendly
doesn't take THAT much time
* changed qqq to qq
0001r0:
* initial version
design notes:
* split is a bit weird
- the obvious way is to just specify the character to split at
- we do a search for a given string and split there
- I like it a lot, it's VERY convenient
- but I don't know how a new user would deal with it
* the way we handle the last line in a file is odd
- there should also be a way to save a file without a trailing
newline probably
* how to handle save/save as?
- right now, we just make all saving done as save as
- which means you need to re-type the filename
- we could just ask whether to use the current filename (TODO)
* something something ls
- we used to have how much text to display be a runtime option
definite TODO list:
* file info (number of lines, characters, words)
* multi-split (to make inter-line editing easier)
* text search, replace
* append/prepend text to an existing line (vs just re-typing it)
* really, every commented out command in the help
* backups (really, just rename the existing file before save)
* refactoring functions
wishlist that I may or may not get to:
* save vs save as
* cut/paste buffer that works something like the nano one
* undo (even if it's only like, single level undo)
* possibly multiple file loading (but like, if I have to edit that
much text under line editor conditions, I'd begin to scream and
would not stop)
* make it so that commands can take arguments, you currently have
to hit enter between the command and the data because I'm lazy
* refactor -- nearly everything happens outside of the object,
which doesn't really make sense
* test this with a text to speech system lol, I'm fairly sure it
should be entirely usable without a display of any variety, but at
the same time, I might need to make it so that I can alter prompts
to make them more friendly
* any work done on that would also allow for localization
"""
class Filedata:
"""
Holds the data for a text file.
"""
def clear(self):
"""
Reset the file to be empty.
"""
self.dirty=False
self.name=""
self.data=[]
self.line=0
def loadfile(self, filename):
"""
Read a file from disk.
Complains if the file cannot be read.
"""
try:
#expand ~, and this even works on Windows
loadpath=os.path.expanduser(filename)
#read and split lines
#we don't use readlines becuase we don't want a trailing \n
with open(loadpath, "r") as f:
self.clear()
self.data=f.read().splitlines()
self.line=len(self.data)
self.name=filename
print(f"Loaded {len(self.data)} lines from disk.")
except IOError:
print("error: could not load file!")
def writefile(self, filename):
"""
Write the file to disk.
Complains if the file cannot be written.
"""
try:
#expand ~, and this even works on Windows apparently
savepath=os.path.expanduser(filename)
#write each line; I think this automagically adds a newline to
#the last line of the file, as is proper
with open(savepath, "w") as f:
for line in self.data:
f.write(line)
f.write("\n")
print(f"Saved {len(self.data)} lines to disk.")
self.name=filename
self.dirty=False
except IOError:
print("error: could not write file!")
def __init__(self):
"""
Initialize the file, just clears it out.
"""
self.clear()
def showfile(self, start=0):
"""
Displays the file to the screen, starting at a given line.
"""
if start<0:
start=0
view=c_file.data[start:]
#display whole file
#may or may not be useful depending on your device
#on a real paper TTY, this would be dead useful
#check if file isn't empty
if len(view)==0:
print("Nothing to show.")
return
#display file
num=0
for line in view:
marker=":"
#indicate current line
if num+start==c_file.line:
marker="*"
print(f"{num+start+1}{marker} {line}")
#pause every few lines
#TODO: probably should be a configurable option
if num%10 == 9:
#confirm=input("Show more? [y]/n? > ").lower()
#if confirm in ("n", "no"):
confirm=ynprompt('y', "Show more?")
if not confirm:
break
num=num+1
c_file=Filedata() #current file
def helptext():
"""Show help to the user."""
#TODO: display this in pages, like if the user used list
print("commands:")
print("* new, newfile - start a new, empty file")
print("* st, stats, info - show file statistics") #TODO: incomplete
print("* i, ins, insert - insert a line before the current")
print("* a, app, append - insert a line after the current")
#print("* add, atch, attach - add text before/after the current line") #TODO
print("* j, jmp, jump - jump to a given line")
print("* p, [, prev, previous - go back one line")
print("* n, ], next - go forward one line")
#print("* s, search, find - locate a string in the file") #TODO
print("* r, replace - replace a string in this line with another")
print("* jn, join, cat - join line with the next")
print("* sp, split - split a line into two")
print("* mv, move - move a line")
print("* dd, delete - delete the current line")
print("* wf, write, save - write the file to disk")
print("* lf, load, open - load a file from disk")
print("* l, ls, list - show a given amount of lines")
print("* sl, showline, ll - show the current line between quotes")
print("* la, al, als, listall - show the entire file")
print("* qq, quit, exit - exit program")
print()
print("All commands must be on their own lines.")
print("Commands will prompt for data as needed.")
print()
#TODO: replace all prompts with this
#TODO: might add a way to cancel?
#Current design is that operations are always canceled at the end...
#...but that's a bit clunky.
def ynprompt(default=None, message=""):
"""
Ask a yes/no question.
Returns True for yes, False for no.
message is any text to show at the prompt.
"""
#handle defaults
if default in ("y", "yes"):
default=True
prompt="[y]/n"
elif default in ("n", "no"):
default=False
prompt="y/[n]"
else:
default=None
prompt="y/n"
#add a space for the message if present
if message!="":
message=f"{message} "
#loop until a valid result is given
while True:
result_orig=input(f"{message}{prompt} > ")
result=result_orig.lower().strip()
#normal y/n + aliases
if result in ("y", "yes"):
return True
elif result in ("n", "no"):
return False
#empty string with no default
elif result=="" and default!=None:
return default
#show message, loop again
print(f"error: could not understand '{result_orig}'!")
print("Valid options are yes or no.")
def main():
"""
Show the intro, handle arguments, and then start accepting commands.
"""
#show intro, clear file data
c_file.clear()
print(f"Welcome to ezpyle {ezpyle_version}.")
print("(C) 2021, 2022 B.M.Deeal.")
print("Type ? for help.")
#load a file passed on the command line
#TODO: any kind of real argument handling
if len(sys.argv) > 1:
print (f"Opening file '{sys.argv[1]}'...")
c_file.loadfile(sys.argv[1])
#run the main loop, deal with ^C/^D
while True:
try:
mainloop()
#TODO: better handling of this
#also, no command should depend on ^C to escape!
except KeyboardInterrupt:
print("\nKeyboard interrupt. Type qq to quit.")
except EOFError:
print("\nEnd of file detected. Type qq to quit.")
def cmd_quit():
"""
Quit command.
"""
#check dirty file indicator, prompt to quit if dirty
if c_file.dirty:
print("File not saved! Quit anyway?")
confirm=ynprompt('n')
if not confirm:
print("Did not quit.")
return
print("Exiting.")
sys.exit()
def cmd_replace(thisline):
"""
Text replace command.
Currently only replaces the first found on the line.
Doesn't search the whole file, only the current line.
(both are TODO, naturally -- might be added to a different command)
"""
#bounds check
if thisline<0:
print("Nothing to replace.")
return
#ask for what to replace
print("Replacing in line:")
print(f"{thisline+1}* {c_file.data[thisline]}") #TODO: this should be a method of | |
x: len(x[:-1]) > 0, lines)
for line in lines:
data = re.split("\s+", line)
if data[0] == "//":
break
if line[0] == '#':
if data[0] == "#=GC":
id, fragment = data[1:3]
else:
self.mComments.append(line)
continue
if id not in annotations:
annotations[id] = []
annotations[id].append(fragment)
else:
if len(data) > 2:
raise ValueError, "parsing error in line %s" % line
elif len(data) == 1:
# treat empty alignments/lines
id = data[0]
fragment = ""
else:
id, fragment = data
if id not in fragments:
fragments[id] = []
self.mIdentifiers.append(id)
fragments[id].append(fragment)
n = []
for id in self.mIdentifiers:
f = fragments[id]
s = re.sub("\s", "", string.join(f, ""))
x = pattern_parse_ranges.match(id)
if x:
id, fr, to = x.groups()
fr, to = int(fr) - 1, int(to)
else:
fr, to = 0, self.countCharacters(s)
n.append(id)
self.mMali[id] = AlignedString(id, fr, to, s)
self.mIdentifiers = n
for id, f in annotations.items():
s = re.sub("\s", "", string.join(f, ""))
annotations[id] = s
self.mAnnotations = annotations
else:
raise "unknown alignment format %s" % format
if len(self.mMali) == 0:
self.mLength = 0
else:
self.mLength = min(
map(lambda x: len(x.mString), self.mMali.values()))
def writeToFile(self, outfile, write_ranges=True, format="plain", options=None):
"""write alignment to file.
If options is given, these lines are output into the multiple alignment.
"""
if format == "plain-fasta":
format = "fasta"
write_ranges = False
write_ranges = write_ranges and self.mWriteRanges
if format == "plain":
for identifier in self.mIdentifiers:
m = self.mMali[identifier]
outfile.write("%i\t%s\t%i\t%s\n" % (
m.mFrom + 1, m.mString, m.mTo, identifier))
elif format == "fasta":
for identifier in self.mIdentifiers:
m = self.mMali[identifier]
if write_ranges:
outfile.write(">%s/%i-%i\n%s\n" %
(identifier, m.mFrom + 1, m.mTo, m.mString))
else:
outfile.write(">%s\n%s\n" % (identifier, m.mString))
elif format == "stockholm":
outfile.write("# STOCKHOLM 1.0\n")
if options:
for o in options:
outfile.write("%s\n" % o)
# calculate offset:
max_l = 0
for identifier in self.mIdentifiers:
m = self.mMali[identifier]
# tab does not work as separator
if m.mTo and write_ranges:
x = "%s/%i-%i" % (identifier, m.mFrom + 1, m.mTo)
else:
x = "%s" % (identifier)
max_l = max(max_l, len(x))
for identifier in self.mAnnotations.keys():
x = "#=GC %s" % identifier
max_l = max(max_l, len(x))
format = "%-" + str(max_l) + "s %s\n"
for identifier in self.mIdentifiers:
m = self.mMali[identifier]
# tab does not work as separator
if m.mTo and write_ranges:
x = "%s/%i-%i" % (identifier, m.mFrom + 1, m.mTo)
else:
x = "%s" % (identifier)
outfile.write(format % (x, m.mString))
for identifier, value in self.mAnnotations.items():
x = "#=GC %s" % identifier
outfile.write(format % (x, value))
outfile.write("//\n")
elif format == "phylip":
outfile.write("%i %i\n" % (self.getLength(), self.getWidth()))
for identifier in self.mIdentifiers:
outfile.write("%s %s\n" %
(identifier, self.mMali[identifier].mString))
elif format.lower() == "profile":
if self.mName:
name = self.mName
else:
name = ",".join(self.mIdentifiers)
outfile.write(">profile=%s length=%i width=%i\n" %
(name, self.getWidth(), self.getLength()))
for identifier in self.mIdentifiers:
outfile.write("%s\n" % (self.mMali[identifier].mString))
elif format == "nexus":
# nexus formatted output - MrBayes conformant.
outfile.write("#NEXUS\n")
outfile.write("begin data;\n")
outfile.write(" dimensions ntax=%i nchar=%i;\n" %
(self.getLength(), self.getWidth()))
outfile.write(
" format datatype=dna interleave=no gap=%s;\n" % (self.mGapChar))
outfile.write(" matrix\n")
max_len = max(map(lambda x: len(x), self.mIdentifiers))
format = " %-" + str(max_len) + "s %s\n"
for identifier in self.mIdentifiers:
outfile.write(format %
(identifier, self.mMali[identifier].mString))
outfile.write(" ;\n")
outfile.write("end;\n")
else:
raise "unknown alignment format %s" % format
def removeUnalignedEnds(self):
"""remove unaligned ends in the multiple alignment.
unaligned ends correspond to lower-case characters.
"""
pattern_start = re.compile("^([- .a-z]+)")
pattern_unaligned = re.compile("[a-z]")
for s in self.mMali.values():
first = pattern_start.match(s.mString)
if first:
first = first.groups()[0]
nchars = len(pattern_unaligned.findall(first))
s.mFrom += nchars
s.mString = self.mGapChar * len(first) + s.mString[len(first):]
# search from the back end by reversing. This is much faster than
# using $ from the back.
last = pattern_start.match(s.mString[::-1])
if last:
last = last.groups()[0]
nchars = len(pattern_unaligned.findall(last))
s.mTo -= nchars
l = len(s) - len(last)
s.mString = s.mString[:l] + self.mGapChar * l
def upperCase(self):
"""set all characters to upper case."""
for k, s in self.mMali.items():
s.mString = s.mString.upper()
def lowerCase(self):
"""set all characters to lower case."""
for k, s in self.mMali.items():
s.mString = s.mString.lower()
def removeEndGaps(self):
"""remove end gaps.
end gaps do not include any characters and thus
the alignment coordinates won't change.
"""
pattern_start_gaps = re.compile("^([- ]+)")
min_from = self.mLength
max_to = 0
for s in self.mMali.values():
first = pattern_start_gaps.match(s.mString)
if first:
first = first.groups()[0]
min_from = min(min_from, len(first))
# search from the back end by reversing. This is much faster than
# using $ from the back.
last = pattern_start_gaps.search(s.mString[::-1])
if last:
last = last.groups()[0]
max_to = max(max_to, len(s) - len(last))
for s in self.mMali.values():
s.mString = s.mString[min_from:max_to]
self.mLength = min(map(lambda x: x.mString, self.mMali.values()))
def insertColumns(self, position, num_gaps, keep_fixed=None, char="-"):
"""insert gaps at position into multiple alignment.
if keep_constant is a list of identifiers, those are kept constant,
instead, gaps are added to the end.
"""
last_pos = min(self.getWidth(), position + num_gaps)
for id, seq in self.mMali.items():
if keep_fixed and id in keep_fixed:
seq.insertColumns(last_pos, num_gaps, char)
else:
seq.insertColumns(position, num_gaps, char)
def removeGaps(self,
allowed_gaps=0,
minimum_gaps=1,
frame=1):
"""remove gappy columns.
allowed_gaps: number of gaps allowed for column to be kept
minimum_gaps: number of gaps for column to be removed
set minimum_gaps to the number of sequences to remove columns
with all gaps.
If frame is > 1 (3 most likely), then a whole codon is removed
as soon as there is one column to be removed.
"""
self.removePattern(
match_function=lambda x: x in self.mGapChars,
allowed_matches=allowed_gaps,
minimum_matches=minimum_gaps,
delete_frame=frame)
def removePattern(self,
match_function,
allowed_matches=0,
minimum_matches=1,
delete_frame=1,
search_frame=1):
"""remove columns (or group of columns), that match a certain pattern.
allowed_matches: number of matches allowed so that column is still kept
minimum_matches: number of matches required for column to be removed
set minimum_matches to the number of sequences to remove columns
with all gaps.
Patterns are matches in search_frame. For example, if frame is 3,
whole codons are supplied to match_function.
delete_frame specifies the frame for deletion. If it is set to 3,
codons are removed if already one column matches.
Example: remove all columns that contain at least one stop-codon:
removePattern( lambda x: x.upper() in ("TAG", "TAA", "TGA"),
allowed_matches = 0,
minimum_matches = 1,
search_frame = 3,
delete_frame = 3)
"""
nmatches = [0] * self.getWidth()
for s in map(lambda x: x.mString, self.mMali.values()):
for x in range(0, len(s), search_frame):
segment = s[x:x + search_frame]
if match_function(segment):
nmatches[x] += 1
columns = []
delete_columns = []
for x in range(len(nmatches)):
if nmatches[x] >= allowed_matches and nmatches[x] < minimum_matches:
columns.append(x)
else:
delete_columns.append(x)
if delete_frame != 1:
s = set(columns)
for x in delete_columns:
start = int(math.floor(float(x) / delete_frame) * delete_frame)
end = start + delete_frame
for c in range(start, end):
if c in s:
s.remove(c)
columns = list(s)
columns.sort()
self.takeColumns(columns)
def removeEmptySequences(self):
"""remove sequences that are completely empty.
"""
new_ids = []
for id in self.mIdentifiers:
if self.countCharacters(self.mMali[id].mString) == 0:
del self.mMali[id]
continue
new_ids.append(id)
self.mIdentifiers = new_ids
def upper(self):
"""convert all characters in mali to uppercase."""
for s in self.mMali.values():
s.mString = s.mString.upper()
def lower(self):
"""convert all characters in mali to lowercase."""
for s in self.mMali.values():
s.mString = s.mString.lower()
def shiftAlignment(self, map_id2offset):
"""shift alignment by offset."""
for identifier, m in self.mMali.items():
if identifier in map_id2offset:
o = map_id2offset[identifier]
m.mFrom += o
m.mTo += o
def markCodons(self, mode="case"):
"""mark codons.
"""
for identifier, m in self.mMali.items():
s = m.mString
if len(s) % 3 != 0:
raise "sequence %s not divisible by 3" % (m.mId)
is_upper = True
sequence = []
for x in range(0, len(s), 3):
if is_upper:
sequence.append(s[x:x + 3].upper())
is_upper = False
else:
sequence.append(s[x:x + 3].lower())
is_upper = True
m.mString = "".join(sequence)
def markTransitions(self, map_id2transitions, mode="case"):
"""mark transitions in the multiple alignment.
if mode == case, then upper/lower case is used for the transitions
Otherwise, a character given by mode is inserted.
"""
if mode in ("case", "keep-odd", "keep-even"):
# check, if the whole alignment needs to be masked/marked:
if "mali" in map_id2transitions:
transitions = map_id2transitions["mali"]
for identifier, s in self.mMali.items():
new_chars = []
is_upper = True
is_first = False
for c in range(len(s)):
if c in transitions:
is_first = True
if | |
<gh_stars>0
import pdb
import openmdao
import openmdao.api as om
import numpy as np
from typing import Dict, Any, Tuple
class Fan:
@staticmethod
def inlet_broadband(settings:Dict[str, Any], theta: np.float64, M_tip: np.float64, tsqem: np.float64, M_d_fan: np.float64, RSS_fan: np.float64) -> np.float64:
"""
Compute the broadband component of the fan inlet mean-square acoustic pressure (msap).
:param settings: pyna settings.
:type settings: Dict[str, Any]
:param theta: polar directivity angle [deg]
:type theta: np.float64
:param M_tip: relative (i.e., helical) tip Mach number [-]
:type M_tip: np.float64
:param tsqem: broadband temperature-flow power base term [-]
:type tsqem: np.float64
:param M_d_fan: fan rotor relative tip Mach number at design [-]
:type M_d_fan: np.float64
:param RSS_fan: fan rotor-stator spacing [%]
:type RSS_fan: np.float64
:return: bblv_I
:rtype: np.float64
"""
# Fan inlet broadband noise component:
if settings.fan_BB_method == 'original':
# Tip Mach-dependent term (F1 of Eqn 4 in report, Figure 4A):
if M_d_fan <= 1:
if M_tip <= 0.9:
F1IB = 58.5
else:
F1IB = 58.5 - 20 * np.log10(M_tip / 0.9)
else:
if M_tip <= 0.9:
F1IB = 58.5 + 20 * np.log10(M_d_fan)
else:
F1IB = 58.5 + 20 * np.log10(M_d_fan) - 20 * np.log10(M_tip / 0.9)
# Rotor-stator correction term (F2 of Eqn 4, Figure 6B):
if not settings.fan_id:
F2IB = -5 * np.log10(RSS_fan / 300) # If no distortion
else:
if RSS_fan <= 100:
F2IB = -5 * np.log10(RSS_fan / 300)
else:
F2IB = -5 * np.log10(100 / 300) # This is set to a constant 2.3856
elif settings.fan_BB_method == 'allied_signal':
# Tip Mach-dependent term (F1 of Eqn 4 in report, Figure 4A, modified by AlliedSignal):
if M_d_fan <= 1:
if M_tip <= 0.9:
F1IB = 55.5
else:
F1IB = 55.5 - 20 * np.log10(M_tip / 0.9)
else:
if M_tip <= 0.9:
F1IB = 55.5 + 20 * np.log10(M_d_fan)
else:
F1IB = 55.5 + 20 * np.log10(M_d_fan) - 20 * np.log10(M_tip / 0.9)
# Rotor-stator spacing correction term (F2 of Eqn 4, Figure 6B):
if not settings.fan_id:
F2IB = -5 * np.log10(RSS_fan / 300) # If no distortion
else:
if RSS_fan <= 100:
F2IB = -5 * np.log10(RSS_fan / 300)
else:
F2IB = -5 * np.log10(100. / 300) # This is set to a constant 2.3856
elif settings.fan_BB_method == 'geae':
# Tip Mach-dependent term (F1 of Eqn 4 in report, Figure 4A, modified by GE):
if M_d_fan <= 1:
if M_tip <= 0.9:
F1IB = 58.5
else:
F1IB = 58.5 - 50 * np.log10(M_tip / 0.9)
else:
if M_tip <= 0.9:
F1IB = 58.5 + 20 * np.log10(M_d_fan)
else:
F1IB = 58.5 + 20 * np.log10(M_d_fan) - 50 * np.log10(M_tip / 0.9)
# Rotor-stator spacing correction term (F2 of Eqn 4, Figure 6B):
F2IB = 0
elif settings.fan_BB_method == 'kresja':
# Tip Mach-dependent term (F1, of Eqn 4 in report, Figure 4A, modified by Krejsa):
if M_d_fan <= 1:
if M_tip < 0.72:
F1IB = 34 + 20 * np.log10(1. / 1.245)
else:
F1IB = 34 - 43 * (M_tip - 0.72) + 20 * np.log10(1. / 1.245)
else:
if M_tip < 0.72:
F1IB = 34 + 20 * np.log10(M_d_fan / 1.245)
else:
F1IB = 34 - 43 * (M_tip - 0.72) + 20 * np.log10(M_d_fan/ 1.245)
# Rotor-stator spacing correction term (F2, of Eqn 4, Figure 6B):
if not settings.fan_id:
F2IB = -5 * np.log10(RSS_fan / 300) # If no distortion
else:
if RSS_fan <= 100:
F2IB = -5 * np.log10(RSS_fan / 300)
else:
F2IB = -5 * np.log10(100 / 300) # This is set to a constant 2.3856
else:
raise ValueError('Invalid fan_BB_method specified. Specify: original / allied_signal / geae / kresja.')
# Theta correction term (F3 of Eqn 4, Figure 7A):
if settings.fan_BB_method == 'kresja':
# Krejsa method:
THET7A = np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190])
FIG7A = np.array([-0.5, -1, -1.25, -1.41, -1.4, -2.2, -4.5, -8.5, -13, -18.5, -24, -30, -36, -42, -48, -54, -60,-66, -73, -66])
F3IB = np.interp(theta, THET7A, FIG7A)
else:
# All other methods:
THET7A = np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 180, 250])
FIG7A = np.array([-2, -1, 0, 0, 0, -2, -4.5, -7.5, -11, -15, -19.5, -25, -63.5, -25])
F3IB = np.interp(theta, THET7A, FIG7A)
# Component value:
bblv_I = tsqem + F1IB + F2IB + F3IB
return bblv_I
@staticmethod
def discharge_broadband(settings:Dict[str, Any], theta: np.float64, M_tip: np.float64, tsqem: np.float64, M_d_fan: np.float64, RSS_fan: np.float64) -> np.float64:
"""
Compute the broadband component of the fan discharge mean-square acoustic pressure (msap).
:param settings: pyna settings
:type settings: Dict[str, Any]
:param theta: polar directivity angle [deg]
:type theta: np.float64
:param M_tip: relative (i.e., helical) tip Mach number [-]
:type M_tip: np.float64
:param tsqem: broadband temperature-flow power base term [-]
:type tsqem: np.float64
:param M_d_fan: fan rotor relative tip Mach number at design [-]
:type M_d_fan: np.float64
:param RSS_fan: fan rotor-stator spacing [%]
:type RSS_fan: np.float64
:return: bblv_D
:rtype: np.float64
"""
# Fan discharge broadband noise component
if settings.fan_BB_method == 'original':
# Tip Mach-dependent term (F1 of Eqn 10 in report, Figure 4B):
if M_d_fan <= 1:
if M_tip <= 1:
F1DB = 60
else:
F1DB = 60 - 20 * np.log10(M_tip / 1)
else:
if M_tip <= 1:
F1DB = 60 + 20 * np.log10(M_d_fan)
else:
F1DB = 60 + 20 * np.log10(M_d_fan) - 20 * np.log10(M_tip / 1)
# Rotor-stator correction term (F2 of Eqn 4, Figure 6B):
if not settings.fan_id:
F2DB = -5 * np.log10(RSS_fan / 300) # If no distortion
else:
if RSS_fan <= 100:
F2DB = -5 * np.log10(RSS_fan / 300)
else:
F2DB = -5 * np.log10(100 / 300) # This is set to a constant 2.3856
elif settings.fan_BB_method == 'allied_signal':
# Tip Mach-dependent term (F1 of Eqn 10 in report, Figure 4B, modified by AlliedSignal):
if M_d_fan <= 1:
if M_tip <= 1:
F1DB = 58
else:
F1DB = 58 - 20 * np.log10(M_tip / 1)
else:
if M_tip <= 1:
F1DB = 58 + 20 * np.log10(M_d_fan)
else:
F1DB = 58 + 20 * np.log10(M_d_fan) - 20 * np.log10(M_tip / 1)
# Rotor-stator spacing correction term (F2 of Eqn 10, Figure 6B, modified by AlliedSignal):
if not settings.fan_id:
F2DB = -5 * np.log10(RSS_fan / 300) # If no distortion
else:
if RSS_fan <= 100:
F2DB = -5 * np.log10(RSS_fan / 300)
else:
F2DB = -5 * np.log10(100 / 300) # This is set to a constant 2.3856
elif settings.fan_BB_method == 'geae':
# Tip Mach-dependent term (F1 of Eqn 10 in report, Figure 4B, modified by GE):
if M_d_fan <= 1:
if M_tip <= 1:
F1DB = 63
else:
F1DB = 63 - 30 * np.log10(M_tip / 1)
else:
if M_tip <= 1:
F1DB = 63 + 20 * np.log10(M_d_fan)
else:
F1DB = 63 + 20 * np.log10(M_d_fan) - 30 * np.log10(M_tip / 1)
# Rotor-stator spacing correction term (F2 of Eqn 10, Figure 6B, modified by GE):
F2DB = -5 * np.log10(RSS_fan / 300)
elif settings.fan_BB_method == 'kresja':
# Tip Mach-dependent term (F1, of Eqn 10 in report, Figure 4B, modified by Krejsa):
if M_d_fan <= 1:
# If M_tip < 0.65 Then
# F1DBBkrejsa = 34 + 20 * np.log10(1 / 1.245)
# Else
F1DB = 34 - 17 * (M_tip - 0.65) + 20 * np.log10(1 / 1.245)
else:
# If M_tip < 0.65 Then
# F1DBBkrejsa = 34 + 20 * np.log10(M_d_fan / 1.245)
# Else
F1DB = 34 - 17 * (M_tip - 0.65) + 20 * np.log10(M_d_fan / 1.245)
# Rotor-stator spacing correction term (F2, of Eqn 10, Figure 6B):
if not settings.fan_id:
F2DB = -5 * np.log10(RSS_fan / 300) # If no distortion
else:
if RSS_fan <= 100:
F2DB = -5 * np.log10(RSS_fan / 300)
else:
F2DB = -5 * np.log10(100 / 300) # This is set to a constant 2.3856
else:
raise ValueError('Invalid fan_BB_method specified. Specify: original / allied_signal / | |
from pypy.conftest import gettestobjspace
import sys
import py
from pypy.tool.udir import udir
def setup_module(mod):
mod.space = gettestobjspace(usemodules=['_socket'])
global socket
import socket
mod.w_socket = space.appexec([], "(): import _socket as m; return m")
mod.path = udir.join('fd')
mod.path.write('fo')
mod.raises = py.test.raises # make raises available from app-level tests
mod.skip = py.test.skip
def test_gethostname():
host = space.appexec([w_socket], "(_socket): return _socket.gethostname()")
assert space.unwrap(host) == socket.gethostname()
def test_gethostbyname():
host = "localhost"
ip = space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyname(host)")
assert space.unwrap(ip) == socket.gethostbyname(host)
def test_gethostbyname_ex():
host = "localhost"
ip = space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyname_ex(host)")
assert isinstance(space.unwrap(ip), tuple)
assert space.unwrap(ip) == socket.gethostbyname_ex(host)
def test_gethostbyaddr():
host = "localhost"
ip = space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyaddr(host)")
assert space.unwrap(ip) == socket.gethostbyaddr(host)
host = "127.0.0.1"
ip = space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyaddr(host)")
assert space.unwrap(ip) == socket.gethostbyaddr(host)
def test_getservbyname():
name = "smtp"
# 2 args version
port = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getservbyname(name, 'tcp')")
assert space.unwrap(port) == 25
# 1 arg version
if sys.version_info < (2, 4):
py.test.skip("getservbyname second argument is not optional before python 2.4")
port = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getservbyname(name)")
assert space.unwrap(port) == 25
def test_getservbyport():
if sys.version_info < (2, 4):
py.test.skip("getservbyport does not exist before python 2.4")
port = 25
# 2 args version
name = space.appexec([w_socket, space.wrap(port)],
"(_socket, port): return _socket.getservbyport(port, 'tcp')")
assert space.unwrap(name) == "smtp"
name = space.appexec([w_socket, space.wrap(port)],
"""(_socket, port):
try:
return _socket.getservbyport(port, 42)
except TypeError:
return 'OK'
""")
assert space.unwrap(name) == 'OK'
# 1 arg version
name = space.appexec([w_socket, space.wrap(port)],
"(_socket, port): return _socket.getservbyport(port)")
assert space.unwrap(name) == "smtp"
def test_getprotobyname():
name = "tcp"
w_n = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getprotobyname(name)")
assert space.unwrap(w_n) == socket.IPPROTO_TCP
def test_fromfd():
# XXX review
if not hasattr(socket, 'fromfd'):
py.test.skip("No socket.fromfd on this platform")
orig_fd = path.open()
fd = space.appexec([w_socket, space.wrap(orig_fd.fileno()),
space.wrap(socket.AF_INET), space.wrap(socket.SOCK_STREAM),
space.wrap(0)],
"""(_socket, fd, family, type, proto):
return _socket.fromfd(fd, family, type, proto)""")
assert space.unwrap(space.call_method(fd, 'fileno'))
fd = space.appexec([w_socket, space.wrap(orig_fd.fileno()),
space.wrap(socket.AF_INET), space.wrap(socket.SOCK_STREAM)],
"""(_socket, fd, family, type):
return _socket.fromfd(fd, family, type)""")
assert space.unwrap(space.call_method(fd, 'fileno'))
def test_ntohs():
w_n = space.appexec([w_socket, space.wrap(125)],
"(_socket, x): return _socket.ntohs(x)")
assert space.unwrap(w_n) == socket.ntohs(125)
def test_ntohl():
w_n = space.appexec([w_socket, space.wrap(125)],
"(_socket, x): return _socket.ntohl(x)")
assert space.unwrap(w_n) == socket.ntohl(125)
def test_htons():
w_n = space.appexec([w_socket, space.wrap(125)],
"(_socket, x): return _socket.htons(x)")
assert space.unwrap(w_n) == socket.htons(125)
def test_htonl():
w_n = space.appexec([w_socket, space.wrap(125)],
"(_socket, x): return _socket.htonl(x)")
assert space.unwrap(w_n) == socket.htonl(125)
def test_aton_ntoa():
ip = '172.16.31.10'
packed = socket.inet_aton(ip)
w_p = space.appexec([w_socket, space.wrap(ip)],
"(_socket, ip): return _socket.inet_aton(ip)")
assert space.unwrap(w_p) == packed
w_ip = space.appexec([w_socket, space.wrap(packed)],
"(_socket, p): return _socket.inet_ntoa(p)")
assert space.unwrap(w_ip) == ip
def test_pton_ntop_ipv4():
if not hasattr(socket, 'inet_pton'):
py.test.skip('No socket.inet_pton on this platform')
tests = [
("172.16.31.10", "\x7b\x2d\x43\x59"),
("0.0.0.0", "\x00" * 4),
("255.255.255.255", "\xff" * 4),
]
for ip, packed in tests:
w_p = space.appexec([w_socket, space.wrap(ip)],
"(_socket, ip): return _socket.inet_pton(_socket.AF_INET, ip)")
assert space.unwrap(w_p) == packed
w_ip = space.appexec([w_socket, w_p],
"(_socket, p): return _socket.inet_ntop(_socket.AF_INET, p)")
assert space.unwrap(w_ip) == ip
def test_ntop_ipv6():
if not hasattr(socket, 'inet_pton'):
py.test.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
py.test.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
("\x00\x00\x10\x10" * 4, None), #"::1010:" + ":".join(["0:1010"] * 3)),
("\x00" * 12 + "\x01\x02\x03\x04", "::1.2.3.4"),
("\x00" * 10 + "\xff\xff\x01\x02\x03\x04", "::ffff:172.16.31.10"),
]
for packed, ip in tests:
w_ip = space.appexec([w_socket, space.wrap(packed)],
"(_socket, packed): return _socket.inet_ntop(_socket.AF_INET6, packed)")
if ip is not None: # else don't check for the precise representation
assert space.unwrap(w_ip) == ip
w_packed = space.appexec([w_socket, w_ip],
"(_socket, ip): return _socket.inet_pton(_socket.AF_INET6, ip)")
assert space.unwrap(w_packed) == packed
def test_pton_ipv6():
if not hasattr(socket, 'inet_pton'):
py.test.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
py.test.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
("\x00\x01" + "\x00" * 12 + "\x00\x02", "fc00:e968:6179::de52:7100"),
("\x00" * 4 + "\x00\x01" * 6, "::1:1:1:1:1:1"),
("\x00\x01" * 6 + "\x00" * 4, "1:1:1:1:1:1::"),
("\xab\xcd\xef\00" + "\x00" * 12, "ABCD:EF00::"),
("\xab\xcd\xef\00" + "\x00" * 12, "abcd:ef00::"),
("\x00\x00\x10\x10" * 4, "::1010:" + ":".join(["0:1010"] * 3)),
("\x00" * 12 + "\x01\x02\x03\x04", "::1.2.3.4"),
("\x00" * 10 + "\xff\xff\x01\x02\x03\x04", "::ffff:1.2.3.4"),
]
for packed, ip in tests:
w_packed = space.appexec([w_socket, space.wrap(ip)],
"(_socket, ip): return _socket.inet_pton(_socket.AF_INET6, ip)")
assert space.unwrap(w_packed) == packed
def test_has_ipv6():
py.test.skip("has_ipv6 is always True on PyPy for now")
res = space.appexec([w_socket], "(_socket): return _socket.has_ipv6")
assert space.unwrap(res) == socket.has_ipv6
def test_getaddrinfo():
host = "localhost"
port = 25
info = socket.getaddrinfo(host, port)
w_l = space.appexec([w_socket, space.wrap(host), space.wrap(port)],
"(_socket, host, port): return _socket.getaddrinfo(host, port)")
assert space.unwrap(w_l) == info
py.test.skip("Unicode conversion is too slow")
w_l = space.appexec([w_socket, space.wrap(unicode(host)), space.wrap(port)],
"(_socket, host, port): return _socket.getaddrinfo(host, port)")
assert space.unwrap(w_l) == info
def test_getnameinfo():
host = "127.0.0.1"
port = 25
info = socket.getnameinfo((host, port), 0)
w_l = space.appexec([w_socket, space.wrap(host), space.wrap(port)],
"(_socket, host, port): return _socket.getnameinfo((host, port), 0)")
assert space.unwrap(w_l) == info
def test_timeout():
space.appexec([w_socket, space.wrap(25.4)],
"(_socket, timeout): _socket.setdefaulttimeout(timeout)")
w_t = space.appexec([w_socket],
"(_socket): return _socket.getdefaulttimeout()")
assert space.unwrap(w_t) == 25.4
space.appexec([w_socket, space.w_None],
"(_socket, timeout): _socket.setdefaulttimeout(timeout)")
w_t = space.appexec([w_socket],
"(_socket): return _socket.getdefaulttimeout()")
assert space.unwrap(w_t) is None
# XXX also need tests for other connection and timeout errors
class AppTestSocket:
def setup_class(cls):
cls.space = space
cls.w_udir = space.wrap(str(udir))
def test_ntoa_exception(self):
import _socket
raises(_socket.error, _socket.inet_ntoa, "ab")
def test_aton_exceptions(self):
import _socket
tests = ["127.0.0.256", "127.0.0.255555555555555555", "127.2b.0.0",
"127.2.0.0.1", "127.2.0."]
for ip in tests:
raises(_socket.error, _socket.inet_aton, ip)
def test_ntop_exceptions(self):
import _socket
if not hasattr(_socket, 'inet_ntop'):
skip('No socket.inet_pton on this platform')
for family, packed, exception in \
[(_socket.AF_INET + _socket.AF_INET6, "", _socket.error),
(_socket.AF_INET, "a", ValueError),
(_socket.AF_INET6, "a", ValueError),
(_socket.AF_INET, u"aa\u2222a", UnicodeEncodeError)]:
raises(exception, _socket.inet_ntop, family, packed)
def test_pton_exceptions(self):
import _socket
if not hasattr(_socket, 'inet_pton'):
skip('No socket.inet_pton on this platform')
tests = [
(_socket.AF_INET + _socket.AF_INET6, ""),
(_socket.AF_INET, "127.0.0.256"),
(_socket.AF_INET, "127.0.0.255555555555555555"),
(_socket.AF_INET, "127.2b.0.0"),
(_socket.AF_INET, "127.2.0.0.1"),
(_socket.AF_INET, "127.2..0"),
(_socket.AF_INET6, "127.0.0.1"),
(_socket.AF_INET6, "fc00:e968:6179::de52:7100::3"),
(_socket.AF_INET6, "1:1:1:1:1:1:1:1:1"),
(_socket.AF_INET6, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b::"),
(_socket.AF_INET6, "fc00:e968:6179::de52:7100:1:1:1:1"),
(_socket.AF_INET6, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b222:1"),
(_socket.AF_INET6, "1::eg"),
]
for family, ip in tests:
raises(_socket.error, _socket.inet_pton, family, ip)
def test_newsocket_error(self):
import _socket
raises(_socket.error, _socket.socket, 10001, _socket.SOCK_STREAM, 0)
def test_socket_fileno(self):
import _socket
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
assert s.fileno() > -1
assert isinstance(s.fileno(), int)
def test_socket_close(self):
import _socket, errno
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
fileno = s.fileno()
s.close()
s.close()
try:
s.fileno()
except _socket.error, ex:
assert ex.args[0], errno.EBADF
else:
assert 0
def test_socket_close_error(self):
import _socket, os
if os.name == 'nt':
skip("Windows sockets are not files")
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
os.close(s.fileno())
raises(_socket.error, s.close)
def test_socket_connect(self):
import _socket, os
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
# XXX temporarily we use codespeak to test, will have more robust tests in
# the absence of a network connection later when mroe parts of the socket
# API are implemented.
s.connect(("codespeak.net", 80))
name = s.getpeername() # Will raise socket.error if not connected
assert name[1] == 80
s.close()
def test_socket_connect_typeerrors(self):
tests = [
"",
("80"),
("80", "80"),
(80, 80),
]
import _socket
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
for args in tests:
raises(TypeError, s.connect, args)
s.close()
def test_NtoH(self):
import _socket as socket
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1L<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
assert i & mask == func(func(i&mask)) & mask
swapped = func(mask)
assert swapped & mask == mask
try:
func(1L<<34)
except OverflowError:
pass
else:
assert False
def test_newsocket(self):
import socket
s = socket.socket()
def test_getsetsockopt(self):
import _socket as socket
import struct
# A socket sould start with reuse == 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
assert reuse == 0
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
assert reuse != 0
# String case
intsize = struct.calcsize('i')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
reusestr = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
intsize)
(reuse,) = struct.unpack('i', reusestr)
assert reuse == 0
reusestr = struct.pack('i', 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, reusestr)
reusestr = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
intsize)
(reuse,) = struct.unpack('i', reusestr)
assert reuse != 0
def test_dup(self):
import _socket as socket
if not hasattr(socket.socket, 'dup'):
skip('No dup() on this platform')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('localhost', 50007))
s2 = s.dup()
assert s.fileno() != s2.fileno()
assert s.getsockname() == s2.getsockname()
def test_buffer_or_unicode(self):
# | |
exception_status(self) -> dict:
return self.__exception_status
@exception_status.setter
def exception_status(self, value: dict):
self._property_changed('exception_status')
self.__exception_status = value
@property
def sales_coverage(self) -> dict:
return self.__sales_coverage
@sales_coverage.setter
def sales_coverage(self, value: dict):
self._property_changed('sales_coverage')
self.__sales_coverage = value
@property
def short_exposure(self) -> dict:
return self.__short_exposure
@short_exposure.setter
def short_exposure(self, value: dict):
self._property_changed('short_exposure')
self.__short_exposure = value
@property
def tcm_cost_participation_rate10_pct(self) -> dict:
return self.__tcm_cost_participation_rate10_pct
@tcm_cost_participation_rate10_pct.setter
def tcm_cost_participation_rate10_pct(self, value: dict):
self._property_changed('tcm_cost_participation_rate10_pct')
self.__tcm_cost_participation_rate10_pct = value
@property
def event_time(self) -> dict:
return self.__event_time
@event_time.setter
def event_time(self, value: dict):
self._property_changed('event_time')
self.__event_time = value
@property
def position_source_name(self) -> dict:
return self.__position_source_name
@position_source_name.setter
def position_source_name(self, value: dict):
self._property_changed('position_source_name')
self.__position_source_name = value
@property
def arrival_haircut_vwap(self) -> dict:
return self.__arrival_haircut_vwap
@arrival_haircut_vwap.setter
def arrival_haircut_vwap(self, value: dict):
self._property_changed('arrival_haircut_vwap')
self.__arrival_haircut_vwap = value
@property
def interest_rate(self) -> dict:
return self.__interest_rate
@interest_rate.setter
def interest_rate(self, value: dict):
self._property_changed('interest_rate')
self.__interest_rate = value
@property
def execution_days(self) -> dict:
return self.__execution_days
@execution_days.setter
def execution_days(self, value: dict):
self._property_changed('execution_days')
self.__execution_days = value
@property
def side(self) -> dict:
return self.__side
@side.setter
def side(self, value: dict):
self._property_changed('side')
self.__side = value
@property
def compliance_restricted_status(self) -> dict:
return self.__compliance_restricted_status
@compliance_restricted_status.setter
def compliance_restricted_status(self, value: dict):
self._property_changed('compliance_restricted_status')
self.__compliance_restricted_status = value
@property
def forward(self) -> dict:
return self.__forward
@forward.setter
def forward(self, value: dict):
self._property_changed('forward')
self.__forward = value
@property
def borrow_fee(self) -> dict:
return self.__borrow_fee
@borrow_fee.setter
def borrow_fee(self, value: dict):
self._property_changed('borrow_fee')
self.__borrow_fee = value
@property
def strike(self) -> dict:
return self.__strike
@strike.setter
def strike(self, value: dict):
self._property_changed('strike')
self.__strike = value
@property
def loan_spread(self) -> dict:
return self.__loan_spread
@loan_spread.setter
def loan_spread(self, value: dict):
self._property_changed('loan_spread')
self.__loan_spread = value
@property
def tcm_cost_horizon12_hour(self) -> dict:
return self.__tcm_cost_horizon12_hour
@tcm_cost_horizon12_hour.setter
def tcm_cost_horizon12_hour(self, value: dict):
self._property_changed('tcm_cost_horizon12_hour')
self.__tcm_cost_horizon12_hour = value
@property
def dew_point(self) -> dict:
return self.__dew_point
@dew_point.setter
def dew_point(self, value: dict):
self._property_changed('dew_point')
self.__dew_point = value
@property
def research_commission(self) -> dict:
return self.__research_commission
@research_commission.setter
def research_commission(self, value: dict):
self._property_changed('research_commission')
self.__research_commission = value
@property
def leg_one_delivery_point(self) -> dict:
return self.__leg_one_delivery_point
@leg_one_delivery_point.setter
def leg_one_delivery_point(self, value: dict):
self._property_changed('leg_one_delivery_point')
self.__leg_one_delivery_point = value
@property
def asset_classifications_risk_country_code(self) -> dict:
return self.__asset_classifications_risk_country_code
@asset_classifications_risk_country_code.setter
def asset_classifications_risk_country_code(self, value: dict):
self._property_changed('asset_classifications_risk_country_code')
self.__asset_classifications_risk_country_code = value
@property
def event_status(self) -> dict:
return self.__event_status
@event_status.setter
def event_status(self, value: dict):
self._property_changed('event_status')
self.__event_status = value
@property
def asset_parameters_settlement(self) -> dict:
return self.__asset_parameters_settlement
@asset_parameters_settlement.setter
def asset_parameters_settlement(self, value: dict):
self._property_changed('asset_parameters_settlement')
self.__asset_parameters_settlement = value
@property
def return_(self) -> dict:
return self.__return
@return_.setter
def return_(self, value: dict):
self._property_changed('return_')
self.__return = value
@property
def max_temperature(self) -> dict:
return self.__max_temperature
@max_temperature.setter
def max_temperature(self, value: dict):
self._property_changed('max_temperature')
self.__max_temperature = value
@property
def acquirer_shareholder_meeting_date(self) -> dict:
return self.__acquirer_shareholder_meeting_date
@acquirer_shareholder_meeting_date.setter
def acquirer_shareholder_meeting_date(self, value: dict):
self._property_changed('acquirer_shareholder_meeting_date')
self.__acquirer_shareholder_meeting_date = value
@property
def notional_amount(self) -> dict:
return self.__notional_amount
@notional_amount.setter
def notional_amount(self, value: dict):
self._property_changed('notional_amount')
self.__notional_amount = value
@property
def arrival_rt_normalized(self) -> dict:
return self.__arrival_rt_normalized
@arrival_rt_normalized.setter
def arrival_rt_normalized(self, value: dict):
self._property_changed('arrival_rt_normalized')
self.__arrival_rt_normalized = value
@property
def report_type(self) -> dict:
return self.__report_type
@report_type.setter
def report_type(self, value: dict):
self._property_changed('report_type')
self.__report_type = value
@property
def source_url(self) -> dict:
return self.__source_url
@source_url.setter
def source_url(self, value: dict):
self._property_changed('source_url')
self.__source_url = value
@property
def estimated_return(self) -> dict:
return self.__estimated_return
@estimated_return.setter
def estimated_return(self, value: dict):
self._property_changed('estimated_return')
self.__estimated_return = value
@property
def high(self) -> dict:
return self.__high
@high.setter
def high(self, value: dict):
self._property_changed('high')
self.__high = value
@property
def source_last_update(self) -> dict:
return self.__source_last_update
@source_last_update.setter
def source_last_update(self, value: dict):
self._property_changed('source_last_update')
self.__source_last_update = value
@property
def quantity_mw(self) -> dict:
return self.__quantity_mw
@quantity_mw.setter
def quantity_mw(self, value: dict):
self._property_changed('quantity_mw')
self.__quantity_mw = value
@property
def pnode_id(self) -> dict:
return self.__pnode_id
@pnode_id.setter
def pnode_id(self, value: dict):
self._property_changed('pnode_id')
self.__pnode_id = value
@property
def event_name(self) -> dict:
return self.__event_name
@event_name.setter
def event_name(self, value: dict):
self._property_changed('event_name')
self.__event_name = value
@property
def indication_of_other_price_affecting_term(self) -> dict:
return self.__indication_of_other_price_affecting_term
@indication_of_other_price_affecting_term.setter
def indication_of_other_price_affecting_term(self, value: dict):
self._property_changed('indication_of_other_price_affecting_term')
self.__indication_of_other_price_affecting_term = value
@property
def unadjusted_bid(self) -> dict:
return self.__unadjusted_bid
@unadjusted_bid.setter
def unadjusted_bid(self, value: dict):
self._property_changed('unadjusted_bid')
self.__unadjusted_bid = value
@property
def backtest_type(self) -> dict:
return self.__backtest_type
@backtest_type.setter
def backtest_type(self, value: dict):
self._property_changed('backtest_type')
self.__backtest_type = value
@property
def gsdeer(self) -> dict:
return self.__gsdeer
@gsdeer.setter
def gsdeer(self, value: dict):
self._property_changed('gsdeer')
self.__gsdeer = value
@property
def g_regional_percentile(self) -> dict:
return self.__g_regional_percentile
@g_regional_percentile.setter
def g_regional_percentile(self, value: dict):
self._property_changed('g_regional_percentile')
self.__g_regional_percentile = value
@property
def prev_close_ask(self) -> dict:
return self.__prev_close_ask
@prev_close_ask.setter
def prev_close_ask(self, value: dict):
self._property_changed('prev_close_ask')
self.__prev_close_ask = value
@property
def level(self) -> dict:
return self.__level
@level.setter
def level(self, value: dict):
self._property_changed('level')
self.__level = value
@property
def mnav(self) -> dict:
return self.__mnav
@mnav.setter
def mnav(self, value: dict):
self._property_changed('mnav')
self.__mnav = value
@property
def es_momentum_score(self) -> dict:
return self.__es_momentum_score
@es_momentum_score.setter
def es_momentum_score(self, value: dict):
self._property_changed('es_momentum_score')
self.__es_momentum_score = value
@property
def curr_yield7_day(self) -> dict:
return self.__curr_yield7_day
@curr_yield7_day.setter
def curr_yield7_day(self, value: dict):
self._property_changed('curr_yield7_day')
self.__curr_yield7_day = value
@property
def pressure(self) -> dict:
return self.__pressure
@pressure.setter
def pressure(self, value: dict):
self._property_changed('pressure')
self.__pressure = value
@property
def short_description(self) -> dict:
return self.__short_description
@short_description.setter
def short_description(self, value: dict):
self._property_changed('short_description')
self.__short_description = value
@property
def feed(self) -> dict:
return self.__feed
@feed.setter
def feed(self, value: dict):
self._property_changed('feed')
self.__feed = value
@property
def net_weight(self) -> dict:
return self.__net_weight
@net_weight.setter
def net_weight(self, value: dict):
self._property_changed('net_weight')
self.__net_weight = value
@property
def portfolio_managers(self) -> dict:
return self.__portfolio_managers
@portfolio_managers.setter
def portfolio_managers(self, value: dict):
self._property_changed('portfolio_managers')
self.__portfolio_managers = value
@property
def asset_parameters_commodity_sector(self) -> dict:
return self.__asset_parameters_commodity_sector
@asset_parameters_commodity_sector.setter
def asset_parameters_commodity_sector(self, value: dict):
self._property_changed('asset_parameters_commodity_sector')
self.__asset_parameters_commodity_sector = value
@property
def bos_in_ticks(self) -> dict:
return self.__bos_in_ticks
@bos_in_ticks.setter
def bos_in_ticks(self, value: dict):
self._property_changed('bos_in_ticks')
self.__bos_in_ticks = value
@property
def price_notation2(self) -> dict:
return self.__price_notation2
@price_notation2.setter
def price_notation2(self, value: dict):
self._property_changed('price_notation2')
self.__price_notation2 = value
@property
def market_buffer_threshold(self) -> dict:
return self.__market_buffer_threshold
@market_buffer_threshold.setter
def market_buffer_threshold(self, value: dict):
self._property_changed('market_buffer_threshold')
self.__market_buffer_threshold = value
@property
def price_notation3(self) -> dict:
return self.__price_notation3
@price_notation3.setter
def price_notation3(self, value: dict):
self._property_changed('price_notation3')
self.__price_notation3 = value
@property
def cap_floor_vol(self) -> dict:
return self.__cap_floor_vol
@cap_floor_vol.setter
def cap_floor_vol(self, value: dict):
self._property_changed('cap_floor_vol')
self.__cap_floor_vol = value
@property
def submitter(self) -> dict:
return self.__submitter
@submitter.setter
def submitter(self, value: dict):
self._property_changed('submitter')
self.__submitter = value
@property
def bond_risk_premia(self) -> dict:
return self.__bond_risk_premia
@bond_risk_premia.setter
def bond_risk_premia(self, value: dict):
self._property_changed('bond_risk_premia')
self.__bond_risk_premia = value
@property
def notional(self) -> dict:
return self.__notional
@notional.setter
def notional(self, value: dict):
self._property_changed('notional')
self.__notional = value
@property
def es_disclosure_percentage(self) -> dict:
return self.__es_disclosure_percentage
@es_disclosure_percentage.setter
def es_disclosure_percentage(self, value: dict):
self._property_changed('es_disclosure_percentage')
self.__es_disclosure_percentage = value
@property
def investment_income(self) -> dict:
return self.__investment_income
@investment_income.setter
def investment_income(self, value: dict):
self._property_changed('investment_income')
self.__investment_income = value
@property
def forward_point_imm(self) -> dict:
return self.__forward_point_imm
@forward_point_imm.setter
def forward_point_imm(self, value: dict):
self._property_changed('forward_point_imm')
self.__forward_point_imm = value
@property
def client_short_name(self) -> dict:
return self.__client_short_name
@client_short_name.setter
def client_short_name(self, value: dict):
self._property_changed('client_short_name')
self.__client_short_name = value
@property
def group_category(self) -> dict:
return self.__group_category
@group_category.setter
def group_category(self, value: dict):
self._property_changed('group_category')
self.__group_category = value
@property
def bid_plus_ask(self) -> dict:
return self.__bid_plus_ask
@bid_plus_ask.setter
def bid_plus_ask(self, value: dict):
self._property_changed('bid_plus_ask')
self.__bid_plus_ask = value
@property
def election_odds(self) -> dict:
return self.__election_odds
@election_odds.setter
def election_odds(self, value: dict):
self._property_changed('election_odds')
self.__election_odds = value
@property
def require_anon_client_name(self) -> dict:
return self.__require_anon_client_name
@require_anon_client_name.setter
def require_anon_client_name(self, value: dict):
self._property_changed('require_anon_client_name')
self.__require_anon_client_name = value
@property
def total(self) -> dict:
return self.__total
@total.setter
def total(self, value: dict):
self._property_changed('total')
self.__total = value
@property
def asset_id(self) -> dict:
return self.__asset_id
@asset_id.setter
def asset_id(self, value: dict):
self._property_changed('asset_id')
self.__asset_id = value
@property
def mkt_type(self) -> dict:
return self.__mkt_type
@mkt_type.setter
def mkt_type(self, value: dict):
self._property_changed('mkt_type')
self.__mkt_type = value
@property
def pricing_location(self) -> dict:
return self.__pricing_location
@pricing_location.setter
def pricing_location(self, value: dict):
self._property_changed('pricing_location')
self.__pricing_location = value
@property
def yield30_day(self) -> dict:
return self.__yield30_day
@yield30_day.setter
def yield30_day(self, value: dict):
self._property_changed('yield30_day')
self.__yield30_day = value
@property
def beta(self) -> dict:
return self.__beta
@beta.setter
def beta(self, value: dict):
self._property_changed('beta')
self.__beta = value
@property
def long_exposure(self) -> dict:
return self.__long_exposure
@long_exposure.setter
def long_exposure(self, value: dict):
self._property_changed('long_exposure')
self.__long_exposure = value
@property
def tcm_cost_participation_rate20_pct(self) -> dict:
return self.__tcm_cost_participation_rate20_pct
@tcm_cost_participation_rate20_pct.setter
def tcm_cost_participation_rate20_pct(self, value: dict):
self._property_changed('tcm_cost_participation_rate20_pct')
self.__tcm_cost_participation_rate20_pct = value
@property
def multi_asset_class_swap(self) -> dict:
return self.__multi_asset_class_swap
| |
<gh_stars>0
import asyncio
from typing import Callable, Iterable, Optional, Tuple, Union
import aiohttp
from discord.ext import commands
try:
import discord
import youtube_dl
has_voice = True
except ImportError:
has_voice = False
if has_voice:
youtube_dl.utils.bug_reports_message = lambda: ""
ydl = youtube_dl.YoutubeDL({
"format": "bestaudio/best",
"restrictfilenames": True,
"noplaylist": True,
"nocheckcertificate": True,
"ignoreerrors": True,
"logtostderr": False,
"quiet": True,
"no_warnings": True,
"source_address": "0.0.0.0",
})
class EmptyQueue(Exception):
"""Cannot skip because queue is empty"""
class NotConnectedToVoice(Exception):
"""Cannot create the player because bot is not connected to voice"""
class NotPlaying(Exception):
"""Cannot <do something> because nothing is being played"""
async def ytbettersearch(query) -> str:
'''Formats the search string for the YouTube music search'''
url = f"https://www.youtube.com/results?search_query={query}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
html = await resp.text()
index = html.find("watch?v")
url = ""
while True:
char = html[index]
if char == '"':
break
url += char
index += 1
url = f"https://www.youtube.com/{url}"
return url
class Song:
"""The requested song data
"""
__slots__ = [
'source',
'url',
'title',
'description',
'views',
'duration',
'thumbnail',
'channel',
'channel_url',
'loop',
'name',
'is_looping'
]
def __init__(
self,
source: str,
url: str,
title: str,
description: str,
views: int,
duration: Union[str,int],
thumbnail: str,
channel: str,
channel_url: str,
loop: bool,
):
self.source = source
self.url = url
self.title = title
self.description = description
self.views = views
self.name = title
self.duration = duration
self.thumbnail = thumbnail
self.channel = channel
self.channel_url = channel_url
self.is_looping = loop
async def get_video_data(url, search: bool, bettersearch:bool, loop: Optional[asyncio.AbstractEventLoop]) -> Song:
"""It returns required video data after searching `YouTube`
:raises RuntimeError: Is raised when the package is install without the .[voice] parameters
:return: The song data in a formatted way
:rtype: :class:`Song`
"""
if not has_voice:
raise RuntimeError("DiscordUtils[voice] install needed in order to use voice")
if not search and not bettersearch:
data = await loop.run_in_executor(
None, lambda: ydl.extract_info(url, download=False))
source = data.get("url")
url = "https://www.youtube.com/watch?v=" + data.get("id")
title = data.get("title")
description = data.get("description")
views = data.get("view_count")
duration = data.get("duration")
thumbnail = data.get("thumbnail")
channel = data.get("uploader")
channel_url = data.get("uploader_url")
return Song(
source,
url,
title,
description,
views,
duration,
thumbnail,
channel,
channel_url,
False,
)
if bettersearch:
url = await ytbettersearch(url)
data = await loop.run_in_executor(None, lambda: ydl.extract_info(url, download=False))
source = data.get("url")
url = "https://www.youtube.com/watch?v=" + data.get("id")
title = data.get("title")
description = data.get("description")
views = data.get("view_count")
duration = data.get("duration")
thumbnail = data.get("thumbnail")
channel = data.get("uploader")
channel_url = data.get("uploader_url")
return Song(
source,
url,
title,
description,
views,
duration,
thumbnail,
channel,
channel_url,
False,
)
ytdl = youtube_dl.YoutubeDL({
"format": "bestaudio/best",
"restrictfilenames": True,
"noplaylist": True,
"nocheckcertificate": True,
"ignoreerrors": True,
"logtostderr": False,
"quiet": True,
"no_warnings": True,
"default_search": "auto",
"source_address": "0.0.0.0",
})
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=False))
try:
data = data["entries"][0]
except (KeyError, TypeError):
pass
source = data.get("url")
url = "https://www.youtube.com/watch?v=" + data.get("id")
title = data.get("title")
description = data.get("description")
views = data.get("view_count")
duration = data.get("duration")
thumbnail = data.get("thumbnail")
channel = data.get("uploader")
channel_url = data.get("uploader_url")
return Song(
source,
url,
title,
description,
views,
duration,
thumbnail,
channel,
channel_url,
False,
)
def check_queue(ctx: commands.Context, opts: dict, music: 'Music', after: Callable, on_play: Callable, loop: Optional[asyncio.AbstractEventLoop]) -> None:
"""It checks the music queue
:param ctx: The commands `context`
:type ctx: commands.Context
:param opts: A set options for `ffmpeg`
:type opts: dict
:param music: The master class where the all the players data is stored
:type music: Music
:param after: The :func:`check_queue` which would be called afterwards
:type after: Callable
:param on_play: :func:`MusicPlayer.on_play` function
:type on_play: MusicPlayer.on_play
:param loop: The event loop in which the :class:`~discord.ext.commands.Bot` is running
:type loop: Optional[asyncio.AbstractEventLoop]
:raises RuntimeError: Is raised when the package is install without the .[voice] parameters
"""
if not has_voice:
raise RuntimeError("DiscordUtils[voice] install needed in order to use voice")
try:
song = music.queue[ctx.guild.id][0]
except IndexError:
return
if not song.is_looping:
try:
music.queue[ctx.guild.id].pop(0)
except IndexError:
return
if len(music.queue[ctx.guild.id]) > 0:
source = discord.PCMVolumeTransformer(
discord.FFmpegPCMAudio(music.queue[ctx.guild.id][0].source,
**opts))
ctx.voice_client.play(
source,
after=lambda error: after(ctx, opts, music, after, on_play,loop),
)
song = music.queue[ctx.guild.id][0]
if on_play:
loop.create_task(on_play(ctx, song))
else:
source = discord.PCMVolumeTransformer(
discord.FFmpegPCMAudio(music.queue[ctx.guild.id][0].source,**opts))
ctx.voice_client.play(
source,
after=lambda error: after(ctx, opts, music, after, on_play, loop))
song = music.queue[ctx.guild.id][0]
if on_play:
loop.create_task(on_play(ctx, song))
class MusicPlayer:
"""The class which acts a music controller/player
:raises RuntimeError: Is raised when the package is install without the .[voice] parameters
:raises NotPlaying: See :func:`skip`, :func:`stop`, :func:`resume`, :func:`pause`, :func:`toggle_song_loop`, :func:`change_volume`, :func:`remove_from_queue`
:raises EmptyQueue: See :func:`skip`, :func:`current_queue`
"""
__slots__ = [
'ctx',
'voice',
'loop',
'music',
'after_func',
'on_play_func',
'on_queue_func',
'on_skip_func',
'on_stop_func',
'on_pause_func',
'on_resume_func',
'on_loop_toggle_func',
'on_volume_change_func',
'on_remove_from_queue_func',
'ffmpeg_opts'
]
def __init__(self, ctx:commands.Context , music: 'Music', **kwargs):
if not has_voice:
raise RuntimeError("DiscordUtils[voice] install needed in order to use voice")
self.ctx = ctx
self.voice: Optional[discord.VoiceProtocol] = ctx.voice_client
self.loop: Optional[asyncio.AbstractEventLoop] = ctx.bot.loop
self.music = music
if self.ctx.guild.id not in self.music.queue:
self.music.queue[self.ctx.guild.id] = []
self.after_func: Callable = check_queue
self.on_play_func: Optional[Callable] = None
self.on_queue_func: Optional[Callable] = None
self.on_skip_func: Optional[Callable] = None
self.on_stop_func: Optional[Callable] = None
self.on_pause_func: Optional[Callable] = None
self.on_resume_func: Optional[Callable] = None
self.on_loop_toggle_func: Optional[Callable] = None
self.on_volume_change_func: Optional[Callable] = None
self.on_remove_from_queue_func: Optional[Callable] = None
ffmpeg_error = kwargs.get("ffmpeg_error_betterfix", kwargs.get("ffmpeg_error_fix"))
if ffmpeg_error and "ffmpeg_error_betterfix" in kwargs:
self.ffmpeg_opts: dict = {
"options":
"-vn -loglevel quiet -hide_banner -nostats",
"before_options":
"-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 0 -nostdin",
}
elif ffmpeg_error:
self.ffmpeg_opts: dict = {
"options":
"-vn",
"before_options":
"-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 0 -nostdin",
}
else:
self.ffmpeg_opts: dict = {"options": "-vn", "before_options": "-nostdin"}
def disable(self):
'''It disables the `Music Player`'''
self.music.players.remove(self)
def on_queue(self, func: Callable) -> None:
"""The event when the song is `queued`
:param func:
:type func: Callable
"""
self.on_queue_func = func
def on_play(self, func: Callable) -> None:
"""The event when the song is `played`
:param func:
:type func: Callable
"""
self.on_play_func = func
def on_skip(self, func: Callable) -> None:
"""The event when the song is `skipped`
:param func:
:type func: Callable
"""
self.on_skip_func = func
def on_stop(self, func: Callable) -> None:
"""The event when the player is `stopped`
:param func:
:type func: Callable
"""
self.on_stop_func = func
def on_pause(self, func: Callable) -> None:
"""The event when the song is `paused`
:param func:
:type func: Callable
"""
self.on_pause_func = func
def on_resume(self, func: Callable) -> None:
"""The event when the song is `resumed`
:param func:
:type func: Callable
"""
self.on_resume_func = func
def on_loop_toggle(self, func: Callable) -> None:
"""The event when the `looping` is `enabled`
:param func:
:type func: Callable
"""
self.on_loop_toggle_func = func
def on_volume_change(self, func: Callable) -> None:
"""The event when the `volume` is `changed`
:param func:
:type func: Callable
"""
self.on_volume_change_func = func
def on_remove_from_queue(self, func: Callable) -> None:
"""The event when the song is `removed from the queue`
:param func:
:type func: Callable
"""
self.on_remove_from_queue_func = func
async def queue(self, url: str, search: bool = False, bettersearch: bool = False) -> Song:
"""The song to queue
:param url: The `url` of the song provider
:type url: str
:param search: Song Name, defaults to False
:type search: bool, optional
:param bettersearch: Search betterly or not, defaults to False
:type bettersearch: bool, optional
:return: The song with the minimum required data
:rtype: Song
"""
song = await get_video_data(url, search, bettersearch, self.loop)
self.music.queue[self.ctx.guild.id].append(song)
if self.on_queue_func:
await self.on_queue_func(self.ctx, song)
return song
async def play(self) -> Song:
"""Determines which song to play from the queue
:return: See above
:rtype: Song
"""
source = discord.PCMVolumeTransformer(
discord.FFmpegPCMAudio(self.music.queue[self.ctx.guild.id][0].source,**self.ffmpeg_opts)
)
self.voice.play(
source,
after=lambda error: self.after_func(
self.ctx,
self.ffmpeg_opts,
self.music,
self.after_func,
self.on_play_func,
self.loop,
),
)
song = self.music.queue[self.ctx.guild.id][0]
if self.on_play_func:
await self.on_play_func(self.ctx, song)
return song
async def skip(self, force: bool = False) -> Union[Tuple[Song, Song], Song]:
"""Skips the current song which is being played
:param force: Force skip or not, defaults to False
:type force: bool, optional
:raises NotPlaying: When there is no song played then this error is raised
:raises EmptyQueue: When the queue is empty
:return: It returns (old song, new song) or just (song) depending on the situtation
:rtype: Union[Tuple[Song, Song], Song]
"""
if len(self.music.queue[self.ctx.guild.id]) == 0:
raise NotPlaying("Cannot loop because nothing is being played")
elif not len(self.music.queue[self.ctx.guild.id]) > 1 and not force:
raise EmptyQueue("Cannot skip because queue is empty")
old = self.music.queue[self.ctx.guild.id][0]
old.is_looping = False if old.is_looping else | |
<reponame>byfilip/streamxmlwriter
#!/usr/bin/env python
"""
streamxmlwriter - A simple library for incrementally writing XML
files of arbitrary size. Supports pretty-printing and custom attribute
ordering. Experimental namespace support.
In development; poor documentation and tests; may eat your children.
The latest development version is available from the git repository [1].
[1] http://github.com/filipsalomonsson/streamxmlwriter
Comments and/or patches are always welcome.
"""
# Copyright (c) 2009-2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "<NAME> <<EMAIL>>"
__version__ = "1.0"
INDENT = " "
def escape_attribute(value, encoding):
"""Escape an attribute value using the given encoding."""
if "&" in value:
value = value.replace("&", "&")
if "<" in value:
value = value.replace("<", "<")
if '"' in value:
value = value.replace('"', """)
return value.encode(encoding, "xmlcharrefreplace")
def escape_cdata(data, encoding):
"""Escape character data using the given encoding."""
if "&" in data:
data = data.replace("&", "&")
if "<" in data:
data = data.replace("<", "<")
if ">" in data:
data = data.replace(">", ">")
return data.encode(encoding, "xmlcharrefreplace")
def _nssplitname(name):
if name is None:
return None
if not name[0] == "{":
return ("", name)
return tuple(name[1:].split("}", 1))
def _cname(name, nsmap, cnames):
"""Return a cname from its {ns}tag form."""
if not isinstance(name, tuple):
name = _nssplitname(name)
if name in cnames:
return cnames[name]
uri, ncname = name
if not uri:
for uri in nsmap:
if not nsmap[uri]:
break
else:
uri = ""
prefix = nsmap.setdefault(uri, "ns" + str(len(nsmap) + 1))
if prefix:
cname = prefix + ":" + ncname
else:
cname = ncname
cnames[name] = cname
return cname
def sorter_factory(attrib_order):
"""Return a function that sorts a list of (key, value) pairs.
The sort order is determined by the `attrib_order` dictionary,
whose format is described in the documentation for the `XMLWriter`
class.
"""
items = attrib_order.items()
attrib_order = {}
for tag, names in items:
tag = _nssplitname(tag)
attrib_order[tag] = dict(
[(_nssplitname(name), n) for (n, name) in enumerate(names)]
)
for tag, order in attrib_order.items():
order.setdefault(None, len(order))
def asort(pairs, tag):
"""Sort a list of ``(name, cname, value)`` tuples), using the
custom sort order for the given `tag` name."""
def key(item):
"""Return a sort key for a ``(name, cname, value)`` tuple."""
(ncname, cname, value) = item
if tag not in attrib_order:
return ncname
keys = attrib_order[tag]
return keys.get(ncname, keys[None]), ncname
pairs.sort(key=key)
return asort
def tostring(element, *args, **kwargs):
"""Serialize an element to its string representation using an
`XMLWriter`.
`element` is an Element instance. All additional positional and
keyword arguments are passed on to the underlying `XMLWriter`.
"""
import io
out = io.BytesIO()
writer = XMLWriter(out, *args, **kwargs)
writer.element(element)
writer.close()
return out.getvalue()
class XMLSyntaxError(Exception):
"""XML syntactic errors, such as ill-nestedness."""
class XMLWriter(object):
"""Stream XML writer"""
def __init__(
self, file, encoding="utf-8", pretty_print=False, sort=True, abbrev_empty=True
):
"""
Create an `XMLWriter` that writes its output to `file`.
`encoding` is the output encoding (default: utf-8). If
`pretty_print` is true, the output will be written in indented.
form.
If `sort` is true (which is the default), attributes will be
sorted alphabetically.
Optionally, `sort` can be a dictionary specifying a custom
sort order for attributes. The dictionary keys are tag names,
and each value is a list of attribute names in the order they
should appear when sorted. If `None` appears in the list, it
acts as a wildcard for all attributes not explicitly named in
the list. (By default, they will be placed last.)
Example::
attrib_order = {
"person": ["id", "first_name", "last_name"],
"foo": ["id", None, "put_me_last"],
}
"""
self.file = file
self.encoding = encoding
self._pretty_print = pretty_print
self._sort = sort
if isinstance(sort, dict):
self._sort = sorter_factory(sort)
elif sort:
self._sort = lambda attributes, tag: attributes.sort()
self._abbrev_empty = abbrev_empty
self._tags = []
self._start_tag_open = False
self._new_namespaces = {}
self._started = False
self._wrote_declaration = False
if self.encoding not in ("us-ascii", "utf-8"):
self.declaration()
self._wrote_data = False
def write(self, *data):
for datum in data:
if not isinstance(datum, bytes):
datum = bytes(datum, self.encoding)
self.file.write(datum)
def start(self, tag, attributes=None, nsmap=None, **kwargs):
"""Open a new `tag` element.
Attributes can be given as a dictionary (`attributes`), or as
keyword arguments.
`nsmap` is an optional dictionary mapping namespace prefixes
to URIs. It is intended mainly for lxml compatibility.
"""
self._started = True
if self._start_tag_open:
self.write(">")
self._start_tag_open = False
if self._pretty_print and self._tags and not self._wrote_data:
self.write("\n", INDENT * len(self._tags))
# Copy old namespaces and cnames
if self._tags:
_, old_namespaces, _ = self._tags[-1]
else:
old_namespaces = {"": ""}
namespaces = old_namespaces.copy()
if nsmap:
self._new_namespaces.update(reversed(item) for item in nsmap.items())
values = self._new_namespaces.values()
for uri, prefix in list(namespaces.items()):
if prefix in values:
del namespaces[uri]
namespaces.update(self._new_namespaces)
cnames = {}
# Write tag name (cname)
tag = _nssplitname(tag)
self.write("<", _cname(tag, namespaces, cnames))
# Make cnames for the attributes
if attributes:
kwargs.update(attributes)
attributes = [(_nssplitname(name), value) for (name, value) in kwargs.items()]
attributes = [
(name, _cname(name, namespaces, cnames), value)
for (name, value) in attributes
]
# Write namespace declarations for all new mappings
for (uri, prefix) in sorted(namespaces.items(), key=lambda x: x[1]):
if uri not in old_namespaces or old_namespaces.get(uri) != prefix:
value = escape_attribute(uri, self.encoding)
if prefix:
self.write(
" xmlns:", bytes(prefix, self.encoding), '="', value, '"'
)
else:
self.write(' xmlns="', value, '"')
# Write the attributes
if self._sort:
self._sort(attributes, tag)
for (name, cname, value) in attributes:
value = escape_attribute(value, self.encoding)
self.write(" ", cname, '="', value, '"')
self._new_namespaces = {}
self._start_tag_open = True
self._wrote_data = False
self._tags.append((tag, namespaces, cnames))
def end(self, tag=None):
"""Close the most recently opened element.
If `tag` is given, it must match the tag name of the open
element, or an `XMLSyntaxError will be raised.
"""
open_tag, namespaces, cnames = self._tags.pop()
if tag is not None:
tag = _nssplitname(tag)
if open_tag != tag:
raise XMLSyntaxError(
"Start and end tag mismatch: %s and /%s." % (open_tag, tag)
)
if self._start_tag_open:
if self._abbrev_empty:
self.write(" />")
else:
self.write("></", _cname(open_tag, namespaces, cnames), ">")
self._start_tag_open = False
else:
if self._pretty_print and not self._wrote_data:
self.write("\n", INDENT * len(self._tags))
self.write("</", _cname(open_tag, namespaces, cnames), ">")
self._wrote_data = False
def start_ns(self, prefix, uri):
"""Add a namespace declaration to the scope of the next
element."""
self._new_namespaces[uri] = prefix
def end_ns(self):
"""End a namespace scope."""
pass
def data(self, data):
"""Add character data."""
if not (self._pretty_print and not data.strip()):
if self._start_tag_open:
self.write(">")
self._start_tag_open = False
self.write(escape_cdata(data, self.encoding))
self._wrote_data = True
def element(self, element, attributes=None, data=None, **kwargs):
if hasattr(element, "tag"):
attrib = dict(element.attrib)
if attributes:
attrib.update(attributes)
if hasattr(element, "nsmap"):
self.start(element.tag, attrib, element.nsmap, **kwargs)
else:
self.start(element.tag, attrib, **kwargs)
if data is not None or element.text:
if data is not None:
self.data(data)
else:
self.data(element.text)
for child in element:
self.element(child)
self.end()
if element.tail:
self.data(element.tail)
else:
self.start(element, attributes, **kwargs)
if data:
self.data(data)
self.end(element)
def _close_start(self):
"""Make sure the start tag is finished."""
if self._start_tag_open:
self.write(">")
self._start_tag_open = False
def declaration(self):
"""Write an XML declaration."""
if self._started:
raise XMLSyntaxError(
"Can't write XML declaration after root element has been started."
)
if not self._wrote_declaration:
self.pi("xml", "version='1.0' encoding='" + self.encoding + "'")
self._wrote_declaration = True
xml = declaration
def _comment_or_pi(self, *data):
"""Write a comment or PI, using special rules for
pretty-printing."""
self._close_start()
if self._pretty_print:
if (self._tags and not self._wrote_data) or (
self._started and not self._tags
):
self.write("\n", INDENT * len(self._tags))
self.write(*data)
if self._pretty_print and not self._started:
self.write("\n")
def comment(self, data):
| |
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality to use to conduct
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to solve all of these problems. The idea is simple: instead
# of moving data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
from collections.abc import Iterable
# High Chart
try:
from highcharts import Highchart, Highstock
except:
raise ImportError(
"The highcharts module seems to not be installed in your environment.\nTo be able to use this method, you'll have to install it.\n[Tips] Run: 'pip3 install python-highcharts' in your terminal to install the module."
)
# VerticaPy Modules
from verticapy.utilities import *
from verticapy.toolbox import *
#
##
#
# ___ ___ ________ ___ ___ ________ ________ _________
# |\ \|\ \|\ ____\|\ \|\ \|\ __ \|\ __ \|\___ ___\
# \ \ \\\ \ \ \___|\ \ \\\ \ \ \|\ \ \ \|\ \|___ \ \_|
# \ \ __ \ \ \ \ \ __ \ \ __ \ \ _ _\ \ \ \
# \ \ \ \ \ \ \____\ \ \ \ \ \ \ \ \ \ \\ \| \ \ \
# \ \__\ \__\ \_______\ \__\ \__\ \__\ \__\ \__\\ _\ \ \__\
# \|__|\|__|\|_______|\|__|\|__|\|__|\|__|\|__|\|__| \|__|
#
##
#
# Functions used by vDataFrames to draw graphics using High Chart API.
#
# ---#
def hchart_from_vdf(
vdf,
x=None,
y=None,
z=None,
c=None,
aggregate: bool = True,
kind="boxplot",
width: int = 600,
height: int = 400,
options: dict = {},
h: float = -1,
max_cardinality: int = 10,
limit=10000,
drilldown: bool = False,
stock: bool = False,
alpha: float = 0.25,
):
if not (x):
x = vdf.numcol()
x = (
vdf_columns_names([x], vdf)[0]
if (isinstance(x, str))
else vdf_columns_names(x, vdf)
)
cursor = vdf._VERTICAPY_VARIABLES_["cursor"]
if drilldown:
if not (z):
z = "COUNT(*)"
if kind == "hist":
kind = "column"
check_types([("y", y, [str, list],)])
if isinstance(x, Iterable) and not (isinstance(x, str)):
x = x[0]
columns_check([x], vdf)
if isinstance(y, str):
columns_check([y], vdf)
y = vdf_columns_names([y], vdf)[0]
else:
columns_check(y, vdf)
y = vdf_columns_names(y, vdf)[0]
query = [
"SELECT {}, {} FROM {} GROUP BY 1 LIMIT {}".format(
x, z, vdf.__genSQL__(), limit
),
"SELECT {}, {}, {} FROM {} GROUP BY 1, 2 LIMIT {}".format(
x, y, z, vdf.__genSQL__(), limit
),
]
elif (kind in ("pie_half", "pie", "donut", "pie3d", "donut3d")) or (
kind in ("bar", "hist") and not (z)
):
if not (y):
y = "COUNT(*)"
if isinstance(x, Iterable) and not (isinstance(x, str)):
x = x[0]
columns_check([x], vdf)
unique = vdf[x].nunique()
is_num = vdf[x].isnum()
order_by = " ORDER BY 2 DESC "
if unique > max_cardinality:
if is_num:
order_by = " ORDER BY MIN({}) DESC ".format(x)
x = vdf[x].discretize(h=h, return_enum_trans=True)[0].replace(
"{}", x
) + " AS {}".format(x)
else:
query = "SELECT {}, {} FROM {} GROUP BY 1 ORDER BY 2 DESC LIMIT {}".format(
x, y, vdf.__genSQL__(), max_cardinality
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(query)
result = vdf._VERTICAPY_VARIABLES_["cursor"].fetchall()
result = [elem[0] for elem in result]
result = [
"NULL" if elem == None else "'{}'".format(elem) for elem in result
]
x = "(CASE WHEN {} IN ({}) THEN {} ELSE 'Others' END) AS {}".format(
x, ", ".join(result), x, x
)
query = "SELECT {}, {} FROM {}{}{}LIMIT {}".format(
x,
y,
vdf.__genSQL__(),
" GROUP BY 1" if (aggregate) else "",
order_by,
limit,
)
elif kind in (
"bar",
"hist",
"stacked_bar",
"stacked_hist",
"heatmap",
"negative_bar",
):
where = ""
if kind == "heatmap" and not (z):
if not (y):
y = "COUNT(*)"
z = y
y = "'' AS \"_\""
else:
if not (z):
z = "COUNT(*)"
check_types([("y", y, [str, list],)])
if isinstance(x, Iterable) and not (isinstance(x, str)):
x = x[0]
columns_check([x], vdf)
if isinstance(y, str):
columns_check([y], vdf)
y = vdf_columns_names([y], vdf)[0]
else:
columns_check(y, vdf)
y = vdf_columns_names(y, vdf)[0]
# y
unique = vdf[y].nunique()
is_num = vdf[y].isnum()
if unique > max_cardinality:
if is_num:
where = " WHERE {} IS NOT NULL ".format(y)
y = vdf[y].discretize(h=h, return_enum_trans=True)[0].replace(
"{}", y
) + " AS {}".format(y)
else:
y = vdf[y].discretize(
k=max_cardinality, method="topk", return_enum_trans=True
)[0].replace("{}", y) + " AS {}".format(y)
# x
unique = vdf[x].nunique()
is_num = vdf[x].isnum()
if unique > max_cardinality:
if is_num:
x = vdf[x].discretize(h=h, return_enum_trans=True)[0].replace(
"{}", x
) + " AS {}".format(x)
else:
x = vdf[x].discretize(
k=max_cardinality, method="topk", return_enum_trans=True
)[0].replace("{}", x) + " AS {}".format(x)
query = "SELECT {}, {}, {} FROM {} {}{}LIMIT {}".format(
x,
y,
z,
vdf.__genSQL__(),
where,
" GROUP BY 1, 2 " if (aggregate) else "",
limit,
)
elif kind in ("area", "area_ts", "line", "spline"):
check_types([("y", y, [str, list],)])
if isinstance(x, Iterable) and not (isinstance(x, str)):
x = x[0]
if isinstance(y, Iterable) and not (isinstance(y, str)) and kind == "area_ts":
y = y[0]
columns_check([x], vdf)
cast = "::timestamp" if (vdf[x].isdate()) else ""
if not (z):
if not (aggregate):
if isinstance(y, str):
columns_check([y], vdf)
y = vdf_columns_names([y], vdf)[0]
else:
columns_check(y, vdf)
y = vdf_columns_names(y, vdf)
if not (isinstance(y, str)):
y = ", ".join(y)
kind = "multi_" + kind
query = "SELECT {}{}, {} FROM {} WHERE {} IS NOT NULL{}{} LIMIT {}".format(
x,
cast,
y,
vdf.__genSQL__(),
x,
" GROUP BY 1 " if (aggregate) else "",
" ORDER BY 1 " if (vdf[x].isdate() or vdf[x].isnum()) else "",
limit,
)
else:
check_types([("y", y, [str, list],)])
check_types([("z", z, [str, list],)])
if isinstance(y, str):
columns_check([y], vdf)
y = vdf_columns_names([y], vdf)[0]
else:
columns_check(y, vdf)
y = vdf_columns_names(y, vdf)[0]
if isinstance(z, str):
columns_check([z], vdf)
z = vdf_columns_names([z], vdf)[0]
else:
columns_check(z, vdf)
z = vdf_columns_names(z, vdf)[0]
# z
unique = vdf[z].nunique()
is_num = vdf[z].isnum()
z_copy = z
if unique > max_cardinality:
if is_num:
z = vdf[z].discretize(h=h, return_enum_trans=True)[0].replace(
"{}", z
) + " AS {}".format(z)
else:
z = vdf[z].discretize(
k=max_cardinality, method="topk", return_enum_trans=True
)[0].replace("{}", z) + " AS {}".format(z)
query = "SELECT {}{}, {}, {} FROM {} WHERE {} IS NOT NULL AND {} IS NOT NULL LIMIT {} OVER (PARTITION BY {} ORDER BY {} DESC)".format(
x,
cast,
y,
z,
vdf.__genSQL__(),
x,
y,
max(int(limit / unique), 1),
z_copy,
x,
)
elif kind in ("scatter", "bubble"):
check_types([("y", y, [str, list],)])
if isinstance(y, str):
columns_check([y], vdf)
y = vdf_columns_names([y], vdf)[0]
else:
columns_check(y, vdf)
y = vdf_columns_names(y, vdf)[0]
if isinstance(x, Iterable) and not (isinstance(x, str)):
x = x[0]
cast = "::timestamp" if (vdf[x].isdate()) else ""
if not (z) and not (c) and (kind == "scatter"):
query = "SELECT {}{}, {} FROM {} WHERE {} IS NOT NULL AND {} IS NOT NULL LIMIT {}".format(
x, cast, y, vdf.__genSQL__(), x, y, limit
)
elif not (c) and (z):
check_types([("z", z, [str, list],)])
try:
z = (
vdf_columns_names([z], vdf)[0]
if (isinstance(z, str))
else vdf_columns_names(z, vdf)[0]
)
except:
pass
query = "SELECT {}{}, {}, {} FROM {} WHERE {} IS NOT NULL AND {} IS NOT NULL AND {} IS NOT NULL | |
verify=False)
if phantom.is_fail(ret_val):
return ret_val, None
phantom_base_url = resp_json.get('base_url')
if not phantom_base_url:
return action_result.set_status(phantom.APP_ERROR, status_message=MATTERMOST_BASE_URL_NOT_FOUND_MSG), None
return phantom.APP_SUCCESS, phantom_base_url
def _get_asset_name(self, action_result):
""" Get name of the asset using Phantom URL.
:param action_result: object of ActionResult class
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message), asset name
"""
mattermost_phantom_base_url = self.get_phantom_base_url()
asset_id = self.get_asset_id()
rest_endpoint = MATTERMOST_PHANTOM_ASSET_INFO_URL.format(
asset_id=asset_id)
url = '{}rest{}'.format(mattermost_phantom_base_url, rest_endpoint)
ret_val, resp_json = self._make_rest_call(
action_result=action_result, url=url, verify=False)
if phantom.is_fail(ret_val):
return ret_val, None
asset_name = resp_json.get('name')
if not asset_name:
return action_result.set_status(phantom.APP_ERROR, status_message='Asset Name for id: {0} not found.'
.format(asset_id)), None
return phantom.APP_SUCCESS, asset_name
def _wait(self, action_result):
""" This function is used to hold the action till user login for 105 seconds.
:param action_result: Object of ActionResult class
:return: status (success/failed)
"""
app_dir = os.path.dirname(os.path.abspath(__file__))
# file to check whether the request has been granted or not
auth_status_file_path = '{0}/{1}_{2}'.format(
app_dir, self.get_asset_id(), MATTERMOST_TC_FILE)
# wait-time while request is being granted for 105 seconds
for _ in range(0, 35):
self.send_progress('Waiting...')
# If file is generated
if os.path.isfile(auth_status_file_path):
os.unlink(auth_status_file_path)
break
time.sleep(MATTERMOST_TC_STATUS_SLEEP)
else:
self.send_progress('')
return action_result.set_status(phantom.APP_ERROR, status_message='Timeout. Please try again later.')
self.send_progress('Authenticated')
return phantom.APP_SUCCESS
def _validate_date(self, date_timestamp):
""" This function is used to validate date timestamp as per YYYY-MM-DD format or valid ISO 8601 format.
:param date_timestamp: Value of the date timestamp
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
regex = r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):' \
r'([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$'
match_iso8601 = re.compile(regex).match
try:
if match_iso8601(date_timestamp) is not None:
return phantom.APP_SUCCESS
elif datetime.strptime(date_timestamp, '%Y-%m-%d'):
return phantom.APP_SUCCESS
except Exception:
return phantom.APP_ERROR
return phantom.APP_ERROR
def _convert_time(self, time_stamp):
""" This function is used to convert formatted timestamp into millisecond epoch.
:param time_stamp: formatted timestamp of start_time or end_time
:return: status success/failure, epoch time in milliseconds
"""
try:
epoch = datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=dateutil.tz.UTC)
epoch = epoch.astimezone(dateutil.tz.tzlocal())
parsed_time = dateutil.parser.parse(time_stamp)
if not parsed_time.tzinfo:
parsed_time = parsed_time.replace(tzinfo=dateutil.tz.UTC)
parsed_time = parsed_time.astimezone(dateutil.tz.tzlocal())
epoch_time = int((parsed_time - epoch).total_seconds() * 1000)
except Exception as e:
self.debug_print("conversion failed")
return phantom.APP_ERROR, self._get_error_message_from_exception(e)
return phantom.APP_SUCCESS, epoch_time
def _verify_time(self, time_value):
""" This function is used to verify time parameters.
:param time_value: start_time or end_time epoch
:return: status success/failure with appropriate message
"""
# Validate time parameter
try:
time_value = int(float(time_value))
except Exception:
self.debug_print(MATTERMOST_INVALID_TIME)
return phantom.APP_ERROR, MATTERMOST_INVALID_TIME
# Validate start_time and end_time for negation
if time_value < 0:
self.debug_print(MATTERMOST_NEGATIVE_TIME)
return phantom.APP_ERROR, MATTERMOST_NEGATIVE_TIME
return phantom.APP_SUCCESS, MATTERMOST_VALID_TIME
def _process_posts(self, action_result, url, params, start_time, end_time):
""" This function is used to process posts for a given channel.
:param action_result: Object of ActionResult class
:param url: url for making REST call
:param params: dictionary of query parameters
:param start_time: start time in epoch
:param end_time: end time in epoch
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
if not end_time:
if start_time:
params.update({
'since': start_time
})
# Get posts for given channel
post_status, post_list = self._get_posts(
action_result, url, params)
if phantom.is_fail(post_status):
return action_result.get_status()
if not post_list:
return action_result.set_status(phantom.APP_SUCCESS, MATTERMOST_NO_POSTS_FOUND)
for each_post in post_list:
action_result.add_data(each_post)
elif not start_time and end_time:
params.update({
'since': end_time
})
# Get posts for given channel
post_status, post_list = self._get_posts(
action_result, url, params)
if phantom.is_fail(post_status):
return action_result.get_status()
params = {}
if post_list:
end_time_id = post_list[-1]['id']
params.update({
'before': end_time_id
})
# Get posts for given channel
post_status, post_list = self._get_posts(
action_result, url, params)
if phantom.is_fail(post_status):
return action_result.get_status()
if not post_list:
return action_result.set_status(phantom.APP_SUCCESS, MATTERMOST_NO_POSTS_FOUND)
for each_post in post_list:
action_result.add_data(each_post)
elif start_time and end_time:
params.update({
'since': start_time
})
# Get posts for given channel
post_status, post_list = self._get_posts(
action_result, url, params)
if phantom.is_fail(post_status):
return action_result.get_status()
if not post_list:
return action_result.set_status(phantom.APP_SUCCESS, MATTERMOST_NO_POSTS_FOUND)
for each_post in reversed(post_list):
if each_post['create_at'] <= end_time:
action_result.add_data(each_post)
else:
break
return phantom.APP_SUCCESS
def _get_posts(self, action_result, url, params):
""" This function is used to list posts for a given channel.
:param action_result: Object of ActionResult class
:param url: url for making REST call
:param params: dictionary of query parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR), list of posts
"""
page_number = 0
params.update({
'page': page_number
})
post_list = []
while True:
# make rest call
ret_val, response_json = self._handle_update_request(url=url, action_result=action_result,
params=params)
if phantom.is_fail(ret_val):
return action_result.get_status(), post_list
# If empty list then break
if not response_json['posts']:
break
# Add post to the post list
for each_post in response_json['order']:
post_list.append(response_json.get('posts', "")[each_post])
# Increment page_number for fetching next page in upcoming cycle
if not params.get('since'):
page_number += 1
params.update({
'page': page_number
})
else:
break
return phantom.APP_SUCCESS, post_list
def _create_post(self, action_result, request_data):
""" This function is used to create post in a channel.
:param action_result: Object of ActionResult class
:param request_data: Dictionary of request body
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR), response dict
"""
# Endpoint for creating post
url = '{0}{1}'.format(MATTERMOST_API_BASE_URL.format(server_url=self._server_url),
MATTERMOST_SEND_MESSAGE_ENDPOINT)
# make rest call
ret_val, response_json = self._handle_update_request(url=url, action_result=action_result,
data=json.dumps(request_data), method="post")
if phantom.is_fail(ret_val):
return action_result.get_status(), None
return phantom.APP_SUCCESS, response_json
def _verify_channel(self, action_result, team_id, channel):
""" This function is used to verify given channel and list channels.
:param action_result: Object of ActionResult class
:param team_id: ID of the team
:param channel: ID or name of the channel
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR), ID of the channel
"""
channel_id = None
# Endpoint for fetching channels
url = '{0}{1}'.format(MATTERMOST_API_BASE_URL.format(server_url=self._server_url),
MATTERMOST_LIST_CHANNELS_ENDPOINT.format(team=team_id))
# make rest call
ret_val, response_json = self._handle_update_request(
url=url, action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
# If an empty list of channels, then return
if not response_json and not self.get_action_identifier() == 'list_channels':
return phantom.APP_ERROR, MATTERMOST_CONST_NOT_FOUND
# For any action other than list channel
if not self.get_action_identifier() == 'list_channels':
# Fetch Channel ID from Channel name
for each_channel in response_json:
# Check if either channel name or channel ID matches
if channel.lower() == each_channel.get('id').lower() or channel.lower() == each_channel.get('name').lower():
channel_id = each_channel.get('id')
return phantom.APP_SUCCESS, channel_id
else:
for each_channel in response_json:
# Allow public(O) and private(P) channels only
if each_channel.get('type').lower() in ['o', 'p']:
action_result.add_data(each_channel)
if not channel_id and not self.get_action_identifier() == 'list_channels':
return phantom.APP_ERROR, MATTERMOST_CONST_NOT_FOUND
return phantom.APP_SUCCESS, channel_id
def _verify_team(self, action_result, team):
""" This function is used to verify given team and list teams.
:param action_result: Object of ActionResult class
:param team: ID or name of the team
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR), ID of the team
"""
team_id = None
params = {}
# Endpoint for fetching teams
url = '{0}{1}'.format(MATTERMOST_API_BASE_URL.format(server_url=self._server_url),
MATTERMOST_TEAMS_ENDPOINT)
page_number = 0
params.update({
'page': page_number
})
duplicate_entry = 0
previous_teams = []
while True:
# make rest call
ret_val, response_json = self._handle_update_request(url=url, action_result=action_result,
params=params)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
# If an empty list of teams, then break
if not response_json:
break
# For any action other than list teams
if not self.get_action_identifier() == 'list_teams':
# Fetch Team ID from Team name
for each_team in response_json:
# Check if either team name or team ID matches
if team.lower() == each_team.get('id').lower() or team.lower() == each_team.get('name').lower():
team_id = each_team.get('id')
return phantom.APP_SUCCESS, team_id
else:
new_team = []
if previous_teams:
duplicate_entry = len(
[value for value in response_json if value in previous_teams])
new_team = [
value for value in response_json if value not in previous_teams]
previous_teams = response_json
if not new_team and page_number == 0:
for each_team in response_json:
action_result.add_data(each_team)
else:
for each_team in new_team:
action_result.add_data(each_team)
# Increment page_number for fetching next page in upcoming cycle
page_number += 1 + duplicate_entry
params.update({
'page': page_number
})
if not self.get_action_identifier() == 'list_teams':
return phantom.APP_ERROR, MATTERMOST_CONST_NOT_FOUND
return phantom.APP_SUCCESS, team_id
def _handle_list_users(self, param):
""" This function is used to handle list users action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(
self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
# If neither personal token nor access token are present, action fails
if not self._personal_token and not self._access_token:
return action_result.set_status(phantom.APP_ERROR, status_message=MATTERMOST_CONFIG_PARAMS_REQUIRED_MSG)
team = param.get(MATTERMOST_JSON_TEAM, "")
params = {}
if team:
# Verify valid team name or team ID
team_status, team_id = self._verify_team(action_result, team)
if phantom.is_fail(team_status):
if team_id == MATTERMOST_CONST_NOT_FOUND:
return action_result.set_status(phantom.APP_ERROR, MATTERMOST_TEAM_NOT_FOUND_MSG)
return action_result.get_status()
params.update({
'in_team': team_id
})
# Endpoint for fetching users
url = '{0}{1}'.format(MATTERMOST_API_BASE_URL.format(
server_url=self._server_url), MATTERMOST_USERS_ENDPOINT)
page_number = 0
params.update({
'page': page_number
})
while True:
# make rest call
ret_val, response_json = self._handle_update_request(
url=url, action_result=action_result, params=params)
if phantom.is_fail(ret_val):
return action_result.get_status()
# If empty list then break
if not response_json:
break
# Add user to action result data
for | |
<gh_stars>0
# Copyright (c) 2017-2019, <NAME>
# Copyright (c) 2014-2018, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""iocage events collection."""
import typing
from timeit import default_timer as timer
import libioc.errors
# MyPy
import libzfs
EVENT_STATUS = (
"pending",
"done",
"failed"
)
class Scope(list):
"""An independent event history scope."""
PENDING_COUNT: int
def __init__(self) -> None:
self.PENDING_COUNT = 0
super().__init__([])
class IocEvent:
"""The base event class of liblibioc."""
_scope: Scope
identifier: typing.Optional[str]
_started_at: float
_stopped_at: float
_pending: bool
skipped: bool
done: bool
reverted: bool
error: typing.Optional[typing.Union[bool, BaseException]]
_rollback_steps: typing.List[typing.Callable[[], typing.Optional[
typing.Generator['IocEvent', None, None]
]]]
_child_events: typing.List['IocEvent']
def __init__(
self,
message: typing.Optional[str]=None,
scope: typing.Optional[Scope]=None
) -> None:
"""Initialize an IocEvent."""
self.scope = scope
for event in self.scope:
if event.__hash__() == self.__hash__():
return event # type: ignore
self._pending = False
self.skipped = False
self.done = True
self.reverted = False
self.error = None
self._rollback_steps = []
self._child_events = []
self._rollback_steps = []
self.number = len(self.scope) + 1
self.parent_count = self.scope.PENDING_COUNT
self.message = message
@property
def scope(self) -> Scope:
"""Return the currently used event scope."""
return self._scope
@scope.setter
def scope(self, scope: typing.Optional[Scope]) -> None:
if scope is None:
self._scope = Scope()
else:
self._scope = scope
def get_state_string(
self,
error: str="failed",
skipped: str="skipped",
done: str="done",
pending: str="pending"
) -> str:
"""Get a humanreadable string according to the jail state."""
if self.error is not None:
return error
if self.skipped is True:
return skipped
if self.done is True:
return done
return pending
def child_event(self, event: 'IocEvent') -> 'IocEvent':
"""Append the event to the child_events for later notification."""
self._child_events.append(event)
return event
def add_rollback_step(self, method: typing.Callable[[], None]) -> None:
"""Add a rollback step that is executed when the event fails."""
self._rollback_steps.append(method)
def rollback(
self
) -> typing.Optional[typing.Generator['IocEvent', None, None]]:
"""Rollback all rollback steps in reverse order."""
if self.reverted is True:
return
self.reverted = True
# Notify child_events in reverse order
for event in reversed(self._child_events):
rollback_actions = event.rollback()
if rollback_actions is not None:
for rollback_action in rollback_actions:
yield rollback_action
# Execute rollback steps in reverse order
reversed_rollback_steps = reversed(self._rollback_steps)
self._rollback_steps = []
for revert_step in reversed_rollback_steps:
revert_events = revert_step()
if revert_events is not None:
for event in revert_events:
yield event
@property
def type(self) -> str:
"""
Return the events type.
The event type is obtained from an IocEvent's class name.
"""
return type(self).__name__
@property
def pending(self) -> bool:
"""Return True if the event is pending."""
return self._pending
@pending.setter
def pending(self, state: bool) -> None:
"""
Set the pending state.
Changes invoke internal processing as for example the calculation of
the event duration and the global PENDING_COUNT.
"""
current = self._pending
new_state = (state is True)
if current == new_state:
return
if new_state is True:
try:
self._started_at
raise libioc.errors.EventAlreadyFinished(event=self)
except AttributeError:
self._started_at = float(timer())
if new_state is False:
self._stopped_at = float(timer())
self._pending = new_state
self.scope.PENDING_COUNT += 1 if (state is True) else -1
@property
def duration(self) -> typing.Optional[float]:
"""Return the duration of finished events."""
try:
return self._stopped_at - self._started_at
except AttributeError:
return None
def _update_message(
self,
message: typing.Optional[str]=None,
) -> None:
self.message = message
def begin(self, message: typing.Optional[str]=None) -> 'IocEvent':
"""Begin an event."""
self._update_message(message)
self.pending = True
self.done = False
self.parent_count = self.scope.PENDING_COUNT - 1
return self
def end(self, message: typing.Optional[str]=None) -> 'IocEvent':
"""Successfully finish an event."""
self._update_message(message)
self.done = True
self.pending = False
self.parent_count = self.scope.PENDING_COUNT
return self
def step(self, message: typing.Optional[str]=None) -> 'IocEvent':
"""Reflect partial event progress."""
self._update_message(message)
self.parent_count = self.scope.PENDING_COUNT
return self
def skip(self, message: typing.Optional[str]=None) -> 'IocEvent':
"""Mark an event as skipped."""
self._update_message(message)
self.skipped = True
self.pending = False
self.parent_count = self.scope.PENDING_COUNT
return self
def fail(
self,
exception: bool=True,
message: typing.Optional[str]=None
) -> 'IocEvent':
"""End an event with a failure."""
list(self.fail_generator(exception=exception, message=message))
return self
def fail_generator(
self,
exception: bool=True,
message: typing.Optional[str]=None
) -> typing.Generator['IocEvent', None, None]:
"""End an event with a failure via a generator of rollback steps."""
self._update_message(message)
self.error = exception
actions = self.rollback()
if isinstance(actions, typing.Generator):
for action in actions:
yield action
self.pending = False
self.parent_count = self.scope.PENDING_COUNT
yield self
def __hash__(self) -> typing.Any:
"""Compare an event by its type and identifier."""
has_identifier = ("identifier" in self.__dir__()) is True
identifier = "generic" if has_identifier is False else self.identifier
return hash((self.type, identifier))
# Jail
class JailEvent(IocEvent):
"""Any event related to a jail."""
jail: 'libioc.Jail.JailGenerator'
identifier: typing.Optional[str]
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
message: typing.Optional[str]=None,
scope: typing.Optional[Scope]=None
) -> None:
try:
self.identifier = jail.full_name
except AttributeError:
self.identifier = None
self.jail = jail
IocEvent.__init__(self, message=message, scope=scope)
class JailRename(JailEvent):
"""Change the name of a jail."""
current_name: str
new_name: str
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
current_name: str,
new_name: str,
message: typing.Optional[str]=None,
scope: typing.Optional[Scope]=None
) -> None:
self.current_name = current_name
self.new_name = new_name
JailEvent.__init__(self, jail=jail, message=message, scope=scope)
class JailStop(JailEvent):
"""Destroy the jail."""
pass
class JailRemove(JailEvent):
"""Remove the jail(2)."""
pass
class TeardownSystemMounts(JailStop):
"""Teardown a jails mountpoints."""
pass
class JailResourceLimitAction(JailEvent):
"""Set or unset a jails resource limits."""
pass
class VnetEvent(JailEvent):
"""A group of events around VNET operations."""
pass
class JailNetworkSetup(VnetEvent):
"""Start VNET networks."""
pass
class JailNetworkTeardown(JailStop):
"""Teardown a jails network."""
pass
class VnetInterfaceConfig(JailNetworkSetup):
"""Configure VNET network interfaces and firewall."""
pass
class VnetSetupLocalhost(JailNetworkSetup):
"""Configure a VNET jails localhost."""
pass
class VnetSetRoutes(JailNetworkSetup):
"""Set a VNET jails network routes."""
pass
class JailAttach(JailEvent):
"""Remove the jail(2)."""
pass
class DevFSEvent(JailEvent):
"""Group of events that occor on DevFS operations."""
pass
class MountDevFS(DevFSEvent):
"""Mount /dev into a jail."""
pass
class MountFdescfs(DevFSEvent):
"""Mount /dev/fd into a jail."""
pass
class FstabEvent(JailEvent):
"""Group of events that occor on Fstab operations."""
pass
class MountFstab(FstabEvent):
"""Mount entries from a jails fstab file."""
pass
class UnmountFstab(FstabEvent):
"""Unmount entries from a jails fstab file."""
pass
class JailHook(JailEvent):
"""Run jail hook."""
stdout: typing.Optional[str]
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
message: typing.Optional[str]=None,
scope: typing.Optional[Scope]=None
) -> None:
self.stdout = None
super().__init__(
jail=jail,
message=message,
scope=scope
)
def end(
self,
message: typing.Optional[str]=None,
stdout: str=""
) -> 'IocEvent':
"""Successfully finish an event."""
self.stdout = stdout
return super().end(message)
def fail(
self,
exception: bool=True,
message: typing.Optional[str]=None,
stdout: str=""
) -> 'IocEvent':
"""Successfully finish an event."""
self.stdout = stdout
return super().fail(
exception=exception,
message=message
)
class JailHookPrestart(JailHook):
"""Run jail prestart hook."""
pass
class JailHookStart(JailHook):
"""Run jail start hook."""
pass
class JailCommand(JailHook):
"""Run command in a jail."""
stdout: typing.Optional[str]
stderr: typing.Optional[str]
code: typing.Optional[int]
class JailHookCreated(JailHook):
"""Run jail created hook."""
pass
class JailHookPoststart(JailHook):
"""Run jail poststart hook."""
pass
class JailHookPrestop(JailHook):
"""Run jail prestop hook."""
pass
class JailHookStop(JailHook):
"""Run jail stop hook."""
pass
class JailHookPoststop(JailHook):
"""Run jail poststop hook."""
pass
class JailFstabUpdate(JailEvent):
"""Update a jails fstab file."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
message: typing.Optional[str]=None,
scope: typing.Optional[Scope]=None
) -> None:
JailEvent.__init__(self, jail=jail, message=message, scope=scope)
class JailResolverConfig(JailEvent):
"""Update a jails /etc/resolv.conf file."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
message: typing.Optional[str]=None,
scope: typing.Optional[Scope]=None
) -> None:
JailEvent.__init__(self, jail=jail, message=message, scope=scope)
class JailZFSShare(JailEvent):
"""Group of events that mounts or unmounts shared ZFS datasets."""
pass
class BasejailStorageConfig(JailEvent):
"""Mount or unmount basejail storage of a jail."""
pass
class AttachZFSDataset(JailZFSShare):
"""Mount an individual dataset when starting a jail with shared ZFS."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
dataset: libzfs.ZFSDataset,
scope: typing.Optional[Scope]=None
) -> None:
msg = f"Dataset {dataset.name} was attached to Jail {jail.full_name}"
JailEvent.__init__(self, jail=jail, message=msg, scope=scope)
class JailClone(JailEvent):
"""Clone a jail."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
message: typing.Optional[str]=None,
scope: typing.Optional[Scope]=None
) -> | |
warmup_begin_lr=self.warmup_begin_lr, warmup_steps=self.warmup_steps,
),
'poly': mx.lr_scheduler.PolyScheduler(
max_update=self.cycle_length, base_lr=self.base_lr, pwr=2, final_lr=self.min_lr,
),
'cycle': CyclicalSchedule(
TriangularSchedule, min_lr=self.min_lr, max_lr=self.max_lr, cycle_length=self.cycle_length,
inc_fraction=self.inc_fraction,
cycle_length_decay=self.cycle_length_decay,
cycle_magnitude_decay=self.cycle_magnitude_decay,
# stop_decay_iter=self.stop_decay_iter,
final_drop_iter=self.final_drop_iter,
),
'cosine': LinearWarmUp(
OneCycleSchedule(start_lr=self.min_lr, max_lr=self.max_lr, cycle_length=self.cycle_length,
cooldown_length=self.cooldown_length, finish_lr=self.finish_lr),
start_lr=self.warmup_begin_lr,
length=self.warmup_steps,
)
}
self._lr_scheduler = schedules[self.lr_scheduler]
def compare_unsup(self):
"""Get unsupervised loss"""
if self.compare_embedding_unsup:
self.fake_out_unsup = [nd.squeeze(self.netGE(A_unsup)) for A_unsup in self.A_unsup]
self.fake_out_unsup_aug = [nd.squeeze(self.netGE(A_rp_unsup)) for A_rp_unsup in self.A_rp_unsup]
if self.lambda_aux > 0:
self.fake_out_unsup = [fake_out_unsup[0] for fake_out_unsup in self.fake_out_unsup]
self.fake_out_unsup_aug = [fake_out_unsup_aug[0] for fake_out_unsup_aug in self.fake_out_unsup_aug]
self.unsup_loss = [
self.density_unsup(
fake_out_unsup,
fake_out_unsup_aug,
nd.ones(fake_out_unsup.shape[0], ctx=fake_out_unsup.context),
)
for fake_out_unsup, fake_out_unsup_aug
in zip(self.fake_out_unsup, self.fake_out_unsup_aug, )]
else:
self.fake_out_unsup = [self.netG(A_unsup) for A_unsup in self.A_unsup]
self.fake_out_unsup_aug = [nd.flip(self.netG(A_rp_unsup), 3) for A_rp_unsup in self.A_rp_unsup]
if self.lambda_aux > 0:
self.fake_out_unsup = [fake_out_unsup[0] for fake_out_unsup in self.fake_out_unsup]
self.fake_out_unsup_aug = [fake_out_unsup_aug[0] for fake_out_unsup_aug in self.fake_out_unsup_aug]
self.fake_out_unsup = [nd.where(wp_unsup, fake_out_unsup, wp_unsup - 1) for wp_unsup, fake_out_unsup in
zip(self.wp_unsup, self.fake_out_unsup)]
self.fake_out_unsup_aug = [nd.where(wp_unsup, fake_out_unsup_aug, wp_unsup - 1) for
wp_unsup, fake_out_unsup_aug in zip(self.wp_unsup, self.fake_out_unsup_aug)]
self.unsup_loss = [
self.density_unsup(
fake_out_unsup,
fake_out_unsup_aug,
wp_unsup,
# _margin_unsup / self.C_thr,
None,
)
for fake_out_unsup, fake_out_unsup_aug, wp_unsup, _margin_unsup
in zip(self.fake_out_unsup, self.fake_out_unsup_aug, self.wp_unsup, self._margin_unsup)]
if self.monitor_unsup_outputs:
im = np.hstack(
(montage(self.fake_out_unsup[0].asnumpy()[:9, 0]),
montage(self.fake_out_unsup_aug[0].asnumpy()[:9, 0]),
montage(
np.abs(
self.fake_out_unsup[0].asnumpy()[:9, 0] - self.fake_out_unsup_aug[0].asnumpy()[:9, 0]))))
[plt.imsave('%s/ep%04d_%02d_%d' % (
self.result_folder_figure_train_unsup, self.current_epoch, self.current_it, i), im) for i in
range(1)]
def optimize_D(self):
if hasattr(self, 'A_rp_unsup'): # choose unsup data if avail.
tmp_input = self.A_rp_unsup
else:
tmp_input = self.A_rp
fake_out = [self.netG(A_rp) for A_rp in tmp_input]
fake_out = [fo[0] if self.lambda_aux > 0 else fo for fo in fake_out]
if hasattr(self, 'wp_unsup'):
tmp_wp = self.wp_unsup
else:
tmp_wp = self.wp
fake_out = [nd.where(wp, fo, wp - 1) for wp, fo in zip(tmp_wp, fake_out)]
fake_concat = [self.image_pool.query(nd.concat(A_rp, fo, dim=1)) for A_rp, fo in zip(self.A_rp, fake_out)]
with autograd.record():
# Train with fake image
# Use image pooling to utilize history images
output = [self.netD(fc) for fc in fake_concat]
fake_label = [nd.zeros_like(op) for op in output]
err_DB_fake = [self.criterionGAN(op, fl) for op, fl in zip(output, fake_label)]
[self.metric.update([fl, ], [op, ]) for fl, op in zip(fake_label, output)]
# self.metric.update([fake_label[0], ], [output[0], ])
# Train with real image
real_concat = [nd.concat(A_rp, _C, dim=1) for A_rp, _C in zip(self.A_rp, self.C)]
output = [self.netD(rc) for rc in real_concat]
real_label = [nd.ones_like(op) for op in output]
err_DB_real = [self.criterionGAN(op, rl) for op, rl in zip(output, real_label)]
self.err_DB = [(edb + edf) * 0.5 for edb, edf in zip(err_DB_real, err_DB_fake)]
[self.metric.update([rl, ], [op, ]) for rl, op in zip(real_label, output)]
for err_DB in self.err_DB:
err_DB.backward()
# with amp.scale_loss(self.err_DB, self.trainerD) as scaled_loss:
# autograd.backward(scaled_loss)
self.trainerD.step(self.batch_size)
def create_net(self, upscale_factor=1):
from mxnet.gluon import nn
import mxnet.gluon.contrib.nn as contrib_nn
def conv_factory(opts, num_filters, kernel_size, stride=1, group=1):
"""A convenience function for convolution with BatchNorm & activation"""
pad = int((kernel_size - 1) / 2)
out = nn.HybridSequential()
out.add(nn.BatchNorm())
if opts.activation == 'leaky':
out.add(nn.LeakyReLU(opts.alpha))
else:
out.add(nn.Activation(opts.activation))
out.add(nn.Conv2D(channels=num_filters, kernel_size=(kernel_size, kernel_size),
strides=(stride, stride), use_bias=opts.use_bias,
padding=(pad, pad), groups=group))
return out
class Options:
""""""
def __init__(self):
super(Options, self).__init__()
self.activation = 'relu'
self.use_bias = False
class SuperResolutionNet(gluon.HybridBlock):
def __init__(self, upscale_factor, opts):
super(SuperResolutionNet, self).__init__()
with self.name_scope():
self.conv1 = conv_factory(opts, num_filters=64, kernel_size=5, stride=1)
self.conv2 = conv_factory(opts, num_filters=64, kernel_size=3, stride=1)
self.conv3 = conv_factory(opts, num_filters=32, kernel_size=3, stride=1)
self.conv4 = conv_factory(opts, num_filters=upscale_factor ** 2, kernel_size=3, stride=1)
self.pxshuf = contrib_nn.PixelShuffle2D(upscale_factor)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = F.tanh(self.pxshuf(x))
return x
return SuperResolutionNet(upscale_factor, opts=Options())
def optimize_G(self):
"""Optimize generator"""
if np.array([self.lambda_C, self.lambda_D, self.lambda_consistency, self.lambda_unsup, self.lambda0,
self.lambda_aux]).sum() == 0: # No extra loss
with autograd.record():
self.fake_out = [self.netG(A_rp) for A_rp in self.A_rp]
self.loss_true_density_train = [self.trueDensity_train(fake_out, C, m, margin) for
C, fake_out, m, margin in
zip(self.C, self.fake_out, self.m, self._margin)]
self.loss_G = self.loss_true_density_train
[loss_G.backward() for loss_G in self.loss_G]
else:
with autograd.record():
self.fake_out = [self.netG(A_rp) for A_rp in self.A_rp]
# Supervised learning
self.var0 = [nd.square(coef) for coef in self.netG.coef_G._data]
self.loss_true_density_train = [self.trueDensity_train(fake_out, C, m, margin) for
fake_out, C, m, margin in
zip(self.fake_out, self.C, self.m, self._margin)]
self.loss_G = [((1 / var) * l + nd.log(var)) for l, var in
zip(self.loss_true_density_train, self.var0)]
############################### Consistency Loss ###############################
if self.lambda_consistency > 0:
fake_out_T2 = [self.netG(A_rp) for A_rp in
[nd.concat(A_rp[:, 0:1], nd.zeros_like(A_rp[:, 0:1]), dim=1) for A_rp in
self.A_rp]] # masked out ADC channel
fake_out_ADC = [self.netG(A_rp) for A_rp in
[nd.concat(nd.zeros_like(A_rp[:, 1:2]), A_rp[:, 1:2], dim=1) for A_rp in
self.A_rp]] # masked out T2 channel
self.loss_consistency_train = [self.density_corr(_fake_out_T2, _fake_out_ADC, wp) for
_fake_out_T2, _fake_out_ADC, wp in
zip(fake_out_T2, fake_out_ADC, self.wp)]
self.var1 = [nd.square(coef) for coef in self.netG.coef_consistency._data]
self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in
zip(self.loss_G, self.loss_consistency_train, self.var1)]
############################### Correlation Loss ###############################
if self.lambda_C > 0:
self.var2 = [nd.square(coef) for coef in self.netG.coef_C._data]
self.loss_corr_train = [self.density_corr(fake_out, C, m) for
C, fake_out, m in
zip(self.C, self.fake_out, self.m)]
self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in
zip(self.loss_G, self.loss_corr_train, self.var2)]
############################### Unsupervised learning ###############################
if self.lambda_unsup > 0:
self.compare_unsup()
self.var3 = [nd.square(coef) for coef in self.netG.coef_unsup._data]
self.loss_G = [l0 + ((1 / var) * l1 + nd.log(var)) for l0, l1, var in
zip(self.loss_G, self.unsup_loss, self.var3)]
############################## Feature Comparision ###############################
if self.lambda_D > 0:
self.var4 = [nd.square(coef) for coef in self.netG.coef_D._data]
self.loss_features = [self.feature_difference(
self.D_features(nd.where(m, C, m - 1)),
self.D_features(nd.where(m, fake_out, m - 1)),
nd.ones((C.shape[0]), ctx=C.context)
).mean() for m, C, fake_out in zip(self.m, self.C, self.fake_out)]
self.loss_G = [l0 + ((1 / var) * l1 * .1 + nd.log(var)) for l0, l1, var in
zip(self.loss_G, self.loss_features, self.var4)]
[loss_G.backward() for loss_G in self.loss_G]
self.trainerG.step(1, ignore_stale_grad=False)
if self.use_l_coefs:
self.trainer_coefs.step(1, ignore_stale_grad=False)
[self.save_training_outputs(self.A_rp[i], self.fake_out[i], self.C[i], self.m[i], prefix='',
suffix='_%02d_%d' % (self.current_it, i)) if self.monitor_training_outputs else None
for i in range(len(self.ctx))]
def update_running_loss(self, first_iter=False, num_batch=None):
"""Compute running loss"""
if num_batch is None:
if first_iter:
loss_fields = [field for field in self.__dict__.keys() if ('loss' in field) or ('err' in field)]
self.running_loss_fields = ['running_' + field for field in loss_fields]
[self.__setattr__(field, 0.) for field in self.running_loss_fields]
for loss_field in self.running_loss_fields:
_loss = nd.concatenate(list(self.__getattribute__(loss_field.replace('running_', ''))))
self.__setattr__(loss_field, (self.__getattribute__(loss_field) + _loss.mean().asscalar()))
else:
for loss_field in self.running_loss_fields:
self.__setattr__(loss_field, (self.__getattribute__(loss_field) / num_batch))
def update_mxboard(self, sw, epoch, val_data=None):
""" SHOW STATS AND IMAGES ON TENSORBOARD. THIS SHOULD BE RUN AFTER RUnNNING UPDATE_RUNNING_LOSS """
for loss_field in self.running_loss_fields:
_loss = self.__getattribute__(loss_field)
_loss = _loss.mean().asscalar() if isinstance(_loss, nd.NDArray) else _loss.mean()
if 'loss_true_density' in loss_field: # True density
sw.add_scalar('loss/true_density_loss', _loss, global_step=epoch)
else: # GAN loss
loss_type = loss_field.split('_')[0] + '_' + \
loss_field.split('_')[1] + '_' + \
loss_field.split('_')[2]
# sw.add_scalar('loss/' + loss_type, {loss_field: _loss}, global_step=epoch)
sw.add_scalar('loss/' + loss_type, _loss, global_step=epoch)
if hasattr(self, 'running_loss_true_density_val'):
sw.add_scalar('loss/true_density_loss_val', self.running_loss_true_density_val, global_step=epoch)
metric_list = metrics.update_mxboard_metric_v1(sw, data=val_data, global_step=epoch,
metric_names=[
'r_whole', 'l1_whole', 'ssim_whole',
'rmse_whole', 'rmse_log_whole',
't1', 't2', 't3',
'abs_rel_diff', 'sqr_rel_diff',
'ta1', 'ta2',
],
prefix='validation_',
num_input_channels=self.n_A_channel_idx,
c_thr=self.C_thr,
density_range=self.density_range,
root=self.root) # 'r', 'l1', 'ssim', 'nmi',
# if hasattr(self, 'current_margin'):
sw.add_scalar('loss_margin', self.current_margin, global_step=epoch)
#######################################
# Map input data to 0 - 1
for c in range(val_data[0].shape[1]):
val_data[0][:, c] = (val_data[0][:, c] - val_data[0][:, c].min()) / (
val_data[0][:, c].max() - val_data[0][:, c].min()) * val_data[4][:, 0]
""" MULTIPLE CHANNELS OF EACH IMAGE ARE SPLIT INTO SEPARATE IMAGES """
_val_data = []
for i in range(len(val_data)):
for j in range(val_data[i].shape[1]):
_val_data.append(val_data[i][:, j:j + 1])
#######################################
""" NORM TO 0-1 RANGE IF NECESSARY """
if self.to_11: # Normalize image from [-1, 1] to [0, 1]
for i in range(-4, -2): # prediction and label
_val_data[i] = self.normalize_01(_val_data[i], [-1, 1]) * _val_data[-1]
#######################################
""" SAVE FIRST IMAGE TO FOLDER & UPDATE BEST METRICS """
to_save_montage = self.update_best_metrics(metric_list)
print(self.best_metrics)
if to_save_montage:
self.save_montage_im(_val_data)
#######################################
""" DROP LAST CHANNEL (WP) IN _val_data BECAUSE IT IS NO LONGER NECESSARY """
_val_data = _val_data[:-1]
#######################################
return metric_list
@staticmethod
def linear_scale(x, vmin=-1, vmax=1, tmin=0, tmax=1):
return ((x - vmin) / (vmax - vmin)) * (tmax - tmin) + tmin
def _gen_unsup_pred(self):
"""Generate predictions for unsupvised data"""
input_list, pred_list, wp_list = [], [], []
for i, (_, _, C, m, wp, A_rp) in enumerate(self.val_iter):
# Inputs to GPUs (or CPUs)
self.set_inputs(A_rp_val=A_rp, C_val=C, m_val=m, wp_val=wp)
pred = nd.concatenate([self.netG(A_rp_val) for A_rp_val in self.A_rp_val])
# merge data across all used GPUs
self.C_val, self.m_val, | |
#!/usr/bin/env python3
import os
from datetime import datetime, timedelta
import errno
import time
import USBKey
class Files():
import sys
import csv
from datetime import datetime, date, time, timedelta
import os
from os import path
import shutil
# import urllib # for python 3 : import urllib.request
import urllib
import USBKey
import json
import structureConfig as structConfig
import re
import ast
"""
Le constructeur permet d'initialiser cette classe
IMPORTANT Si le fichier n'existe pas encore il sera cree. Cela signifie que nous creons un FICHIER DE STOCKAGE DES UIDs ET RIEN D'AUTRE
A l'inverse pour faire de la comparaison de fichier il faut creer un object Files en passant en argument l'endroit ou se trouve le fichier dans votre structure
Argument : path pour le fichier (la valeur par defaut est None)
"""
def __init__(self, initialFilePath=None):
self.presents = 0
self.absents = 0
self.wrong_presents = 0
self.errorsRequestAPI = 0
self.initFilePath = initialFilePath
self.folderPathName = ""
self.pathDSIFile = ""
self.read = 'r'
self.append = 'a'
self.write = 'w'
"""
Cette methode permet de faire l'ajout d'etudiant dans le fichier donne comme initialFilePath
"""
def addStudentToFile(self, UID, DTnow):
# Cette variable permet de savoir si l'uid est deja present dans le fichier
if(self.exist(self.initFilePath)):
# la lecture de fichier suivante permet de savoir si la carte scannee par l'utilisateur est deja presente dans le fichier
with open(self.initFilePath, self.read) as UIDFile:
UIDFilereader = self.csv.reader(UIDFile)
for row in UIDFilereader:
if(row[2] == UID):
print("Carte deja scannee")
return False
with open(self.initFilePath, self.append) as UIDFile:
UIDFileWriter = self.csv.writer(
UIDFile, delimiter=',', quotechar='|', quoting=self.csv.QUOTE_MINIMAL)
UIDFileWriter.writerow(
[DTnow.strftime("%d-%m-%Y"), DTnow.strftime("%H-%M-%S"), UID])
return True
else:
with open(self.initFilePath, self.write) as UIDFile:
UIDFileWriter = self.csv.writer(
UIDFile, delimiter=',', quotechar='|', quoting=self.csv.QUOTE_MINIMAL)
UIDFileWriter.writerow(
[DTnow.strftime("%d-%m-%Y"), DTnow.strftime("%H-%M-%S"), UID])
return True
"""
Cette methode permet de connnaitre le fichier qui possede la meme date a un interval donne dans le constructeur de la classe
Dans notre cas cette methode est utilisee pour rechercher dans cle usb un le dossier etant dans l'interval
Output : nom du fichier dans l'intervalle sinon None
"""
def foundSameEventFile(self, pathUSB, DTMain, interval):
'''
Pour cette fonction il faut encore ajouter des conditions d'autant plus que cette fonction permet de savoir si il y a des fichiers de la meme date sur celle-ci. Il faut donc ajouter cela
'''
folderList = listDirectory(
pathUSB + "/Fichiers_SCAN", True, False, False)
print("DIR LIST : ", folderList)
for folderName in folderList:
try:
folderDT = self.datetime.strptime(
folderName, "%d-%m-%Y-%H-%M-%S")
Delta = abs(DTMain - folderDT)
if(Delta <= interval):
InnerFiles = listDirectory(
pathUSB+"/Fichiers_SCAN/"+folderName, False, True, True)
StandardFiles = ["report", "total"]
isExtractionComplete = True
for stfile in StandardFiles:
if not stfile+".csv" in InnerFiles:
isExtractionComplete = False
if isExtractionComplete:
print("Complete extraction found on USB key: ", folderName)
return folderName
else:
print("Uncomplete extraction found on USB key: ", folderName)
except ValueError:
print("Warning : Fichiers_SCAN folder contains undesired folders")
return None
"""
Cette methode permet de faire l'extration du dossier final genere par la methode: compareDsiFilesToFileCreation()
Il est donc important de faire appel a cette methode avant de faire l'extraction vers la cle usb car sinon la valeur de la variable self.folderPathName
ne correspondera pas a l'endroit ou se trouve les fichiers sur la cle usb
"""
def exportFileToUSB(self, USB_Key, DSIPath):
if(USB_Key):
# this condition checks if the extraction have been done
if(self.folderPathName + '/' != ""):
if(self.exist(USB_Key + '/' + "Fichiers_SCAN")):
USB_Key += '/' + "Fichiers_SCAN" + '/' + \
self.initFilePath.rsplit(
'/')[-1].replace(".csv", "") + '/'
self.copyFilesFromDirectoryToDirectory(
self.folderPathName + '/', USB_Key)
if(self.exist(USB_Key)):
return True
else:
return False
else:
self.os.mkdir(USB_Key + '/' + "Fichiers_SCAN")
USB_Key += '/' + "Fichiers_SCAN" + '/' + \
self.initFilePath.rsplit(
'/')[-1].replace(".csv", "") + '/'
self.copyFilesFromDirectoryToDirectory(
self.folderPathName + '/', USB_Key)
if(self.exist(USB_Key)):
return True
else:
return False
# This condition permits to check if the final files extactions has been done and if the uid file is not empty
# if so it extracts final files and
elif(self.folderPathName == "" and not self.isEmpty(self.initFilePath)):
self.compareDsiFilesToFileCreation(
DSIPath, self.initFilePath.rsplit('/')[-1].replace(".csv", ""))
self.exportFileToUSB(USB_Key, DSIPath)
else:
print("ERROR : usb key missing or final files not in the folder")
return False
"""
Cette methode permet de relier des fichiers de la cle usb aux fichier presents dans le dosssier final_extractions.
Pour cela les fichiers du dossier final_extractions sont ajoutes au fichier sur la cle usb
"""
# to call in case of multiple extraction
def addToUSBKEY(self, pathToUSB, DTnow, interval):
if(pathToUSB):
if(self.folderPathName != ""): # permet de verifier que l'extraction a ete faite
if(self.exist(pathToUSB + '/' + "Fichiers_SCAN/")):
directoryName = self.foundSameEventFile(
pathToUSB, DTnow, interval)
if directoryName:
pathToUSB += '/Fichiers_SCAN/' + directoryName
if(self.exist(self.folderPathName + '/presents.csv')):
with open(self.folderPathName + '/presents.csv', self.read) as presentFile:
presentFileReader = self.csv.reader(
presentFile)
if(self.exist(pathToUSB + '/presents.csv')):
next(presentFileReader)
with open(pathToUSB + '/presents.csv', self.append) as presentFileUSBKey:
fileUSBwriter = self.csv.writer(
presentFileUSBKey)
# Cette lecture du fichier permet de verifier si la personne n'est pas deja presente dans le fichier sur la cle
# dans cette situation cela signifierait que la personne est rentree dans part deux entree en scannant les deux fois
with open(pathToUSB + '/presents.csv', self.read) as presentFileUSBReader:
checkerUSBPresent = self.csv.reader(
presentFileUSBReader)
for student in presentFileReader:
presentFileUSBReader.seek(0)
next(checkerUSBPresent)
indicePresent = False
for scannedPresent in checkerUSBPresent:
if(student[5] == scannedPresent[5]):
indicePresent = True
if not indicePresent:
fileUSBwriter.writerow(
student[:])
self.presents = self.__row_count(
pathToUSB + '/presents.csv') - 1
else:
with open(pathToUSB + '/presents.csv', self.write) as presentFileUSBKey:
fileUSBwriter = self.csv.writer(
presentFileUSBKey)
for student in presentFileReader:
fileUSBwriter.writerow(
student[:])
print("Adding present content to USB done")
if(self.exist(self.folderPathName + '/absents.csv')):
with open(self.folderPathName + '/absents.csv', self.read) as absentFile:
absentFileReader = self.csv.reader(absentFile)
if(self.exist(pathToUSB + '/absents.csv') and self.exist(pathToUSB + '/presents.csv')):
self.absents = 0
# Supression du fichier absent sur la cle usb afin de regener les absents en fonction du fichier des presents
self.deleteFile(pathToUSB + '/absents.csv')
# Cela permet par la suite de recreer un fichier absent sur la cle usb en lisant le fichier des presents qui est sur la cle
with open(pathToUSB + '/presents.csv', self.read) as Present_File, open(self.pathDSIFile+".csv", self.read) as DSIfile:
Present_FileReader = self.csv.reader(
Present_File)
DSI_FileReader = self.csv.reader(
DSIfile)
# Ces deux lignes permettent de skiper les headers dans les fichiers
next(DSI_FileReader)
for DSI_Row in DSI_FileReader:
indice_present = False
Present_File.seek(0)
next(Present_FileReader)
for Present_Row in Present_FileReader:
if(DSI_Row[3] == Present_Row[5]):
indice_present = True
break
if not indice_present:
if(self.exist(pathToUSB + '/absents.csv')):
with open(pathToUSB + '/absents.csv', self.append) as absentsFile:
absentFileWriter = self.csv.writer(
absentsFile, delimiter=',', quotechar='|', quoting=self.csv.QUOTE_MINIMAL)
absentFileWriter.writerow(
DSI_Row[:])
else:
with open(pathToUSB + '/absents.csv', self.write) as absentsFile:
absentFileWriter = self.csv.writer(
absentsFile, delimiter=',', quotechar='|', quoting=self.csv.QUOTE_MINIMAL)
absentFileWriter.writerow(
['NOM', 'PRENOM', 'ETUD_NUMERO', 'NO_INDIVIDU', 'MAIL', 'LOGIN', 'FORMATION', 'NIVEAU'])
absentFileWriter.writerow(
DSI_Row[:])
self.absents += 1
# Si il n'y pas de fichier de present sur la cle mais qu'il y a un fichier d'absent alors nous ajoutons les absent que nous vons genere dans ce fichier
elif(self.exist(pathToUSB + '/absents.csv')):
with open(pathToUSB + '/absents.csv', self.append) as absentFileUSBKey:
fileUSBwriter = self.csv.writer(
absentFileUSBKey)
next(absentFileReader)
for student in absentFileReader:
fileUSBwriter.writerow(
student[:])
self.absents = self.__row_count(
pathToUSB + '/absents.csv') - 1
# Si il n'y a aucun fichier d'absent sur la cle alors on copie celui que nous avons genere
elif not self.exist(pathToUSB + '/absents.csv'):
with open(pathToUSB + '/absents.csv', self.write) as absentFileUSBKey:
fileUSBwriter = self.csv.writer(
absentFileUSBKey)
for student in absentFileReader:
fileUSBwriter.writerow(
student[:])
print("Adding absents content to USB done")
# Comme les faux presents sont des personnes qui sont la par erreur nous pouvons les ajouter directement a la suite du fichier ou d'en creer un si il n'exite pas
if(self.exist(self.folderPathName + '/faux-presents.csv')):
with open(self.folderPathName + '/faux-presents.csv', self.read) as wrong_present_File:
wPresentFileReader = self.csv.reader(
wrong_present_File)
if(self.exist(pathToUSB + '/faux-presents.csv')):
next(wPresentFileReader)
with open(pathToUSB + '/faux-presents.csv', self.append) as wPresentFileUSBKey:
fileUSBwriter = self.csv.writer(
wPresentFileUSBKey)
for student in wPresentFileReader:
fileUSBwriter.writerow(
student[:])
else:
with open(pathToUSB + '/faux-presents.csv', self.write) as wPresentFileUSBKey:
fileUSBwriter = self.csv.writer(
wPresentFileUSBKey)
for student in wPresentFileReader:
fileUSBwriter.writerow(
student[:])
self.wrong_presents = self.__row_count(
pathToUSB + '/faux-presents.csv') - 1
print("Adding faux-present content to USB done")
with open(pathToUSB + '/total.csv', self.write) as totalFileUSBKey:
totalFileWriter = self.csv.writer(
totalFileUSBKey, delimiter=',', quotechar='|', quoting=self.csv.QUOTE_MINIMAL)
totalFileWriter.writerow(
['DATE', 'HEURE', 'NOM', 'PRENOM', 'PRESENCE', 'ETUD_NUMERO', 'NO_INDIVIDU', 'MAIL', 'FORMATION', 'NIVEAU'])
if(self.exist(pathToUSB + '/presents.csv')):
with open(pathToUSB + '/presents.csv', self.read) as presentFile:
presentFileReader = | |
-2, -2]
disaster -3.1 0.83066 [-2, -4, -4, -3, -3, -2, -4, -3, -2, -4]
disasters -2.6 0.8 [-2, -2, -3, -1, -3, -3, -2, -4, -3, -3]
disastrous -2.9 0.53852 [-2, -2, -3, -3, -3, -3, -4, -3, -3, -3]
disbelieve -1.2 0.87178 [-1, -2, -1, -2, -1, 0, 0, -1, -3, -1]
discard -1.0 0.44721 [-1, -1, -1, -1, 0, -1, -1, -1, -2, -1]
discarded -1.4 0.91652 [-1, -1, -1, -1, 0, -1, -2, -3, -3, -1]
discarding -0.7 0.45826 [-1, 0, -1, -1, -1, 0, -1, 0, -1, -1]
discards -1.0 0.63246 [0, -1, -1, -1, -2, 0, -2, -1, -1, -1]
discomfort -1.8 0.6 [-2, -2, -2, -1, -1, -3, -2, -2, -1, -2]
discomfortable -1.6 0.8 [-1, -1, -1, -2, -3, -1, -2, -1, -3, -1]
discomforted -1.6 0.8 [-1, -1, -1, -2, -3, -3, -1, -1, -1, -2]
discomforting -1.6 1.11355 [-1, -2, -1, -1, -2, 1, -3, -2, -3, -2]
discomforts -1.3 0.9 [-2, -1, -2, -1, -1, -2, -2, -1, 1, -2]
disconsolate -2.3 0.78102 [-1, -2, -2, -3, -2, -2, -2, -4, -3, -2]
disconsolation -1.7 0.45826 [-2, -2, -1, -2, -1, -1, -2, -2, -2, -2]
discontented -1.8 0.9798 [-1, -3, -1, -2, -4, -2, -1, -2, -1, -1]
discord -1.7 0.64031 [-3, -2, -2, -2, -2, -1, -1, -1, -1, -2]
discounted 0.2 1.249 [-1, 0, 3, -1, 0, 1, 1, 1, -1, -1]
discourage -1.8 0.6 [-2, -2, -1, -2, -1, -1, -2, -2, -3, -2]
discourageable -1.2 0.9798 [-1, -2, -1, 1, -1, -1, -1, -2, -3, -1]
discouraged -1.7 0.45826 [-2, -1, -2, -2, -2, -2, -1, -1, -2, -2]
discouragement -2.0 0.89443 [-4, -1, -2, -2, -1, -1, -3, -2, -2, -2]
discouragements -1.8 0.6 [-2, -2, -2, -1, -1, -3, -2, -1, -2, -2]
discourager -1.7 0.78102 [-2, -1, -3, -2, -1, -3, -1, -2, -1, -1]
discouragers -1.9 0.53852 [-2, -2, -2, -2, -1, -1, -3, -2, -2, -2]
discourages -1.9 0.53852 [-2, -1, -2, -2, -2, -2, -1, -3, -2, -2]
discouraging -1.9 0.7 [-2, -2, -2, -3, -1, -1, -2, -1, -2, -3]
discouragingly -1.8 0.87178 [-2, -1, -3, -1, -1, -3, -2, -1, -3, -1]
discredited -1.9 0.53852 [-2, -2, -2, -2, -1, -3, -1, -2, -2, -2]
disdain -2.1 0.3 [-3, -2, -2, -2, -2, -2, -2, -2, -2, -2]
disgrace -2.2 0.74833 [-2, -4, -1, -2, -2, -2, -2, -3, -2, -2]
disgraced -2.0 0.44721 [-3, -2, -2, -2, -1, -2, -2, -2, -2, -2]
disguise -1.0 1.09545 [-2, -1, 0, 0, 0, 0, -3, -2, -2, 0]
disguised -1.1 1.04403 [-3, 0, 0, -1, -1, 0, -3, -1, -1, -1]
disguises -1.0 0.63246 [-2, 0, 0, -1, -1, -1, -2, -1, -1, -1]
disguising -1.3 0.78102 [0, -2, -1, -1, -1, -2, -1, -1, -3, -1]
disgust -2.9 0.7 [-3, -3, -4, -2, -3, -3, -4, -2, -3, -2]
disgusted -2.4 0.91652 [-4, -3, -3, -1, -3, -1, -2, -2, -2, -3]
disgustedly -3.0 0.89443 [-2, -3, -4, -4, -2, -4, -4, -2, -3, -2]
disgustful -2.6 0.4899 [-3, -3, -2, -2, -2, -2, -3, -3, -3, -3]
disgusting -2.4 1.11355 [-3, -2, -3, -4, -1, -3, -1, -4, -1, -2]
disgustingly -2.9 0.7 [-3, -3, -4, -3, -3, -2, -2, -4, -2, -3]
disgusts -2.1 0.53852 [-2, -2, -3, -2, -2, -2, -2, -3, -1, -2]
dishearten -2.0 0.63246 [-3, -1, -2, -3, -2, -2, -1, -2, -2, -2]
disheartened -2.2 0.74833 [-2, -2, -2, -1, -2, -2, -4, -3, -2, -2]
disheartening -1.8 1.32665 [-2, -2, -2, -3, -2, 2, -2, -2, -3, -2]
dishearteningly -2.0 0.63246 [-2, -3, -2, -1, -2, -2, -2, -3, -2, -1]
disheartenment -2.3 0.45826 [-3, -2, -3, -2, -2, -2, -2, -3, -2, -2]
disheartenments -2.2 0.87178 [-2, -3, -3, -3, -3, -1, -1, -1, -2, -3]
disheartens -2.2 0.4 [-3, -2, -2, -2, -3, -2, -2, -2, -2, -2]
dishonest -2.7 0.9 [-3, -2, -1, -4, -3, -2, -4, -3, -3, -2]
disillusion -1.0 1.18322 [-2, 0, -2, -1, -2, 1, -2, -1, 1, -2]
disillusioned -1.9 0.7 [-2, -2, -3, -2, -3, -1, -1, -1, -2, -2]
disillusioning -1.3 1.00499 [-2, -2, 1, -2, 0, -2, -2, -1, -1, -2]
disillusionment -1.7 0.78102 [-1, -3, -2, -3, -1, -2, -2, -1, -1, -1]
disillusionments -1.5 1.0247 [-2, 1, -3, -2, -1, -1, -2, -1, -2, -2]
disillusions -1.6 0.4899 [-2, -2, -2, -1, -1, -1, -2, -2, -1, -2]
disinclined -1.1 0.53852 [0, -1, -1, -1, -1, -1, -1, -2, -2, -1]
disjointed -1.3 0.45826 [-1, -1, -2, -1, -1, -2, -1, -2, -1, -1]
dislike -1.6 0.4899 [-2, -1, -1, -2, -2, -1, -2, -1, -2, -2]
disliked -1.7 0.64031 [-2, -3, -2, -1, -1, -1, -2, -2, -1, -2]
dislikes -1.7 0.78102 [-2, -2, -1, -1, -2, -1, -3, -3, -1, -1]
disliking -1.3 0.45826 [-1, -1, -2, -2, -2, -1, -1, -1, -1, -1]
dismal -3.0 1.0 [-2, -1, -4, -4, -3, -2, -3, -4, -4, -3]
dismay -1.8 0.87178 [-3, -1, -1, -3, -1, -1, -3, -1, -2, -2]
dismayed -1.9 0.9434 [-1, -2, -1, -3, -4, -1, -2, -2, -1, -2]
dismaying -2.2 0.9798 [-2, -3, -2, -3, -3, 0, -2, -1, -3, -3]
dismayingly -1.9 0.83066 [-2, -3, -2, -3, -2, -1, -2, -2, 0, -2]
dismays -1.8 1.07703 [-1, -1, -4, -3, -2, -1, -2, 0, -2, -2]
disorder -1.7 0.64031 [-2, -1, -1, -2, -2, -1, -3, -1, -2, -2]
disorganized -1.2 0.4 [-1, -1, -1, -1, -1, -2, -1, -2, -1, -1]
disoriented -1.5 0.67082 [-2, -2, -1, 0, -1, -2, -2, -1, -2, -2]
disparage -2.0 0.44721 [-2, -2, -2, -1, -2, -2, -2, -3, -2, -2]
disparaged -1.4 0.8 [-1, -2, -2, -3, -1, -1, -1, -2, 0, -1]
disparages -1.6 0.8 [-1, -2, -3, -2, -1, -1, -2, -2, 0, -2]
disparaging -2.2 0.6 [-3, -1, -2, -2, -2, -3, -3, -2, -2, -2]
displeased -1.9 0.7 [-3, -2, -1, -1, -3, -2, -2, -1, -2, -2]
dispute -1.7 0.78102 [-1, -3, -1, -1, -2, -1, -2, -2, -3, -1]
disputed -1.4 0.66332 [-2, -2, -2, -2, 0, -1, -1, -1, -1, -2]
disputes -1.1 1.64012 [-2, -2, -2, 2, -3, -1, -2, 2, -1, -2]
disputing -1.7 0.64031 [-2, -2, -2, -2, -1, -1, -3, -1, -1, -2]
disqualified -1.8 0.6 [-1, -2, -1, -2, -1, -2, -2, -3, -2, -2]
disquiet -1.3 0.9 [-1, -2, -2, -1, -1, -1, 1, -2, -2, -2]
disregard -1.1 0.53852 [-1, -1, -2, -1, -1, -2, -1, -1, 0, -1]
disregarded -1.6 0.4899 [-1, -1, -2, -2, -2, -2, -1, -2, -1, -2]
disregarding -0.9 0.53852 [-1, 0, -1, 0, -2, -1, -1, -1, -1, -1]
disregards -1.4 0.4899 [-1, -1, -2, -1, -2, -2, -2, -1, -1, -1]
disrespect -1.8 0.6 [-2, -2, -2, -1, -2, -2, -1, -3, -1, -2]
disrespected -2.0 0.63246 [-2, -2, -2, -2, -2, -3, -3, -1, -1, -2]
disruption -1.5 0.67082 [-1, -1, -1, -2, -1, -3, -2, -2, -1, -1]
disruptions -1.4 0.4899 [-1, -2, -1, -1, -1, -2, -2, -2, -1, -1]
disruptive -1.3 1.00499 [-4, 0, -1, -1, -1, -1, -1, -1, -2, -1]
dissatisfaction -2.2 0.74833 [-4, -2, -2, -2, -1, -3, -2, -2, -2, -2]
dissatisfactions -1.9 0.83066 [-1, -3, -3, -1, -2, -1, -2, -2, -1, -3]
dissatisfactory -2.0 0.63246 [-2, -2, -3, -1, -2, -3, -2, -2, -1, -2]
dissatisfied -1.6 0.66332 [-2, -3, -1, -2, -1, -1, -2, -2, -1, -1]
dissatisfies -1.8 0.74833 [-3, -3, -1, -1, -2, -1, -2, -2, -2, -1]
dissatisfy -2.2 0.6 [-2, -3, -2, -2, -2, -2, -3, -3, -1, -2]
dissatisfying -2.4 0.91652 [-3, -1, -4, -3, -2, -1, -2, -2, -3, -3]
distort -1.3 0.45826 [-2, -1, -1, -1, -2, -1, -1, -1, -1, -2]
distorted -1.7 0.78102 [-3, -1, -3, -1, -2, -1, -2, -2, -1, -1]
distorting -1.1 0.53852 [0, -1, -1, -1, -2, -1, -1, -1, -2, -1]
distorts -1.4 0.4899 [-2, -1, -1, -1, -2, -2, -2, -1, -1, -1]
distract -1.2 0.6 [-1, -1, 0, -2, -1, -1, -1, -2, -1, -2]
distractable -1.3 1.00499 [-2, 0, 1, -2, -2, -1, -1, -2, -2, -2]
distracted -1.4 0.66332 [-1, -3, -1, -2, -2, -1, -1, -1, -1, -1]
distractedly -0.9 0.7 [-1, -1, 0, -2, -1, 0, -1, 0, -1, -2]
distractibility -1.3 1.1 [-1, -1, -3, -1, 0, -3, -2, 0, -2, 0]
distractible -1.5 0.92195 [-1, -2, -1, -1, -4, -1, -1, -1, -2, -1]
distracting -1.2 0.4 [-2, -1, -1, -1, -1, -1, -2, -1, -1, -1]
distractingly -1.4 1.0198 [-4, 0, -1, -1, -1, -2, -1, -1, -2, -1]
distraction -1.6 0.66332 [-1, -2, -2, -1, -1, -3, -1, -1, -2, -2]
distractions -1.0 0.0 [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
distractive -1.6 0.4899 [-2, -2, -1, -1, -1, -1, -2, -2, -2, -2]
distracts -1.3 0.45826 [-1, -1, -2, -1, -1, -2, -1, -2, -1, -1]
distraught -2.6 0.8 [-2, -3, -2, -3, -4, -2, -3, -3, -1, -3]
distress -2.4 0.8 [-1, -2, -2, -3, -3, -4, -3, -2, -2, -2]
distressed -1.8 0.6 [-2, -2, -2, -3, -2, -2, -1, -1, -2, -1]
distresses -1.6 0.66332 [-2, -1, -1, -2, -2, -2, -1, -1, -3, -1]
distressful -2.2 0.6 [-1, -3, -3, -2, -2, -3, -2, -2, -2, -2]
distressfully -1.7 1.1 [-1, -3, -2, 1, -3, -1, -2, -2, -2, -2]
distressfulness -2.4 0.66332 [-2, -3, -2, -3, -3, -3, -2, -1, -3, -2]
distressing -1.7 1.18743 [-3, -3, -1, -1, -2, -2, -3, -2, 1, -1]
distressingly -2.2 0.74833 [-3, -1, -2, -3, -3, -3, -2, -2, -2, -1]
distrust -1.8 0.87178 [-1, -2, -2, -2, -1, -1, -2, -4, -1, -2]
distrusted -2.4 0.66332 [-1, -3, -3, -3, -2, -2, -3, -2, -2, -3]
distrustful -2.1 0.83066 [-1, -3, -2, -2, -3, -1, -1, -3, -2, -3]
distrustfully -1.8 0.6 [-2, -1, -1, -2, -1, -2, -3, -2, -2, -2]
distrustfulness -1.6 0.66332 [-2, -1, -2, -1, -2, -1, -1, -3, -1, -2]
distrusting -2.1 0.83066 [-1, -2, -2, -2, -3, -3, -1, -3, -1, -3]
distrusts -1.3 0.45826 [-1, -1, -2, -1, -2, -1, -2, -1, -1, -1]
disturb -1.7 0.45826 [-2, -1, -1, -2, -2, -2, -1, -2, -2, -2]
disturbance -1.6 0.8 [-1, -2, -1, -2, -2, -3, -1, -2, 0, -2]
disturbances -1.4 0.66332 [-1, -1, -1, -2, -1, -1, -1, -1, -3, -2]
disturbed -1.6 0.4899 [-2, -2, -1, -1, -2, -2, -1, -2, -1, -2]
disturber -1.4 0.4899 [-2, -1, -1, -2, -2, -1, -1, -2, -1, -1]
disturbers -2.1 0.53852 [-2, -2, -2, -2, -2, -3, -3, -2, -1, -2]
disturbing -2.3 0.45826 [-2, -2, -3, -3, -2, -2, -3, -2, -2, -2]
disturbingly -2.3 0.78102 [-2, -2, -1, -3, -4, -3, -2, -2, -2, -2]
disturbs -1.9 0.53852 [-2, -2, -1, -2, -3, -2, -1, -2, -2, -2]
dithering -0.5 0.92195 [0, 0, 0, 0, 1, -1, -2, -2, -1, 0]
divination 1.7 1.1 [2, 3, 0, 1, 2, 1, 3, 3, 2, 0]
divinations 1.1 1.04403 [1, | |
**self.directory._upload_lookup(conn, cur, object, self.job))
def upload_chunk_from_file(self, position, input, client_context, nbytes, metadata={}):
return self.directory.upload_chunk_from_file(self, position, input, client_context, nbytes, metadata)
def get_content(self, client_context, get_data=True):
self.enforce_acl(['owner'], client_context)
metadata = self.metadata.to_http()
body = {
'url': str(self),
'target': str(self.object),
'owner': self.get_acl('owner'),
'chunk-length': self.chunksize,
'content-length': self.nbytes
}
for hdr in {
'content-type',
'content-md5',
'content-sha256',
'content-disposition',
}:
if hdr in metadata:
body[hdr] = metadata[hdr]
body = jsonWriter(body) + b'\n'
return len(body), Metadata({'content-type': 'application/json'}), body
def finalize(self, client_context):
return self.directory.upload_finalize(self, client_context)
def cancel(self, client_context):
return self.directory.upload_cancel(self, client_context)
class connection (psycopg2.extensions.connection):
"""Customized psycopg2 connection factory
"""
def __init__(self, dsn):
psycopg2.extensions.connection.__init__(self, dsn)
try:
self._prepare_hatrac_stmts()
except psycopg2.ProgrammingError:
self.rollback()
def _prepare_hatrac_stmts(self):
cur = self.cursor()
cur.execute("""
DEALLOCATE PREPARE ALL;
PREPARE hatrac_complete_version (int8, text, boolean) AS
UPDATE hatrac.version SET is_deleted = $3, version = $2 WHERE id = $1 ;
PREPARE hatrac_delete_version (int8) AS
UPDATE hatrac.version SET is_deleted = True WHERE id = $1 ;
PREPARE hatrac_delete_name (int8) AS
UPDATE hatrac.name SET is_deleted = True WHERE id = $1 ;
PREPARE hatrac_delete_chunks (int8) AS
DELETE FROM hatrac.chunk WHERE uploadid = $1 ;
PREPARE hatrac_delete_upload (int8) AS
DELETE FROM hatrac.upload WHERE id = $1 ;
PREPARE hatrac_name_lookup (text, boolean) AS
SELECT n.*, %(owner_acl)s, %(update_acl)s, %(read_acl)s, %(create_acl)s
FROM hatrac.name n
WHERE n.name = $1 AND (NOT n.is_deleted OR NOT $2) ;
PREPARE hatrac_version_lookup (int8, text) AS
SELECT v.*, n.name, n.pid, n.ancestors, %(owner_acl)s, %(read_acl)s
FROM hatrac.version v
JOIN hatrac.name n ON (v.nameid = n.id)
WHERE v.nameid = $1 AND v.version = $2 ;
PREPARE hatrac_upload_lookup(int8, text) AS
SELECT u.*, n.name, n.pid, n.ancestors, %(owner_acl)s
FROM hatrac.upload u
JOIN hatrac.name n ON (u.nameid = n.id)
WHERE u.nameid = $1 AND u.job = $2 ;
PREPARE hatrac_version_list(int8, int8) AS
SELECT v.*, n.name, n.pid, n.ancestors
FROM hatrac.name n
JOIN hatrac.version v ON (v.nameid = n.id)
WHERE v.nameid = $1 AND NOT v.is_deleted
ORDER BY v.id DESC
LIMIT $2 ;
PREPARE hatrac_chunk_list (int8, int8) AS
SELECT *
FROM hatrac.chunk
WHERE uploadid = $1 AND ($2 IS NULL OR position = $2)
ORDER BY position ;
PREPARE hatrac_object_enumerate_versions (int8) AS
SELECT n.name, n.pid, n.ancestors, n.subtype, n.update, n."subtree-owner", n."subtree-read", v.*, %(owner_acl)s, %(read_acl)s
FROM hatrac.name n
JOIN hatrac.version v ON (v.nameid = n.id)
WHERE v.nameid = $1 AND NOT v.is_deleted
ORDER BY n.id, v.id ;
PREPARE hatrac_namepattern_enumerate_versions (text) AS
SELECT n.name, n.pid, n.ancestors, n.subtype, n.update, n."subtree-owner", n."subtree-read", v.*, %(owner_acl)s, %(read_acl)s
FROM hatrac.name n
JOIN hatrac.version v ON (v.nameid = n.id)
WHERE n.name ~ $1 AND NOT v.is_deleted
ORDER BY n.name, v.id ;
PREPARE hatrac_namespace_children_noacl (int8) AS
SELECT n.*
FROM hatrac.name p
JOIN hatrac.name n ON (n.pid = p.id)
WHERE p.id = $1 AND NOT n.is_deleted
ORDER BY n.name ;
PREPARE hatrac_namespace_children_acl (int8) AS
SELECT n.*, %(owner_acl)s, %(update_acl)s, %(read_acl)s, %(create_acl)s
FROM hatrac.name p
JOIN hatrac.name n ON (n.pid = p.id)
WHERE p.id = $1 AND NOT n.is_deleted
ORDER BY n.name ;
PREPARE hatrac_namespace_subtree_noacl (int8) AS
SELECT n.*
FROM hatrac.name p
JOIN hatrac.name n ON (p.id = ANY( n.ancestors ))
WHERE p.id = $1 AND NOT n.is_deleted
ORDER BY n.name ;
PREPARE hatrac_namespace_subtree_acl (int8) AS
SELECT n.*, %(owner_acl)s, %(update_acl)s, %(read_acl)s, %(create_acl)s
FROM hatrac.name p
JOIN hatrac.name n ON (p.id = ANY( n.ancestors ))
WHERE p.id = $1 AND NOT n.is_deleted
ORDER BY n.name ;
PREPARE hatrac_object_uploads (int8) AS
SELECT u.*, n.name, n.pid, n.ancestors, %(owner_acl)s
FROM hatrac.name n
JOIN hatrac.upload u ON (u.nameid = n.id)
WHERE n.id = $1
ORDER BY u.id ;
PREPARE hatrac_namespace_uploads (int8) AS
SELECT u.*, n.name, n.pid, n.ancestors, %(owner_acl)s
FROM hatrac.name n
JOIN hatrac.upload u ON (u.nameid = n.id)
WHERE $1 = ANY (n.ancestors)
ORDER BY n.name, u.id ;
PREPARE hatrac_version_aux_url_update (int8, text) AS
UPDATE hatrac.version v
SET aux = ('{"url":"' || $2 || '"}')::jsonb
WHERE id = $1 ;
PREPARE hatrac_version_aux_url_delete (int8) AS
UPDATE hatrac.version
SET aux = aux::jsonb - 'url'
WHERE id = $1 ;
PREPARE hatrac_version_aux_version_update (int8, text) AS
UPDATE hatrac.version
SET aux = CASE WHEN $2 IS NOT NULL THEN
('{"version":"' || $2 || '"}')::jsonb ELSE aux::jsonb - 'version' END
WHERE id = $1 ;
""" % dict(
owner_acl=ancestor_acl_sql('owner'),
update_acl=ancestor_acl_sql('update'),
read_acl=ancestor_acl_sql('read'),
create_acl=ancestor_acl_sql('create')
)
)
cur.close()
self.commit()
def pool(minconn, maxconn, dsn):
"""Open a thread-safe connection pool with minconn <= N <= maxconn connections to database.
The connections are from the customized connection factory in this module.
"""
return psycopg2.pool.ThreadedConnectionPool(minconn, maxconn, dsn=dsn, connection_factory=connection, cursor_factory=DictCursor)
class PoolManager (object):
"""Manage a set of database connection pools keyed by database name.
"""
def __init__(self):
# map dsn -> [pool, timestamp]
self.pools = dict()
self.max_idle_seconds = 60 * 60 # 1 hour
def __getitem__(self, dsn):
"""Lookup existing or create new pool for database on demand.
May fail transiently and caller should retry.
"""
# abandon old pools so they can be garbage collected
for key in list(self.pools.keys()):
try:
pair = self.pools.pop(key)
delta = (datetime.datetime.now() - pair[1])
try:
delta_seconds = delta.total_seconds()
except:
delta_seconds = delta.seconds + delta.microseconds * math.pow(10,-6)
if delta_seconds < self.max_idle_seconds:
# this pool is sufficiently active so put it back!
boundpair = self.pools.setdefault(key, pair)
# if pair is still removed at this point, let garbage collector deal with it
except KeyError:
# another thread could have purged key before we got to it
pass
try:
pair = self.pools[dsn]
pair[1] = datetime.datetime.now() # update timestamp
return pair[0]
except KeyError:
# atomically get/set pool
newpool = pool(1, 4, dsn)
boundpair = self.pools.setdefault(dsn, [newpool, datetime.datetime.now()])
if boundpair[0] is not newpool:
# someone beat us to it
newpool.closeall()
return boundpair[0]
pools = PoolManager()
class PooledConnection (object):
def __init__(self, dsn):
self.dsn = dsn
def perform(self, bodyfunc, finalfunc=lambda x: x, verbose=False):
"""Run bodyfunc(conn, cur) using pooling, commit, transform with finalfunc, clean up.
Automates handling of errors.
"""
used_pool = pools[self.dsn]
conn = used_pool.getconn()
assert conn is not None
assert conn.status == psycopg2.extensions.STATUS_READY, ("pooled connection status", conn.status)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ)
cur = conn.cursor(cursor_factory=DictCursor)
try:
try:
result = bodyfunc(conn, cur)
conn.commit()
return finalfunc(result)
except psycopg2.InterfaceError as e:
# reset bad connection
used_pool.putconn(conn, close=True)
conn = None
raise e
except GeneratorExit as e:
# happens normally at end of result yielding sequence
raise
except:
if conn is not None:
conn.rollback()
if verbose:
et, ev, tb = sys.exc_info()
web.debug(u'got exception "%s" during PooledConnection.perform()' % (ev,),
traceback.format_exception(et, ev, tb))
raise
finally:
if conn is not None:
assert conn.status == psycopg2.extensions.STATUS_READY, ("pooled connection status", conn.status)
cur.close()
used_pool.putconn(conn)
conn = None
_name_table_sql = """
CREATE TABLE IF NOT EXISTS hatrac.name (
id bigserial PRIMARY KEY,
pid int8 REFERENCES hatrac."name" (id),
ancestors int8[],
name text NOT NULL UNIQUE,
subtype int NOT NULL,
is_deleted bool NOT NULL,
owner text[],
"create" text[],
"update" text[],
read text[],
"subtree-owner" text[],
"subtree-create" text[],
"subtree-update" text[],
"subtree-read" text[]
);
CREATE INDEX IF NOT EXISTS name_ancestors_idx ON hatrac."name" USING gin (ancestors) WHERE NOT is_deleted;
CREATE INDEX IF NOT EXISTS name_id_idx ON hatrac."name" (id) WHERE "subtree-owner" IS NOT NULL;
CREATE INDEX IF NOT EXISTS name_id_idx1 ON hatrac."name" (id) WHERE "subtree-create" IS NOT NULL;
CREATE INDEX IF NOT EXISTS name_id_idx2 ON hatrac."name" (id) WHERE "subtree-read" IS NOT NULL;
CREATE INDEX IF NOT EXISTS name_id_idx3 ON hatrac."name" (id) WHERE "subtree-update" IS NOT NULL;
INSERT INTO hatrac.name
(name, ancestors, subtype, is_deleted)
VALUES ('/', array[]::int8[], 0, False)
ON CONFLICT (name) DO NOTHING ;
"""
_version_table_sql = """
CREATE TABLE IF NOT EXISTS hatrac.version (
id bigserial PRIMARY KEY,
nameid int8 NOT NULL REFERENCES hatrac.name(id),
version text,
nbytes int8,
metadata jsonb,
is_deleted bool NOT NULL,
owner text[],
read text[],
aux json,
UNIQUE(nameid, version),
CHECK(version IS NOT NULL OR is_deleted)
);
CREATE INDEX IF NOT EXISTS version_nameid_id_idx ON hatrac.version (nameid, id);
DO $aux_upgrade$
BEGIN
IF (SELECT True FROM information_schema.columns
WHERE table_schema = 'hatrac'
AND table_name = 'version'
AND column_name = 'aux') THEN
-- do nothing
ELSE
ALTER TABLE hatrac.version ADD COLUMN aux json;
END IF;
END;
$aux_upgrade$ LANGUAGE plpgsql;
"""
_upload_table_sql = """
CREATE TABLE IF NOT EXISTS hatrac.upload (
id bigserial PRIMARY KEY,
nameid int8 NOT NULL REFERENCES hatrac.name(id),
job text NOT NULL,
nbytes int8 NOT NULL,
chunksize int8 NOT NULL,
metadata jsonb,
owner text[],
UNIQUE(nameid, job),
CHECK(chunksize > 0)
);
DO $created_on_upgrade$
BEGIN
IF (SELECT True FROM information_schema.columns
| |
<gh_stars>1-10
'''
Verify correct log retention behavior.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test the enforcment of proxy.config.log.max_space_mb_for_logs.
'''
# This test is sensitive to timing issues, especially in the OS CI for some
# reason. We'll leave the test here because it is helpful for when doing
# development on the log rotate code, but make it generally skipped when the
# suite of AuTests are run so it doesn't generate annoying false negatives.
Test.SkipIf(Condition.true("This test is sensitive to timing issues which makes it flaky."))
class TestLogRetention:
__base_records_config = {
# Do not accept connections from clients until cache subsystem is operational.
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'logspace',
# Enable log rotation and auto-deletion, the subjects of this test.
'proxy.config.log.rolling_enabled': 3,
'proxy.config.log.auto_delete_rolled_files': 1,
# 10 MB is the minimum rolling size.
'proxy.config.log.rolling_size_mb': 10,
'proxy.config.log.periodic_tasks_interval': 1,
}
__server = None
__ts_counter = 1
__server_is_started = False
def __init__(self, records_config, run_description, command="traffic_manager"):
"""
Create a TestLogRetention instance.
"""
self.server = TestLogRetention.__create_server()
self.ts = self.__create_ts(records_config, command)
self.__initialize_processes()
self.tr = Test.AddTestRun(run_description)
def __initialize_processes(self):
"""
Create a run to initialize the server and traffic_server processes so
the caller doesn't have to.
"""
tr = Test.AddTestRun("Initialize processes for ts{}".format(TestLogRetention.__ts_counter - 1))
tr.Processes.Default.Command = self.get_curl_command()
tr.Processes.Default.ReturnCode = 0
if not TestLogRetention.__server_is_started:
self.server.StartBefore(self.ts)
tr.Processes.Default.StartBefore(self.server)
TestLogRetention.__server_is_started = True
else:
tr.Processes.Default.StartBefore(self.ts)
tr.StillRunningAfter = self.ts
tr.StillRunningAfter = self.server
@classmethod
def __create_server(cls):
"""
Create and return a server process.
There is only one server process for all the tests. This function is
re-entrant, but subsequent calls to it will return the cached version
of the single server.
"""
if cls.__server:
return cls.__server
server = Test.MakeOriginServer("server")
request_header = {"headers": "GET / HTTP/1.1\r\n"
"Host: does.not.matter\r\n\r\n",
"timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\n"
"Connection: close\r\n"
"Cache-control: max-age=85000\r\n\r\n",
"timestamp": "1469733493.993", "body": "xxx"}
server.addResponse("sessionlog.json", request_header, response_header)
cls.__server = server
return cls.__server
def __create_ts(self, records_config, command="traffic_manager"):
"""
Create an ATS process.
records_config: records_config values for this test.
command: The ATS process to run for the test.
"""
ts_name = "ts{counter}".format(counter=TestLogRetention.__ts_counter)
TestLogRetention.__ts_counter += 1
self.ts = Test.MakeATSProcess(ts_name, command=command)
combined_records_config = TestLogRetention.__base_records_config.copy()
combined_records_config.update(records_config)
self.ts.Disk.records_config.update(combined_records_config)
self.ts.Disk.remap_config.AddLine(
'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(
self.ts.Variables.port, self.server.Variables.Port)
)
return self.ts
def get_curl_command(self):
"""
Generate the appropriate single curl command.
"""
return 'curl "http://127.0.0.1:{0}" --verbose'.format(
self.ts.Variables.port)
def get_command_to_rotate_once(self):
"""
Generate the set of curl commands to trigger a log rotate.
"""
return 'for i in {{1..2500}}; do curl "http://127.0.0.1:{0}" --verbose; done'.format(
self.ts.Variables.port)
def get_command_to_rotate_thrice(self):
"""
Generate the set of curl commands to trigger a log rotate.
"""
return 'for i in {{1..7500}}; do curl "http://127.0.0.1:{0}" --verbose; done'.format(
self.ts.Variables.port)
#
# Run 1: Verify that log deletion happens when no min_count is specified.
#
twelve_meg_log_space = {
# The following configures a 12 MB log cap with a required 2 MB head room.
# Thus the rotated log of just over 10 MB should be deleted because it
# will not leave enough head room.
'proxy.config.log.max_space_mb_headroom': 2,
'proxy.config.log.max_space_mb_for_logs': 12,
}
test = TestLogRetention(twelve_meg_log_space,
"Verify log rotation and deletion of the configured log file with no min_count.")
# Configure approximately 5 KB entries for a log with no specified min_count.
test.ts.Disk.logging_yaml.AddLines(
'''
logging:
formats:
- name: long
format: "{prefix}: %<sssc>"
logs:
- filename: test_deletion
format: long
'''.format(prefix="0123456789" * 500).split("\n")
)
# Verify that each log type was registered for auto-deletion.
test.ts.Streams.stderr = Testers.ContainsExpression(
"Registering rotated log deletion for test_deletion.log with min roll count 0",
"Verify test_deletion.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for error.log with min roll count 0",
"Verify error.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for traffic.out with min roll count 0",
"Verify traffic.out auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for diags.log with min roll count 0",
"Verify diags.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for manager.log with min roll count 0",
"Verify manager.log auto-delete configuration")
# Verify test_deletion was rotated and deleted.
test.ts.Streams.stderr += Testers.ContainsExpression(
"The rolled logfile.*test_deletion.log_.*was auto-deleted.*bytes were reclaimed",
"Verify that space was reclaimed")
test.tr.Processes.Default.Command = test.get_command_to_rotate_once()
test.tr.Processes.Default.ReturnCode = 0
test.tr.StillRunningAfter = test.ts
test.tr.StillRunningAfter = test.server
#
# Test 2: Verify log deletion happens with a min_count of 1.
#
test = TestLogRetention(twelve_meg_log_space,
"Verify log rotation and deletion of the configured log file with a min_count of 1.")
# Configure approximately 5 KB entries for a log with no specified min_count.
test.ts.Disk.logging_yaml.AddLines(
'''
logging:
formats:
- name: long
format: "{prefix}: %<sssc>"
logs:
- filename: test_deletion
rolling_min_count: 1
format: long
'''.format(prefix="0123456789" * 500).split("\n")
)
# Verify that each log type was registered for auto-deletion.
test.ts.Streams.stderr = Testers.ContainsExpression(
"Registering rotated log deletion for test_deletion.log with min roll count 1",
"Verify test_deletion.log auto-delete configuration")
# Only the test_deletion should have its min_count overridden.
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for error.log with min roll count 0",
"Verify error.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for traffic.out with min roll count 0",
"Verify traffic.out auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for diags.log with min roll count 0",
"Verify diags.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for manager.log with min roll count 0",
"Verify manager.log auto-delete configuration")
# Verify test_deletion was rotated and deleted.
test.ts.Streams.stderr += Testers.ContainsExpression(
"The rolled logfile.*test_deletion.log_.*was auto-deleted.*bytes were reclaimed",
"Verify that space was reclaimed")
test.tr.Processes.Default.Command = test.get_command_to_rotate_once()
test.tr.Processes.Default.ReturnCode = 0
test.tr.StillRunningAfter = test.ts
test.tr.StillRunningAfter = test.server
#
# Test 3: Verify log deletion happens for a plugin's logs.
#
test = TestLogRetention(twelve_meg_log_space,
"Verify log rotation and deletion of plugin logs.")
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'test_log_interface.so'), test.ts)
# Verify that the plugin's logs and other core logs were registered for deletion.
test.ts.Streams.stderr = Testers.ContainsExpression(
"Registering rotated log deletion for test_log_interface.log with min roll count 0",
"Verify test_log_interface.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for error.log with min roll count 0",
"Verify error.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for traffic.out with min roll count 0",
"Verify traffic.out auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for diags.log with min roll count 0",
"Verify diags.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for manager.log with min roll count 0",
"Verify manager.log auto-delete configuration")
# Verify test_deletion was rotated and deleted.
test.ts.Streams.stderr += Testers.ContainsExpression(
"The rolled logfile.*test_log_interface.log_.*was auto-deleted.*bytes were reclaimed",
"Verify that space was reclaimed")
test.tr.Processes.Default.Command = test.get_command_to_rotate_once()
test.tr.Processes.Default.ReturnCode = 0
test.tr.StillRunningAfter = test.ts
test.tr.StillRunningAfter = test.server
#
# Test 4: Verify log deletion priority behavior.
#
twenty_two_meg_log_space = {
# The following configures a 22 MB log cap with a required 2 MB head room.
# This should allow enough room for two logs being rotated.
'proxy.config.log.max_space_mb_headroom': 2,
'proxy.config.log.max_space_mb_for_logs': 22,
}
test = TestLogRetention(twenty_two_meg_log_space,
"Verify log deletion priority behavior.")
# Configure approximately 5 KB entries for a log with no specified min_count.
test.ts.Disk.logging_yaml.AddLines(
'''
logging:
formats:
- name: long
format: "{prefix}: %<sssc>"
logs:
- filename: test_low_priority_deletion
rolling_min_count: 5
format: long
- filename: test_high_priority_deletion
rolling_min_count: 1
format: long
'''.format(prefix="0123456789" * 500).split("\n")
)
# Verify that each log type was registered for auto-deletion.
test.ts.Streams.stderr = Testers.ContainsExpression(
"Registering rotated log deletion for test_low_priority_deletion.log with min roll count 5",
"Verify test_low_priority_deletion.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for test_high_priority_deletion.log with min roll count 1",
"Verify test_high_priority_deletion.log auto-delete configuration")
# Only the test_deletion should have its min_count overridden.
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for error.log with min roll count 0",
"Verify error.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for traffic.out with min roll count 0",
"Verify traffic.out auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for diags.log with min roll count 0",
"Verify diags.log auto-delete configuration")
test.ts.Streams.stderr += Testers.ContainsExpression(
"Registering rotated log deletion for manager.log with min roll count 0",
"Verify manager.log auto-delete configuration")
# Verify test_deletion was rotated and deleted.
test.ts.Streams.stderr += Testers.ExcludesExpression(
"The rolled logfile.*test_low_priority_deletion.log_.*was auto-deleted.*bytes | |
<filename>code_snippets/sort_example.py
import numbers
from selection_sort import *
from insertion_sort import *
from quick_sort import *
from merge_sort import *
from binary_radix_sort import *
from heap_sort import *
from bubble_sort import *
class StabilityUnit:
def __init__(self, value, stability_value):
self.value = value
self.stability_value = stability_value
def __lt__(self, other):
if isinstance(other, numbers.Number):
return self.value < other
return self.value < other.value
def __le__(self, other):
if isinstance(other, numbers.Number):
return self.value <= other
return self.value <= other.value
def __eq__(self, other):
if isinstance(other, numbers.Number):
return self.value == other
return self.value == other.value
def __ne__(self, other):
if isinstance(other, numbers.Number):
return self.value != other
return self.value != other.value
def __gt__(self, other):
if isinstance(other, numbers.Number):
return self.value > other
return self.value > other.value
def __ge__(self, other):
if isinstance(other, numbers.Number):
return self.value >= other
return self.value >= other.value
def __lshift__(self, other):
return StabilityUnit(self.value << other, self.stability_value)
def __rshift__(self, other):
return StabilityUnit(self.value >> other, self.stability_value)
def __abs__(self):
return StabilityUnit(abs(self.value), self.stability_value)
def __mod__(self, other):
return StabilityUnit(self.value % other, self.stability_value)
def __str__(self):
return str(self.value) + str(self.stability_value)
def print_list(a_list):
print("\t\t".join([str(x) for x in a_list]))
print("Original list: ")
original = [StabilityUnit(1, "a"),
StabilityUnit(3, "a"),
StabilityUnit(5, "a"),
StabilityUnit(6, "b"),
StabilityUnit(1, "b"),
StabilityUnit(1, "c"),
StabilityUnit(1, "d"),
StabilityUnit(44, "a"),
StabilityUnit(6, "b"),
StabilityUnit(4, "a"),
StabilityUnit(234, "a"),
StabilityUnit(234, "b"),
StabilityUnit(24, "a"),
StabilityUnit(11, "a"),
StabilityUnit(1, "e"),
StabilityUnit(1, "f"),
StabilityUnit(1, "g"),
StabilityUnit(102, "a"),
StabilityUnit(10, "a"),
StabilityUnit(10213131, "a"),
StabilityUnit(1201, "a"),
StabilityUnit(102, "b"),
StabilityUnit(10, "b"),
StabilityUnit(10213131, "b"),
StabilityUnit(1201, "b"),
StabilityUnit(102, "c"),
StabilityUnit(10, "c"),
StabilityUnit(10213131, "c"),
StabilityUnit(1201, "c"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(-1, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
StabilityUnit(9999, "a"),
StabilityUnit(99, "a"),
StabilityUnit(999, "a"),
StabilityUnit(999, "a"),
]
def test_properties(correct, tested):
is_stable = True
for i in range(len(correct)):
if correct[i] != tested[i]:
print("INCORRECT: index({0}), {1} should be {2}".format(str(i), str(tested[i]), str(correct[i])))
return
if correct[i].stability_value != tested[i].stability_value:
is_stable = False
print("Correct, " + ("stable" if is_stable else "unstable"))
import time
print_list(original)
print(len(original))
print()
print("Correctly sorted: ")
a = time.time()
sorted = sorted(original)
b = time.time()
print(b - a)
print_list(sorted)
print()
print("Selection sorted: ")
selection_sorted_list = list(original)
a = time.time()
selection_sort(selection_sorted_list)
b = time.time()
print(b - a)
print_list(selection_sorted_list)
test_properties(sorted, selection_sorted_list)
print()
print("Insertion | |
#!/usr/bin/env python
"""
Run tests
NOTE: Running these tests are confusing. The best way I currently have of adjusting parameters
during development is to adjust the parameters inside the DTO object
"""
import io
import os
import sys
import glob
import boto3
import shlex
import docker
import shutil
import pickle
import logging
import warnings
import pytest
import importlib
import subprocess
import numpy as np
warnings.filterwarnings("ignore")
import pandas as pd
from subprocess import call
from Bio import SeqIO
from collections import defaultdict
from inStrain._version import __version__
def estimate_cost(downfolder, upfolder):
'''
downfolder and upfolder can be a string, list, or none
'''
n2c = {}
for var, name in zip([downfolder, upfolder], ['down', 'up']):
if type(var) == type('test'):
cost = gb_to_cost(s3_folder_size(var))
elif type(var) == type([]):
cost = gb_to_cost(sum([s3_folder_size(x) for x in var]))
elif downfolder == None:
cost = 0
n2c[name] = cost
downcost = n2c['down']
upcost = n2c['up']
print("$$$$$$$$$$$$$$\nIt cost {0:.2f}¢ to run this test ({1}¢ for download and {2}¢ for upload)\n$$$$$$$$$$$$$$".format(
downcost + upcost, downcost, upcost))
def s3_folder_size(folder):
'''
return total size of objects in s3 folder in Gb
'''
bucket = folder.split('/')[2]
key = '/'.join(folder.split('/')[3:])
total_size = bytesto(sum([float(o['Size']) for o in get_matching_s3_objects(bucket, key)]), 'g')
return total_size
def bytesto(bytes, to, bsize=1024):
"""convert bytes to megabytes, etc.
sample code:
print('mb= ' + str(bytesto(314575262000000, 'm')))
sample output:
mb= 300002347.946
"""
a = {'k' : 1, 'm': 2, 'g' : 3, 't' : 4, 'p' : 5, 'e' : 6 }
r = float(bytes)
for i in range(a[to]):
r = r / bsize
return(r)
def gb_to_cost(gb):
'''
return cost in cents
'''
return gb * 0.09
def sync_test_data():
cmd = 'aws s3 sync /Users/mattolm/Programs/inStrain/test/docker_tests/s3_test_data/ s3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/'
subprocess.check_call(shlex.split(cmd))
#print(s3_folder_size('s3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/'))
def clear_s3_results():
s3 = boto3.resource('s3')
bucket = s3.Bucket('czbiohub-microbiome')
bucket.objects.filter(Prefix="Sonnenburg_Lab/Software/docker_testing/s3_results/").delete()
def load_s3_results():
"""
Return a list of the objects created during the run
"""
return [f for f in get_matching_s3_keys('czbiohub-microbiome', 'Sonnenburg_Lab/Software/docker_testing/s3_results/')]
def download_s3_results():
"""
Download the results from s3 and return the folder they're at
"""
out_loc = load_random_test_dir()
cmd = f'/Users/mattolm/miniconda3/envs/python3.7/bin/aws s3 sync s3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/s3_results/ {out_loc}'
subprocess.call(cmd, shell=True)
#subprocess.check_call(shlex.split(cmd))
return out_loc
def get_s3_results_folder():
return 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/s3_results/'
def load_random_test_dir():
loc = os.path.join(str(os.getcwd()), \
'test_backend/testdir/')
return loc
def get_credentials():
loc = os.path.expanduser('~/.aws/credentials')
return loc
def get_accessible_test_data():
loc = '/Users/mattolm/Programs/inStrain/test/docker_tests/accessible_test_data/'
# loc = os.path.join(str(os.getcwd()), \
# 'accessible_test_data/')
return loc
def read_s3_file(key, bucketname='czbiohub-microbiome'):
s3 = boto3.resource('s3')
obj = s3.Object(bucketname, key)
body = obj.get()['Body'].read()
return body.decode("utf-8").strip()
def get_matching_s3_objects(bucket, prefix="", suffix=""):
"""
Generate objects in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch objects whose key starts with
this prefix (optional).
:param suffix: Only fetch objects whose keys end with
this suffix (optional).
"""
s3 = boto3.client("s3")
paginator = s3.get_paginator("list_objects_v2")
kwargs = {'Bucket': bucket}
# We can pass the prefix directly to the S3 API. If the user has passed
# a tuple or list of prefixes, we go through them one by one.
if isinstance(prefix, str):
prefixes = (prefix, )
else:
prefixes = prefix
for key_prefix in prefixes:
kwargs["Prefix"] = key_prefix
for page in paginator.paginate(**kwargs):
try:
contents = page["Contents"]
except KeyError:
return
for obj in contents:
key = obj["Key"]
if key.endswith(suffix):
yield obj
def get_matching_s3_keys(bucket, prefix="", suffix=""):
"""
Generate the keys in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch keys that start with this prefix (optional).
:param suffix: Only fetch keys that end with this suffix (optional).
"""
for obj in get_matching_s3_objects(bucket, prefix, suffix):
yield obj["Key"]
def check_s3_file(floc):
'''
Return True if exists and False if it does not
'''
bucket = floc.split('/')[2]
prefix = '/'.join(floc.split('/')[3:])
found = False
for key in get_matching_s3_keys(bucket, prefix):
if prefix in key:
found = True
return found
def run_docker(image, cmd, simulate_aegea=True, overwrite_accessible=False):
'''
Load the image and run the command
'''
# Run the Docker
cred_loc = get_credentials()
test_loc = get_accessible_test_data()
output_loc = load_random_test_dir()
program_loc = '/Users/mattolm/Programs/inStrain/'
mt_loc = os.getcwd() + '/mnt'
if overwrite_accessible:
cmd = 'cp /root/accessible_testing_data/* ./;' + cmd
if simulate_aegea == True:
AEGEA_JUNK = ['/bin/bash', '-c', 'for i in "$@"; do eval "$i"; done; cd /', 'aegea.util.aws.batch', 'set -a', 'if [ -f /etc/environment ]; then source /etc/environment; fi', 'if [ -f /etc/default/locale ]; then source /etc/default/locale; else export LC_ALL=C.UTF-8 LANG=C.UTF-8; fi', 'export AWS_DEFAULT_REGION=us-west-2', 'set +a', 'if [ -f /etc/profile ]; then source /etc/profile; fi', 'set -euo pipefail', 'sed -i -e "s|/archive.ubuntu.com|/us-west-2.ec2.archive.ubuntu.com|g" /etc/apt/sources.list', 'apt-get update -qq', 'apt-get install -qqy --no-install-suggests --no-install-recommends httpie awscli jq lsof python3-virtualenv > /dev/null', 'python3 -m virtualenv -q --python=python3 /opt/aegea-venv', '/opt/aegea-venv/bin/pip install -q argcomplete requests boto3 tweak pyyaml', '/opt/aegea-venv/bin/pip install -q --no-deps aegea==3.4.3']
cmd = AEGEA_JUNK + [cmd]
elif simulate_aegea == 'semi':
AEGEA_JUNK = ['/bin/bash', '-c', 'for i in "$@"; do eval "$i"; done; cd /', 'aegea.util.aws.batch', 'set -a', 'if [ -f /etc/environment ]; then source /etc/environment; fi', 'if [ -f /etc/default/locale ]; then source /etc/default/locale; else export LC_ALL=C.UTF-8 LANG=C.UTF-8; fi', 'export AWS_DEFAULT_REGION=us-west-2', 'set +a', 'if [ -f /etc/profile ]; then source /etc/profile; fi', 'set -euo pipefail', 'sed -i -e "s|/archive.ubuntu.com|/us-west-2.ec2.archive.ubuntu.com|g" /etc/apt/sources.list']
cmd = AEGEA_JUNK + [cmd]
else:
quick_junk = ["/bin/bash", "-c", 'for i in "$@"; do eval "$i"; done; cd /', __name__]
cmd = quick_junk + [cmd]
BASE_CMD = shlex.split(f"docker run -v {program_loc}:/root/whole_program/ -v {cred_loc}:/root/.aws/credentials -v {test_loc}:/root/accessible_testing_data/ -v {output_loc}:/root/accessible_results/ {image}")
FULL_CMD = BASE_CMD + cmd
print(FULL_CMD)
print(' '.join(FULL_CMD))
call(FULL_CMD)
class TestingClass():
def __init__(self):
self.IMAGE = "mattolm/instrain:latest"
self.BAM_S3 = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.sorted.bam'
self.SAM_S3 = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.sam'
self.GENES_S3 = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/N5_271_010G1_scaffold_min1000.fa.genes.fna'
self.STB_S3 = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/N5_271_010G1.maxbin2.stb'
self.FASTA_S3 = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/N5_271_010G1_scaffold_min1000.fa'
self.FASTA_S3_GZ = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/N5_271_010G1_scaffold_min1000.fa.gz'
self.GENOME_LIST = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/genomelist.txt'
self.IS_1 = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.forRC.IS/'
self.IS_2 = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G2.forRC.IS/'
self.IS_FIF = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/is_locs.txt'
self.scafflist = 's3://czbiohub-microbiome/Sonnenburg_Lab/Software/docker_testing/test_data/scaffList.txt'
self.setup_cmd = ''
self.test_dir = load_random_test_dir()
def teardown(self):
if os.path.isdir(self.test_dir):
shutil.rmtree(self.test_dir)
os.mkdir(self.test_dir)
clear_s3_results()
importlib.reload(logging)
@pytest.fixture()
def DTO():
"""
Docker test object
This object makes no copies of anything; just has references and does setup / cleanup
"""
# Set up
self = TestingClass()
# ADJUST THIS IF YOU ARE DEVELOPING
self.setup_cmd = "./prepare.sh; conda activate work;"
#self.setup_cmd = "./prepare.sh; conda activate work; cp /root/accessible_testing_data/run_instrain.py /mnt/;"
#self.setup_cmd = "cp /root/accessible_testing_data/run_instrain.py /mnt/; ./prepare.sh; conda activate work; pushd /root/whole_program/; pip install . --upgrade; popd;"
#self.setup_cmd = "./prepare.sh; conda activate work; pip install instrain --upgrade;"
#self.setup_cmd = "./prepare.sh; conda activate work; echo \"hello\";"
self.aegea_simulation = True
if self.setup_cmd != "./prepare.sh; conda activate work;":
print("WANRING! YOURE RUNNING TESTS IN DEVELOPMENT MODE!")
self.teardown()
yield self
self.teardown()
<EMAIL>(reason="This test often fails during development")
def test_docker_0(DTO):
'''
make sure dependencies are working; make sure the right version of inStrain is in there
'''
# Set up command
CMD = "inStrain profile --version > version.txt; aws s3 cp version.txt {0}/"\
.format(get_s3_results_folder())
CMD = DTO.setup_cmd + CMD
# Run command
run_docker(DTO.IMAGE, CMD, simulate_aegea=DTO.aegea_simulation)
# Upload results
output_files = load_s3_results()
reported_version = read_s3_file(output_files[0])
correct_version = "inStrain version {0}".format(__version__)
assert reported_version == correct_version, [correct_version, reported_version]
# Estimate cost
estimate_cost(None, get_s3_results_folder())
def test_docker_1(DTO):
'''
Full on basic test
'''
# Set up command
CMD = "./prepare.sh; conda activate work; pip install inStrain --upgrade; time ./run_instrain.py --bam {0} --fasta {1} --results_directory {2} --wd_name {3} --cmd_args='--skip_plot_generation'".format(DTO.BAM_S3, DTO.FASTA_S3, get_s3_results_folder(), 'test')
CMD = DTO.setup_cmd + CMD
# Run command
print(CMD)
run_docker(DTO.IMAGE, CMD, simulate_aegea=DTO.aegea_simulation)
# Set up intended output
OUTPUT = ['docker_log.log', 'log.log', 'test_genome_info.tsv', 'scaffold_2_mm_2_read_2_snvs.pickle']
# Estimate cost
dls = [DTO.BAM_S3, DTO.FASTA_S3, DTO.BAM_S3 + 'bai']
estimate_cost(dls, get_s3_results_folder())
output_files = load_s3_results()
basenames = [os.path.basename(b) for b in output_files]
for o in OUTPUT:
have = o in basenames
assert have, [o, basenames]
def test_docker_2(DTO):
'''
Test with .sam, genes, and .stb file
'''
# Set up command
CMD = "./prepare.sh; conda activate work; ls; ./run_instrain.py --bam {0} --fasta {1} --results_directory {2} --wd_name {3} --cmd_args='--skip_plot_generation' --genes {4} --stb {5}".format(DTO.SAM_S3, DTO.FASTA_S3, get_s3_results_folder(), 'test', DTO.GENES_S3, DTO.STB_S3)
CMD = DTO.setup_cmd + CMD
# Run command
run_docker(DTO.IMAGE, CMD, simulate_aegea=DTO.aegea_simulation)
# Set up intended output
OUTPUT = ['docker_log.log', 'log.log', 'test_gene_info.tsv', 'scaffold_2_mm_2_read_2_snvs.pickle']
# Estimate cost
dls = [DTO.BAM_S3, DTO.FASTA_S3, DTO.BAM_S3 + 'bai', DTO.GENES_S3, DTO.STB_S3]
estimate_cost(dls, get_s3_results_folder())
output_files = load_s3_results()
basenames = [os.path.basename(b) for b in output_files]
for o in OUTPUT:
have = o in basenames
assert have, [o, basenames]
def test_docker_3(DTO):
'''
Test the timeout functionality
'''
# Set up command
CMD = "./run_instrain.py --bam {0} --fasta {1} --results_directory {2} --wd_name {3} --timeout 5 --cmd_args='--skip_plot_generation'".format(DTO.BAM_S3, DTO.FASTA_S3, get_s3_results_folder(), 'test')
CMD = DTO.setup_cmd + CMD
# Run command
run_docker(DTO.IMAGE, CMD, simulate_aegea=DTO.aegea_simulation)
# Set up intended output
OUTPUT = ['docker_log.log', 'log.log']#, | |
the selected model.
occurrence_percent : float between [0, 1]
In how many percent of all initialization the word has to appear for printing. 1 means it has to appear in all and 0.5 in half of the initializations.
"""
model = self.models[model_id]
n_runs = len(model.samples[sample_id])
# print("Words per topic appearing at least in ",
# round(occurence_percent*100), "% of all runs(", n_runs, ").")
n_topics = 0
if model.source_lib == "sklearn":
n_topics = len(model.samples[sample_id][0].components_)
if model.source_lib == "gensim":
n_topics = model.samples[sample_id][0].num_topics
# Intersect each topic
for topic in range(n_topics):
word_list = []
for terms in model.topic_terms[sample_id]:
word_list.extend(terms[topic])
counter = Counter(word_list)
selected_words = filter(lambda x: counter[x] >= n_runs *
occurence_percent, counter)
print("Topic " + str(topic))
print_data = {}
for word in selected_words:
count = counter[word]
if count in print_data:
print_data[count].append(word)
else:
print_data[count] = [word]
for count, words in print_data.items():
print("In", count, "runs:", " ".join(words))
def display_run_topics(self, model_id, sample_id, initialization_id):
"""Print the top n words for each topic of a specific initialization.
Parameters
----------
model_id : int
The array index of the model.
sample_id : int
The array index of the sample within the selected model.
initialization_id : int
The array index of the initialization of respective sample and model.
"""
model = self.models[model_id]
sorted_topics = np.array(model.topic_terms[sample_id][initialization_id])[
model.topic_sorting_indices[sample_id][initialization_id]]
for topic_idx, topic in enumerate(sorted_topics):
print("Topic %d:" % (topic_idx))
print(" ".join(topic))
def _compute_param_combinations(self, params, n_samples):
"""Computes the sample parametrizations based on a sobol sequence.
Parameters
----------
params : object
The params object with the parameters and ranges.
n_sample : int
The number of samples taken from the parameter space.
Returns
----------
list
List of n_samples parameter combinations.
"""
seq = []
changing_params = list(
filter(lambda x: params[x]["mode"] is not "fixed", params))
fixed_params = list(
filter(lambda x: params[x]["mode"] is "fixed", params))
for vec in sobol_seq.i4_sobol_generate(len(params), n_samples):
sample = {}
for i, name in enumerate(changing_params):
sample[name] = self._param_to_value(
params[name], vec[i])
for name in fixed_params:
sample[name] = params[name]["values"]
seq.append(sample)
return seq
def _param_to_value(self, param, sampling):
"""Map the sampling values to parameter values."""
if param["mode"] == "range":
return self._range_to_value(param["values"], sampling, param["type"])
if param["mode"] == "list":
return self._list_to_value(param["values"], sampling, param["type"])
@staticmethod
def _range_to_value(p_range, sampling, p_type):
value = p_range[0] + (p_range[1] - p_range[0]) * sampling
return int(value) if p_type is int else value
@staticmethod
def _list_to_value(p_values, sampling, p_type):
return p_values[min(math.floor(sampling*len(p_values)), len(p_values)-1)]
def _topic_matching(self, n_topics, model, sample_id, terms, term_distributions, ranking_vecs):
"""Compute the coherence scores for each topic."""
# print("Topic Matching for each Initialization")
run_coherences = []
run_sort_indices = []
# print("Compute Topic Coherence:", end="")
for run_number in range(model.n_initializations):
# print(model.n_initializations-run_number, end="")
topic_terms = model.topic_terms[sample_id][run_number]
run_coherences.append(self.compute_tcw2c(n_topics, topic_terms))
best_run = run_coherences.index(max(run_coherences))
reference_topic = terms[best_run]
# print("")
# print("Match Topics:", end="")
# Create mapping for all topics across all runs
for run in range(model.n_initializations):
# print(model.n_initializations-run, end="")
topics = np.concatenate((reference_topic, terms[
run, :, :]), axis=0)
sim = squareform(pdist(topics, self._jaccard_similarity))[
:n_topics, n_topics:]
run_mapping = []
# Map reference topics to run topics based on highest similarity
for topic in range(n_topics):
# [1] is the reference topic index and [0] is the other index
first_highest_index = np.argwhere(sim == sim.max())[0]
run_mapping.append(
[first_highest_index[1], first_highest_index[0]])
# Delete topic with highest value
sim[:, first_highest_index[1]] = -1
sim[first_highest_index[0], :] = -1
run_mapping.sort()
sort_indices = np.array(run_mapping)[:, 1]
run_sort_indices.append(sort_indices)
# Sort all runs
terms[run] = terms[run, sort_indices]
term_distributions[run] = term_distributions[run, sort_indices]
ranking_vecs[run] = ranking_vecs[run, sort_indices]
return np.array(run_coherences), run_sort_indices
# Ideas from here: https://github.com/derekgreene/topic-model-tutorial/blob/master/3%20-%20Parameter%20Selection%20for%20NMF.ipynb
def compute_tcw2c(self, n_topics, topic_terms, max_terms=5):
"""Compute coherence score based on wordvector similarities.
Performance is a huge issue in this method. Be careful!
Example:
# max_terms=5 results in roughly 0.75s per topic
# max_terms=10 results in roughly 4 times the time
# max_terms=20 results in roughly 15 times the time
"""
total_coherence = []
for topic in range(n_topics):
pairs = []
processed_topic_terms = (self.nlp(str(t))
for t in topic_terms[topic][:max_terms])
for pair in combinations(processed_topic_terms, 2):
tokens = pair
if tokens[0].has_vector and tokens[1].has_vector:
pairs.append(tokens[0].similarity(tokens[1]))
else:
print("One of", tokens, "has no vector representation.")
total_coherence.append(sum(pairs) / len(pairs))
return sum(total_coherence) / n_topics
def _compute_topic_stability(self):
"""Computes the stability for each model."""
# print("Evaluate Models")
for model_id, model in enumerate(self.models):
# print("Model: ", model.topic_model_class)
self._fetch_top_terms(model, 20)
model_distributions = self._fetch_term_distributions(model)
all_ranking_vecs = self._create_ranking_vectors(model)
sample_sorting_indices = []
for sample_id, sample in enumerate(model.samples):
# print("Sample", sample_id+1, "of",
# len(model.samples), " Samples")
n_topics = 0
if model.source_lib == "sklearn":
n_topics = sample[0].n_components
if model.source_lib == "gensim":
n_topics = sample[0].num_topics
terms = model.topic_terms[sample_id]
term_distributions = model_distributions[sample_id]
ranking_vecs = all_ranking_vecs[sample_id]
kendalls = []
jensen = []
jaccard = []
report = {}
report_full = {}
run_coherence, sort_indices = self._topic_matching(
n_topics, model, sample_id, terms, term_distributions, ranking_vecs)
sample_sorting_indices.append(sort_indices)
# Evaluate each topic
for topic in range(n_topics):
sim = pdist(terms[
:, topic, :], self._jaccard_similarity)
jaccard.append(sim)
jen = pdist(term_distributions[
:, topic, :], self._jenson_similarity)
jensen.append(jen)
ken = pdist(ranking_vecs[
:, topic, :], self._kendalls)
kendalls.append(ken)
kendalls_ranking = np.array(kendalls)
jaccard_similarity = np.array(jaccard)
jensen_similarity = np.array(jensen)
report["model"] = model.topic_model_class
report["model_id"] = model_id
report["sample_id"] = sample_id
report["n_topics"] = n_topics
report["params"] = model.sampling_parameters[sample_id]
report["topic_coherence"] = run_coherence.mean()
report["jaccard"] = jaccard_similarity.mean()
report["kendalltau"] = kendalls_ranking.mean()
report["jensenshannon"] = jensen_similarity.mean()
report_full["model"] = model.topic_model_class
report_full["model_id"] = model_id
report_full["sample_id"] = sample_id
report_full["n_topics"] = n_topics
report_full["params"] = model.sampling_parameters[sample_id]
report_full["topic_coherence"] = {
"topic_coherences": run_coherence,
}
report_full["jaccard"] = {
"mean": jaccard_similarity.mean(axis=1),
"std": jaccard_similarity.std(axis=1),
"min": jaccard_similarity.min(axis=1),
"max": jaccard_similarity.max(axis=1),
}
report_full["kendalltau"] = {
"mean": kendalls_ranking.mean(axis=1),
"std": kendalls_ranking.std(axis=1),
"min": kendalls_ranking.min(axis=1),
"max": kendalls_ranking.max(axis=1),
}
report_full["jensenshannon"] = {
"mean": jensen_similarity.mean(axis=1),
"std": jensen_similarity.std(axis=1),
"min": jensen_similarity.min(axis=1),
"max": jensen_similarity.max(axis=1),
}
top_terms = {}
for i, t in enumerate(terms):
top_terms['instance_'+str(i)] = t
report_full["top_words"] = top_terms
model.report.append(report)
model.report_full.append(report_full)
# print("")
model.topic_sorting_indices = sample_sorting_indices
@staticmethod
def _linear_combination_of_reports(weights, report):
"""Compute the linear combination for ranking."""
total_weight = 0
combination = 0
for property, weight in weights.items():
total_weight += weight
combination += report[property]
return combination / total_weight
def _fetch_top_terms(self, model, n_top_terms):
"""Get the top n terms for each topic of the selected model."""
model_terms = []
for sample in model.samples:
terms = []
for instance in sample:
if model.source_lib == "sklearn":
top_terms = self._get_top_terms(
model, instance, n_top_terms)
terms.append(top_terms)
if model.source_lib == "gensim":
top_terms = []
for topic_id in range(instance.num_topics):
top_terms.append([model.word_mapping[x[0]] for x in instance.get_topic_terms(
topic_id, n_top_terms)])
terms.append(top_terms)
model_terms.append(np.array(terms))
model.topic_terms = model_terms
def _fetch_term_distributions(self, model):
"""Get the topic distributions for all topics of the selected model."""
model_distributions = []
for sample in model.samples:
term_distributions = []
for instance in sample:
if model.source_lib == "sklearn":
term_distributions.append(
instance.components_ / instance.components_.sum(axis=1)[:, np.newaxis])
if model.source_lib == "gensim":
term_distributions.append(instance.get_topics())
model_distributions.append(np.array(term_distributions))
return model_distributions
def _create_ranking_vectors(self, model):
"""Create the ranking vectors based on the top terms."""
vocab = set()
sample_terms = []
ranking_vecs = []
for sample in model.samples:
terms = []
for instance in sample:
if model.source_lib == "sklearn":
top_terms = self._get_top_terms(model,
instance, self.n_relevant_top_words)
terms.append(top_terms)
vocab.update([e for l in top_terms for e in l])
if model.source_lib == "gensim":
top_terms = []
for topic_id in range(instance.num_topics):
top_terms.append([x[0] for x in instance.get_topic_terms(
topic_id, self.n_relevant_top_words)])
terms.append(top_terms)
vocab.update([e for l in top_terms for e in l])
sample_terms.append(terms)
vocab_vec = list(vocab)
for sample in sample_terms:
rankings = []
for model_terms in sample:
rankings.append([self._terms_to_ranking(t, vocab_vec)
for t in model_terms])
ranking_vecs.append(np.array(rankings))
return ranking_vecs
@staticmethod
def _jaccard_similarity(a, b):
"""Compute the jaccard similarity between two lists."""
sa = set(a)
sb = set(b)
return len(sa.intersection(sb))/len(sa.union(sb))
@staticmethod
def _kendalls(a, b):
"""Compute the rank correlation based on kedalls tau."""
k, _ = kendalltau(a, b)
return k
@staticmethod
def _jenson_similarity(a, b):
"""Compute the correlation between two distributions."""
# Added rounding because without often inf was the result
# Usage of base 2 algorithm so that the range is [0, 1]
distance = jensenshannon(a.round(12), b.round(12), base=2)
return 1 - distance
@staticmethod
def _terms_to_ranking(terms, vocab):
"""Convertthe vocab set to the ranking base vector."""
vec = []
for e in vocab:
if e in terms:
vec.append(terms.index(e))
else:
vec.append(len(vocab))
return vec
@staticmethod
def _get_top_terms(model, instance, n_terms):
"""Get top n terms for sklearn models."""
feature_names = model.word_mapping.get_feature_names()
topic_terms = []
for topic in instance.components_:
topic_terms.append([feature_names[i]
for i in topic.argsort()[:-n_terms - 1:-1]])
return topic_terms
class TopicModel():
"""A helper class for the robustTopics class."""
topic_sorting_indices = []
def __init__(self, source_lib, topic_model_class, data, word_mapping, parameters, sampling_parameters, n_samples, n_initializations, samples, topic_terms, | |
import re
from itertools import count
from .tools import process_path
_conversions = {'atomicint': 'counter',
'str': 'text',
'bool': 'boolean',
'decimal': 'decimal',
'float': 'float',
'int': 'int',
'tuple': 'tuple',
'list': 'list',
'generator': 'list',
'frozenset': 'set',
'set': 'set',
'dict': 'map',
'long': 'bigint',
'buffer': 'blob',
'bytearray': 'blob',
'counter': 'counter',
'double': 'double',
'StorageDict': 'dict',
'ndarray': 'hecuba.hnumpy.StorageNumpy',
'numpy.ndarray': 'hecuba.hnumpy.StorageNumpy',
'date': 'date',
'time': 'time',
'datetime': 'timestamp'}
class Parser(object):
args_names = ["type_parser"]
split_dtypes_regex = re.compile('^(tuple|set)<(.*)>$')
def _append_values_to_list_after_replace(self, vals):
"""
Receives a list of data types. Strips the outermost data type.
Returns:
typev: list of the outer data types, with the keyword "simple" if not found
finalvars: list of the corresponding internal data types
"""
typev = []
finalvars = []
for var in vals:
res = self.split_dtypes_regex.search(var)
if res:
typev.append(res.group(1))
finalvars.append(res.group(2))
else:
typev.append("simple")
finalvars.append(var)
return typev, finalvars
def _get_elements(self, s):
"""
Args:
s is a string with a type specification containing one or more types
Returns a list of type_specifications
Example:
k1:tuple<int,int>,k2:tuple<int,str>,str
-->
'k1:tuple<int,int>' 'k2:tuple<int,str>' 'str'
"""
elements=[]
n_brackets = 0
pos = 0
lastpos = 0
for pos, c in enumerate(s):
if c == '<':
n_brackets = n_brackets + 1
elif c == '>':
n_brackets = n_brackets - 1
elif c == ',':
if n_brackets == 0: # a new element found
elements.append( s[lastpos:pos] )
lastpos = pos + 1 # skip ','
if lastpos < pos: #add last element
elements.append( s[lastpos:] )
return elements
def _get_name_and_type(self, k):
"""
Args:
k is a string with a single type specification "name:value"
Return:
name and type, or None and type if ":" is not present
Raises Syntax Error in cases: "n:", ":v" , ":"
"""
s = k.split(":")
if len(s) == 2: # case "name:value"
if len(s[0]) > 0 and len(s[1]) > 0:
return s
elif len(s) == 1: # case "value" only
if len(s[0]) > 0: # case ":"
return None, s[0]
raise SyntaxError("Error parsing Type Specification. Trying to parse: '{}'".format(k))
def _get_str_primary_keys_values(self, pk):
"""
Args:
pk is a string with a dict specification "dict<<key_specification>, value_specification>"
Return:
Six lists:
- keys' names,
- values' names,
- keys' types (simple, tuple or set),
- values' types (simple, tuple or set),
- keys' types (int, float, ...),
- values' types (int, float, ...),
"""
pk = pk.replace("dict", "", 1).strip()
# Find point to split keys from values
n_brackets = 0
pos = 0
for pos, c in enumerate(pk):
if c == '<':
n_brackets = n_brackets + 1
elif c == '>':
n_brackets = n_brackets - 1
if n_brackets == 1:
break
keys = pk[2:pos]
values = pk[pos + 2:len(pk) - 1]
if not keys:
raise SyntaxError("Can't detect the keys in the TypeSpec")
# We get the variables
keyList = self._get_elements(keys)
valueList = self._get_elements(values)
# Parse Keys...
keyNamesList = []
keyTypesList = []
for i,k in enumerate(keyList):
myname,mytype = self._get_name_and_type(k)
if not myname: # Generate name "key_0","key_1",...,"key_N"
myname = "key_" + str(i)
keyNamesList.append(myname)
keyTypesList.append(mytype)
# Parse Values...
valueNamesList = []
valueTypesList = []
offset = len(keyNamesList)
for i,v in enumerate(valueList):
myname,mytype = self._get_name_and_type(v)
if not myname: # Generate name "val_N+1","valN+2",...
myname = "val_" + str(i + offset)
valueNamesList.append(myname)
valueTypesList.append(mytype)
# for each type we store if its a 'simple' or a 'tuple/set' type
# (finalvarksk and finalvarsv)
# and for 'set' or 'tuple' types, the type specification is replaced by
# the type of its elements (typek and typev)
#TODO: review if this can be improved
typevk, finalvarsk = self._append_values_to_list_after_replace(keyTypesList)
typevv, finalvarsv = self._append_values_to_list_after_replace(valueTypesList)
return keyNamesList, valueNamesList, finalvarsk, finalvarsv, typevk, typevv
def _set_or_tuple(self, type, pk_col, t, t1):
string_str = ""
t = t.split(',')
converted_primary_keys = ", ".join([_conversions.get(w, w) for w in t])
converted_primary_keys = converted_primary_keys.split(',')
converted_primary_keys = [w.replace(' ', '') for w in converted_primary_keys]
aux_list = [] # stores ((var_1, val),(var_2, val),...)
if len(converted_primary_keys) > 1:
counter = count(0)
for type_val in converted_primary_keys:
if type == "set":
aux_list.append((t1 + '_' + str(next(counter)), type_val))
else:
aux_list.append(type_val)
# string_str = ',{"name": "%s", "type": "%s", "%s": ["%s"]}' % (t1, type, pk_col, '","'.join(aux_list))
string_str = ',{"name": "%s", "type": "%s", "%s": %s}' % (t1, type, pk_col, aux_list)
else:
aux_list.append((t1, converted_primary_keys[0]))
string_str = ',{"name": "%s", "type": "%s", "%s": %s}' % (t1, type, pk_col, aux_list)
return string_str
def _get_dict_str(self, varsk, cleank, typek):
concatenated_keys = ""
values = ""
string_str = ""
for t, t1, t2 in zip(cleank, varsk, typek): # first keys
if t2 == 'set':
string_str = self._set_or_tuple('set', 'columns', t, t1)
elif t2 == 'tuple':
string_str = self._set_or_tuple('tuple', 'columns', t, t1)
else:
if t not in _conversions:
route = t
cname, module = process_path(route)
try:
mod = __import__(module, globals(), locals(), [cname], 0)
except (ImportError, ValueError) as ex:
if cname in _conversions:
raise Exception("Error parsing the TypeSpec. Maybe you forgot a comma between the columns.")
raise ImportError("Can't import class {} from module {}".format(cname, module))
string_str = ',("%s", "%s")' % (t1, t)
else:
type = _conversions[t]
string_str = ',("%s", "%s")' % (t1, type)
concatenated_keys = concatenated_keys + string_str
concatenated_keys = concatenated_keys[1:]
return concatenated_keys
def _parse_dict(self, line, this):
split_line = line.split()
if len(split_line) == 2:
pk = split_line[1]
table = None
else:
pk = split_line[2]
table = split_line[1]
varsk, varsv, cleank, cleanv, typek, typevv = self._get_str_primary_keys_values(pk)
pks = self._get_dict_str(varsk, cleank, typek)
values = self._get_dict_str(varsv, cleanv, typevv)
if table == None:
final_dict = '{"primary_keys": [%s], "columns": [%s], "type": "StorageDict"}' % (pks, values)
else:
final_dict = '{"%s": {"primary_keys": [%s], "columns": [%s], "type": "StorageDict"}}' % (table, pks, values)
final_dict = eval(final_dict)
aux = '{"primary_keys": [%s], "columns": [%s], "type": "StorageDict"}' % (pks, values)
if table in this:
this[table].update(eval(aux))
return this
return final_dict
def _parse_set_or_tuple(self, type, line, pk_or_col, this):
split_line = line.split()
table = split_line[1]
line = re.sub('[<>, ]', ' ', split_line[2].replace(str(type), ""))
primary_keys = line.split()
converted_primary_keys = ", ".join([_conversions.get(w, w) for w in primary_keys])
if len(primary_keys) == 1:
string_str = '{"%s":{"%s": "%s","type": "%s"}}' % (table, pk_or_col, converted_primary_keys, str(type))
final_string = eval(string_str)
aux = '{"%s": "%s","type": "%s"}' % (pk_or_col, converted_primary_keys, str(type))
else:
string_str = '{"%s":{"%s": "%s","type": "%s"}}' % (table, pk_or_col, converted_primary_keys, str(type))
final_string = eval(string_str)
aux = '{"%s": {"%s"},"type": "%s"}' % (pk_or_col, converted_primary_keys, str(type))
if table in this:
this[table].update(eval(aux))
return this
return final_string
def _parse_index(self, line, this):
'''Def: parses index declaration, checking for the introduced vars.
Returns: a dict structure with the parsed dict.'''
if self.type_parser == "TypeSpec":
table = "indexed_on"
atributes = line.split(' ', 2)
atributes = atributes[1].replace(" ", '')
else:
table = line.split()[1]
atributes = line.split(' ', 2)
atributes = atributes[2].replace(" ", '')
atributes = atributes.split(',')
converted_atributes = ", ".join([_conversions.get(w, w) for w in atributes])
converted_atributes = converted_atributes.split(',')
converted_atributes = [w.replace(" ", "") for w in converted_atributes]
if self.type_parser == "TypeSpec":
this[table] = converted_atributes
else:
if table in this:
this[table].update({'indexed_on': converted_atributes})
else:
this[table] = {'indexed_on': converted_atributes}
return this
def _parse_file(self, line, new):
'''Def: Checks if the file declaration is correct.
Returns: the file declaration with a dict structure'''
line = line.split(" ")
output = {}
table_name = line[1]
route = line[2]
cname, module = process_path(route)
try:
mod = __import__(module, globals(), locals(), [cname], 0)
except (ImportError, ValueError) as ex:
raise ImportError("Can't import class {} from module {}".format(cname, module))
output["type"] = str(route)
if table_name in new:
new[table_name].update(output)
else:
new[table_name] = output
return new
def _parse_set_tuple_list(self, line, this):
if line.count('set') > 0:
return self._parse_set_or_tuple('set', line, 'primary_keys', this)
elif line.count('tuple') > 0:
return self._parse_set_or_tuple('tuple', line, 'columns', this)
elif line.count('list') > 0:
return self._parse_set_or_tuple('list', line, 'columns', this)
def _parse_simple(self, line, this):
split_line = line.split()
table = split_line[1]
try:
type = _conversions[split_line[2]]
except KeyError as ex:
raise Exception(f"Type '{split_line[2]}' not identified.")
simple = '{"%s":{"type":"%s"}}' % (table, type)
simple = eval(simple)
if table in this:
this[table].update(simple)
return simple
def _input_type(self, line, this):
if line.count('<') == 1: # is tuple, set, list
aux = self._parse_set_tuple_list(line, this)
elif line.count('<') == 0 and line.count('Index_on') == 0 and line.count('.') == 0 or (
line.count('numpy.ndarray') and line.count(' dict') == 0): # is simple type
aux = self._parse_simple(line, this)
elif line.count('Index_on') == 1:
aux = self._parse_index(line, this)
elif line.count('.') > 0 | |
# ##############################################################################
# This file is part of df_config #
# #
# Copyright (C) 2020 <NAME> <<EMAIL>> #
# All Rights Reserved #
# #
# You may use, distribute and modify this code under the #
# terms of the (BSD-like) CeCILL-B license. #
# #
# You should have received a copy of the CeCILL-B license with #
# this file. If not, please visit: #
# https://cecill.info/licences/Licence_CeCILL-B_V1-en.txt (English) #
# or https://cecill.info/licences/Licence_CeCILL-B_V1-fr.txt (French) #
# #
# ##############################################################################
"""Default values for all Django settings
======================================
Define settings for deploying most of df_config-based websites or for running them in `DEBUG` mode.
Most of them are used by Django, some of them by common third-party packages and the other ones are
used by DjangoFloor.
df_config also allows references between settings: for example, you only defines `SERVER_BASE_URL`
(like 'https://www.example.com/site/' ) and `SERVER_NAME` ('www.example.com'), `SERVER_PORT` ('443'),
`USE_SSL` ('True'), `SERVER_PROTOCOL` ('https') and `URL_PREFIX` ('/site/') are deduced.
These settings are defined in :mod:`df_config.config.defaults`.
Settings that should be customized on each installation (like the server name or the database password) can be
written in .ini files. The mapping between the Python setting and the [section/option] system is defined in
:mod:`df_config.iniconf`.
.. literalinclude:: ../../../../../df_config/config/defaults.py
:language: python
:lines: 41-1000
"""
import os
from df_config.guesses.djt import guess_djt_panels
from django.utils.translation import gettext_lazy as _
# ######################################################################################################################
#
# detect if some external packages are available, to automatically customize some settings
#
# ######################################################################################################################
from df_config.config.dynamic_settings import (
AutocreateFile,
CallableSetting,
Directory,
ExpandIterable,
Path,
SettingReference,
)
from df_config.guesses.apps import allauth_provider_apps, installed_apps, middlewares
from df_config.guesses.auth import (
authentication_backends,
ldap_attribute_map,
ldap_boolean_attribute_map,
ldap_group_class,
ldap_group_search,
ldap_user_search,
CookieName,
)
from df_config.guesses.databases import (
cache_redis_url,
cache_setting,
celery_broker_url,
databases,
session_redis_dict,
websocket_redis_dict,
websocket_redis_channels, celery_result_url,
)
from df_config.guesses.log import log_configuration
from df_config.guesses.misc import (
DefaultListenAddress,
allowed_hosts,
excluded_django_commands,
project_name,
required_packages,
secure_hsts_seconds,
smart_hostname,
template_setting,
url_parse_prefix,
url_parse_server_name,
url_parse_server_port,
url_parse_server_protocol,
url_parse_ssl,
use_x_forwarded_for,
AutocreateSecretKey,
get_asgi_application,
get_wsgi_application,
use_sentry,
web_server, csp_connect,
)
from df_config.guesses.pipeline import (
pipeline_compilers,
pipeline_css_compressor,
pipeline_js_compressor,
)
from df_config.guesses.staticfiles import (
pipeline_enabled,
static_finder,
static_storage,
)
from df_config.utils import guess_version, is_package_present
USE_CELERY = is_package_present("celery")
USE_REDIS_SESSIONS = is_package_present("redis_sessions")
USE_PIPELINE = is_package_present("pipeline")
USE_DEBUG_TOOLBAR = is_package_present("debug_toolbar")
USE_ALL_AUTH = is_package_present("allauth")
USE_WEBSOCKETS = is_package_present("df_websockets")
USE_SITE = is_package_present("df_site")
USE_WHITENOISE = is_package_present("whitenoise")
USE_CSP = is_package_present("csp")
# ######################################################################################################################
#
# settings that could be kept as-is for most projects
# of course, you can override them in your default settings
#
# ######################################################################################################################
ADMINS = (("admin", "{ADMIN_EMAIL}"),)
ALLOWED_HOSTS = CallableSetting(allowed_hosts)
CACHE_URL = CallableSetting(cache_redis_url)
CACHES = CallableSetting(cache_setting)
CSRF_COOKIE_DOMAIN = "{SERVER_NAME}"
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_NAME = CallableSetting(CookieName("csrftoken"))
CSRF_COOKIE_SAMESITE = "Lax"
CSRF_COOKIE_SECURE = SettingReference("USE_SSL")
CSRF_TRUSTED_ORIGINS = ["{SERVER_BASE_URL}", "{SERVER_NAME}", "{SERVER_NAME}:{SERVER_PORT}"]
DATABASES = CallableSetting(databases)
DEBUG = False
# you should create a "local_settings.py" with "DEBUG = True" at the root of your project
DEVELOPMENT = True
# display all commands (like "migrate" or "runserver") in manage.py
# if False, development-specific commands are hidden
DEFAULT_FROM_EMAIL = "webmaster@{SERVER_NAME}"
FILE_UPLOAD_TEMP_DIR = Directory("{LOCAL_PATH}/tmp-uploads")
INSTALLED_APPS = CallableSetting(installed_apps)
LANGUAGE_COOKIE_NAME = CallableSetting(CookieName("django_language"))
LANGUAGE_COOKIE_DOMAIN = "{SERVER_NAME}"
LANGUAGE_COOKIE_SAMESITE = "Lax"
LANGUAGE_COOKIE_SECURE = SettingReference("USE_SSL")
LOGGING = CallableSetting(log_configuration)
MANAGERS = SettingReference("ADMINS")
MEDIA_ROOT = Directory("{LOCAL_PATH}/media", mode=0o755)
MEDIA_URL = "/media/"
MIDDLEWARE = CallableSetting(middlewares)
ROOT_URLCONF = "df_config.root_urls"
SECRET_KEY = AutocreateSecretKey("{LOCAL_PATH}/secret_key.txt")
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = SettingReference("USE_SSL")
SECURE_HSTS_PRELOAD = SettingReference("USE_SSL")
SECURE_HSTS_SECONDS = CallableSetting(secure_hsts_seconds)
SECURE_PROXY_SSL_HEADER = (
"HTTP_X_FORWARDED_PROTO",
"https",
) # X-Forwarded-Proto or None
SECURE_SSL_REDIRECT = SettingReference("USE_SSL")
SECURE_FRAME_DENY = SettingReference("USE_SSL")
SERVER_EMAIL = "{ADMIN_EMAIL}"
SESSION_COOKIE_AGE = 1209600
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_NAME = CallableSetting(CookieName("sessionid"))
SESSION_COOKIE_DOMAIN = "{SERVER_NAME}"
SESSION_COOKIE_SAMESITE = "Lax"
SESSION_COOKIE_SECURE = SettingReference("USE_SSL")
TEMPLATES = CallableSetting(template_setting)
TEMPLATE_DEBUG = SettingReference("DEBUG")
TEMPLATE_DIRS = ()
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
"df_config.context_processors.config",
ExpandIterable("DF_TEMPLATE_CONTEXT_PROCESSORS"),
]
TEST_RUNNER = "django.test.runner.DiscoverRunner"
USE_I18N = True
USE_L10N = True
USE_THOUSAND_SEPARATOR = True
USE_TZ = True
USE_X_FORWARDED_HOST = True # X-Forwarded-Host
X_FRAME_OPTIONS = "SAMEORIGIN"
WSGI_APPLICATION = CallableSetting(get_wsgi_application)
# django.contrib.auth
AUTHENTICATION_BACKENDS = CallableSetting(authentication_backends)
LOGIN_URL = "/admin/login/"
LOGIN_REDIRECT_URL = "{URL_PREFIX}"
# LOGOUT_REDIRECT_URL = '{URL_PREFIX}'
# django.contrib.sessions
SESSION_ENGINE = "django.contrib.sessions.backends.db"
if USE_REDIS_SESSIONS:
SESSION_ENGINE = "redis_sessions.session"
SESSION_COOKIE_SECURE = SettingReference("USE_SSL")
CSRF_COOKIE_SECURE = SettingReference("USE_SSL")
# django.contrib.sites
SITE_ID = 1
# django.contrib.staticfiles
STATIC_ROOT = Directory("{LOCAL_PATH}/static", mode=0o755)
STATIC_URL = "/static/"
STATICFILES_STORAGE = CallableSetting(static_storage)
STATICFILES_FINDERS = CallableSetting(static_finder)
# celery
BROKER_URL = CallableSetting(celery_broker_url)
CELERY_DEFAULT_QUEUE = "celery"
CELERY_TIMEZONE = "{TIME_ZONE}"
CELERY_RESULT_EXCHANGE = "{DF_MODULE_NAME}_results"
CELERY_RESULT_BACKEND = CallableSetting(celery_result_url)
CELERY_RESULT_SERIALIZER = "json"
CELERY_ACCEPT_CONTENT = ["json", "yaml", "msgpack"]
CELERY_APP = "df_websockets"
CELERY_CREATE_DIRS = True
CELERY_TASK_SERIALIZER = "json"
# df_config
DF_PROJECT_VERSION = CallableSetting(guess_version)
DATA_PATH = Directory("{LOCAL_PATH}/data")
SERVER_NAME = CallableSetting(url_parse_server_name) # ~ www.example.org
SERVER_PORT = CallableSetting(url_parse_server_port) # ~ 443
SERVER_PROTOCOL = CallableSetting(url_parse_server_protocol) # ~ "https"
URL_PREFIX = CallableSetting(url_parse_prefix)
# something like "/prefix/" (but probably just "/")
USE_SSL = CallableSetting(url_parse_ssl) # ~ True
USE_X_SEND_FILE = False # Apache module
X_ACCEL_REDIRECT = [] # paths used by nginx
USE_HTTP_BASIC_AUTH = False # HTTP-Authorization
USE_X_FORWARDED_FOR = CallableSetting(use_x_forwarded_for) # X-Forwarded-For
DF_FAKE_AUTHENTICATION_USERNAME = None
DF_ALLOW_USER_CREATION = True
DF_SERVER = CallableSetting(
web_server
) # must be "gunicorn" or "daphne" / used by the server command
DF_REMOVED_DJANGO_COMMANDS = CallableSetting(excluded_django_commands)
DF_ALLOW_LOCAL_USERS = True
DF_CHECKED_REQUIREMENTS = CallableSetting(required_packages)
# df_websockets
WEBSOCKET_URL = "/ws/" # set to None if you do not use websockets
# by default, use the same Redis as django-channels
WEBSOCKET_REDIS_CONNECTION = CallableSetting(websocket_redis_dict)
WEBSOCKET_SIGNAL_DECODER = "json.JSONDecoder"
WEBSOCKET_TOPIC_SERIALIZER = "df_websockets.topics.serialize_topic"
WEBSOCKET_SIGNAL_ENCODER = "django.core.serializers.json.DjangoJSONEncoder"
WEBSOCKET_REDIS_PREFIX = "ws"
WEBSOCKET_REDIS_EXPIRE = 36000
WINDOW_INFO_MIDDLEWARES = [
"df_websockets.ws_middleware.WindowKeyMiddleware",
"df_websockets.ws_middleware.DjangoAuthMiddleware",
"df_websockets.ws_middleware.Djangoi18nMiddleware",
"df_websockets.ws_middleware.BrowserMiddleware",
]
ASGI_APPLICATION = CallableSetting(get_asgi_application)
# django-channels
# noinspection PyUnresolvedReferences
CHANNEL_REDIS = CallableSetting(websocket_redis_channels)
CHANNEL_LAYERS = {"default": SettingReference("CHANNEL_REDIS")}
# django-pipeline
PIPELINE = {
"PIPELINE_ENABLED": SettingReference("PIPELINE_ENABLED"),
"JAVASCRIPT": SettingReference("PIPELINE_JS"),
"STYLESHEETS": SettingReference("PIPELINE_CSS"),
"CSS_COMPRESSOR": SettingReference("PIPELINE_CSS_COMPRESSOR"),
"JS_COMPRESSOR": SettingReference("PIPELINE_JS_COMPRESSOR"),
"COMPILERS": SettingReference("PIPELINE_COMPILERS"),
}
PIPELINE_COMPILERS = CallableSetting(pipeline_compilers)
PIPELINE_CSS_COMPRESSOR = CallableSetting(pipeline_css_compressor)
PIPELINE_JS_COMPRESSOR = CallableSetting(pipeline_js_compressor)
PIPELINE_CSS = {
"django.admin": {
"source_filenames": ["admin/css/base.css", "admin/css/responsive.css"],
"output_filename": "css/django-admin.css",
"extra_context": {"media": "all"},
},
"default": {
"source_filenames": [ExpandIterable("DF_CSS")],
"output_filename": "css/default.css",
"extra_context": {"media": "all"},
},
}
PIPELINE_ENABLED = CallableSetting(pipeline_enabled)
PIPELINE_JS = {
"django.admin": {
"source_filenames": [],
"output_filename": "js/django-admin.js",
"integrity": "sha384",
"crossorigin": "anonymous",
},
"default": {
"source_filenames": [ExpandIterable("DF_JS")],
"output_filename": "js/default.js",
"integrity": "sha384",
"crossorigin": "anonymous",
},
}
LIVE_SCRIPT_BINARY = "lsc"
LESS_BINARY = "lessc"
SASS_BINARY = "sass"
STYLUS_BINARY = "stylus"
BABEL_BINARY = "babel"
YUGLIFY_BINARY = "yuglify"
YUI_BINARY = "yuicompressor"
CLOSURE_BINARY = "closure"
UGLIFYJS_BINARY = "uglifyjs"
CSSTIDY_BINARY = "csstidy"
COFFEE_SCRIPT_BINARY = "coffee"
CSSMIN_BINARY = "cssmin"
TYPESCRIPT_BINARY = "tsc"
TYPESCRIPT_ARGUMENTS = []
CSSNANO_BINARY = "cssnano"
CSSNANO_ARGUMENTS = []
TERSER_BINARY = "terser"
TERSER_ARGUMENTS = []
# Django-All-Auth
ACCOUNT_EMAIL_SUBJECT_PREFIX = "[{SERVER_NAME}] "
ACCOUNT_EMAIL_VERIFICATION = None
ALLAUTH_PROVIDER_APPS = CallableSetting(allauth_provider_apps)
ALLAUTH_APPLICATIONS_CONFIG = AutocreateFile("{LOCAL_PATH}/social_auth.ini", mode=0o600)
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "{SERVER_PROTOCOL}"
ACCOUNT_ADAPTER = "df_config.apps.allauth.AccountAdapter"
# Django-Debug-Toolbar
DEBUG_TOOLBAR_CONFIG = {"JQUERY_URL": "{STATIC_URL}vendor/jquery/dist/jquery.min.js"}
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = CallableSetting(guess_djt_panels)
INTERNAL_IPS = ("127.0.0.1", "::1", "localhost")
# django-auth-ldap
AUTH_LDAP_SERVER_URI = None
AUTH_LDAP_BIND_DN = ""
AUTH_LDAP_BIND_PASSWORD = ""
AUTH_LDAP_USER_SEARCH_BASE = "ou=users,dc=example,dc=com"
AUTH_LDAP_FILTER = "(uid=%(user)s)"
AUTH_LDAP_USER_SEARCH = CallableSetting(ldap_user_search)
AUTH_LDAP_USER_DN_TEMPLATE = None
AUTH_LDAP_START_TLS = False
AUTH_LDAP_USER_ATTR_MAP = CallableSetting(ldap_attribute_map)
AUTH_LDAP_USER_FLAGS_BY_GROUP = CallableSetting(ldap_boolean_attribute_map)
AUTH_LDAP_MIRROR_GROUPS = False
AUTH_LDAP_USER_IS_ACTIVE = None
AUTH_LDAP_USER_IS_STAFF = None
AUTH_LDAP_USER_IS_SUPERUSER = None
AUTH_LDAP_USER_FIRST_NAME = None
AUTH_LDAP_USER_LAST_NAME = None
AUTH_LDAP_USER_EMAIL = None
AUTH_LDAP_GROUP_TYPE = CallableSetting(ldap_group_class)
AUTH_LDAP_GROUP_NAME = "posix"
AUTH_LDAP_ALWAYS_UPDATE_USER = True
AUTH_LDAP_REQUIRE_GROUP = None
AUTH_LDAP_DENY_GROUP = None
# Cache group memberships for an hour to minimize LDAP traffic
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 3600
# Use LDAP group membership to calculate group permissions.
AUTH_LDAP_FIND_GROUP_PERMS = False
AUTH_LDAP_GROUP_SEARCH = CallableSetting(ldap_group_search)
AUTH_LDAP_GROUP_SEARCH_BASE = "ou=groups,dc=example,dc=com"
AUTH_LDAP_AUTHORIZE_ALL_USERS = True
# django-cors-headers
CORS_ORIGIN_WHITELIST = ("{SERVER_NAME}", "{SERVER_NAME}:{SERVER_PORT}")
CORS_REPLACE_HTTPS_REFERER = False
# django-hosts
DEFAULT_HOST = "{SERVER_NAME}"
HOST_SCHEME = "{SERVER_PROTOCOL}://"
HOST_PORT = "{SERVER_PORT}"
# django—pam
USE_PAM_AUTHENTICATION = False
# django-radius
RADIUS_SERVER = None
RADIUS_PORT = None
RADIUS_SECRET = None
# django-redis-sessions
SESSION_REDIS = CallableSetting(session_redis_dict)
# django-smart-selects
USE_DJANGO_JQUERY = True
JQUERY_URL = False
# django-csp
CSP_CONNECT_SRC = CallableSetting(csp_connect)
CSP_BLOCK_ALL_MIXED_CONTENT = True
# ######################################################################################################################
#
# settings that should be customized for each project
# of course, you can redefine or override any setting
#
# ######################################################################################################################
# df_config
DF_CSS = []
DF_JS = []
DF_INDEX_VIEW = None
DF_SITE_SEARCH_VIEW = None # 'djangofloor.views.search.UserSearchView'
DF_PROJECT_NAME = CallableSetting(project_name)
DF_URL_CONF = "{DF_MODULE_NAME}.urls.urlpatterns"
DF_ADMIN_SITE = "django.contrib.admin.site"
DF_JS_CATALOG_VIEWS = ["django.contrib.admin"]
# noinspection PyUnresolvedReferences
DF_INSTALLED_APPS = ["{DF_MODULE_NAME}"] # your django app!
DF_MIDDLEWARE = []
DF_TEMPLATE_CONTEXT_PROCESSORS = []
DF_PIP_NAME = "{DF_MODULE_NAME}" # anything such that "python -m pip install {DF_PIP_NAME}" installs your project
# only used in docs
DF_REMOTE_USER_HEADER = None # HTTP_REMOTE_USER
DF_DEFAULT_GROUPS = [_("Users")]
NPM_FILE_PATTERNS = {
"bootstrap-notify": ["*.js"],
"font-awesome": ["css/*", "fonts/*"],
"html5shiv": ["dist/*"],
"jquery": ["dist/*"],
"jquery-file-upload": ["css/*", "js/*"],
"respond.js": ["dest/*"],
}
# used by the "npm" command: downloads these packages and copies the files matching any pattern in the list
LOG_REMOTE_ACCESS = True
LOG_DIRECTORY = Directory("{LOCAL_PATH}/log")
LOG_EXCLUDED_COMMANDS = {
"clearsessions",
"check",
"compilemessages",
"collectstatic",
"config",
"createcachetable",
"changepassword",
"createsuperuser",
"dumpdb",
"dbshell",
"dumpdata",
"flush",
"loaddata",
"inspectdb",
"makemessages",
"makemigrations",
"migrate",
"npm",
"packaging",
"ping_google",
"remove_stale_contenttypes",
"sendtestemail",
"shell",
"showmigrations",
"sqlflush",
"sqlmigrate",
"sqlsequencereset",
"squashmigrations",
"startapp",
"test",
"testserver",
}
# ######################################################################################################################
#
# settings that should be customized for each deployment
# {DF_MODULE_NAME}.iniconf:INI_MAPPING should be a list of ConfigField, allowing to define these settings in a .ini file
#
# ######################################################################################################################
ADMIN_EMAIL = "admin@{SERVER_NAME}" # aliased in settings.ini as "[global]admin_email"
DATABASE_ENGINE = "sqlite3" # aliased in settings.ini as "[database]engine"
DATABASE_NAME = Path(
"{LOCAL_PATH}/database.sqlite3"
) # aliased in settings.ini as "[database]name"
DATABASE_USER = "" # aliased in settings.ini as "[database]user"
DATABASE_PASSWORD = "" # aliased in settings.ini as "[database]password"
DATABASE_HOST = "" # aliased in settings.ini as "[database]host"
DATABASE_PORT = "" # aliased in settings.ini as "[database]port"
DATABASE_OPTIONS = {}
EMAIL_HOST = "localhost" # aliased in settings.ini as "[email]host"
EMAIL_HOST_PASSWORD = "" # aliased in settings.ini as "[email]password"
EMAIL_HOST_USER = "" # aliased in settings.ini as "[email]user"
EMAIL_FROM = "{ADMIN_EMAIL}" # aliased in settings.ini as "[email]from"
EMAIL_PORT = 25 # aliased in settings.ini as "[email]port"
EMAIL_SUBJECT_PREFIX = "[{SERVER_NAME}] "
EMAIL_USE_TLS = False # aliased in settings.ini as "[email]use_tls"
EMAIL_USE_SSL = False # aliased in settings.ini as "[email]use_ssl"
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
LANGUAGE_CODE = "en" # aliased in settings.ini as "[global]language_code"
TIME_ZONE = "Europe/Paris" # aliased in settings.ini as "[global]time_zone"
LOG_REMOTE_URL = None # aliased in settings.ini as "[global]log_remote_url"
LOG_LEVEL = None
SERVER_BASE_URL = CallableSetting(
smart_hostname
) # aliased in settings.ini as "[global]server_url"
# df_config
LISTEN_ADDRESS = DefaultListenAddress(
9000
) # aliased in settings.ini as "[global]listen_address"
LOCAL_PATH = "./django_data" # aliased in settings.ini as "[global]data"
__split_path = __file__.split(os.path.sep)
if "lib" in __split_path:
prefix = os.path.join(*__split_path[: __split_path.index("lib")])
LOCAL_PATH = Directory("/%s/var/{DF_MODULE_NAME}" % prefix)
# django-redis-sessions
SESSION_REDIS_PROTOCOL = "redis"
SESSION_REDIS_HOST = "localhost" # aliased in settings.ini as "[session]host"
SESSION_REDIS_PORT | |
<reponame>cosmicoder/isoclassify
#! /usr/bin/env python
# --------------------------------------------------------------
# The asfgrid is a python module to compute asteroseismic
# parameters for a star with given stellar parameters and vice versa.
# Copyright (C) 2015 <NAME>, <NAME>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------
"""
A module to compute asteroseismic parameters for
a star with given stellar parameters and vice versa.
Author <NAME> <bugsanjib at gmail com>
Copyright (c) 2015 <NAME>, <NAME>
License: AGPL see <http://www.gnu.org/licenses/>.
Data files should be in current directory.
To run as a script
$./asfgrid.py --help
To use as module
::
>>> import asfgrid
>>> evstate=[1,1]
>>> logz=[-1.97,-1.98]
>>> teff=[4659.8,4903.2]
>>> dnu=[8.81,13.1]
>>> numax=[92.36,157.3]
>>> s=asfgrid.Seism()
>>> mass,radius=s.get_mass_radius(evstate,logz,teff,dnu,numax)
>>> print mass,radius
>>> logg=s.mr2logg(mass,radius)
>>> dnu,numax,fdnu=s.get_dnu_numax(evstate,logz,teff,mass,mass,logg)
>>> print dnu,numax
"""
import sys
import ebf
import numpy as np
import scipy.interpolate
__version__ = "0.0.5"
def _tocsv(filename,data,basekey=None,keylist=None,delimiter=', '):
"""
Write a dict or npstruct to a csv file
"""
if type(data) == dict:
with open(filename,'w') as fp:
if keylist==None:
keylist=data.keys()
if basekey == None:
nsize=data[keylist[0]].size
else:
nsize=data[basekey].size
keylist=[key for key in keylist if data[key].size==nsize]
# s=str(keylist)
# s=s[1:-1].replace("'",'')+'\n'
s=delimiter.join([str(key) for key in keylist])+'\n'
fp.write(s)
for i in range(data[keylist[0]].size):
s=', '.join([str(data[key][i]) for key in keylist])+'\n'
fp.write(s)
else:
with open(filename,'w') as fp:
if keylist==None:
s=str(data.dtype.names)
s=s[1:-1].replace("'",'')+'\n'
fp.write(s)
for temp in data:
s=str(temp)
s=s[1:-1].replace("'",'')+'\n'
fp.write(s)
else:
s=str(keylist)
s=s[1:-1].replace("'",'')+'\n'
fp.write(s)
for i in range(data[keylist[0]].size):
s=', '.join([str(data[key][i]) for key in keylist])+'\n'
fp.write(s)
print 'Written file:',filename
class _IGrid():
def __init__(self,data1,keys):
data=np.resize(data1,data1.size)
data.sort(order=keys)
self.keys=keys
self.vnames=[temp for temp in data.dtype.names if temp not in self.keys]
self.points=[np.unique(data[key]) for key in self.keys]
self.values={}
for vname in self.vnames:
self.values[vname]=data[vname].reshape([point.size for point in self.points])
self.points1=tuple([data[key] for key in self.keys])
self.values1={}
for vname in self.vnames:
self.values1[vname]=data[vname]
def homogenize_arrays(self,xi):
xj=[np.asarray(t) for t in xi]
temp=xj[0]
for t in xj:
temp=temp+t
xj=[np.zeros_like(temp)+t for t in xj]
return xj
def get_values(self,vname,xi,fill_value='nearest'):
fill_value1=np.nan
if type(xi) == list:
xi=np.array(self.homogenize_arrays(xi)).transpose()
t1=scipy.interpolate.interpn(self.points,self.values[vname],xi,bounds_error=False,fill_value=fill_value1)
if fill_value == 'nearest':
ind=np.where(np.isfinite(t1)==False)[0]
if ind.size>0:
print 'outside interp range',ind.size,' out of ',t1.size
if (xi.ndim == 1)&(len(self.keys)>1):
xi=xi.reshape([1,xi.size])
t1[ind]=scipy.interpolate.griddata(self.points1,self.values1[vname],xi[ind],method='nearest')
return t1
class Seism():
def __init__(self,datadir=None,z_solar=0.019):
# Change this to appropriate path
if datadir is None:
self.datadir=''
# self.datadir='/work1/sharma/Projects/kepler/data/dnu_grid6/'
else:
self.datadir=datadir
# set solar reference values
# self.radius= 6.958e8
# self.mass=1.99e30
# sun logg=np.log10(100.0*6.67259e-11*1.989e30/(6.958e8*6.958e8))
self.logg_solar=4.43796037457 # cgs unit
self.teff_solar=5777.0 # kelvin
self.numax_solar=3090.0 # micro Hz 3090+-30
# cannot change this
self.dnu_solar=135.1 # micro Hz 135.1
self.z_solar=z_solar # solar metallicity value
data1=ebf.read(self.datadir+'grid_interp1.ebf','/data')
data2=ebf.read(self.datadir+'grid_interp2.ebf','/data')
self.igrid1=_IGrid(data1,['evstate','logz','mass','logg_teff'])
self.igrid2=_IGrid(data2,['evstate','logz','mass_nu','logg_teff'])
def logg2r(self,logg,mass):
"""
From logg and mass compute radius
"""
return np.power(10.0,((self.logg_solar-logg)*0.5))*np.sqrt(mass)
def logg2m(self,logg,radius):
"""
From logg and radius compute mass
"""
return np.power(10.0,logg-self.logg_solar)*radius*radius
def logg2numax(self,logg,teff):
"""
From logg and teff compute numax with numax_solar=3090.0 microHz
"""
return (self.numax_solar)*np.power(10.0,logg-self.logg_solar)/(np.power(teff/self.teff_solar,0.5))
def numax2logg(self,numax,teff):
"""
From logg and teff compute numax with numax_solar=3090.0 microHz
"""
return np.log10((numax/self.numax_solar)*np.sqrt(teff/self.teff_solar))+self.logg_solar
def mr2rho(self,mass,sradius):
"""
From mass and radius compute rho_rho_solar
"""
return mass/np.power(sradius,3)
def mr2logg(self,mass,radius):
"""
From mass and radius compute logg
"""
return self.logg_solar+np.log10(mass/(radius*radius))
def kappa_m(self,dnu,numax):
"""
From dnu and numax compute kappa_m
"""
return np.power(numax/3090.0,3.0)*np.power(dnu/135.1,-4.0)
def kappa_r(self,dnu,numax):
"""
Not in original
From dnu and numax compute kappa_r
"""
return (numax/3090.0)*np.power(dnu/135.1,-2.0)
def mass_sc(self,dnu,numax,teff):
"""
From dnu, numax and teff compute mass according to scaling relation
Assumes dnu_solar=135.1 microHz and numax_solar=3090.0 microHz
"""
return np.power(numax/3090.0,3.0)*np.power(dnu/135.1,-4.0)*np.power(teff/self.teff_solar,1.5)
def _mass_dnu(self,dnu,logg):
"""
From dnu, logg compute mass according to scaling relation
Assumes dnu_solar=135.1 microHz
"""
return np.power(10.0,3*(logg-self.logg_solar))*np.power(dnu/(135.1),-4.0)
def _quantf(self,logg,teff):
"""
From logg and teff compute a quantity for interpolation that
is almost monotonic with age
"""
return np.log10(teff)+0.5*(np.tanh((logg-4.5)/0.25)+1)*logg*0.1
def get_dnu_numax(self,evstate,logz,teff,mini,mass,logg,fill_value='nearest',isfeh=False):
"""
Get average seismic parameters (dnu, numax) by interpolation on
a grid for a given (evstate, logz, teff, dnu, numax).
Assumption numax_solar=3090.0 microHz.
Args:
evstate (array): 1) Pre RGB 2) Post RGB
logz or feh (array): log(Z) log metallcity or [Fe/H]=log(Z/Z_solar)
logz (array): log(Z) log metallcity ([Fe/H]=log(Z/Z_solar))
teff (array): temperature
mini (array): initial mass
mass (array): actual mass with mass loss (mass <= mini).
logg (array): log(gravity)
fill_value : Deafault is 'nearest', to use nearest grid points
in case of input values being out of grid range.
Alternatively, one can use None
Returns:
dnu (array): Large frequency separation (micro Hz)
numax (array): Central frequency of max amplitude (micro Hz)
fdnu (array): the correction factor for Delta nu
"""
evstate=np.asarray(evstate)
logz=np.asarray(logz)
if isfeh is True:
logz=logz+np.log10(self.z_solar)
teff=np.asarray(teff)
mini=np.asarray(mini)
mass=np.asarray(mass)
logg=np.asarray(logg)
numax=self.logg2numax(logg,teff)
sradius=self.logg2r(logg,mass)
logg1=self.mr2logg(mini,sradius)
factor=self._get_fdnu(evstate,logz,teff,mini,logg1,fill_value= fill_value)
dnu=135.1*factor*np.power(mass,0.5)/np.power(sradius,1.5)
if (factor.size == 1)&(numax.ndim == 0):
return dnu[0],numax,factor[0]
else:
return dnu,numax,factor
def _get_fdnu(self,evstate,logz,teff,mass,logg,fill_value='nearest'):
evstate=np.asarray(evstate)
logz=np.asarray(logz)
teff=np.asarray(teff)
mass=np.asarray(mass)
logg=np.asarray(logg)
return self._from_mlogg('fdnu',evstate,logz,teff,mass,logg,fill_value= fill_value)
def _from_mlogg(self,quant,evstate,logz,teff,mini,logg,fill_value='nearest'):
"""
The driver function to perform interpolation on the grid
for a given (evstate, logz, teff, mini, logg)
Args:
quant (str): name of quantity for which answer is needed.
For example 'fdnu', 'age', etc
evstate (array): 1) Pre RGB 2) Post RGB
logz (array): log(Z) log metallcity ([Fe/H]=log(Z/Z_solar))
teff (array): temperature
mini (array): initial mass
logg (array): log(gravity)
"""
logz=np.asarray(logz)
logg_teff=self._quantf(logg,teff)
return self.igrid1.get_values(quant,[evstate,logz,mini,logg_teff],fill_value= fill_value)
def _from_freq(self,quant,evstate,logz,teff,dnu,numax,fill_value='nearest'):
"""
The driver function to perform interpolation on the grid
for a given (evstate, logz, teff, dnu, numax).
Args:
quant (str): name of quantity for which answer is needed.
For example 'fdnu', 'age', etc
evstate (array): 1) Pre RGB 2) Post RGB
logz (array): log(Z) log metallcity ([Fe/H]=log(Z/Z_solar))
teff (array): temperature
dnu (array): Large frequency separation (micro Hz)
numax (array): Central frequency of max amplitude (micro Hz)
"""
logz=np.asarray(logz)
logg=self.numax2logg(numax,teff)
mass_dnu=self._mass_dnu(dnu,logg)
logg_teff=self._quantf(logg,teff)
return self.igrid2.get_values(quant,[evstate,logz,mass_dnu,logg_teff],fill_value= fill_value)
def get_mass_radius(self,evstate,logz,teff,dnu,numax,fill_value='nearest',isfeh=False):
"""
Get mass and radius of stars by interpolation on a grid
for a given (evstate, logz, teff, dnu, numax).
Assumption numax_solar=3090.0 microHz.
Args:
evstate (array): 1) Pre RGB 2) Post RGB
logz or feh (array): log(Z) log metallcity or [Fe/H]=log(Z/Z_solar)
teff (array): temperature
dnu (array): Large frequency separation (micro Hz)
numax (array): Central frequency of max amplitude (micro Hz)
fill_value : Deafault is 'nearest', to use nearest grid points
in case of input values being out of grid range.
Alternatively, one can use None
isfeh : A flag with default value being False. If set to
True, the second argument is considered to be
[Fe/H]
"""
evstate=np.asarray(evstate)
logz=np.asarray(logz)
if isfeh is True:
logz=logz+np.log10(self.z_solar)
teff=np.asarray(teff)
dnu=np.asarray(dnu)
numax=np.asarray(numax)
mass=self._from_freq('mass',evstate,logz,teff,dnu,numax,fill_value= fill_value)
logg=self.numax2logg(numax,teff)
sradius=self.logg2r(logg,mass)
if (mass.size == 1)&(evstate.ndim == 0):
return mass[0],sradius[0]
else:
return mass,sradius
def _usage():
print "NAME:"
print "\t asfgrid 0.0.4 - computes asteroseismic freuqncies or masses"
print "\t Copyright (c) 2015 <NAME> and <NAME> "
print "\t License: AGPL see <http://www.gnu.org/licenses/>."
print "\t Reference: Sharma et al. 2016, ApJ,822,15 \n"
print "USAGE:"
print "\t asfgrid inputfile \n"
print "DESCRIPTION:"
print "\t Outfile name is constructed from filename with suffix .out "
print "\t Input file should be ascii as follows"
print "\t evstate logz teff dnu numax"
print "\t 1 -1.97 4659.8 8.81 92.36"
print "\t 1 -1.98 4903.2 13.1 157.3 \n"
print "\t First line must contain column names"
print "\t Column names can be in any order but need to follow names"
print "\t given below"
print "OPTIONS:"
print "\t Possible input outputs are"
print "\t 1) (evstate, logz, teff, dnu, numax) ->(mass,radius)"
print "\t 2) (evstate, logz, teff, mass, logg) ->(dnu,numax,fdnu)"
print "\t 3) (evstate, logz, teff, mass, logg, mini)->(dnu,numax,fdnu)"
print "\t 4) (evstate, feh, teff, dnu, numax) ->(mass,radius)"
print "\t 5) (evstate, feh, teff, mass, logg) ->(dnu,numax,fdnu)"
print "\t 6) (evstate, feh, teff, mass, logg, mini) ->(dnu,numax,fdnu)"
print "\t Third and sixth option allows for mass loss if mass<mini \n"
print "VARIABLES:"
print "\t evstate (array): 1=Pre RGB tip, 2=Post | |
0.45098039],
[0.59607843, 0.32156863, 0.4745098 ],
[0.57254902, 0.21960784, 0.42352941],
[0.49803922, 0.15686275, 0.36862745],
[0.36470588, 0.07058824, 0.3254902 ],
[0.31372549, 0.12156863, 0.34901961],
[0.36078431, 0.16470588, 0.44313725],
[0.32156863, 0.07058824, 0.37254902],
[0.6627451 , 0.44313725, 0.6745098 ],
[0.42352941, 0.2 , 0.41568627],
[0.29411765, 0.06666667, 0.28627451],
[0.30196078, 0.12156863, 0.39215686],
[0.12156863, 0.01960784, 0.30980392],
[0.15686275, 0.08235294, 0.42352941],
[0.12941176, 0.03529412, 0.35686275],
[0.08235294, 0. , 0.29019608],
[0.1372549 , 0.05882353, 0.3372549 ],
[0.41568627, 0.30196078, 0.54901961],
[0.19607843, 0.03921569, 0.2745098 ],
[0.19607843, 0.03529412, 0.33333333],
[0.18039216, 0.02745098, 0.31372549],
[0.32156863, 0.16862745, 0.41176471],
[0.19607843, 0.03137255, 0.27843137],
[0.18431373, 0.01176471, 0.32156863],
[0.19215686, 0.01176471, 0.34117647],
[0.21568627, 0.02352941, 0.34509804],
[0.22352941, 0.04705882, 0.34901961],
[0.13333333, 0.01568627, 0.29019608],
[0.36470588, 0.19607843, 0.41568627],
[0.14509804, 0.01176471, 0.24313725],
[0.05490196, 0.02352941, 0.31764706],
[0.05490196, 0.01568627, 0.29019608],
[0.15294118, 0.01960784, 0.21176471],
[0.30196078, 0.11372549, 0.28627451],
[0.07058824, 0.03529412, 0.29019608],
[0.17254902, 0.05882353, 0.33333333],
[0.15686275, 0.01176471, 0.29019608],
[0.21176471, 0.02745098, 0.3254902 ],
[0.25098039, 0.01568627, 0.32941176],
[0.24705882, 0.00784314, 0.3254902 ],
[0.20392157, 0.00392157, 0.30588235],
[0.50196078, 0.33333333, 0.62352941],
[0.14901961, 0.02352941, 0.31764706],
[0.1254902 , 0.01568627, 0.2745098 ],
[0.17647059, 0.01960784, 0.30588235],
[0.05882353, 0.03529412, 0.30588235],
[0.07058824, 0.00392157, 0.30980392],
[0.04313725, 0.02352941, 0.31372549],
[0.12941176, 0.05098039, 0.38431373],
[0.03137255, 0.01176471, 0.34117647],
[0.00392157, 0.00392157, 0.31764706]],
kinds='Lch',
grad_npts=3,
grad_funcs='x',
extent='mirror'
)
cmap_citrus = fscolors.Fractal_colormap(
colors=[[0.96078431, 0.83137255, 0.77254902],
[0.34901961, 0.01568627, 0.02745098],
[0.27843137, 0. , 0.02745098],
[0.41176471, 0.00392157, 0.04705882],
[0.44705882, 0. , 0.05882353],
[0.71372549, 0.14509804, 0.20392157],
[0.79607843, 0.26666667, 0.21176471],
[0.92941176, 0.54117647, 0.47843137],
[0.90588235, 0.5372549 , 0.47058824],
[0.89803922, 0.49411765, 0.43529412],
[0.91372549, 0.51764706, 0.50196078],
[0.79215686, 0.41176471, 0.40392157],
[0.84705882, 0.45098039, 0.44705882],
[0.86666667, 0.44705882, 0.41960784],
[0.89803922, 0.54509804, 0.56862745],
[0.98431373, 0.68627451, 0.72941176],
[0.78039216, 0.30980392, 0.30588235],
[0.90196078, 0.4745098 , 0.4745098 ],
[0.76862745, 0.38823529, 0.43529412],
[0.95294118, 0.74117647, 0.75294118],
[0.80392157, 0.4 , 0.39607843],
[0.84313725, 0.38823529, 0.36078431],
[0.80392157, 0.26666667, 0.26666667],
[0.90980392, 0.51764706, 0.52156863],
[0.89019608, 0.58823529, 0.58431373],
[0.81176471, 0.3372549 , 0.29803922],
[0.71372549, 0.13333333, 0.10588235],
[0.72156863, 0.18823529, 0.18431373],
[0.81176471, 0.35294118, 0.35686275],
[0.82352941, 0.40784314, 0.41960784],
[0.84313725, 0.30980392, 0.28235294],
[0.8745098 , 0.49411765, 0.48627451],
[0.99607843, 0.69411765, 0.73333333],
[0.70980392, 0.16470588, 0.14117647],
[0.85490196, 0.27843137, 0.2627451 ],
[0.89019608, 0.45882353, 0.43137255],
[0.84313725, 0.30196078, 0.25490196],
[0.81568627, 0.27843137, 0.22745098],
[0.92941176, 0.40392157, 0.37254902],
[0.76078431, 0.22352941, 0.21176471],
[0.94509804, 0.41960784, 0.41568627],
[0.75294118, 0.20392157, 0.2 ],
[0.85490196, 0.36470588, 0.34901961],
[0.79607843, 0.20784314, 0.18823529],
[0.85098039, 0.27843137, 0.2745098 ],
[0.83529412, 0.18431373, 0.2 ],
[0.90196078, 0.34117647, 0.3254902 ],
[0.81176471, 0.41960784, 0.41960784],
[0.90980392, 0.56470588, 0.6 ],
[0.87058824, 0.43529412, 0.48235294],
[0.96862745, 0.76078431, 0.68627451],
[0.94117647, 0.8745098 , 0.76862745],
[0.99607843, 0.97647059, 0.89019608],
[0.97647059, 0.9254902 , 0.85882353],
[0.96862745, 0.90196078, 0.80784314],
[0.96862745, 0.85882353, 0.74509804],
[0.98039216, 0.89411765, 0.76078431],
[0.97647059, 0.85490196, 0.62745098],
[0.9254902 , 0.72941176, 0.23921569],
[0.83137255, 0.6745098 , 0.4 ],
[0.82352941, 0.74901961, 0.52941176],
[0.82745098, 0.75294118, 0.56078431],
[0.8627451 , 0.79215686, 0.65490196],
[0.69803922, 0.63921569, 0.30196078],
[0.9254902 , 0.9254902 , 0.56862745],
[0.79215686, 0.80784314, 0.21176471],
[0.78823529, 0.81568627, 0.20784314],
[0.83529412, 0.8627451 , 0.4 ],
[0.8745098 , 0.90980392, 0.41176471],
[0.80392157, 0.82745098, 0.24705882],
[0.78431373, 0.81176471, 0.20392157],
[0.78431373, 0.81960784, 0.20784314],
[0.79215686, 0.82745098, 0.23529412],
[0.75686275, 0.79607843, 0.18039216],
[0.77254902, 0.81568627, 0.19607843],
[0.75686275, 0.80392157, 0.18431373],
[0.72156863, 0.78039216, 0.18431373],
[0.79607843, 0.85098039, 0.28627451],
[0.76078431, 0.8 , 0.19607843],
[0.81568627, 0.8627451 , 0.29411765],
[0.77254902, 0.82352941, 0.20392157],
[0.78431373, 0.83921569, 0.21960784],
[0.77647059, 0.83529412, 0.20392157],
[0.75294118, 0.81568627, 0.18039216],
[0.76078431, 0.82352941, 0.19215686],
[0.76470588, 0.82745098, 0.19607843],
[0.78823529, 0.84705882, 0.2 ],
[0.82745098, 0.89411765, 0.28235294],
[0.77647059, 0.83137255, 0.21176471],
[0.79607843, 0.8627451 , 0.23529412],
[0.75686275, 0.82352941, 0.17254902],
[0.82352941, 0.88235294, 0.30196078],
[0.76470588, 0.82352941, 0.20392157],
[0.81176471, 0.8627451 , 0.22352941],
[0.75686275, 0.80784314, 0.16078431],
[0.78039216, 0.82745098, 0.18039216],
[0.79215686, 0.83921569, 0.25882353],
[0.80784314, 0.84313725, 0.21176471],
[0.81568627, 0.85490196, 0.24313725],
[0.85490196, 0.89803922, 0.41176471],
[0.88627451, 0.91764706, 0.45490196],
[0.81568627, 0.83921569, 0.21960784],
[0.92941176, 0.92156863, 0.7372549 ],
[0.9254902 , 0.91764706, 0.74901961],
[0.94509804, 0.92941176, 0.78039216],
[0.94901961, 0.91764706, 0.76470588],
[0.9254902 , 0.89803922, 0.69019608],
[0.91764706, 0.89019608, 0.70196078],
[0.92156863, 0.89803922, 0.71372549],
[0.9254902 , 0.89803922, 0.70980392],
[0.95686275, 0.90980392, 0.70196078],
[0.93333333, 0.85882353, 0.60392157],
[0.9372549 , 0.83921569, 0.49019608],
[0.91764706, 0.81568627, 0.36470588],
[0.89803922, 0.78823529, 0.25882353],
[0.89411765, 0.79215686, 0.21568627],
[0.91764706, 0.80784314, 0.23921569],
[0.93333333, 0.82352941, 0.29411765],
[0.9372549 , 0.81960784, 0.3372549 ],
[0.92156863, 0.82745098, 0.34509804],
[0.91372549, 0.81176471, 0.32941176],
[0.92156863, 0.81176471, 0.30980392],
[0.93333333, 0.84705882, 0.4 ],
[0.91372549, 0.80784314, 0.27843137],
[0.95686275, 0.85098039, 0.32941176],
[0.9254902 , 0.83529412, 0.3254902 ],
[0.9254902 , 0.83137255, 0.33333333],
[0.91764706, 0.81960784, 0.27843137],
[0.9254902 , 0.82745098, 0.27058824],
[0.93333333, 0.83921569, 0.3372549 ],
[0.96078431, 0.86666667, 0.39215686],
[0.94509804, 0.8627451 , 0.41568627],
[0.94509804, 0.85882353, 0.43529412],
[0.94509804, 0.8627451 , 0.41960784],
[0.92156863, 0.83137255, 0.34117647],
[0.90980392, 0.82352941, 0.32941176],
[0.92941176, 0.83921569, 0.35686275],
[0.92941176, 0.8627451 , 0.38823529],
[0.96078431, 0.90196078, 0.49019608],
[0.98823529, 0.98823529, 0.83529412],
[0.98431373, 0.99607843, 0.8745098 ],
[0.98039216, 0.99607843, 0.85098039],
[0.98039216, 0.99607843, 0.78823529],
[0.98039216, 0.99607843, 0.83921569],
[0.91764706, 0.9372549 , 0.65490196],
[0.89803922, 0.85490196, 0.31764706],
[0.66666667, 0.61176471, 0.19215686],
[0.04313725, 0.0745098 , 0.05490196],
[0.03921569, 0.05882353, 0.03529412],
[0.03137255, 0.05882353, 0.03137255],
[0.05098039, 0.0745098 , 0.02352941],
[0.10588235, 0.12156863, 0.03137255],
[0.11764706, 0.15294118, 0.03137255],
[0.14901961, 0.17647059, 0.03529412],
[0.18039216, 0.23921569, 0.05882353],
[0.23137255, 0.31372549, 0.09019608],
[0.19607843, 0.27058824, 0.05098039],
[0.23137255, 0.3372549 , 0.08235294],
[0.25098039, 0.36078431, 0.08627451],
[0.24313725, 0.34901961, 0.08627451],
[0.23921569, 0.35294118, 0.09019608],
[0.20392157, 0.32156863, 0.05882353],
[0.23529412, 0.35686275, 0.08627451],
[0.25882353, 0.38823529, 0.09019608],
[0.24705882, 0.39215686, 0.08235294],
[0.27058824, 0.41568627, 0.10588235],
[0.26666667, 0.41176471, 0.09803922],
[0.26666667, 0.41176471, 0.09803922],
[0.25490196, 0.40784314, 0.09019608],
[0.25098039, 0.41960784, 0.09411765],
[0.27058824, 0.41960784, 0.10196078],
[0.29411765, 0.42745098, 0.11372549],
[0.27058824, 0.40392157, 0.09019608],
[0.28627451, 0.43529412, 0.09803922],
[0.27843137, 0.42745098, 0.09803922],
[0.27843137, 0.42352941, 0.10980392],
[0.29411765, 0.40784314, 0.11372549],
[0.24705882, 0.33333333, 0.06666667],
[0.28235294, 0.31372549, 0.04705882],
[0.42745098, 0.43921569, 0.15294118],
[0.24705882, 0.26666667, 0.06666667],
[0.27058824, 0.31764706, 0.09019608],
[0.2 , 0.27058824, 0.02745098],
[0.2745098 , 0.3254902 , 0.07843137],
[0.3254902 , 0.34117647, 0.10980392],
[0.30588235, 0.32941176, 0.09019608],
[0.27058824, 0.30980392, 0.08235294],
[0.23921569, 0.29019608, 0.07058824],
[0.23137255, 0.28235294, 0.07058824],
[0.21176471, 0.24313725, 0.05098039],
[0.21960784, 0.24313725, 0.06666667],
[0.18431373, 0.21176471, 0.06666667],
[0.75294118, 0.79607843, 0.33333333],
[0.90588235, 0.94509804, 0.63921569],
[0.89411765, 0.91764706, 0.46666667],
[0.81960784, 0.85098039, 0.24705882],
[0.77647059, 0.81568627, 0.30196078],
[0.80392157, 0.81960784, 0.20784314],
[0.77254902, 0.81176471, 0.28235294],
[0.85882353, 0.88627451, 0.49019608],
[0.82745098, 0.85882353, 0.39607843],
[0.81568627, 0.85098039, 0.28235294],
[0.81960784, 0.84705882, 0.3372549 ],
[0.82745098, 0.85882353, 0.39607843],
[0.80392157, 0.83529412, 0.28235294],
[0.78431373, 0.81960784, 0.20392157],
[0.80392157, 0.83921569, 0.25490196],
[0.78823529, 0.83921569, 0.23529412],
[0.81176471, 0.85098039, 0.29411765],
[0.76862745, 0.80392157, 0.23137255],
[0.73333333, 0.78823529, 0.25882353],
[0.8745098 , 0.90196078, 0.4627451 ],
[0.9372549 , 0.95686275, 0.58431373],
[0.77647059, 0.8 , 0.18039216],
[0.78823529, 0.81960784, 0.23137255],
[0.76078431, 0.79607843, 0.18823529],
[0.78431373, 0.83529412, 0.25882353],
[0.79607843, 0.82352941, 0.23137255],
[0.78823529, 0.83529412, 0.30588235],
[0.76470588, 0.80392157, 0.23529412],
[0.81960784, 0.85098039, 0.4 ],
[0.77254902, 0.80392157, 0.25882353],
[0.80392157, 0.85098039, 0.31372549],
[0.8627451 , 0.90196078, 0.41960784],
[0.77254902, 0.81568627, 0.22352941],
[0.83529412, 0.87843137, 0.37254902],
[0.8 , 0.84313725, 0.36862745],
[0.88235294, 0.92941176, 0.58431373],
[0.84313725, 0.89803922, 0.48627451],
[0.78431373, 0.82745098, 0.29019608],
[0.81960784, 0.85490196, 0.30588235],
[0.85490196, 0.89411765, 0.50196078],
[0.82745098, 0.86666667, 0.3372549 ],
[0.84705882, 0.88235294, 0.38039216],
[0.82352941, 0.86666667, 0.40784314],
[0.9254902 , 0.98039216, 0.61568627],
[0.69411765, 0.78823529, 0.18823529],
[0.64313725, 0.7254902 , 0.17647059],
[0.61176471, 0.64313725, 0.13333333],
[0.62352941, 0.56862745, 0.14509804],
[0.60392157, 0.11372549, 0.03921569],
[0.84313725, 0.28235294, 0.29803922],
[0.6745098 , 0.02352941, 0.07058824],
[0.66666667, 0. , 0.05490196],
[0.68627451, 0.01568627, 0.07058824],
[0.69411765, 0.07058824, 0.10980392],
[0.69019608, 0.00392157, 0.0745098 ],
[0.71764706, 0.04705882, 0.10980392],
[0.65490196, 0.00784314, 0.0627451 ],
[0.81568627, 0.28627451, 0.30588235],
[0.78039216, 0.23137255, 0.23921569],
[0.69803922, 0.12941176, 0.12156863],
[0.72156863, 0.12941176, 0.13333333],
[0.71764706, 0.10980392, 0.11372549],
[0.79215686, 0.24313725, 0.18823529],
[0.74901961, 0.20392157, 0.16470588]],
kinds='Lch',
grad_npts=3,
grad_funcs='x',
extent='repeat'
)
cmap_argon = fscolors.Fractal_colormap(
colors=[
[0.01568627, 0. , 0.12156863],
[0.14509804, 0.00392157, 0.31372549],
[0.27843137, 0.00784314, 0.50196078],
[0.40784314, 0.01176471, 0.69411765],
[0.54901961, 0.05490196, 0.74901961],
[0.69019608, 0.11764706, 0.74509804],
[0.83137255, 0.17647059, 0.74117647],
[0.93333333, 0.29019608, 0.76078431],
[0.95294118, 0.50588235, 0.83137255],
[0.97254902, 0.72156863, 0.90588235],
[0.98039216, 0.82745098, 0.94117647]],
kinds='Lch',
grad_npts=32,
grad_funcs='x',
extent='mirror'
)
cmap_peacock = fscolors.Fractal_colormap(
colors=[
[0.02745098, 0.05882353, 0.01568627],
[0.05098039, 0.05882353, 0.04705882],
[0.08235294, 0.11372549, 0.03529412],
[0.07058824, 0.10588235, 0.03529412],
[0.01176471, 0.04313725, 0. ],
[0.00392157, 0.03137255, 0. ],
[0.01960784, 0.04705882, 0.01568627],
[0. , 0.04313725, 0. ],
[0.0627451 , 0.10980392, 0.03921569],
[0.0745098 , 0.10980392, 0.04705882],
[0.01960784, 0.04313725, 0.00392157],
[0.00392157, 0.01960784, 0.01568627],
[0. , 0. , 0. ],
[0. , 0. , 0.01568627],
[0. , 0.00392157, 0.01176471],
[0.01960784, 0.06666667, 0.02352941],
[0.05882353, 0.15686275, 0.05882353],
[0.10588235, 0.23921569, 0.11372549],
[0.01568627, 0.07843137, 0.01960784],
[0.01568627, 0. , 0.02352941],
[0.01960784, 0.08235294, 0.01568627],
[0.02352941, 0.1254902 , 0.00784314],
[0.16470588, 0.31764706, 0.10196078],
[0.40784314, 0.56862745, 0.30980392],
[0.19215686, 0.35294118, 0.1254902 ],
[0.27843137, 0.45098039, 0.24705882],
[0.41568627, 0.58039216, 0.41176471],
[0.25098039, 0.37647059, 0.14901961],
[0.10980392, 0.19215686, 0.03137255],
[0.00392157, 0.03921569, 0.01176471],
[0.07843137, 0.15294118, 0.06666667],
[0.13333333, 0.30196078, 0.08627451],
[0.09019608, 0.23921569, 0.03137255],
[0.05882353, 0.19607843, 0.05098039],
[0.05490196, 0.14901961, 0.08235294],
[0.04313725, 0.07058824, 0.03137255],
[0.09411765, 0.27843137, 0.09411765],
[0.24705882, 0.60784314, 0.27843137],
[0.01568627, 0.2627451 , 0.01960784],
[0.14509804, 0.36470588, 0.1372549 ],
[0.18431373, 0.38039216, 0.14901961],
[0.15294118, 0.31764706, 0.09803922],
[0.05882353, 0.22745098, 0.05098039],
[0. , 0.04313725, 0. ],
[0.00784314, 0.07843137, 0.02352941],
[0.10196078, 0.34901961, 0.16078431],
[0.37647059, 0.72941176, 0.37647059],
[0.48627451, 0.8 , 0.36862745],
[0.05882353, 0.26666667, 0.01176471],
[0.02352941, 0.17647059, 0.01960784],
[0.41960784, 0.65882353, 0.41568627],
[0.34117647, 0.70980392, 0.3372549 ],
[0.14901961, 0.57647059, 0.20392157],
[0.08627451, 0.4745098 , 0.24705882],
[0.02352941, 0.30196078, 0.16862745],
[0.27058824, 0.70980392, 0.4745098 ],
[0.36862745, 0.93333333, 0.50588235],
[0.54901961, 0.99215686, 0.4627451 ],
[0.43137255, 0.69411765, 0.14117647],
[0.16078431, 0.48627451, 0.03529412],
[0.28235294, 0.57647059, 0.28627451],
[0.03921569, 0.2 , 0.04705882],
[0.09411765, 0.47843137, 0.25098039],
[0.49411765, 0.90980392, 0.49411765],
[0.75686275, 0.99607843, 0.43137255],
[0.59607843, 0.97647059, 0.30196078],
[0.37647059, | |
here that Euler's method doesn't give a stable orbit with for example $\Delta t =0.01$. It
means that we cannot trust Euler's method. Euler's method does not conserve energy. It is an
example of an integrator which is not
[symplectic](https://en.wikipedia.org/wiki/Symplectic_integrator).
Here we present thus two methods, which with simple changes allow us
to avoid these pitfalls. The simplest possible extension is the
so-called Euler-Cromer method. The changes we need to make to our
code are indeed marginal here. We need simply to replace
r[i+1] = r[i] + DeltaT*v[i]
in the above code with the velocity at the new time $t_{i+1}$
r[i+1] = r[i] + DeltaT*v[i+1]
By this simple caveat we get stable orbits. Below we derive the
Euler-Cromer method as well as one of the most utlized algorithms for
solving the above type of problems, the so-called Velocity-Verlet
method.
Let us repeat Euler's method.
We have a differential equation
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
y'(t_i)=f(t_i,y_i)
\label{_auto7} \tag{14}
\end{equation}
$$
and if we truncate at the first derivative, we have from the Taylor expansion
$$
y_{i+1}=y(t_i) + (\Delta t) f(t_i,y_i) + O(\Delta t^2),
$$
which when complemented with $t_{i+1}=t_i+\Delta t$ forms
the algorithm for the well-known Euler method.
Note that at every step we make an approximation error
of the order of $O(\Delta t^2)$, however the total error is the sum over all
steps $N=(b-a)/(\Delta t)$ for $t\in [a,b]$, yielding thus a global error which goes like
$NO(\Delta t^2)\approx O(\Delta t)$.
To make Euler's method more precise we can obviously
decrease $\Delta t$ (increase $N$), but this can lead to loss of numerical precision.
Euler's method is not recommended for precision calculation,
although it is handy to use in order to get a first
view on how a solution may look like.
Euler's method is asymmetric in time, since it uses information about the derivative at the beginning
of the time interval. This means that we evaluate the position at $y_1$ using the velocity
at $v_0$. A simple variation is to determine $x_{n+1}$ using the velocity at
$v_{n+1}$, that is (in a slightly more generalized form)
<!-- Equation labels as ordinary links -->
<div id="_auto8"></div>
$$
\begin{equation}
y_{n+1}=y_{n}+ v_{n+1}+O(\Delta t^2)
\label{_auto8} \tag{15}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto9"></div>
$$
\begin{equation}
v_{n+1}=v_{n}+(\Delta t) a_{n}+O(\Delta t^2).
\label{_auto9} \tag{16}
\end{equation}
$$
The acceleration $a_n$ is a function of $a_n(y_n, v_n, t_n)$ and needs to be evaluated
as well. This is the Euler-Cromer method. It is easy to change the above code and see that with the same
time step we get stable results.
Let us stay with $x$ (position) and $v$ (velocity) as the quantities we are interested in.
We have the Taylor expansion for the position given by
$$
x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_i+O((\Delta t)^3).
$$
The corresponding expansion for the velocity is
$$
v_{i+1} = v_i+(\Delta t)a_i+\frac{(\Delta t)^2}{2}v^{(2)}_i+O((\Delta t)^3).
$$
Via Newton's second law we have normally an analytical expression for the derivative of the velocity, namely
$$
a_i= \frac{d^2 x}{dt^2}\vert_{i}=\frac{d v}{dt}\vert_{i}= \frac{F(x_i,v_i,t_i)}{m}.
$$
If we add to this the corresponding expansion for the derivative of the velocity
$$
v^{(1)}_{i+1} = a_{i+1}= a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2)=a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2),
$$
and retain only terms up to the second derivative of the velocity since our error goes as $O(h^3)$, we have
$$
(\Delta t)v^{(2)}_i\approx a_{i+1}-a_i.
$$
We can then rewrite the Taylor expansion for the velocity as
$$
v_{i+1} = v_i+\frac{(\Delta t)}{2}\left( a_{i+1}+a_{i}\right)+O((\Delta t)^3).
$$
Our final equations for the position and the velocity become then
$$
x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_{i}+O((\Delta t)^3),
$$
and
$$
v_{i+1} = v_i+\frac{(\Delta t)}{2}\left(a_{i+1}+a_{i}\right)+O((\Delta t)^3).
$$
Note well that the term $a_{i+1}$ depends on the position at $x_{i+1}$. This means that you need to calculate
the position at the updated time $t_{i+1}$ before the computing the next velocity. Note also that the derivative of the velocity at the time
$t_i$ used in the updating of the position can be reused in the calculation of the velocity update as well.
We can now easily add the Verlet method to our original code as
DeltaT = 0.01
#set up arrays
tfinal = 10
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_xlabel('x[AU]')
ax.set_ylabel('y[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunVV")
plt.show()
You can easily generalize the calculation of the forces by defining a function
which takes in as input the various variables. We leave this as a challenge to you.
Running the above code for various time steps we see that the Velocity-Verlet is fully stable for various time steps.
We can also play around with different initial conditions in order to find the escape velocity from an orbit around the sun with distance one astronomical unit, 1 AU. The theoretical value for the escape velocity, is given by
$$
v = \sqrt{8\pi^2}{r},
$$
and with $r=1$ AU, this means that the escape velocity is $2\pi\sqrt{2}$ AU/yr. To obtain this we required that the kinetic energy of Earth equals the potential energy given by the gravitational force.
Setting
$$
\frac{1}{2}M_{\mathrm{Earth}}v^2=\frac{GM_{\odot}}{r},
$$
and with $GM_{\odot}=4\pi^2$ we obtain the above relation for the velocity. Setting an initial velocity say equal to $9$ in the above code, yields a planet (Earth) which escapes a stable orbit around the sun, as seen by running the code here.
DeltaT = 0.01
#set up arrays
tfinal = 100
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
# setting initial velocity larger than escape velocity
v0 = np.array([0.0,9.0])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_xlabel('x[AU]')
ax.set_ylabel('y[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EscapeEarthSunVV")
plt.show()
### Exercise Conservative forces
Which of the following force are conservative? All three forces depend only on $\boldsymbol{r}$ and satisfy the first condition for being conservative.
* $\boldsymbol{F}=k(x\boldsymbol{i}+2y\boldsymbol{j}+3z\boldsymbol{k})$ where $k$ is a constant.
The **curl** is zero and the force is conservative. The potential energy is upon integration $V(x)=-k(1/2x^2+y^2+3/2z^2)$. Taking the derivative shows that this is indeed the case since it gives back the force.
* $\boldsymbol{F}=y\boldsymbol{i}+x\boldsymbol{j}+0\boldsymbol{k}$.
This force is also conservative since it depends only on the coordinates and its curl is zero. To find the potential energy, since the integral is path independent, we can choose to integrate along any direction. The simplest is start from $x=0$ as origin and follow a path along the $x$-axis (which gives zero) and then parallel to the $y$-axis, which results in $V(x,y) = -xy$. Taking the derivative with respect to $x$ and $y$ gives us back the expression for the force.
* $\boldsymbol{F}=k(-y\boldsymbol{i}+x\boldsymbol{j}+0\boldsymbol{k})$ where $k$ is a constant.
Here the **curl** is $(0,0,2)$ and the force is not conservative.
* 2d For those which are conservative, find the corresponding potential energy $V$ and verify that direct differentiation that $\boldsymbol{F}=-\boldsymbol{\nabla} V$.
See the answers to each exercise above.
### Exercise: The Lennard-Jones potential
[The Lennard-Jones potential](https://en.wikipedia.org/wiki/Lennard-Jones_potential) is often used to describe
the interaction between two atoms or ions or molecules. If you end up doing materals science and molecular dynamics calculations, it is very likely that you will encounter this potential model.
The expression for the potential energy is
$$
V(r) = V_0\left((\frac{a}{r})^{12}-(\frac{b}{r})^{6}\right),
$$
where $V_0$, $a$ and $b$ are constants and the potential depends only on the relative distance between two objects
$i$ and $j$, that is $r=\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert=\sqrt{(x_i-x_j)^2+(y_i-y_j)^2+(z_i-z_j)^2}$.
* Sketch/plot the potential (choose some values for the constants in doing so).
The following Python code plots the potential
# | |
"""
Tests for the daemon.
Pyro - Python Remote Objects. Copyright by <NAME> (<EMAIL>).
"""
import time
import socket
import uuid
import pytest
import Pyro5.core
import Pyro5.client
import Pyro5.server
import Pyro5.nameserver
import Pyro5.protocol
import Pyro5.socketutil
import Pyro5.serializers
from Pyro5.errors import DaemonError, PyroError
from Pyro5 import config
from Pyro5.callcontext import current_context
from support import *
class MyObj(object):
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other.arg
__hash__ = object.__hash__
class CustomDaemonInterface(Pyro5.server.DaemonObject):
def __init__(self, daemon):
super(CustomDaemonInterface, self).__init__(daemon)
def custom_daemon_method(self):
return 42
class TestDaemon:
# We create a daemon, but notice that we are not actually running the requestloop.
# 'on-line' tests are all taking place in another test, to keep this one simple.
def setUp(self):
config.POLLTIMEOUT = 0.1
def sendHandshakeMessage(self, conn, correlation_id=None):
ser = Pyro5.serializers.serializers_by_id[Pyro5.serializers.MarshalSerializer.serializer_id]
data = ser.dumps({"handshake": "hello", "object": Pyro5.core.DAEMON_NAME})
current_context.correlation_id = correlation_id
msg = Pyro5.protocol.SendingMessage(Pyro5.protocol.MSG_CONNECT, 0, 99, Pyro5.serializers.MarshalSerializer.serializer_id, data)
conn.send(msg.data)
def testSerializerAccepted(self):
with Pyro5.server.Daemon(port=0) as d:
msg = Pyro5.protocol.SendingMessage(Pyro5.protocol.MSG_INVOKE, 0, 0, Pyro5.serializers.MarshalSerializer.serializer_id, b"")
cm = ConnectionMock(msg)
d.handleRequest(cm)
msg = Pyro5.protocol.SendingMessage(Pyro5.protocol.MSG_INVOKE, 0, 0, Pyro5.serializers.JsonSerializer.serializer_id, b"")
cm = ConnectionMock(msg)
d.handleRequest(cm)
msg = Pyro5.protocol.SendingMessage(Pyro5.protocol.MSG_INVOKE, 0, 0, Pyro5.serializers.SerpentSerializer.serializer_id, b"")
cm = ConnectionMock(msg)
d.handleRequest(cm)
if "msgpack" in Pyro5.serializers.serializers:
msg = Pyro5.protocol.SendingMessage(Pyro5.protocol.MSG_INVOKE, 0, 0, Pyro5.serializers.MsgpackSerializer.serializer_id, b"")
cm = ConnectionMock(msg)
d.handleRequest(cm)
def testDaemon(self):
with Pyro5.server.Daemon(port=0) as d:
hostname, port = d.locationStr.split(":")
port = int(port)
assert Pyro5.core.DAEMON_NAME in d.objectsById
assert str(d.uriFor(Pyro5.core.DAEMON_NAME)) == "PYRO:" + Pyro5.core.DAEMON_NAME + "@" + d.locationStr
# check the string representations
expected = "<Pyro5.server.Daemon at 0x%x; %s - %s; 1 objects>" % (id(d), d.locationStr, Pyro5.socketutil.family_str(d.sock))
assert str(d) == expected
assert repr(d) == expected
sockname = d.sock.getsockname()
assert sockname[1] == port
daemonobj = d.objectsById[Pyro5.core.DAEMON_NAME]
daemonobj.ping()
daemonobj.registered()
def testDaemonCustomInterface(self):
with Pyro5.server.Daemon(port=0, interface=CustomDaemonInterface) as d:
obj = d.objectsById[Pyro5.core.DAEMON_NAME]
assert obj.custom_daemon_method() == 42
def testDaemonConnectedSocket(self):
try:
Pyro5.config.SERVERTYPE = "thread"
with Pyro5.server.Daemon() as d:
assert "Thread" in d.transportServer.__class__.__name__
s1, s2 = socket.socketpair()
with Pyro5.server.Daemon(connected_socket=s1) as d:
assert d.locationStr=="./u:<<not-bound>>" or d.locationStr.startswith("127.0.")
assert not("Thread" in d.transportServer.__class__.__name__)
assert "Existing" in d.transportServer.__class__.__name__
Pyro5.config.SERVERTYPE = "multiplex"
with Pyro5.server.Daemon() as d:
assert "Multiplex" in d.transportServer.__class__.__name__
s1, s2 = socket.socketpair()
with Pyro5.server.Daemon(connected_socket=s1) as d:
assert d.locationStr=="./u:<<not-bound>>" or d.locationStr.startswith("127.0.")
assert not("Multiplex" in d.transportServer.__class__.__name__)
assert "Existing" in d.transportServer.__class__.__name__
finally:
Pyro5.config.SERVERTYPE = "thread"
def testDaemonUnixSocket(self):
if hasattr(socket, "AF_UNIX"):
SOCKNAME = "test_unixsocket"
with Pyro5.server.Daemon(unixsocket=SOCKNAME) as d:
locationstr = "./u:" + SOCKNAME
assert d.locationStr == locationstr
assert str(d.uriFor(Pyro5.core.DAEMON_NAME)) == "PYRO:" + Pyro5.core.DAEMON_NAME + "@" + locationstr
# check the string representations
expected = "<Pyro5.server.Daemon at 0x%x; %s - Unix; 1 objects>" % (id(d), locationstr)
assert str(d) == expected
assert d.sock.getsockname() == SOCKNAME
assert d.sock.family == socket.AF_UNIX
def testDaemonUnixSocketAbstractNS(self):
if hasattr(socket, "AF_UNIX"):
SOCKNAME = "\0test_unixsocket" # mind the \0 at the start, for a Linux abstract namespace socket
with Pyro5.server.Daemon(unixsocket=SOCKNAME) as d:
locationstr = "./u:" + SOCKNAME
assert d.locationStr == locationstr
assert str(d.uriFor(Pyro5.core.DAEMON_NAME)) == "PYRO:" + Pyro5.core.DAEMON_NAME + "@" + locationstr
# check the string representations
expected = "<Pyro5.server.Daemon at 0x%x; %s - Unix; 1 objects>" % (id(d), locationstr)
assert str(d) == expected
sn_bytes = bytes(SOCKNAME, "ascii")
assert d.sock.getsockname() == sn_bytes
assert d.sock.family == socket.AF_UNIX
def testServertypeThread(self):
old_servertype = config.SERVERTYPE
config.SERVERTYPE = "thread"
with Pyro5.server.Daemon(port=0) as d:
assert d.sock in d.sockets, "daemon's socketlist should contain the server socket"
assert len(d.sockets) == 1, "daemon without connections should have just 1 socket"
config.SERVERTYPE = old_servertype
def testServertypeMultiplex(self):
old_servertype = config.SERVERTYPE
config.SERVERTYPE = "multiplex"
with Pyro5.server.Daemon(port=0) as d:
assert d.sock in d.sockets, "daemon's socketlist should contain the server socket"
assert len(d.sockets) == 1, "daemon without connections should have just 1 socket"
config.SERVERTYPE = old_servertype
def testServertypeFoobar(self):
old_servertype = config.SERVERTYPE
config.SERVERTYPE = "foobar"
try:
with pytest.raises(PyroError):
Pyro5.server.Daemon()
finally:
config.SERVERTYPE = old_servertype
def testRegisterTwice(self):
with Pyro5.server.Daemon(port=0) as d:
o1 = MyObj("object1")
d.register(o1)
with pytest.raises(DaemonError) as x:
d.register(o1)
assert str(x.value) == "object or class already has a Pyro id"
d.unregister(o1)
d.register(o1, "samename")
o2 = MyObj("object2")
with pytest.raises(DaemonError) as x:
d.register(o2, "samename")
assert str(x.value) == "an object or class is already registered with that id"
assert hasattr(o1, "_pyroId")
assert hasattr(o1, "_pyroDaemon")
d.unregister(o1)
assert not(hasattr(o1, "_pyroId"))
assert not(hasattr(o1, "_pyroDaemon"))
o1._pyroId = "FOOBAR"
with pytest.raises(DaemonError) as x:
d.register(o1)
assert str(x.value) == "object or class already has a Pyro id"
o1._pyroId = ""
d.register(o1) # with empty-string _pyroId register should work
def testRegisterTwiceForced(self):
with Pyro5.server.Daemon(port=0) as d:
o1 = MyObj("object1")
d.register(o1, "name1")
d.register(o1, "name2", force=True)
d.register(o1, "name1", force=True)
assert d.objectsById["name1"] is d.objectsById["name2"]
d.unregister(o1)
o1._pyroId = "FOOBAR_ID"
d.register(o1, "newname", force=True)
assert o1._pyroId == "newname"
assert "newname" in d.objectsById
def testRegisterEtc(self):
with Pyro5.server.Daemon(port=0) as d:
assert len(d.objectsById) == 1
o1 = MyObj("object1")
o2 = MyObj("object2")
d.register(o1)
with pytest.raises(DaemonError):
d.register(o2, Pyro5.core.DAEMON_NAME) # cannot use daemon name
d.register(o2, "obj2a")
assert len(d.objectsById) == 3
assert d.objectsById[o1._pyroId] == o1
assert d.objectsById["obj2a"] == o2
assert o2._pyroId == "obj2a"
assert o2._pyroDaemon == d
# test unregister
d.unregister("unexisting_thingie")
with pytest.raises(ValueError):
d.unregister(None)
d.unregister("obj2a")
d.unregister(o1._pyroId)
assert len(d.objectsById) == 1
assert o1._pyroId not in d.objectsById
assert o2._pyroId not in d.objectsById
# test unregister objects
del o2._pyroId
d.register(o2)
objectid = o2._pyroId
assert objectid in d.objectsById
assert len(d.objectsById) == 2
d.unregister(o2)
# no more _pyro attributes must remain after unregistering
for attr in vars(o2):
assert not(attr.startswith("_pyro"))
assert len(d.objectsById) == 1
assert objectid not in d.objectsById
with pytest.raises(DaemonError):
d.unregister([1,2,3])
# test unregister daemon name
d.unregister(Pyro5.core.DAEMON_NAME)
assert Pyro5.core.DAEMON_NAME in d.objectsById
# weird args
w = MyObj("weird")
with pytest.raises(AttributeError):
d.register(None)
with pytest.raises(AttributeError):
d.register(4444)
with pytest.raises(TypeError):
d.register(w, 666)
# uri return value from register
uri = d.register(MyObj("xyz"))
assert isinstance(uri, Pyro5.core.URI)
uri = d.register(MyObj("xyz"), "test.register")
assert uri.object == "test.register"
def testRegisterClass(self):
with Pyro5.server.Daemon(port=0) as d:
assert len(d.objectsById) == 1
d.register(MyObj)
with pytest.raises(DaemonError):
d.register(MyObj)
assert len(d.objectsById) == 2
d.uriFor(MyObj)
# unregister:
d.unregister(MyObj)
assert len(d.objectsById) == 1
def testRegisterUnicode(self):
with Pyro5.server.Daemon(port=0) as d:
myobj1 = MyObj("hello1")
myobj3 = MyObj("hello3")
uri1 = d.register(myobj1, "str_name")
uri3 = d.register(myobj3, "unicode_" + chr(0x20ac))
assert len(d.objectsById) == 3
uri = d.uriFor(myobj1)
assert uri == uri1
_ = Pyro5.client.Proxy(uri)
uri = d.uriFor(myobj3)
assert uri == uri3
_ = Pyro5.client.Proxy(uri)
uri = d.uriFor("str_name")
assert uri == uri1
_ = Pyro5.client.Proxy(uri)
_ = Pyro5.client.Proxy(uri)
uri = d.uriFor("unicode_" + chr(0x20ac))
assert uri == uri3
_ = Pyro5.client.Proxy(uri)
def testDaemonObject(self):
with Pyro5.server.Daemon(port=0) as d:
daemon = Pyro5.server.DaemonObject(d)
obj1 = MyObj("object1")
obj2 = MyObj("object2")
obj3 = MyObj("object2")
d.register(obj1, "obj1")
d.register(obj2, "obj2")
d.register(obj3)
daemon.ping()
registered = daemon.registered()
assert type(registered) is list
assert len(registered) == 4
assert "obj1" in registered
assert "obj2" in registered
assert obj3._pyroId in registered
d.shutdown()
def testUriFor(self):
d = Pyro5.server.Daemon(port=0)
try:
o1 = MyObj("object1")
o2 = MyObj("object2")
with pytest.raises(DaemonError):
d.uriFor(o1)
with pytest.raises(DaemonError):
d.uriFor(o2)
d.register(o1, None)
d.register(o2, "object_two")
o3 = MyObj("object3")
with pytest.raises(DaemonError):
d.uriFor(o3) # can't get an uri for an unregistered object (note: unregistered name is ok)
u1 = d.uriFor(o1)
u2 = d.uriFor(o2._pyroId)
u3 = d.uriFor("unexisting_thingie") # unregistered name is no problem, it's just an uri we're requesting
u4 = d.uriFor(o2)
assert type(u1) == Pyro5.core.URI
assert u1.protocol == "PYRO"
assert u2.protocol == "PYRO"
assert u3.protocol == "PYRO"
assert u4.protocol == "PYRO"
assert u4.object == "object_two"
assert u3 == Pyro5.core.URI("PYRO:unexisting_thingie@" + d.locationStr)
finally:
d.close()
def testDaemonWithStmt(self):
d = Pyro5.server.Daemon()
assert d.transportServer
d.close() # closes the transportserver and sets it to None
assert d.transportServer is None
with Pyro5.server.Daemon() as d:
assert d.transportServer
pass
assert d.transportServer is None
with pytest.raises(ZeroDivisionError):
with Pyro5.server.Daemon() as d:
print(1 // 0) # cause an error
assert d.transportServer is None
d = Pyro5.server.Daemon()
with d:
pass
with pytest.raises(Pyro5.errors.PyroError):
with d:
pass
d.close()
def testRequestloopCondition(self):
with Pyro5.server.Daemon(port=0) as d:
condition = lambda: False
start = time.time()
d.requestLoop(loopCondition=condition) # this should return almost immediately
duration = time.time() - start
assert duration < 0.4
def testSimpleHandshake(self):
conn = ConnectionMock()
with Pyro5.server.Daemon(port=0) as d:
self.sendHandshakeMessage(conn)
success = d._handshake(conn)
assert success
msg = Pyro5.protocol.recv_stub(conn)
assert msg.type == Pyro5.protocol.MSG_CONNECTOK
assert msg.seq == 99
def testHandshakeDenied(self):
class HandshakeFailDaemon(Pyro5.server.Daemon):
def validateHandshake(self, conn, data):
raise ValueError("handshake fail validation error")
conn = ConnectionMock()
with HandshakeFailDaemon(port=0) as d:
self.sendHandshakeMessage(conn)
success = d._handshake(conn)
assert not(success)
msg = Pyro5.protocol.recv_stub(conn)
assert msg.type == Pyro5.protocol.MSG_CONNECTFAIL
assert msg.seq == 99
assert b"handshake fail validation error" in msg.data
with Pyro5.server.Daemon(port=0) as d:
self.sendHandshakeMessage(conn)
success = d._handshake(conn, denied_reason="no way, handshake denied")
assert not(success)
msg = Pyro5.protocol.recv_stub(conn)
assert msg.type == Pyro5.protocol.MSG_CONNECTFAIL
assert msg.seq == 99
assert b"no | |
<filename>microbepy/plot/mutation_plot.py
"""Provides plots of mutations for Isolates and Lines."""
from microbepy.common import constants as cn
from microbepy.common.dataframe_sorter import DataframeSorter
from microbepy.common.isolate import Isolate
from microbepy.common import util
from microbepy.correlation import genome_correlation
from microbepy.data.model_data_provider import ModelDataProvider
from microbepy.data import util_data
from microbepy.plot.mutation_cofraction import MutationCofraction
from microbepy.plot.util_plot import PlotParms
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
COLORS = ['red', 'green', 'blue']
SPECIES = {cn.SPECIES_MIX_DVH: "DVH",
cn.SPECIES_MIX_MMP: "MMP",
None: "both"}
FONTSIZE_TITLE = 16
FONTSIZE_LABEL = 8
MAX_LINES = 9
MIN_FRACTION = 0.25
THRESHOLD_FRAC = 0.2
MAX_SIGLVL = 0.01
COLORBAR_MIN = 1.0
COLORBAR_MAX = 4.0
class MutationLinePlot(object):
"""
Plot mutations by occurrences within Lines.
"""
def __init__(self, mutation_column=cn.GGENE_ID, species=None,
is_plot=True):
"""
:param str mutation_column:
:param bool is_plot:
"""
self._mutation_column = mutation_column
self._is_plot = is_plot
self._species = species
self.cofraction = MutationCofraction(species=self._species,
mutation_column=mutation_column)
def plotTransfers(self,
parms=PlotParms(is_initialize=False),
is_unit_fraction = False,
is_cluster_mutations=True):
"""
Does a stacked bar plot of mutation frequency for all transfers.
:param bool is_unit_fraction: round fraction to 1
:param bool is_cluster_mutations: Group similar mutations together
:return pd.DataFrame: row=mutation, col=line + transfer, value is fraction
"""
permitted_mutations = self.cofraction.ordered_mutations
transfers = self.cofraction.transfers
num_transfers = len(transfers)
fig, axes = plt.subplots(nrows=num_transfers, ncols=1)
dfs = []
for idx, transfer in enumerate(transfers):
parms[cn.PLT_YTICKLABELS] = True
if self._species is None:
parms[cn.PLT_TITLE] = "%d" % transfer
else:
parms[cn.PLT_TITLE] = "%s, %d" % (self._species, transfer)
if idx == 0:
parms[cn.PLT_YLABEL] = True
else:
parms[cn.PLT_YLABEL] = False
if idx < num_transfers - 1:
parms[cn.PLT_LEGEND] = False
parms[cn.PLT_XLABEL] = False
parms[cn.PLT_XTICKLABELS] = False
else:
parms[cn.PLT_LEGEND] = True
parms[cn.PLT_XLABEL] = True
parms[cn.PLT_XTICKLABELS] = True
df = self.plotLine(transfer,
parms=parms, is_plot=False,
ax=axes[idx], permitted_mutations=permitted_mutations,
is_unit_fraction=is_unit_fraction)
df[cn.TRANSFER] = transfer
dfs.append(df)
if self._is_plot:
plt.show()
return pd.concat(dfs)
def plotLine(self, transfer,
parms=PlotParms(is_initialize=False),
is_unit_fraction=False,
is_plot=None, ax=None, permitted_mutations=None):
"""
Does a stacked bar plot of mutation frequency by line
with colors
:params int transfer:
:params PlotParms parms:
:params Axis ax: axis to use in plot
:param list-str permitted_mutations: to use and how they
are ordered if None, then use alphabetical order
:param bool is_unit_fraction: round non-zero fraction to 1
:return pd.DataFrame: row=mutation, col=line, value is fraction
"""
if is_plot is None:
is_plot = self._is_plot
parms.setTrueIfAbsent(cn.PLT_XLABEL)
parms.setTrueIfAbsent(cn.PLT_XTICKLABELS)
#
df_plot = self.cofraction.makeLineDF(
permitted_mutations=permitted_mutations,
transfer=transfer)
if is_unit_fraction:
df_plot = df_plot.applymap(
lambda v: 1 if v> MIN_FRACTION else v)
# Do the plot
if not cn.PLT_FIGSIZE in parms:
parms[cn.PLT_FIGSIZE] = (12, 8)
if ax is None:
ax = df_plot.plot(kind='bar', stacked=True,
figsize=parms[cn.PLT_FIGSIZE], legend=None)
else:
df_plot.plot(kind='bar', stacked=True,
legend=None, ax=ax, figsize=parms[cn.PLT_FIGSIZE])
ax.set_xlabel("", fontsize=FONTSIZE_LABEL) # Eliminate implicit label
if parms.isFalse(cn.PLT_XTICKLABELS):
labels = ax.get_xticklabels()
new_labels = np.repeat("", len(labels))
ax.set_xticklabels(new_labels)
if parms.isFalse(cn.PLT_YTICKLABELS):
labels = ax.get_yticklabels()
new_labels = np.repeat("", len(labels))
ax.set_yticklabels(new_labels)
if cn.PLT_TITLE in parms:
title = parms[cn.PLT_TITLE]
else:
title = "%s Mutations" % SPECIES[self._species]
xpos = int(len(df_plot)*0.5)
ypos = MAX_LINES - 3
ax.text(xpos, ypos, title, fontsize=FONTSIZE_TITLE)
ax.set_ylim([0, MAX_LINES])
if parms.isTrue(cn.PLT_YLABEL):
if is_unit_fraction:
label = "No. Lines"
else:
label = "Fraction"
ax.set_ylabel(label , fontsize=FONTSIZE_LABEL)
if parms.isTrue(cn.PLT_XLABEL):
ax.set_xlabel(self._mutation_column, fontsize=FONTSIZE_LABEL)
if parms.isTrue(cn.PLT_LEGEND):
ax.legend(loc=(1,2))
#ax.legend()
if is_plot:
plt.show()
return df_plot
def _makeMutationSiglvlMatrix(self,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None, min_fraction=MIN_FRACTION):
"""
Creates a significance level matrix for mutations.
:param int transfer: transfer time for row mutations
:param int other_transfer: transfer time for column mutations
:param float min_fraction: minimum fractional occurrence of
a mutation within a line for it to be considered
:return pd.DataFrame: row index and columns are mutations
"""
def makeDF(transfer):
df_line = self.cofraction.makeLineDF(transfer=transfer)
df_binary = df_line.applymap(
lambda v: 0 if np.isnan(v) else v)
df_binary = df_line.applymap(
lambda v: 1.0 if v > min_fraction else 0)
return df_binary.transpose()
#
if other_transfer is None:
other_transfer = transfer
#
df_binary_rows = makeDF(transfer)
df_binary_columns = makeDF(other_transfer)
df_matrix = genome_correlation.makeSiglvlDF(df_binary_rows,
df_other=df_binary_columns)
return df_matrix
def _plotSiglvlDF(self, transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
max_siglvl=MAX_SIGLVL):
"""
Constructs a the dataframe used for heatmap.
:param int transfer:
:param float max_siglvl:
:return pd.DataFrame: mutations, mutations,
values are -log10 significance level
"""
df_matrix = self._makeMutationSiglvlMatrix(transfer=transfer,
other_transfer=other_transfer)
sorter = DataframeSorter(df_matrix)
df_sort = sorter.orderBoth()
#
df_transformed = df_sort.applymap(lambda v: np.log10(v))
df_transformed = df_transformed.applymap(lambda v: -v)
ubound = -np.log10(max_siglvl)
df_plot = df_transformed.applymap(
lambda v: np.nan if v < ubound else v)
sorter = DataframeSorter(df_plot)
df_plot = sorter.deleteNanRowsAndColumns()
return df_plot
def plotCofractions(self, is_time_lag=False,
threshold_frac=THRESHOLD_FRAC,
is_difference_frac=False,
is_differenced=False,
is_compress=False,
parms=PlotParms(), **kwargs):
"""
Does a subplots of the fraction of lines in which mutations co-occur.
:param bool is_time_lag: construct time lag subplots
:param bool is_differenced: Computes the difference in
count fractions
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
def funcDF(transfer, other_transfer):
if is_differenced:
df = self.cofraction.makeCofractionDifferencedDF(
transfer=transfer, other_transfer=other_transfer,
threshold_frac=threshold_frac)
else:
df = self.cofraction.makeCofractionDF(transfer=transfer,
is_difference_frac=is_difference_frac,
other_transfer=other_transfer)
if is_compress:
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
return df
#
return self._plotTransfers(funcDF, is_time_lag,
parms=parms, heat_range=[0, 1.0], **kwargs)
def plotSiglvls(self, is_time_lag=False, max_siglvl=MAX_SIGLVL,
parms=PlotParms(), **kwargs):
"""
Does a subplots of mutation correlation significance levels.
:param bool is_time_lag: construct time lag subplots
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
def funcDF(transfer, other_transfer):
return self._plotSiglvlDF(transfer=transfer,
max_siglvl=max_siglvl,
other_transfer=other_transfer)
#
return self._plotTransfers(funcDF, is_time_lag,
parms=parms,
heat_range = [COLORBAR_MIN, COLORBAR_MAX],
**kwargs)
def _plotTransfers(self, funcDF, is_time_lag,
parms=PlotParms(), **kwargs):
"""
Does a subplots of mutation mutations over transfers.
:param Function funcDF: has kwargs transfer, other_transfer;
returns a dataframe of mutations as columns and index;
values are used in the heatmap.
:param bool is_time_lag: construct time lag subplots
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
NCOLS = 3
plot_pos = {1:1, 2:3, 3:4, 4:6}
NPLOTS = 6
transfers = self.cofraction.transfers
if is_time_lag:
pairs = [p for p in zip(transfers[:-1], transfers[1:])]
else:
pairs = [p for p in zip(transfers[:-1], transfers[:-1])]
#
# Calculate the column order
df = funcDF(transfer=cn.TRANSFER_1000G,
other_transfer=cn.TRANSFER_1000G)
df = df.fillna(0)
# Set up for plots
nrows = 2 if (len(pairs) == 4) else 3
fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE])
result = {}
for idx, pair in enumerate(pairs):
idx += 1
ax = fig.add_subplot(nrows, NCOLS, plot_pos[idx])
if idx < len(pairs):
is_plot = False
else:
is_plot = True
if idx in [1, 2, 5]:
parms[cn.PLT_XAXISTICKTOP] = True
else:
parms[cn.PLT_XAXISTICKTOP] = False
if idx == 4:
parms[cn.PLT_COLORBAR] = True
else:
parms[cn.PLT_COLORBAR] = False
transfer = pair[0]
other_transfer = pair[1]
df = funcDF(transfer=transfer, other_transfer=other_transfer)
df = df.applymap(lambda v: np.nan if v == 0 else v)
self._plotTransferCompare(df,
transfer=transfer, other_transfer=other_transfer,
ordered_columns=self.cofraction.ordered_mutations,
is_center_colorbar=True,
fig=fig, ax=ax, parms=parms, is_plot=is_plot, **kwargs)
result[pair] = df
return result
def plotSiglvl(self, max_siglvl=MAX_SIGLVL,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
is_center_colorbar = True,
**kwargs):
"""
Constructs a heatmap of the mutation coocurrence significance
levels.
:param float max_siglvl: maximum significance level
:return pd.DataFrame: columns, rows are mutations
"""
df_plot = self._plotSiglvlDF(transfer=transfer,
other_transfer=other_transfer,
max_siglvl=max_siglvl)
self._plotTransferCompare(df_plot,
heat_range = [COLORBAR_MIN, COLORBAR_MAX],
ordered_mutations=self.cofraction.ordered_mutations,
transfer=transfer, other_transfer=other_transfer,
is_center_colorbar=is_center_colorbar,
**kwargs)
return df_plot
def plotCofraction(self,
threshold_frac=THRESHOLD_FRAC,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
is_difference_frac=False,
is_differenced=False,
is_center_colorbar=True,
is_compress=False,
parms=PlotParms(),
**kwargs):
"""
Constructs a heatmap of the mutation coocurrence fractions.
:param int transfer: Transfer for which plot is done
:param bool is_differenced: Computes the difference in
count fractions
:param bool is_compress: Eliminate rows/columns
with 0 values
:return pd.DataFrame: columns, rows are mutations
"""
if is_differenced:
df = self.cofraction.makeCofractionDifferencedDF(
threshold_frac=threshold_frac,
transfer=transfer, other_transfer=other_transfer,
**kwargs)
df = df.applymap(lambda v: np.nan
if np.abs(v) < threshold_frac else v)
else:
df = self.cofraction.makeCofractionDF(transfer=transfer,
is_difference_frac=is_difference_frac,
other_transfer=other_transfer, **kwargs)
df = df.applymap(lambda v: np.nan if v < threshold_frac else v)
if is_compress:
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
is_include_missing_mutations = False
else:
is_include_missing_mutations = True
ordered_columns = self.cofraction.ordered_mutations
self._plotTransferCompare(df,
heat_range=[0, 1.0],
ordered_columns=ordered_columns,
parms=parms,
transfer=transfer, other_transfer=other_transfer,
is_center_colorbar=is_center_colorbar,
is_include_missing_mutations=is_include_missing_mutations,
**kwargs)
return df
def _plotTransferCompare(self,
df_plot,
heat_range,
ordered_columns=None,
is_center_colorbar=True,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
ax=None,
fig=None,
is_include_missing_mutations=True,
parms=PlotParms(),
is_plot=None):
"""
Constructs a heatmap comparing values for mutations from two transfers.
:param pd.DataFrame df_plot: index and columns are mutations;
values are plotted on the heatmap
:param list-str ordered_columns: order in which columns appear
:param bool is_center_colorbar: center the colorbar in the plot
:param float, float: values on the heatmap range
:param int transfer:
:param int other_transfer: Allow comparisons across time
:param Matplotlib.Axes ax:
:param PlotParms parms: Parameters for the plot
:param bool is_plot: Overrides constructor plotting directive
:param bool is_include_missing_mutations:
"""
| |
<reponame>haybarry/micropython-radio
# async_radio_pickle
# A protocol for exchanging arbitrary Python objects between a pair of nRF24L01+ radios
# Uses uasyncio to achieve nonblocking behaviour (at the expense of speed).
import pyb
import pickle
import gc
import uasyncio as asyncio
from micropython import const
from nrf24l01 import NRF24L01, POWER_3, SPEED_250K
def dolittle(*_): # Null callback lambda *_ : None
pass
COMMAND = const(0) # Byte 0 of message is command
BYTECOUNT = const(1) # Count of data bytes
MSGSTART = const(2)
PAYLOAD_SIZE = const(32)
MAXLEN = const(30) # Space left for data
OK = const(1) # Commands
RESEND = const(2)
BYE = const(3)
START_SLAVE = const(4)
TXDONE = const(0x20) # Bit set for last message
MASK = const(0xdf)
class RadioSetup(object): # Configuration for an nRF24L01 radio
channel = 99 # Necessarily shared by both instances
def __init__(self, *, spi_no, csn_pin, ce_pin):# May differ
self.spi_no = spi_no
self.ce_pin = ce_pin
self.csn_pin = csn_pin
async def _garbage_collect():
while True:
await asyncio.sleep_ms(500)
gc.collect()
gc.threshold(gc.mem_free() // 4 + gc.mem_alloc())
class TxQueue(): # Transmit queue returns the default
def __init__(self, size): # transmit object (None) if no data.
self.size = size
self.q =[]
def put(self, data):
if not self.txrdy():
return False
self.q.append(data)
return True
def get(self):
if len(self.q):
return self.q.pop(0)
def txrdy(self):
return len(self.q) < self.size
class TxMessage(object):
def __init__(self):
self.outbuf = bytearray(PAYLOAD_SIZE) # Buffer with 2 data bytes followed by message
self.msgbytes = None # bytes object holding message to send
self.txleft = 0 # No of bytes still to send
self.offset = 0 # offset into msgbytes
self.bytes_tx = 0 # message length of current transmission
def initialise(self, objsend): # Init with an object for transmission
self.msgbytes = pickle.dumps(objsend).encode('utf8')
self.txleft = len(self.msgbytes)
self.offset = 0
self.bytes_tx = 0
def create_msg_block(self): # Populate buffer with a fragment
bytes_tx = min(self.txleft, MAXLEN) # No. of bytes to send this time
if bytes_tx: # If there are any, copy to output buffer
self.outbuf[MSGSTART : MSGSTART + bytes_tx] = self.msgbytes[self.offset : self.offset + bytes_tx]
self.bytes_tx = bytes_tx
def next_msg(self): # set up next message
self.offset += self.bytes_tx # add no of bytes sent
self.txleft -= self.bytes_tx
return self.txleft <= 0 # True if last packet
def set_cmd(self, cmd): # Prepare message for transmission
self.outbuf[COMMAND] = cmd # Bye and Resend request have no data
self.outbuf[BYTECOUNT] = 0 if cmd == RESEND or cmd == BYE else self.bytes_tx
if self.txleft <= MAXLEN:
self.outbuf[COMMAND] |= TXDONE
class TwoWayRadio(NRF24L01):
pipes = (b'\xf0\xf0\xf0\xf0\xe1', b'\xf0\xf0\xf0\xf0\xd2')
max_resend_requests = 1 # No. of times receiver requests retransmission
timeout = 200 # No. of mS tx and rx wait for each other
def __init__(self, config, master):
super().__init__(pyb.SPI(config.spi_no), pyb.Pin(config.csn_pin), pyb.Pin(config.ce_pin), config.channel, PAYLOAD_SIZE)
self._master = master
if master:
self.open_tx_pipe(TwoWayRadio.pipes[0])
self.open_rx_pipe(1, TwoWayRadio.pipes[1])
else:
self.open_tx_pipe(TwoWayRadio.pipes[1])
self.open_rx_pipe(1, TwoWayRadio.pipes[0])
self.set_power_speed(POWER_3, SPEED_250K) # Best range for point to point links
self.start_listening()
self.txmsg = TxMessage() # Data for transmission
self.inlist = [] # List of received bytes objects
self.failcount = 0 # DEBUG
# Asynchronous send. Raises no errors: returns status. Waits for completion subject to timeout.
# Return is immediate if result is success or failure.
async def as_send(self, timeout=None):
self.send_start(self.txmsg.outbuf) # Non blocking start TX
if timeout is None:
timeout = self.timeout
for _ in range(max(timeout // 10, 1)):
await asyncio.sleep_ms(10)
result = self.send_done() # 1 == success, 2 == fail (None == in progress)
if result is not None:
break
self.start_listening()
return result
# Asynchronously send a message block
async def send_msg_block(self, cmd):
self.stop_listening() # Flush buffers
self.txmsg.set_cmd(cmd) # Prepare message block for transmission
res = await self.as_send() # Attempt to send
return res == 1 # False on fail or timeout.
#start = utime.ticks_ms()
#while utime.ticks_diff(utime.ticks_ms(), start) < self.timeout:
## On timeout as_send() returns None. On fail (2) or success (1) returns immediately.
#res = await self.as_send() # loop repeatedly on fail
#if res == 1: # Success.
#self.start_listening()
#break
#if res == 2:
#await asyncio.sleep_ms(self.timeout // 5) # Possible RF interference?
#else: # Timeout
#self.start_listening()
#return False
#return True
def parse(self):
strpickle = ''.join(self.inlist)
if strpickle: # data was sent
return pickle.loads(strpickle) # Throws exception on fail
# Wait for a message. If forever (i.e. slave waiting for START_SLAVE) blocks until msg received.
# Returns command received or 0 on failure
async def await_message(self, allowed_cmds, forever=False, rxdone=False):
iterations = self.timeout // 10
while not self.any():
if not forever:
if iterations <= 0:
return 0
iterations -= 1
await asyncio.sleep_ms(10)
while self.any(): # Discard all but latest message
inbuf = self.recv()
await asyncio.sleep_ms(10)
if inbuf is None or len(inbuf) < MSGSTART:
return 0
cmd = inbuf[0] & MASK
if cmd not in allowed_cmds:
return 0 # Unexpected response
nbytes = inbuf[BYTECOUNT] # Received bytes
if nbytes and not rxdone: # Can receive zero length messages (responses to tx)
self.inlist.append(inbuf[MSGSTART: MSGSTART + nbytes].decode('utf8')) # List of received strings
return inbuf[0]
async def goodbye(self): # Send BYE. No exceptions raised. No RX expected.
self.stop_listening()
self.txmsg.set_cmd(BYE)
await self.as_send(timeout = 20) # return: don't care about failure
self.start_listening()
# Core protocol. Returns status, data.
# On error - timeout or parse failure - returns False, None
# Success returns True, received object which may be None.
async def run_protocol(self):
txdone = False
rxdone = False
if self._master:
self.inlist = [] # Master: initialise RX
send_cmd = START_SLAVE
else: # Slave waits for master discarding messages. It
started = False # send nothing until it gets START_SLAVE
while not started:
self.inlist = [] # Discard any previous bad message
cmd_raw = await self.await_message((START_SLAVE,), forever=True)
if cmd_raw:
started = True
rxdone = cmd_raw & TXDONE
send_cmd = OK # Always send OK before no data BYE command
# Symmetrical from here
while not (txdone and rxdone): # Continue while there are bytes to send or receive
self.txmsg.create_msg_block()
resend_rq_count = 0 # No.of resend requests sent: reset for each block
cmd = 0
cmd_raw = 0
sent = False
while not sent: # Send and receive a message block until success
if not await self.send_msg_block(send_cmd): # Timeout handled by caller
return False, None
while resend_rq_count <= self.max_resend_requests: # Send the output buffer until success
await asyncio.sleep_ms(10)
cmd_raw = await self.await_message((OK, RESEND, BYE), rxdone) # rxdone handles case where BYE missed
# print('Await with rxdone = ', rxdone, 'got', cmd_raw)
if not cmd_raw:
resend_rq_count += 1
send_cmd = RESEND # Request resend (with a zero length message)
continue
cmd = cmd_raw & MASK # Clear TXDONE bit
if cmd == BYE: # Normal end to protocol: target has sent BYE
# print('Success. BYE received. Inlist:', self.inlist)
try: # no response is required. Quit protocol.
return True, self.parse()
except: # Parse fail. Should never occur.
# print('Parse fail 1.')
self.failcount += 1 # DEBUG
return False, None
break # Got OK or RESEND
else: # Retransmissions have failed
return False, None
sent = cmd == OK # neither we nor the slave timed out
# If slave requested retransmission we loop again
send_cmd = OK # prepare for it in case we do: we repeat the data with OK
txdone = self.txmsg.next_msg()
rxdone = rxdone or cmd_raw & TXDONE
# print('Txdone: {} rxdone: {} inlist: {}'.format(txdone, rxdone, self.inlist))
try:
result = self.parse()
except:
#print('Parse fail 2.')
self.failcount += 1 # DEBUG
return False, None
await self.goodbye() # Over and out: no response expected.
# print('BYE sent.')
return True, result
async def exchange(self, objtx):
self.txmsg.initialise(objtx) # Set up TX message object
status, objrx = await self.run_protocol()
if not status:
if self._master:
self.stop_listening() # Flush buffers. Master doesn't listen.
return status, objrx
# TODO callback args
class Channel():
# latency = 1000
def __init__(self, config, master, *, txqsize=20,
txcb = dolittle, rxcb=dolittle, statecb=dolittle):
self._radio = TwoWayRadio(config, master)
self._master = master
self._txq = TxQueue(txqsize)
self._txcb = txcb # User callbacks
self._rxcb = rxcb
self._statecb = statecb
self._link_is_up = False # Ensure callback occurs when link activates
loop = asyncio.get_event_loop()
loop.create_task(self._run())
loop.create_task(_garbage_collect())
@property
def link(self):
return self._link_is_up
@link.setter
def link(self, value):
if self._link_is_up != value:
self._statecb(value)
self._link_is_up = value
async def _run(self):
radio = self._radio
msg_delay = 100 | |
import pickle
import numpy as np
import networkx as nx
from sklearn.metrics import pairwise_distances
import sys
sys.path.append("...")
from models.circuit_generators import *
def read_pickle(x):
with (open(x, "rb")) as f:
data = pickle.load(f)
return data
X = np.load('al_synapses2.npy')
# al_res = np.load('res1_al.npy', allow_pickle=True)
n_data = np.load('al_references.npy')
n_data_dict = {}
for i in range(n_data.shape[0]):
n_data_dict[n_data[i,1]] = n_data[i,0]
ORN_PN_pos = read_pickle('ORN_PN_pos.pickle')
ORN_LN_pos = read_pickle('ORN_LN_pos.pickle')
LN_PN_pos = read_pickle('LN_PN_pos.pickle')
PN_LN_pos = read_pickle('PN_LN_pos.pickle')
LN_ORN_pos = read_pickle('LN_ORN_pos.pickle')
LN_LN_pos = read_pickle('LN_LN_pos.pickle')
uniques = list(np.unique(X[:,3]))
print('uniques',len(uniques))
ORN_uniques = [i for i in uniques if 'ORN' in i]
PN_uniques = [i for i in uniques if 'PN' in i]
LN_uniques = [i for i in uniques if 'LN' in i]
print('ORN uniques',len(ORN_uniques))
print('PN uniques',len(PN_uniques))
print('LN uniques',len(LN_uniques))
def get_LNs(glom = 'DL5'):
found_LNs = []
for i in [i for i in ORN_uniques if glom in i]:
for j in LN_uniques:
if j in ORN_LN_pos[i]:
found_LNs.append(j)
for i in [i for i in PN_uniques if glom in i]:
for j in LN_uniques:
if j in PN_LN_pos[i]:
found_LNs.append(j)
for j in LN_uniques:
for i in [i for i in ORN_uniques if glom in i]:
if i in LN_ORN_pos[j]:
found_LNs.append(j)
for j in LN_uniques:
for i in [i for i in PN_uniques if glom in i]:
if i in LN_PN_pos[j]:
found_LNs.append(j)
found_LNs = list(set(found_LNs))
return found_LNs
DL5_LNs = get_LNs('DL5')
DC1_LNs = get_LNs('DC1')
only_DL5_LNs = [i for i in DL5_LNs if i not in DC1_LNs]
only_DC1_LNs = [i for i in DC1_LNs if i not in DL5_LNs]
remnant_LNs = [i for i in LN_uniques if i not in only_DL5_LNs and i not in only_DC1_LNs]
def setup_spiking_SPU(ORN_PN_gain = 1., ORN_LN_gain = 1., LN_PN_gain = 1.):
"""Holds computational models and parameters for an antennal lobe model, and generates it using SPUs."""
def gen_ORN(G, x):
params = dict(
br=1.0,
dr=10.0,
gamma=0.138,
a1=45.0,
b1=0.8,
a2=199.574,
b2=51.887,
a3=2.539,
b3=0.9096,
kappa=9593.9,
p=1.0,
c=0.06546,
Imax=150.159,
)
G.add_node(x+'_OTP', **{"class": "OTP"}, **params)
params = dict(
ms=-5.3,
ns=-4.3,
hs=-12.0,
gNa=120.0,
gK=20.0,
gL=0.3,
ga=47.7,
ENa=55.0,
EK=-72.0,
EL=-17.0,
Ea=-75.0,
sigma=0.00,
refperiod=0.0,
)
G.add_node(x+'_RN', **{"class": "NoisyConnorStevens"}, **params)
G.add_edge(x+'_OTP', x+'_RN')
params = dict(
ar=12.5,
ad=12.19,
gmax=0.6/10000.*ORN_PN_gain,
)
G.add_node(x+'_ARN', **{"class": "Alpha"}, **params)
G.add_edge(x+'_RN', x+'_ARN')
params = dict(
ar=12.5,
ad=12.19,
gmax=0.6/1000.*ORN_LN_gain,
)
G.add_node(x+'_ARN_LN', **{"class": "Alpha"}, **params)
G.add_edge(x+'_RN', x+'_ARN_LN')
def gen_PN(G, x):
params = dict(
ms=-5.3,
ns=-4.3,
hs=-12.0,
gNa=120.0,
gK=20.0,
gL=0.3,
ga=47.7,
ENa=55.0,
EK=-72.0,
EL=-17.0,
Ea=-75.0,
sigma=0.00,
refperiod=0.0,
)
G.add_node(x+'_PN', **{"class": "NoisyConnorStevens"}, **params)
def gen_LN(G, x):
params = dict(
ms=-5.3,
ns=-4.3,
hs=-12.0,
gNa=120.0,
gK=20.0,
gL=0.3,
ga=47.7,
ENa=55.0,
EK=-72.0,
EL=-17.0,
Ea=-75.0,
sigma=0.00,
refperiod=0.0,
)
G.add_node(x, **{"class": "NoisyConnorStevens"}, **params)
def ORN_PN_LN_ORN_interaction(G,x,y,z,i,j, dist_gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./100. * dist_gain,
)
G.add_node(x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), **{"class": "Alpha"}, **params)
G.add_edge(x, x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j))
params = dict(
dummy=0.0,
)
G.add_node(x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j), **{"class": "PreLN"}, **params) # LN>(LN>ORN)
G.add_edge(x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j))
#G.add_node(x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j), **{"class": "OSNAxt2"}, **params) # ORN>(ORN>PN)
#G.add_edge(y+'_ARN', x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j))
#G.add_edge(x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j), z)
G.add_edge(x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j), y+'_'+z+'_AT'+'_'+str(j)) # (LN>ORN) to (ORN>PN)
return [x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), 'g']
def ORN_ORN_LN_interaction(G,x,y,z,i,j, dist_gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./100. * dist_gain,
)
G.add_node(x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), **{"class": "Alpha"}, **params)
G.add_edge(x, x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j))
params = dict(
dummy=0.0,
)
G.add_node(x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j), **{"class": "PreLN"}, **params) # LN>(LN>ORN)
G.add_edge(x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j))
#G.add_node(x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j), **{"class": "OSNAxt2"}, **params) # ORN>(ORN>PN)
#G.add_edge(y+'_ARN', x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j))
#G.add_edge(x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j), z)
G.add_edge(x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j), y+'_'+z+'_AT'+'_'+str(j)) # (LN>ORN) to (ORN>PN)
return [y+'_'+z+'_AT'+'_'+str(j), 'g']
def gen_ORNPN_syn(G, x, y, i, gain=1.):
params = dict(
bias=1.0,
gain=1.0,
)
G.add_node(x+'_'+y+'_AT'+'_'+str(i), **{"class": "OSNAxt2"}, **params) # ORN>(ORN>PN)
G.add_edge(x+'_ARN', x+'_'+y+'_AT'+'_'+str(i))
G.add_edge(x+'_'+y+'_AT'+'_'+str(i), y+'_PN')
return [x+'_'+y+'_AT'+'_'+str(i), 'I']
def gen_ORNLN_syn(G, x, y, i, gain=1.):
params = dict(
bias=1.0,
gain=1.0,
)
G.add_node(x+'_'+y+'_AT'+'_'+str(i), **{"class": "OSNAxt2"}, **params) # ORN>(ORN>PN)
G.add_edge(x+'_ARN_LN', x+'_'+y+'_AT'+'_'+str(i))
G.add_edge(x+'_'+y+'_AT'+'_'+str(i), y)
return [x+'_'+y+'_AT'+'_'+str(i), 'I']
def gen_regsyn(G, x, y, i, gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./10000.*gain,
)
G.add_node(x+'_to_'+y+'_Alpha_'+str(i), **{"class": "Alpha"}, **params)
params = dict(
bias=1.0,
gain=1.,
)
G.add_node(x+'_to_'+y+'_Converter_'+str(i), **{"class": "OSNAxt2"}, **params)
G.add_edge(x+'_PN', x+'_to_'+y+'_Alpha_'+str(i))
G.add_edge(x+'_to_'+y+'_Alpha_'+str(i), x+'_to_'+y+'_Converter_'+str(i))
G.add_edge(x+'_to_'+y+'_Converter_'+str(i), y)
return [x+'_to_'+y+'_Alpha_'+str(i), 'g']
def gen_regsyn_PN(G, x, y, i, gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./10000.*gain*LN_PN_gain,
)
G.add_node(x+'_to_'+y+'_Alpha_'+str(i), **{"class": "Alpha"}, **params)
params = dict(
bias=1.0,
gain=1.,
)
G.add_node(x+'_to_'+y+'_Converter_'+str(i), **{"class": "OSNAxt2"}, **params)
G.add_edge(x, x+'_to_'+y+'_Alpha_'+str(i))
G.add_edge(x+'_to_'+y+'_Alpha_'+str(i), x+'_to_'+y+'_Converter_'+str(i))
G.add_edge(x+'_to_'+y+'_Converter_'+str(i), y+'_PN')
return [x+'_to_'+y+'_Alpha_'+str(i), 'g']
def gen_regsyn_LN(G, x, y, i, gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./1000.*gain,
)
G.add_node(x+'_to_'+y+'_Alpha_'+str(i), **{"class": "Alpha"}, **params)
params = dict(
bias=1.0,
gain=1.,
)
G.add_node(x+'_to_'+y+'_Converter_'+str(i), **{"class": "OSNAxt2"}, **params)
G.add_edge(x+'_RN', x+'_to_'+y+'_Alpha_'+str(i))
G.add_edge(x+'_to_'+y+'_Alpha_'+str(i), x+'_to_'+y+'_Converter_'+str(i))
G.add_edge(x+'_to_'+y+'_Converter_'+str(i), y)
return [x+'_to_'+y+'_Alpha_'+str(i), 'g']
neuron_models = {'ORNs': gen_ORN, 'PNs': gen_PN, 'LNs': gen_LN}
synapse_models = {'ORNs-LNs': gen_ORNLN_syn, 'LNs-PNs': gen_regsyn_PN, 'PNs-LNs': gen_regsyn, 'ORNs-PNs': gen_ORNPN_syn}
interaction_models = {'LNs-ORNs-PNs': ORN_PN_LN_ORN_interaction, 'LNs-ORNs-LNs': ORN_PN_LN_ORN_interaction}
AL_SPU = SPU()
glomerulus_SCC = SCC({'ORNs-LNs': gen_ORNLN_syn, 'ORNs-PNs': gen_ORNPN_syn}, {'LNs-ORNs-PNs': ORN_PN_LN_ORN_interaction, 'LNs-ORNs-LNs': ORN_PN_LN_ORN_interaction})
LN_PN_SCC = SCC({'LNs-PNs': gen_regsyn_PN, 'PNs-LNs': gen_regsyn}, {})
AL_SPU.add(glomerulus_SCC)
AL_SPU.add(LN_PN_SCC)
AL_SPU.add_neuron_models({'ORNs': gen_ORN, 'PNs': gen_PN, 'LNs': gen_LN})
return AL_SPU
def setup_spiking_beta(ORN_PN_gain = 1., ORN_LN_gain = 1., LN_PN_gain = 1., PN_LN_gain = 1., interaction_gain = 1., LN_LN_gain=1.):
"""Holds computational models and parameters for an antennal lobe model
(for the beta release of the package)."""
def gen_ORN(G, x):
params = dict(
br=1.0,
dr=10.0,
gamma=0.138,
a1=45.0,
b1=0.8,
a2=199.574,
b2=51.887,
a3=2.539,
b3=0.9096,
kappa=9593.9,
p=1.0,
c=0.06546,
Imax=150.159,
)
G.add_node(x+'_OTP', **{"class": "OTP"}, **params)
params = dict(
ms=-5.3,
ns=-4.3,
hs=-12.0,
gNa=120.0,
gK=20.0,
gL=0.3,
ga=47.7,
ENa=55.0,
EK=-72.0,
EL=-17.0,
Ea=-75.0,
sigma=0.00,
refperiod=0.0,
)
G.add_node(x+'_RN', **{"class": "NoisyConnorStevens"}, **params)
G.add_edge(x+'_OTP', x+'_RN')
params = dict(
ar=12.5,
ad=12.19,
gmax=0.6/10000.*ORN_PN_gain,
)
G.add_node(x+'_ARN', **{"class": "Alpha"}, **params)
G.add_edge(x+'_RN', x+'_ARN')
params = dict(
ar=12.5,
ad=12.19,
gmax=0.6/1000.*ORN_LN_gain,
)
G.add_node(x+'_ARN_LN', **{"class": "Alpha"}, **params)
G.add_edge(x+'_RN', x+'_ARN_LN')
def gen_PN(G, x):
params = dict(
ms=-5.3,
ns=-4.3,
hs=-12.0,
gNa=120.0,
gK=20.0,
gL=0.3,
ga=47.7,
ENa=55.0,
EK=-72.0,
EL=-17.0,
Ea=-75.0,
sigma=0.00,
refperiod=0.0,
)
G.add_node(x+'_PN', **{"class": "NoisyConnorStevens"}, **params)
def gen_LN(G, x):
params = dict(
ms=-5.3,
ns=-4.3,
hs=-12.0,
gNa=120.0,
gK=20.0,
gL=0.3,
ga=47.7,
ENa=55.0,
EK=-72.0,
EL=-17.0,
Ea=-75.0,
sigma=0.00,
refperiod=0.0,
)
G.add_node(x, **{"class": "NoisyConnorStevens"}, **params)
def ORN_PN_LN_ORN_interaction(G,x,y,z,i,j, dist_gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./100. * dist_gain * interaction_gain,
)
G.add_node(x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), **{"class": "Alpha"}, **params)
G.add_edge(x, x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j))
params = dict(
dummy=0.0,
)
G.add_node(x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j), **{"class": "PreLN"}, **params) # LN>(LN>ORN)
G.add_edge(x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j))
#G.add_node(x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j), **{"class": "OSNAxt2"}, **params) # ORN>(ORN>PN)
#G.add_edge(y+'_ARN', x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j))
#G.add_edge(x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j), z)
G.add_edge(x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j), y+'_'+z+'_AT'+'_'+str(j)) # (LN>ORN) to (ORN>PN)
return [x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), 'g']
def ORN_ORN_LN_interaction(G,x,y,z,i,j, dist_gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./100. * dist_gain * interaction_gain,
)
G.add_node(x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), **{"class": "Alpha"}, **params)
G.add_edge(x, x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j))
params = dict(
dummy=0.0,
)
G.add_node(x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j), **{"class": "PreLN"}, **params) # LN>(LN>ORN)
G.add_edge(x+'_to_'+y+'_PreLNAlpha'+'_'+str(z)+'_'+str(i)+'_'+str(j), x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j))
#G.add_node(x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j), **{"class": "OSNAxt2"}, **params) # ORN>(ORN>PN)
#G.add_edge(y+'_ARN', x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j))
#G.add_edge(x+'_'+y+'_to_'+z+'_AT'+'_'+str(i)+'_'+str(j), z)
G.add_edge(x+'_to_'+y+'_PreLN'+'_'+str(z)+'_'+str(i)+'_'+str(j), y+'_'+z+'_AT'+'_'+str(j)) # (LN>ORN) to (ORN>PN)
return [y+'_'+z+'_AT'+'_'+str(j), 'g']
def gen_ORNPN_syn(G, x, y, i, gain=1.):
params = dict(
bias=1.0,
gain=1. * gain,
)
G.add_node(x+'_'+y+'_AT'+'_'+str(i), **{"class": "OSNAxt2"}, **params) # ORN>(ORN>PN)
G.add_edge(x+'_ARN', x+'_'+y+'_AT'+'_'+str(i))
G.add_edge(x+'_'+y+'_AT'+'_'+str(i), y+'_PN')
return [x+'_'+y+'_AT'+'_'+str(i), 'I']
def gen_ORNLN_syn(G, x, y, i, gain=1.):
params = dict(
bias=1.0,
gain=1. * gain,
)
G.add_node(x+'_'+y+'_AT'+'_'+str(i), **{"class": "OSNAxt2"}, **params) # ORN>(ORN>PN)
G.add_edge(x+'_ARN_LN', x+'_'+y+'_AT'+'_'+str(i))
G.add_edge(x+'_'+y+'_AT'+'_'+str(i), y)
return [x+'_'+y+'_AT'+'_'+str(i), 'I']
def gen_regsyn(G, x, y, i, gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./10000.*gain*PN_LN_gain,
)
G.add_node(x+'_to_'+y+'_Alpha_'+str(i), **{"class": "Alpha"}, **params)
params = dict(
bias=1.0,
gain=1.,
)
G.add_node(x+'_to_'+y+'_Converter_'+str(i), **{"class": "OSNAxt2"}, **params)
G.add_edge(x+'_PN', x+'_to_'+y+'_Alpha_'+str(i))
G.add_edge(x+'_to_'+y+'_Alpha_'+str(i), x+'_to_'+y+'_Converter_'+str(i))
G.add_edge(x+'_to_'+y+'_Converter_'+str(i), y)
return [x+'_to_'+y+'_Alpha_'+str(i), 'g']
def gen_regsyn_PN(G, x, y, i, gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./10000.*gain*LN_PN_gain,
)
G.add_node(x+'_to_'+y+'_Alpha_'+str(i), **{"class": "Alpha"}, **params)
params = dict(
bias=1.0,
gain=1.,
)
G.add_node(x+'_to_'+y+'_Converter_'+str(i), **{"class": "OSNAxt2"}, **params)
G.add_edge(x, x+'_to_'+y+'_Alpha_'+str(i))
G.add_edge(x+'_to_'+y+'_Alpha_'+str(i), x+'_to_'+y+'_Converter_'+str(i))
G.add_edge(x+'_to_'+y+'_Converter_'+str(i), y+'_PN')
return [x+'_to_'+y+'_Alpha_'+str(i), 'g']
def gen_regsyn_LN(G, x, y, i, gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./1000.*gain,
)
G.add_node(x+'_to_'+y+'_Alpha_'+str(i), **{"class": "Alpha"}, **params)
params = dict(
bias=1.0,
gain=1.,
)
G.add_node(x+'_to_'+y+'_Converter_'+str(i), **{"class": "OSNAxt2"}, **params)
G.add_edge(x+'_RN', x+'_to_'+y+'_Alpha_'+str(i))
G.add_edge(x+'_to_'+y+'_Alpha_'+str(i), x+'_to_'+y+'_Converter_'+str(i))
G.add_edge(x+'_to_'+y+'_Converter_'+str(i), y)
return [x+'_to_'+y+'_Alpha_'+str(i), 'g']
def gen_regsyn_LN2(G, x, y, i, gain=1.):
params = dict(
ar=12.5,
ad=12.19,
gmax=1./10000.*gain*LN_LN_gain,
)
G.add_node(x+'_to_'+y+'_Alpha_'+str(i), **{"class": "Alpha"}, **params)
params = dict(
bias=1.0,
gain=-1.,
)
G.add_node(x+'_to_'+y+'_Converter_'+str(i), **{"class": "OSNAxt2"}, **params)
G.add_edge(x, x+'_to_'+y+'_Alpha_'+str(i))
G.add_edge(x+'_to_'+y+'_Alpha_'+str(i), x+'_to_'+y+'_Converter_'+str(i))
G.add_edge(x+'_to_'+y+'_Converter_'+str(i), y)
return [x+'_to_'+y+'_Alpha_'+str(i), 'g']
neuron_models = {'ORNs': gen_ORN, 'PNs': gen_PN, 'LNs': gen_LN}
synapse_models = {'ORNs-LNs': gen_ORNLN_syn,
'LNs-PNs': gen_regsyn_PN, 'PNs-LNs': gen_regsyn, # Synaptic Feedback Loop
'ORNs-PNs': gen_ORNPN_syn,
'LNs-LNs': gen_regsyn_LN2}
interaction_models = {'LNs-ORNs-PNs': ORN_PN_LN_ORN_interaction,
'LNs-ORNs-LNs': ORN_PN_LN_ORN_interaction} # Feedback Loop with Interactions
return neuron_models, synapse_models, interaction_models
def setup_spiking_default(ORN_PN_gain = 1., ORN_LN_gain = 1., LN_PN_gain = 1., PN_LN_gain = 1., interaction_gain = 1., LN_LN_gain=1., exLN_LN_gain=1., LN_exLN_gain=1.):
# Default setup for the ORN-PN-LN-ORN network
def gen_ORN(G, x):
params = dict(
br=1.0,
dr=10.0,
gamma=0.138,
a1=45.0,
b1=0.8,
a2=199.574,
b2=51.887,
a3=2.539,
b3=0.9096,
kappa=9593.9,
p=1.0,
c=0.06546,
Imax=150.159,
)
G.add_node(x+'_OTP', **{"class": "OTP"}, **params)
params = dict(
ms=-5.3,
ns=-4.3,
hs=-12.0,
gNa=120.0,
gK=20.0,
gL=0.3,
ga=47.7,
ENa=55.0,
EK=-72.0,
EL=-17.0,
Ea=-75.0,
sigma=0.00,
refperiod=0.0,
)
G.add_node(x+'_RN', **{"class": "NoisyConnorStevens"}, **params)
G.add_edge(x+'_OTP', x+'_RN')
params = dict(
ar=12.5,
ad=12.19,
gmax=0.6/10000.*ORN_PN_gain,
)
G.add_node(x+'_ARN', **{"class": "Alpha"}, **params)
G.add_edge(x+'_RN', x+'_ARN')
params = dict(
ar=12.5,
ad=12.19,
gmax=0.6/1000.*ORN_LN_gain,
)
G.add_node(x+'_ARN_LN', **{"class": | |
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import cv2
from os import listdir
from os.path import isfile, join
import PIL.Image
def listfiles(pathname):
return [f for f in listdir(pathname) if isfile(join(pathname, f))]
def bytesread(filename):
with open(filename, 'rb') as file:
return file.read()
def imread(filename, target_shape=None, interpolation=cv2.INTER_AREA):
"""
Loads an image from disk
:param filename: the path to the image file to load
:param target_shape: optional resizing to the specified shape
:param interpolation: interpolation method. Defaults to cv2.INTER_AREA which is recommended for downsizing.
:return: the loaded image in RGB format
"""
im = cv2.imread(filename)
if im is None:
print('Error loading image. Check path:', filename)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # OpenCV loads images in BGR format
if target_shape is not None:
assert len(target_shape) == 2, 'Parameter target_shape must be 2-dimensional'
im = cv2.resize(im, target_shape[::-1], interpolation=interpolation)
return im
def imwrite(filename, im):
"""
Saves an image to disk
:param filename: the path to the image file to save
:return: None
"""
bgr = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
cv2.imwrite(filename, bgr)
def bgr2rgb(im):
"""
Converts a BGR image to RGB.
:param im: the BGR image to transform
:return: the image in RGB mode
"""
return np.asarray(im[2], im[1], im[0])
def rgb2bgr(im):
"""
Converts a RBG image to BGR.
:param im: the RGB image to transform
:return: the image in BGR mode
"""
return np.asarray(im[2], im[1], im[0])
def imhist(im):
"""
Returns a color histogram of the given image.
:param im: a numpy array with shape (rows, columns, 3)
:return a list of colors and a list of pixel counters for each color
"""
return np.unique(im.reshape(-1, im.shape[2]), axis=0, return_counts=True)
def subtract_mean(im):
"""
Subtracts the RBG mean to each pixel. The image must be in RGB format.
Returns a copy, the input image remains unchanged.
:param im: the image to transform
:return: the image with the mean removed
"""
x = im.astype(np.float32)
x[:, :] -= np.asarray([103.939, 116.779, 123.68]).astype(np.float32)
return x
def pad(im, target_shape, center=False, cval=0):
"""
Pads an image to the specified shape. The image must be smaller than the target shape.
Returns a copy, the input image remains unchanged.
:param im: the image to pad
:param target_shape: the shape of the image after padding
:param center: center the image or append rows and columns to the image
:param cval: constant value for the padded pixels
:return:
"""
h_pad, w_pad = np.asarray(target_shape) - im.shape[:2]
assert h_pad >= 0, 'Height padding must be non-negative'
assert w_pad >= 0, 'Width padding must be non-negative'
if center:
padding = ((h_pad//2, h_pad-h_pad//2), (w_pad//2, w_pad-w_pad//2))
if len(im.shape) == 3:
padding += ((0, 0),)
im_padded = np.pad(im, padding, mode='constant', constant_values=cval)
else:
padding = ((0, h_pad), (0, w_pad))
if len(im.shape) == 3:
padding += ((0, 0),)
im_padded = np.pad(im, padding, mode='constant', constant_values=cval)
return im_padded
def flip_axis(im, axis):
"""
Flips a numpy array along the given axis.
Returns a copy, the input image remains unchanged.
:param im: numpy array
:param axis: the axis along which to flip the data
:return: the flipped array
"""
x = np.asarray(im).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_flip_axis(im, gt, axis):
"""
Randomly flips the input image and its segmentation labels along the given axis.
:param im: the image to transform
:param gt: the ground truth pixels' labels (ie. semantic class) in sparse format
:param axis: the axis along which to flip the data
:return: the original input or a flipped copy
"""
if np.random.random() < 0.5:
return flip_axis(im, axis), flip_axis(gt, axis)
return im, gt
def random_blur(im, ksize_max, sigma_max):
"""
Randomly blurs an image using Gaussian filtering.
:param im: the image to blur
:param ksize_max: a tuple with the maximum kernel size along the X and Y axes
:param sigma_max: a tuple with the maximum kernel sigma along the X and Y axes
:return: the blurred image
"""
# The kernel size must be odd
ksize = [np.random.randint(ksize_max[0]), np.random.randint(ksize_max[1])]
if (ksize[0] % 2) != 1:
ksize[0] += 1
if (ksize[1] % 2) != 1:
ksize[1] += 1
sigmaX = sigma_max[0]*np.random.random()
sigmaY = sigma_max[1]*np.random.random()
im_blur = cv2.GaussianBlur(im, tuple(ksize), sigmaX=sigmaX, sigmaY=sigmaY)
return im_blur
def zoom(im, scale, interpolation):
"""
Zooms an input image to the specified zoom factor.
:param im: the image to zoom
:param scale: the zoom factor, 1.0 means no zoom
:param interpolation: the interpolation method:
- cv2.INTER_LINEAR for an image,
- cv2.INTER_NEAREST for its ground truth pixels' labels
:return: the resized image
"""
return cv2.resize(im, dsize=(0, 0), fx=scale, fy=scale, interpolation=interpolation)
def random_zoom(im, gt, zoom_range):
"""
Randomly zooms in/out of an image and its ground truth segmentation labels.
:param im: the image
:param gt: the segmentation labels
:param zoom_range: a tuple made of the min & max zoom factors, for example (0.8, 1.2)
:return: the resized image and ground truth labels
"""
scale = np.random.uniform(*zoom_range)
x = zoom(im, scale, cv2.INTER_LINEAR)
y = zoom(gt, scale, cv2.INTER_NEAREST)
return x, y
def adjust_saturation_and_value(im, saturation=0, value=0):
"""
Adjusts the saturation and value of the input image by the specified integer amounts.
Pixels are clipped to maintain their HSV values in [0, 255].
Returns a copy, the input image remains unchanged.
:param im: the image to transform
:param saturation: the absolute 'saturation' amount to apply
:param value: the absolute 'value' amount to apply
:return: the transformed image
"""
if (saturation == 0) & (value == 0):
return im
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
if saturation != 0:
if saturation > 0:
s = np.where(s <= 255-saturation, s+saturation, 255).astype('uint8')
else:
s = np.where(s <= -saturation, 0, s+saturation).astype('uint8')
if value != 0:
if value > 0:
v = np.where(v <= 255-value, v+value, 255).astype('uint8')
else:
v = np.where(v <= -value, 0, v+value).astype('uint8')
hsv = cv2.merge((h, s, v))
hsv = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return hsv
def adjust_brightness_and_contrast(im, brightness=0, contrast=0):
"""
Adjusts the brightness and contrast of the input image by the specified integer amounts.
Pixels are clipped to maintain their RGB values in [0, 255].
Returns a copy, the input image remains unchanged.
:param im: the image to transform
:param brightness: the absolute 'brightness' amount to apply
:param contrast: the absolute 'contrast' amount to apply
:return: the transformed image
"""
if (brightness == 0) & (contrast == 0):
return im
buf = im.copy()
if brightness != 0:
if brightness > 0:
shadow = brightness
highlight = 255
else:
shadow = 0
highlight = 255+brightness
alpha_b = (highlight-shadow)/255
gamma_b = shadow
buf = cv2.addWeighted(buf, alpha_b, buf, 0, gamma_b)
if contrast != 0:
f = 131*(contrast+127) / (127*(131-contrast))
alpha_c = f
gamma_c = 127*(1-f)
buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)
return buf
def center_crop(im, target_shape):
h, w = target_shape
y, x = im.shape[:2]
start_y = max(0, y//2-(h//2))
start_x = max(0, x//2-(w//2))
return im[start_y:start_y+h, start_x:start_x+w]
def random_crop(im, gt_im, target_shape):
h, w = target_shape
h_crop, w_crop = im.shape[:2] - np.asarray(target_shape)
start_y = np.random.randint(0, h_crop)
start_x = np.random.randint(0, w_crop)
return im[start_y:start_y+h, start_x:start_x+w], gt_im[start_y:start_y+h, start_x:start_x+w]
def pad_or_crop(im, target_shape, cval=0):
h, w = target_shape
y, x = im.shape[:2]
h_pad, w_pad = h-y, w-x
# Vertical center padding
if h_pad > 0:
padding = ((h_pad//2, h_pad-h_pad//2), (0, 0))
if len(im.shape) == 3:
padding += ((0, 0),)
im_padded = np.pad(im, padding, mode='constant', constant_values=cval)
# Vertical center cropping
else:
start_y = max(0, (y//2)-(h//2))
im_padded = im[start_y:start_y+h, :]
# Horizontal center padding
if w_pad > 0:
padding = ((0, 0), (w_pad//2, w_pad-w_pad//2))
if len(im.shape) == 3:
padding += ((0, 0),)
im_padded = np.pad(im_padded, padding, mode='constant', constant_values=cval)
# Horizontal center cropping
else:
start_x = max(0, x//2-(w//2))
im_padded = im_padded[:, start_x:start_x+w]
return im_padded
def rotate(im, angle, scale, interpolation, cval=0):
"""
Rotates an | |
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# valgrind_test.py
"""Runs an exe through Valgrind and puts the intermediate files in a
directory.
"""
import datetime
import glob
import logging
import optparse
import os
import re
import shutil
import stat
import sys
import tempfile
import time
import common
import drmemory_analyze
import memcheck_analyze
import tempfile
import tsan_analyze
import logging_utils
class BaseTool(object):
"""Abstract class for running Valgrind-, PIN-based and other dynamic
error detector tools.
Always subclass this and implement ToolCommand with framework- and
tool-specific stuff.
"""
def __init__(self):
self.temp_dir = tempfile.mkdtemp()
self.option_parser_hooks = []
def ToolName(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Analyze(self, check_sanity=False):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def RegisterOptionParserHook(self, hook):
# Frameworks and tools can add their own flags to the parser.
self.option_parser_hooks.append(hook)
def CreateOptionParser(self):
# Defines Chromium-specific flags.
self._parser = optparse.OptionParser("usage: %prog [options] <program to "
"test>")
self._parser.add_option("-t", "--timeout",
dest="timeout", metavar="TIMEOUT", default=10000,
help="timeout in seconds for the run (default 10000)")
self._parser.add_option("", "--source_dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
self._parser.add_option("", "--gtest_filter", default="",
help="which test case to run")
self._parser.add_option("", "--gtest_repeat",
help="how many times to run each test")
self._parser.add_option("", "--gtest_print_time", action="store_true",
default=False,
help="show how long each test takes")
self._parser.add_option("", "--ignore_exit_code", action="store_true",
default=False,
help="ignore exit code of the test "
"(e.g. test failures)")
self._parser.add_option("", "--nocleanup_on_exit", action="store_true",
default=False,
help="don't delete directory with logs on exit")
# To add framework- or tool-specific flags, please add a hook using
# RegisterOptionParserHook in the corresponding subclass.
# See ValgrindTool and ThreadSanitizerBase for examples.
for hook in self.option_parser_hooks:
hook(self, self._parser)
def ParseArgv(self, args):
self.CreateOptionParser()
# self._tool_flags will store those tool flags which we don't parse
# manually in this script.
self._tool_flags = []
known_args = []
""" We assume that the first argument not starting with "-" is a program
name and all the following flags should be passed to the program.
TODO(timurrrr): customize optparse instead
"""
while len(args) > 0 and args[0][:1] == "-":
arg = args[0]
if (arg == "--"):
break
if self._parser.has_option(arg.split("=")[0]):
known_args += [arg]
else:
self._tool_flags += [arg]
args = args[1:]
if len(args) > 0:
known_args += args
self._options, self._args = self._parser.parse_args(known_args)
self._timeout = int(self._options.timeout)
self._source_dir = self._options.source_dir
self._nocleanup_on_exit = self._options.nocleanup_on_exit
self._ignore_exit_code = self._options.ignore_exit_code
if self._options.gtest_filter != "":
self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
if self._options.gtest_repeat:
self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_print_time:
self._args.append("--gtest_print_time")
return True
def Setup(self, args):
return self.ParseArgv(args)
def ToolCommand(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Cleanup(self):
# You may override it in the tool-specific subclass
pass
def Execute(self):
""" Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc."""
logging.info("starting execution...")
proc = self.ToolCommand()
add_env = {
"G_SLICE" : "always-malloc",
"NSS_DISABLE_ARENA_FREE_LIST" : "1",
"GTEST_DEATH_TEST_USE_FORK" : "1",
}
if common.IsWine():
# TODO(timurrrr): Maybe we need it for TSan/Win too?
add_env["CHROME_ALLOCATOR"] = "winheap"
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
return common.RunSubprocess(proc, self._timeout)
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
analyze_retcode = self.Analyze(check_sanity)
if analyze_retcode:
logging.error("Analyze failed.")
logging.info("Search the log for '[ERROR]' to see the error reports.")
return analyze_retcode
if exec_retcode:
if self._ignore_exit_code:
logging.info("Test execution failed, but the exit code is ignored.")
else:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
if not analyze_retcode:
logging.info("Analysis completed successfully.")
return 0
def Main(self, args, check_sanity):
"""Call this to run through the whole process: Setup, Execute, Analyze"""
start = datetime.datetime.now()
retcode = -1
if self.Setup(args):
retcode = self.RunTestsAndAnalyze(check_sanity)
if not self._nocleanup_on_exit:
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.Cleanup()
else:
logging.error("Setup failed")
end = datetime.datetime.now()
seconds = (end - start).seconds
hours = seconds / 3600
seconds = seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
return retcode
def Run(self, args, module):
MODULES_TO_SANITY_CHECK = ["base"]
# TODO(timurrrr): this is a temporary workaround for http://crbug.com/47844
if self.ToolName() == "tsan" and common.IsMac():
MODULES_TO_SANITY_CHECK = []
check_sanity = module in MODULES_TO_SANITY_CHECK
return self.Main(args, check_sanity)
class ValgrindTool(BaseTool):
"""Abstract class for running Valgrind tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def __init__(self):
super(ValgrindTool, self).__init__()
self.RegisterOptionParserHook(ValgrindTool.ExtendOptionParser)
def UseXML(self):
# Override if tool prefers nonxml output
return True
def SelfContained(self):
# Returns true iff the tool is distibuted as a self-contained
# .sh script (e.g. ThreadSanitizer)
return False
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a valgrind suppression file")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running valgrind directly")
parser.add_option("", "--trace_children", action="store_true",
default=False,
help="also trace child processes")
parser.add_option("", "--num-callers",
dest="num_callers", default=30,
help="number of callers to show in stack traces")
parser.add_option("", "--generate_dsym", action="store_true",
default=False,
help="Generate .dSYM file on Mac if needed. Slow!")
def Setup(self, args):
if not BaseTool.Setup(self, args):
return False
if common.IsWine():
self.PrepareForTestWine()
elif common.IsMac():
self.PrepareForTestMac()
return True
def PrepareForTestMac(self):
"""Runs dsymutil if needed.
Valgrind for Mac OS X requires that debugging information be in a .dSYM
bundle generated by dsymutil. It is not currently able to chase DWARF
data into .o files like gdb does, so executables without .dSYM bundles or
with the Chromium-specific "fake_dsym" bundles generated by
build/mac/strip_save_dsym won't give source file and line number
information in valgrind.
This function will run dsymutil if the .dSYM bundle is missing or if
it looks like a fake_dsym. A non-fake dsym that already exists is assumed
to be up-to-date.
"""
test_command = self._args[0]
dsym_bundle = self._args[0] + '.dSYM'
dsym_file = os.path.join(dsym_bundle, 'Contents', 'Resources', 'DWARF',
os.path.basename(test_command))
dsym_info_plist = os.path.join(dsym_bundle, 'Contents', 'Info.plist')
needs_dsymutil = True
saved_test_command = None
if os.path.exists(dsym_file) and os.path.exists(dsym_info_plist):
# Look for the special fake_dsym tag in dsym_info_plist.
dsym_info_plist_contents = open(dsym_info_plist).read()
if not re.search('^\s*<key>fake_dsym</key>$', dsym_info_plist_contents,
re.MULTILINE):
# fake_dsym is not set, this is a real .dSYM bundle produced by
# dsymutil. dsymutil does not need to be run again.
needs_dsymutil = False
else:
# fake_dsym is set. dsym_file is a copy of the original test_command
# before it was stripped. Copy it back to test_command so that
# dsymutil has unstripped input to work with. Move the stripped
# test_command out of the way, it will be restored when this is
# done.
saved_test_command = test_command + '.stripped'
os.rename(test_command, saved_test_command)
shutil.copyfile(dsym_file, test_command)
shutil.copymode(saved_test_command, test_command)
if needs_dsymutil:
if self._options.generate_dsym:
# Remove the .dSYM bundle if it exists.
shutil.rmtree(dsym_bundle, True)
dsymutil_command = ['dsymutil', test_command]
# dsymutil is crazy slow. Ideally we'd have a timeout here,
# but common.RunSubprocess' timeout is only checked
# after each line of output; dsymutil is silent
# until the end, and is then killed, which is silly.
common.RunSubprocess(dsymutil_command)
if saved_test_command:
os.rename(saved_test_command, test_command)
else:
logging.info("No real .dSYM for test_command. Line numbers will "
"not be shown. Either tell xcode to generate .dSYM "
"file, or use --generate_dsym option to this tool.")
def PrepareForTestWine(self):
"""Set up the Wine environment.
We need to run some sanity checks, set up a Wine prefix, and make sure
wineserver is running by starting a dummy win32 program.
"""
if not os.path.exists('/usr/share/ca-certificates/root_ca_cert.crt'):
logging.warning('WARNING: SSL certificate missing! SSL tests will fail.')
logging.warning('You need to run:')
logging.warning('sudo cp src/net/data/ssl/certificates/root_ca_cert.crt '
'/usr/share/ca-certificates/')
logging.warning('sudo vi /etc/ca-certificates.conf')
logging.warning(' (and add the line root_ca_cert.crt)')
logging.warning('sudo update-ca-certificates')
# Shutdown the Wine server in case the last run got interrupted.
common.RunSubprocess([os.environ.get('WINESERVER'), '-k'])
# Yes, this can be dangerous if $WINEPREFIX is set incorrectly.
shutil.rmtree(os.environ.get('WINEPREFIX'), ignore_errors=True)
winetricks = os.path.join(self._source_dir, 'tools', 'valgrind',
'wine_memcheck', 'winetricks')
common.RunSubprocess(['sh', winetricks,
'nocrashdialog', 'corefonts', 'gecko'])
time.sleep(1)
# Start a dummy program like winemine so Valgrind won't run memcheck on
# the wineserver startup routine when it launches the test binary, which
# is slow and not interesting to us.
common.RunSubprocessInBackground([os.environ.get('WINE'), 'winemine'])
return
def ToolCommand(self):
"""Get the valgrind command to run."""
# Note that self._args begins with the exe to be run.
tool_name = self.ToolName()
# Construct the valgrind command.
if self.SelfContained():
proc = ["valgrind-%s.sh" % tool_name]
else:
proc = ["valgrind", "--tool=%s" % tool_name]
proc += ["--num-callers=%i" % | |
returned by the device
The str_ argument should be the device response to the <K221?>
command, for example '<K221,10,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
number_before_output, decodes_before_output_mode = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
number_before_output=int(number_before_output),
decodes_before_output_mode=DecodesBeforeOutputMode(
decodes_before_output_mode)
)
# === Scan Speed setting and corresponding enums ===
class ScanSpeed(KSetting):
"""See page 4-17 of Microscan MS3 manual for reference
Note that the user manual groups the "Scan Speed" setting under the
"Scanner Setup" heading. This library treats it as separate setting because
it is stored with a distinct K-code `K500` while all other Scanner Setup
settings are stored with a K-code of `K504`.
"""
K_CODE = b'K500'
K_PATTERN = b'^<%s,([\d]{2,3})?>$' % K_CODE
def __init__(self, scan_speed=350):
self.scan_speed = scan_speed
def is_valid(self):
return all([
isinstance(self.scan_speed, int),
self.scan_speed >= 30,
self.scan_speed <= 100,
])
def to_config_string(self):
return super().to_config_string([
self.scan_speed,
])
@classmethod
def from_config_string(cls, str_):
"""Create ScanSpeed object from str returned by the device
The str_ argument should be the device response to the <K500?>
command, for example '<K500,350>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
scan_speed, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
scan_speed=int(scan_speed),
)
# === Scanner Setup setting and corresponding enums ===
class AGCSamplingMode(Enum):
Disabled = b'0'
LeadingEdge = b'1'
Continuous = b'2'
class ScannerSetup(KSetting):
"""See page 4-17 of Microscan MS3 manual for reference
Note that the user manual groups the "Scan Speed" setting under the
"Scanner Setup" heading. This library treats it as separate setting because
it is stored with a distinct K-code `K500` while all other Scanner Setup
settings are stored with a K-code of `K504`.
"""
K_CODE = b'K504'
K_PATTERN = (
b'^<%s,([\d]{2,3})?,([0-2])?,([\d]{2,3})?,([\d]{2,3})?>$' % K_CODE)
def __init__(
self, gain_level=350,
agc_sampling_mode=AGCSamplingMode.Continuous, agc_min=70,
agc_max=245):
self.gain_level = gain_level
self.agc_sampling_mode = agc_sampling_mode
self.agc_min = agc_min
self.agc_max = agc_max
def is_valid(self):
return all([
isinstance(self.gain_level, int),
self.scan_speed >= 40,
self.scan_speed <= 255,
isinstance(self.agc_sampling_mode, AGCSamplingMode),
isinstance(self.agc_min, int),
self.agc_min >= 40,
self.agc_min <= 250,
isinstance(self.agc_max, int),
self.agc_max >= 60,
self.agc_max <= 255,
])
def to_config_string(self):
return super().to_config_string([
self.gain_level,
self.agc_sampling_mode.value,
self.agc_min,
self.agc_max,
])
@classmethod
def from_config_string(cls, str_):
"""Create ScannerSetup object from str returned by the device
The str_ argument should be the device response to the <K504?>
command, for example '<K504,50,2,60,230>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
gain_level, agc_samling_mode, agc_min, agc_max = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
gain_level=int(gain_level),
agc_sampling_mode=AGCSamplingMode(agc_samling_mode),
agc_min=int(agc_min),
agc_max=int(agc_max),
)
# === Symbol Detect Status setting and corresponding enums ===
class SymbolDetectStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class SymbolDetect(KSetting):
"""See page 4-19 of Microscan MS3 manual for reference
Note that the user manual groups the "Symbol Detect Status" setting under
the "Scanner Setup" heading. This library treats it as separate setting
because it is stored with a distinct K-code `K505` while all other Scanner
Setup settings are stored with a K-code of `K504`.
"""
K_CODE = b'K505'
K_PATTERN = b'^<%s,([0-1])?,([\d]{1,3})?>$' % K_CODE
def __init__(
self, status=SymbolDetectStatus.Disabled, transition_counter=14):
self.status = status
self.transition_counter = transition_counter
def is_valid(self):
return all([
isinstance(self.status, SymbolDetectStatus),
isinstance(self.transition_counter, int),
self.transition_counter >= 0,
self.transition_counter <= 255,
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.transition_counter,
])
@classmethod
def from_config_string(cls, str_):
"""Create SymbolDetect object from string returned by the device
The str_ argument should be the device response to the <K505?>
command, for example '<K505,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, transition_counter = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=SymbolDetectStatus(status),
transition_counter=int(transition_counter)
)
# === Inter Character Delay setting and corresponding enums ===
class MaximumElement(KSetting):
"""See page 4-20 of Microscan MS3 manual for reference
"""
K_CODE = b'K502'
K_PATTERN = b'^<%s,([\d]{1,5})?>$' % K_CODE
def __init__(self, maximum_element=0):
self.maximum_element = maximum_element
def is_valid(self):
return all([
isinstance(self.maximum_element, int),
self.maximum_element >= 0,
self.maximum_element <= 65535,
])
def to_config_string(self):
return super().to_config_string([
self.maximum_element,
])
@classmethod
def from_config_string(cls, str_):
"""Create MaximumElement object from string returned by the device
The str_ argument should be the device response to the <K502?>
command, for example '<K502,123>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
maximum_element, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
maximum_element=int(maximum_element),
)
# === Scan Width Enhance setting and corresponding enums ===
class ScanWidthEnhanceStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class ScanWidthEnhance(KSetting):
"""See page 4-20 of Microscan MS3 manual for reference
Note that the user manual groups the "Symbol Detect Status" setting under
the "Scanner Setup" heading. This library treats it as separate setting
because it is stored with a distinct K-code `K511` while all other Scanner
Setup settings are stored with a K-code of `K504`.
"""
K_CODE = b'K511'
K_PATTERN = b'^<%s,([0-1])?>$' % K_CODE
def __init__(
self, status=ScanWidthEnhanceStatus.Disabled):
self.status = status
def is_valid(self):
return all([
isinstance(self.status, ScanWidthEnhance),
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create ScanWidthEnhance object from string returned by the device
The str_ argument should be the device response to the <K511?>
command, for example '<K511,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=ScanWidthEnhanceStatus(status),
)
# === Laser Setup setting and corresponding enums ===
class LaserOnOffStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class LaserFramingStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class LaserPower(Enum):
Low = b'0'
Medium = b'1'
High = b'2'
class LaserSetup(KSetting):
"""See page 4-20 of Microscan MS3 manual for reference
Note that the "Laser Power" subsetting of the Laser Setup is mentioned
twice in the MS3 user manual, once under "Laser Setup" and once under
"Scanner Setup".
"""
K_CODE = b'K700'
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([\d]{2})?,([\d]{2})?,([0-2])?>$' % K_CODE)
def __init__(
self, laser_on_off_status=LaserOnOffStatus.Enabled,
laser_framing_status=LaserFramingStatus.Enabled,
laser_on_position=10,
laser_off_position=95,
laser_power=LaserPower.High):
self.laser_on_off_status = laser_on_off_status
self.laser_framing_status = laser_framing_status
self.laser_on_position = laser_on_position
self.laser_off_position = laser_off_position
self.laser_power = laser_power
def is_valid(self):
return all([
isinstance(self.laser_on_off_status, LaserOnOffStatus),
isinstance(self.laser_framing_status, LaserFramingStatus),
isinstance(self.laser_on_position, int),
self.laser_on_position >= 10,
self.laser_on_position <= 80,
isinstance(self.laser_off_position, int),
self.laser_off_position >= 20,
self.laser_off_position <= 95,
isinstance(self.laser_power, LaserPower)
])
def to_config_string(self):
return super().to_config_string([
self.laser_on_off_status.value,
self.laser_framing_status.value,
self.laser_on_position,
self.laser_off_position,
self.laser_power.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create LaserSetup object from string returned by the device
The str_ argument should be the device response to the <K700?>
command, for example '<K700,1,1,10,95,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
on_off_status, framing_status, on_position, off_position, power
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
laser_on_off_status=LaserOnOffStatus(on_off_status),
laser_framing_status=LaserFramingStatus(framing_status),
laser_on_position=int(on_position),
laser_off_position=int(off_position),
laser_power=LaserPower(power)
)
# === Code 39 setting and corresponding enums ===
class Code39Status(Enum):
Disabled = b'0'
Enabled = b'1'
class CheckDigitStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class CheckDigitOutputStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class LargeInterCharacterStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class FixedSymbolLengthStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class FullASCIISetStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class Code39(KSetting):
"""See page 5-3 of Microscan MS3 manual for reference
"""
K_CODE = b'K470'
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([0-1])?,([0-1])?,([0-1])?,([\d]{1,2})?,'
b'([0-1])?>$' % K_CODE)
def __init__(
self,
status=Code39Status.Enabled,
check_digit_status=CheckDigitStatus.Disabled,
check_digit_output=CheckDigitOutputStatus.Disabled,
large_intercharacter_gap=LargeInterCharacterStatus.Disabled,
fixed_symbol_length=FixedSymbolLengthStatus.Disabled,
symbol_length=10,
full_ascii_set=FullASCIISetStatus.Disabled):
self.status = status
self.check_digit_status = check_digit_status
self.check_digit_output = check_digit_output
self.large_intercharacter_gap = large_intercharacter_gap
self.fixed_symbol_length = fixed_symbol_length
self.symbol_length = symbol_length
self.full_ascii_set = full_ascii_set
def is_valid(self):
return all([
isinstance(self.status, Code39Status),
isinstance(self.check_digit_status, CheckDigitStatus),
isinstance(self.check_digit_output, CheckDigitOutputStatus),
isinstance(
self.large_intercharacter_gap, LargeInterCharacterStatus),
isinstance(self.fixed_symbol_length, FixedSymbolLengthStatus),
isinstance(self.symbol_length, int),
self.symbol_length >= 1,
self.symbol_length <= 64,
isinstance(self.full_ascii_set, FullASCIISetStatus),
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.check_digit_status.value,
self.check_digit_output.value,
self.large_intercharacter_gap.value,
self.fixed_symbol_length.value,
self.symbol_length,
self.full_ascii_set.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create Code39 object from string returned by the device
The str_ argument should be the device response to0the <K473?>
command, for example '<K473,1,0,0,1,1,32,0>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
status, check_digit_status, check_digit_output,
large_intercharacter_gap, fixed_symbol_length, symbol_length,
full_ascii_set
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=Code39Status(status),
check_digit_status=CheckDigitStatus(check_digit_status),
check_digit_output=CheckDigitOutputStatus(check_digit_output),
large_intercharacter_gap=LargeInterCharacterStatus(
large_intercharacter_gap),
fixed_symbol_length=FixedSymbolLengthStatus(fixed_symbol_length),
symbol_length=int(symbol_length),
full_ascii_set=FullASCIISetStatus(full_ascii_set),
)
# === Code 128 setting and corresponding enums ===
class Code128Status(Enum):
"""Enables/disables the Code 128 symbologies
See page | |
0x233639: (0x8C94, 0), # East Asian ideograph
0x21363A: (0x54E5, 0), # East Asian ideograph
0x225E5F: (0x75D0, 0), # East Asian ideograph
0x21363C: (0x54F2, 0), # East Asian ideograph
0x21363D: (0x54E8, 0), # East Asian ideograph
0x21363E: (0x54E1, 0), # East Asian ideograph
0x22363F: (0x6555, 0), # East Asian ideograph
0x213640: (0x54ED, 0), # East Asian ideograph
0x233641: (0x8C9B, 0), # East Asian ideograph
0x213642: (0x5509, 0), # East Asian ideograph
0x213643: (0x54E6, 0), # East Asian ideograph
0x233644: (0x8CA4, 0), # East Asian ideograph
0x223645: (0x6567, 0), # East Asian ideograph
0x213646: (0x5546, 0), # East Asian ideograph
0x223647: (0x6561, 0), # East Asian ideograph
0x213648: (0x554F, 0), # East Asian ideograph
0x273649: (0x54D1, 0), # East Asian ideograph
0x21364A: (0x5566, 0), # East Asian ideograph
0x21364B: (0x556A, 0), # East Asian ideograph
0x21364C: (0x554A, 0), # East Asian ideograph
0x21364D: (0x5544, 0), # East Asian ideograph
0x21364E: (0x555C, 0), # East Asian ideograph
0x22364F: (0x656D, 0), # East Asian ideograph
0x213650: (0x5543, 0), # East Asian ideograph
0x213651: (0x552C, 0), # East Asian ideograph
0x213652: (0x5561, 0), # East Asian ideograph
0x233653: (0x8CB9, 0), # East Asian ideograph
0x223654: (0x657A, 0), # East Asian ideograph
0x213655: (0x5555, 0), # East Asian ideograph
0x213656: (0x552F, 0), # East Asian ideograph
0x233657: (0x8CCD, 0), # East Asian ideograph
0x213658: (0x5564, 0), # East Asian ideograph
0x213659: (0x5538, 0), # East Asian ideograph
0x21365A: (0x55A7, 0), # East Asian ideograph
0x21365B: (0x5580, 0), # East Asian ideograph
0x21365C: (0x557B, 0), # East Asian ideograph
0x21365D: (0x557C, 0), # East Asian ideograph
0x21365E: (0x5527, 0), # East Asian ideograph
0x21365F: (0x5594, 0), # East Asian ideograph
0x213660: (0x5587, 0), # East Asian ideograph
0x213661: (0x559C, 0), # East Asian ideograph
0x213662: (0x558B, 0), # East Asian ideograph
0x273663: (0x4E27, 0), # East Asian ideograph
0x213664: (0x55B3, 0), # East Asian ideograph
0x225E66: (0x75E1, 0), # East Asian ideograph
0x213666: (0x5583, 0), # East Asian ideograph
0x213667: (0x55B1, 0), # East Asian ideograph
0x273668: (0x5355, 0), # East Asian ideograph
0x213669: (0x5582, 0), # East Asian ideograph
0x21366A: (0x559F, 0), # East Asian ideograph
0x225E67: (0x75E6, 0), # East Asian ideograph
0x21366C: (0x5598, 0), # East Asian ideograph
0x21366D: (0x559A, 0), # East Asian ideograph
0x22366E: (0x658C, 0), # East Asian ideograph
0x27366F: (0x4E54, 0), # East Asian ideograph
0x223670: (0x6592, 0), # East Asian ideograph
0x213671: (0x55B2, 0), # East Asian ideograph
0x233672: (0x8CDD, 0), # East Asian ideograph
0x213673: (0x55E8, 0), # East Asian ideograph
0x233674: (0x8CD9, 0), # East Asian ideograph
0x223675: (0x659B, 0), # East Asian ideograph
0x213676: (0x55DC, 0), # East Asian ideograph
0x223677: (0x659D, 0), # East Asian ideograph
0x213678: (0x55C7, 0), # East Asian ideograph
0x213679: (0x55D3, 0), # East Asian ideograph
0x21367A: (0x55CE, 0), # East Asian ideograph
0x21367B: (0x55E3, 0), # East Asian ideograph
0x23367C: (0x8CF5, 0), # East Asian ideograph
0x21367D: (0x55E4, 0), # East Asian ideograph
0x23367E: (0x8CFB, 0), # East Asian ideograph
0x232760: (0x8600, 0), # East Asian ideograph
0x275E6B: (0x8F9F, 0), # East Asian ideograph (duplicate simplified)
0x285424: (0x70E8, 0), # East Asian ideograph
0x27375C: (0x556D, 0), # East Asian ideograph
0x4B5E6C: (0x961D, 0), # East Asian ideograph (duplicate simplified)
0x293F5A: (0x90E7, 0), # East Asian ideograph
0x235E6F: (0x9EF6, 0), # East Asian ideograph
0x4B5422: (0x81D3, 0), # East Asian ideograph
0x6F583F: (0xC9EC, 0), # Korean hangul
0x27375D: (0x55EB, 0), # East Asian ideograph
0x225E71: (0x75E4, 0), # East Asian ideograph
0x225E72: (0x75E0, 0), # East Asian ideograph
0x4B4759: (0x6D99, 0), # East Asian ideograph
0x6F4B66: (0xB0C7, 0), # Korean hangul
0x225E73: (0x75D7, 0), # East Asian ideograph
0x6F5755: (0xC88D, 0), # Korean hangul
0x235E74: (0x9EF9, 0), # East Asian ideograph
0x275977: (0x8D3A, 0), # East Asian ideograph
0x27375E: (0x56A3, 0), # East Asian ideograph
0x235E76: (0x9EFB, 0), # East Asian ideograph
0x274921: (0x6CFD, 0), # East Asian ideograph
0x293F5C: (0x90AC, 0), # East Asian ideograph
0x2D616A: (0x6B1D, 0), # East Asian ideograph
0x215E77: (0x964C, 0), # East Asian ideograph
0x274922: (0x6D4A, 0), # East Asian ideograph
0x6F5756: (0xC890, 0), # Korean hangul
0x6F4924: (0xAC74, 0), # Korean hangul
0x6F4B76: (0xB118, 0), # Korean hangul
0x215E7A: (0x9662, 0), # East Asian ideograph
0x224925: (0x6D6D, 0), # East Asian ideograph
0x275978: (0x8D35, 0), # East Asian ideograph
0x235E7B: (0x9EFE, 0), # East Asian ideograph (not in Unicode)
0x274926: (0x6D4E, 0), # East Asian ideograph
0x215E7C: (0x965B, 0), # East Asian ideograph
0x274927: (0x6CDE, 0), # East Asian ideograph
0x235E7D: (0x9F02, 0), # East Asian ideograph
0x274928: (0x6EE8, 0), # East Asian ideograph
0x6F5757: (0xC894, 0), # Korean hangul
0x215E7E: (0x965D, 0), # East Asian ideograph
0x224929: (0x6D91, 0), # East Asian ideograph
0x287065: (0x7EC2, 0), # East Asian ideograph
0x2F4231: (0x8019, 0), # Unrelated variant of EACC 215266 which maps to 8019
0x6F492A: (0xAC81, 0), # Korean hangul
0x33492E: (0x6F81, 0), # East Asian ideograph
0x27492B: (0x6EE5, 0), # East Asian ideograph
0x33337B: (0x52E6, 0), # East Asian ideograph
0x233871: (0x8DC2, 0), # East Asian ideograph
0x22492C: (0x6D81, 0), # East Asian ideograph
0x235647: (0x9B51, 0), # East Asian ideograph
0x33463C: (0x6BBB, 0), # East Asian ideograph
0x27492D: (0x6D9B, 0), # East Asian ideograph
0x6F553A: (0xC587, 0), # Korean hangul
0x27492E: (0x6DA9, 0), # East Asian ideograph
0x22413C: (0x6A5A, 0), # East Asian ideograph
0x2E492F: (0x6CD9, 0), # East Asian ideograph
0x27597A: (0x4E70, 0), # East Asian ideograph
0x213331: (0x518D, 0), # East Asian ideograph
0x273761: (0x7F57, 0), # East Asian ideograph (duplicate simplified)
0x213721: (0x55DA, 0), # East Asian ideograph
0x223722: (0x65A8, 0), # East Asian ideograph
0x223723: (0x65A6, 0), # East Asian ideograph
0x213724: (0x5600, 0), # East Asian ideograph
0x233725: (0x8D04, 0), # East Asian ideograph
0x213726: (0x55FE, 0), # East Asian ideograph
0x273727: (0x5567, 0), # East Asian ideograph
0x213728: (0x55F7, 0), # East Asian ideograph
0x213729: (0x5608, 0), # East Asian ideograph
0x22372A: (0x65B6, 0), # East Asian ideograph
0x21372B: (0x55FD, 0), # East Asian ideograph
0x22372C: (0x65B8, 0), # East Asian ideograph
0x23372D: (0x8D09, 0), # East Asian ideograph
0x21372E: (0x5614, 0), # East Asian ideograph
0x22372F: (0x65BF, 0), # East Asian ideograph
0x273730: (0x5C1D, 0), # East Asian ideograph
0x273731: (0x55BD, 0), # East Asian ideograph
0x273732: (0x5520, 0), # East Asian ideograph
0x213733: (0x562F, 0), # East Asian ideograph
0x223734: (0x65C2, 0), # East Asian ideograph
0x213735: (0x5636, 0), # East Asian ideograph
0x213736: (0x5632, 0), # East Asian ideograph
0x213737: (0x563B, 0), # East Asian ideograph
0x213738: (0x5639, 0), # East Asian ideograph
0x274934: (0x6E85, 0), # East Asian ideograph
0x23373A: (0x8D10, 0), # East Asian ideograph
0x22373B: (0x65D0, 0), # East Asian ideograph
0x22373C: (0x65D2, 0), # East Asian ideograph
0x21373D: (0x5634, 0), # East Asian ideograph
0x23373E: (0x8D18, 0), # East Asian ideograph
0x224935: (0x6DEF, 0), # East Asian ideograph
0x213740: (0x5630, 0), # East Asian ideograph
0x213741: (0x566B, 0), # East Asian ideograph
0x213742: (0x5664, 0), # East Asian ideograph
0x213743: (0x5669, 0), # East Asian ideograph
0x223744: (0x65DB, 0), # East Asian ideograph
0x213745: (0x5674, 0), # East Asian ideograph
0x273746: (0x5F53, 0), # East Asian ideograph (duplicate simplified)
0x213747: (0x5665, 0), # East Asian ideograph
0x213748: (0x566A, 0), # East Asian ideograph
0x213749: (0x5668, 0), # East Asian ideograph
0x22374A: (0x65E1, 0), # East Asian ideograph
0x27374B: (0x55F3, 0), # East Asian ideograph
0x225A23: (0x7429, 0), # East Asian ideograph
0x21374D: (0x566C, 0), # East Asian ideograph
0x21374E: (0x5680, 0), # East Asian ideograph
0x21374F: (0x568E, 0), # East Asian ideograph
0x213750: (0x5685, 0), # East Asian ideograph
0x214938: (0x701B, 0), # East Asian ideograph
0x233752: (0x8D78, 0), # East Asian ideograph
0x213753: (0x568F, 0), # East Asian ideograph
0x223754: (0x65F4, 0), # East Asian ideograph
0x213755: (
0x56AE,
0,
), # East Asian ideograph (variant of 453755 | |
worksheet name check, additional check will be made as required.
4. If the batching information is passed to the object at instantiation, then merge this into a dictionary.
Error checking will be completed later.
5.
:param input_fp: The file path to the spreadsheet containing the data
:param data_worksheet: The name of the worksheet containing the data to be formatted.
:param structure_worksheet: The name of the worksheet containing the output document's structure.
:param batch_worksheet: The name of the worksheet containing the batch data.
:param header_row:
:param drop_empty_columns: An explicit tag to drop empty rows from the worksheet if they contain two or more
empty cells. If this is left as None it will be automatically set to True for the data worksheet.
:param template_file:
:param filter_rows:
:param output_file:
:param verbose:
:param template_generate:
"""
if template_generate:
# Generate the template spreadsheet and exit the app.
self.generate_tempate_document()
self.output_verbose: bool = verbose
# Step 1: Basic data checking.
t_sheets_expected = remove_from_iterable([data_worksheet, structure_worksheet, batch_worksheet], None)
print_verbose('Check: Worksheets are present:', verbose=self.output_verbose, **OUTPUT_TITLE)
if len(t_sheets_expected) == 0:
raise ValueError(f'Either the "data" and "structure" worksheets, or the "batch" worksheet must be '
f'provided.')
sheet = enumerate(t_sheets_expected, 1)
for item, sht in sheet:
print_verbose(f' Sheet {item}:\t{sht}', verbose=self.output_verbose, **OUTPUT_TEXT)
# Step 2: Confirm the input file exists.
self._input_fp: (Path, str) = ''
try:
print_verbose(f'Check: Resolving spreadsheet filepath: ', verbose=self.output_verbose, **OUTPUT_TITLE)
self._input_fp = resolve_file_path(input_fp)
print_verbose(f' {self._input_fp}', verbose=self.output_verbose, **OUTPUT_TEXT)
except Exception as e:
print_verbose(f'\t{e}: File {input_fp} does not exist.', True, **EXCEPTION_TEXT)
# Load the Excel file into memory.
self._washing_basket = pd.ExcelFile(self._input_fp)
# Gather the worksheet names
self._sheets_actual: list = self._washing_basket.sheet_names
# Set up the lists to contain the dictionaries containing: 1. data, 2. output structure, and 3. batch docs
self._data: List[dict] = []
self._structure: List[dict] = []
self._batch: List[dict] = []
# Step 3: Check that the data, structure and batch worksheet names passed exist within the file.
try:
print_verbose(f'Check Worksheets exist in spreadsheet: ', verbose=self.output_verbose, **OUTPUT_TITLE)
self.compare_lists(t_sheets_expected, self._sheets_actual)
t_sht_actual = enumerate(self._sheets_actual, 1)
for item, sht in t_sht_actual:
print_verbose(f' Sheet {item}:\t {sht}', verbose=self.output_verbose, **OUTPUT_TEXT)
except Exception as e:
print_verbose(f'{e}', True, **EXCEPTION_TEXT)
# Step 4. If the batching information is passed to the object at instantiation, then merge this into a
# dictionary. Error checking will be completed later.
# Step 4.1. If all the expected command line parameters are set to the defaults assume that the a batch
# approach has been used. We do _not_ test for batch since this will be tested for later.
input_arg = [data_worksheet, structure_worksheet, header_row, drop_empty_columns, template_file, filter_rows,
output_file]
self.batch_df: pd.DataFrame = pd.DataFrame(columns=['data_worksheet', 'structure_worksheet', 'header_row',
'drop_empty_columns', 'template_file', 'output_file'])
# Step 4.2. Since the default values for the input args are all 'None' or 0, if we remove these values from the
# list, if the list's length is greater than 0 then there is a chance that a single wash is required. We don't
# test for the input file path since this has already occurred.
if len(remove_from_iterable(input_arg, None, 0)) > 0:
# If the drop_empty_rows is None set it to True. This will save the user problems.
if drop_empty_columns is None:
drop_empty_columns = True
# Step 4.3. Turn the command line arguments into a dict and store temporarily.
t_batch_dict = {'data_worksheet': [data_worksheet], 'structure_worksheet': [structure_worksheet],
'header_row': [header_row], 'drop_empty_columns': [drop_empty_columns],
'template_file': [template_file], 'filter_rows': [filter_rows],
'output_file': [output_file]}
self.batch_df = pd.DataFrame.from_dict(data=t_batch_dict)
# Step 5. If batch information passed as a worksheet clean and sort the batch data.
else:
self.batch_df = self.excel_to_dataframe(self._washing_basket, batch_worksheet, header_row=0,
clean_header=True)
# Step 6. Check the batch data.
try:
print_verbose(f'Batch worksheet data', verbose=self.output_verbose, **DATAFRAME_TITLE)
print_verbose(f'{self.batch_df}', verbose=self.output_verbose, **DATAFRAME_TEXT)
print_verbose(f'Check: Batch worksheet data', verbose=self.output_verbose, **OUTPUT_TITLE)
self.check_batch_worksheet_data()
print_verbose(f'Batch data checked', verbose=self.output_verbose, **OUTPUT_TITLE)
except Exception as e:
print_verbose(f'{e}', True, **EXCEPTION_TEXT)
exit_app()
# Step 6. Convert the batch DataFrame to a dict and store.
self._batch_dict = self.batch_df.to_dict('records')
# Step 7 - Every row of the the batch DataFrame contains information regarding an output file. For each row in
# the DataFrame produce the associated output file.
for t_batch_row in self.batch_df.itertuples():
t_structure_worksheet = t_batch_row.structure_worksheet
t_data_worksheet = t_batch_row.data_worksheet
self.t_structure_df = self.excel_to_dataframe(self._washing_basket, t_structure_worksheet, header_row=0,
clean_header=True, drop_empty_rows=False)
self.t_structure_photo_path: Dict[str, Path] = {}
self.t_data_df = self.excel_to_dataframe(self._washing_basket, t_data_worksheet,
header_row=t_batch_row.header_row,
clean_header=True, drop_empty_rows=True)
# Filter the data DataFrame using the filters passed.
if str(t_batch_row.filter_rows).lower() not in invalid and t_batch_row.filter_rows is not None:
for row_filter in t_batch_row.filter_rows:
self.t_data_df = self.t_data_df.loc[self.t_data_df[row_filter[0]].isin(row_filter[1])]
# Step 8 - Check the structure data.
self.check_dataframe(f'Structure worksheet data', self.t_structure_df, f'Check: Structure worksheet data',
self.check_structure_worksheet_data, f'Structure dataframe checked',
f'{t_batch_row.structure_worksheet}', f'Structure dataframe failure: ')
# Step 9 - Check the data worksheet data.
self.check_dataframe(f'Data dataframe', self.t_data_df, f'Check: Data worksheet data',
self.check_data_worksheet_data, f'Data dataframe checked',
f'{t_batch_row.data_worksheet}', f'Data dataframe failure: ')
self.wash_load(t_batch_row.template_file, t_batch_row.output_file)
del self.t_structure_photo_path
def generate_tempate_document(self):
"""
Generate a blank teamplate.
:return:
"""
df_batch = pd.DataFrame(columns=EXPECTED_BATCH_HEADERS)
series_section_type = {key: '' for key in EXPECTED_STRUCTURE_HEADERS}
series_section_type['section_type'] = EXPECTED_SECTION_TYPES
df_structure = pd.DataFrame.from_dict(series_section_type)
with pd.ExcelWriter('Laundry_template.xlsx') as writer:
df_batch.to_excel(writer, sheet_name='_batch', index=False)
df_structure.to_excel(writer, sheet_name='_structure', index=False)
print_verbose('Template file saved.', verbose=True, **OUTPUT_TEXT)
exit_app()
def wash_load(self, template_file: Path, output_file: Path):
"""
:param template_file:
:param output_file:
:return:
"""
SingleLoad(self.t_structure_df, self.t_data_df, template_file, output_file)
def check_batch_worksheet_data(self):
"""
Check the batch worksheet data is in the correct format. Data is checked as a DataFrame. The following checks
are made.
Check 1: Confirm expected batch headers exist in the batch worksheet.
Check 2: Confirm Structure and data worksheets referenced in batch worksheet exist.
Check 3: Confirm the template files exist and resolve the files.
Check 4: Confirm an output filename has been provided.
Check 5: Check if filter_rows
Check 6: Check if drop_empty_rows is None, set it to False.
Check 7: Check if header_row is None, set it to 0.
:return:
"""
# Extract the data and structure worksheet names from the batch worksheet for error checking.
t_batch_headers = list(self.batch_df)
t_batch_data_worksheets_expected = list(self.batch_df.loc[:, 'data_worksheet'])
t_batch_structure_worksheets_expected = list(self.batch_df.loc[:, 'structure_worksheet'])
# Check 1.
self.data_check(f'\tBatch work sheet headers.', f'Ok', [(EXPECTED_BATCH_HEADERS, t_batch_headers)],
compare='subset')
# Check 2.
self.data_check(f'\tData & structure worksheets referenced correctly.', f'Ok',
[(t_batch_structure_worksheets_expected, self._sheets_actual),
(t_batch_data_worksheets_expected, self._sheets_actual)], compare='subset')
# Check 3.
for row in self.batch_df.itertuples():
print_verbose(f' Row {row.Index}:', verbose=self.output_verbose, **OUTPUT_TITLE)
print_verbose(f'\tTemplate file {row.template_file}.', verbose=self.output_verbose, end='...',
**OUTPUT_TEXT)
try:
if row.template_file not in invalid:
fp_template = resolve_file_path(row.template_file)
self.batch_df.at[row.Index, 'template_file'] = fp_template
print_verbose(f"{self.batch_df.at[row.Index, 'template_file']}", verbose=self.output_verbose,
**OUTPUT_TEXT)
except ValueError as v:
print_verbose(f'{v}: Row {row.Index} - Template file {row.template_file} does not exist at the given '
f'location.', True, **EXCEPTION_TEXT)
except Exception as e:
print_verbose(f'{e}: Row {row.Index}\n{row}', True, **EXCEPTION_TEXT)
# Check 4.
print_verbose(f'\tCheck output filename {row.output_file}.', verbose=self.output_verbose, end='...',
**OUTPUT_TEXT)
if str(row.output_file) in invalid:
raise ValueError(f'The name of the output file has not been provided.')
try:
fp_name = str(Path(row.output_file).name)
fp_output = Path(resolve_file_path(Path(row.output_file).parent)).joinpath(fp_name)
self.batch_df.at[row.Index, 'output_file'] = fp_output
print_verbose(f"{self.batch_df.at[row.Index, 'output_file']}", verbose=self.output_verbose,
**OUTPUT_TEXT)
except FileNotFoundError as f:
print_verbose(f'{f}. Row {row.Index} - Output file {row.output_file} directory does not exist at the '
f'given loaction.', True, **EXCEPTION_TEXT)
except Exception as e:
print_verbose(f'{e}: Row {row.Index}\n{row}', True, **EXCEPTION_TEXT)
# Check 5.
print_verbose(f'\tCheck row filters:', end='...', verbose=self.output_verbose, **OUTPUT_TEXT)
if str(row.filter_rows).lower() not in invalid and row.filter_rows is not None:
self.batch_df.at[row.Index, 'filter_rows'] = self.prepare_row_filters(row.filter_rows)
print_verbose(f'Ok', verbose=self.output_verbose, **OUTPUT_TEXT)
else:
print_verbose(f'Ok. No filters exist.', verbose=self.output_verbose, **OUTPUT_TEXT)
# Check 6.
print_verbose(f'\tCheck drop empty columns', verbose=self.output_verbose, end='...', **OUTPUT_TEXT)
if row.drop_empty_columns is None:
self.batch_df.at[row.Index, 'drop_empty_columns'] = False
print_verbose(f'Ok', verbose=self.output_verbose, **OUTPUT_TEXT)
else:
print_verbose(f'Ok', verbose=self.output_verbose, **OUTPUT_TEXT)
# Check 7
print_verbose(f'\tCheck header row details', verbose=self.output_verbose, end='...', **OUTPUT_TEXT)
if row.header_row is None:
self.batch_df.at[row.Index, 'header_row'] = 0
print_verbose(f'Ok', verbose=self.output_verbose, **OUTPUT_TEXT)
def check_structure_worksheet_data(self):
"""
Check 1: Confirm expected batch headers exist in the batch worksheet.
Check 2: Confirm the expected section types exist.
Check 3: Confirm the section_contains values exist in the structure document.
Check 4: Check the photo file paths and resolve
Check 5: Check if section_break is None, set it to False.
Check 6: Check if page_break is None, set it to False.
:return:
"""
t_structure_headers = list(self.t_structure_df)
# Check 1
self.data_check(f' Check structure work sheet headers', f'Ok',
[(EXPECTED_STRUCTURE_HEADERS, t_structure_headers)], compare='subset')
t_structure_section_types = list(self.t_structure_df.loc[:, 'section_type'])
t_data_section_types = list(self.t_data_df)
# Check 2
t_structure_section_contains: List = []
for each in list(self.t_structure_df.loc[:, 'section_contains']):
| |
ckpt.get("train_config", None) or ckpt.get("config", None)
assert aux_config is not None, "input checkpoint has no sufficient data to recover a model"
model = ECG_UNET_CPSC2021(config=ckpt["model_config"])
model.load_state_dict(ckpt["model_state_dict"])
return model, aux_config
class ECG_SUBTRACT_UNET_CPSC2021(ECG_SUBTRACT_UNET):
"""
"""
__DEBUG__ = True
__name__ = "ECG_SUBTRACT_UNET_CPSC2021"
def __init__(self, config:ED, **kwargs:Any) -> NoReturn:
""" finished, checked,
Parameters
----------
config: dict,
other hyper-parameters, including kernel sizes, etc.
ref. the corresponding config file
Usage
-----
from cfg import ModelCfg
task = "qrs_detection" # or "main"
model_cfg = deepcopy(ModelCfg[task])
model_cfg.model_name = "unet"
model = ECG_SEQ_LAB_NET_CPSC2021(model_cfg)
"""
super().__init__(config.classes, config.n_leads, config[config.model_name], **kwargs)
self.task = config.task
@torch.no_grad()
def inference(self,
input:Union[Sequence[float],np.ndarray,Tensor],
bin_pred_thr:float=0.5,
**kwargs:Any) -> Union[Tuple[np.ndarray, List[List[List[int]]]], Tuple[np.ndarray, List[np.ndarray]]]:
""" finished, checked,
Parameters
----------
input: array_like,
input tensor, of shape (..., channels, seq_len)
bin_pred_thr: float, default 0.5,
the threshold for making binary predictions from scalar predictions
kwargs: task specific key word arguments
"""
if self.task == "qrs_detection":
return self._inference_qrs_detection(input, bin_pred_thr, **kwargs)
elif self.task == "main":
return self._inference_main_task(input, bin_pred_thr, **kwargs)
@torch.no_grad()
def inference_CPSC2021(self,
input:Union[Sequence[float],np.ndarray,Tensor],
bin_pred_thr:float=0.5,
**kwargs:Any,) -> Union[Tuple[np.ndarray, List[List[List[int]]]], Tuple[np.ndarray, List[np.ndarray]]]:
"""
alias for `self.inference`
"""
return self.inference(input, bin_pred_thr, **kwargs)
@torch.no_grad()
def _inference_qrs_detection(self,
input:Union[Sequence[float],np.ndarray,Tensor],
bin_pred_thr:float=0.5,
duration_thr:int=4*16,
dist_thr:Union[int,Sequence[int]]=200,) -> Tuple[np.ndarray, List[np.ndarray]]:
""" finished, checked,
NOTE: each segment of input be better filtered using `_remove_spikes_naive`,
and normalized to a suitable mean and std
Parameters
----------
input: array_like,
input tensor, of shape (..., channels, seq_len)
bin_pred_thr: float, default 0.5,
the threshold for making binary predictions from scalar predictions
duration_thr: int, default 4*16,
minimum duration for a "true" qrs complex, units in ms
dist_thr: int or sequence of int, default 200,
if is sequence of int,
(0-th element). minimum distance for two consecutive qrs complexes, units in ms;
(1st element).(optional) maximum distance for checking missing qrs complexes, units in ms,
e.g. [200, 1200]
if is int, then is the case of (0-th element).
Returns
-------
pred: ndarray,
the array of scalar predictions
rpeaks: list of ndarray,
list of rpeak indices for each batch element
"""
self.eval()
_device = next(self.parameters()).device
_dtype = next(self.parameters()).dtype
_input = torch.as_tensor(input, dtype=_dtype, device=_device)
if _input.ndim == 2:
_input = _input.unsqueeze(0) # add a batch dimension
# batch_size, channels, seq_len = _input.shape
pred = self.forward(_input)
pred = self.sigmoid(pred)
pred = pred.cpu().detach().numpy().squeeze(-1)
# prob --> qrs mask --> qrs intervals --> rpeaks
rpeaks = _qrs_detection_post_process(
pred=pred,
fs=self.config.fs,
reduction=1,
bin_pred_thr=bin_pred_thr,
duration_thr=duration_thr,
dist_thr=dist_thr
)
return pred, rpeaks
@torch.no_grad()
def _inference_main_task(self,
input:Union[Sequence[float],np.ndarray,Tensor],
bin_pred_thr:float=0.5,
rpeaks:Optional[Union[Sequence[int],Sequence[Sequence[int]]]]=None,
episode_len_thr:int=5,) -> Tuple[np.ndarray, List[List[List[int]]]]:
""" finished, checked,
Parameters
----------
input: array_like,
input tensor, of shape (..., channels, seq_len)
bin_pred_thr: float, default 0.5,
the threshold for making binary predictions from scalar predictions
rpeaks: sequence of sequence of int, optional,
sequences of r peak indices
episode_len_thr: int, default 5,
minimal length of (both af and normal) episodes,
with units in number of beats (rpeaks)
Returns
-------
pred: ndarray,
the array of scalar predictions
af_episodes: list of list of intervals,
af episodes, in the form of intervals of [start, end]
"""
self.eval()
_device = next(self.parameters()).device
_dtype = next(self.parameters()).dtype
_input = torch.as_tensor(input, dtype=_dtype, device=_device)
if _input.ndim == 2:
_input = _input.unsqueeze(0) # add a batch dimension
batch_size, n_leads, seq_len = _input.shape
pred = self.forward(_input)
pred = self.sigmoid(pred)
pred = pred.cpu().detach().numpy().squeeze(-1)
af_episodes = _main_task_post_process(
pred=pred,
fs=self.config.fs,
reduction=self.config.reduction,
bin_pred_thr=bin_pred_thr,
rpeaks=rpeaks,
siglens=list(repeat(seq_len, batch_size)),
episode_len_thr=episode_len_thr,
)
return pred, af_episodes
@staticmethod
def from_checkpoint(path:str, device:Optional[torch.device]=None) -> Tuple[torch.nn.Module, dict]:
""" finished, checked,
Parameters
----------
path: str,
path of the checkpoint
device: torch.device, optional,
map location of the model parameters,
defaults "cuda" if available, otherwise "cpu"
Returns
-------
model: Module,
the model loaded from a checkpoint
aux_config: dict,
auxiliary configs that are needed for data preprocessing, etc.
"""
_device = device or (torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"))
ckpt = torch.load(path, map_location=_device)
aux_config = ckpt.get("train_config", None) or ckpt.get("config", None)
assert aux_config is not None, "input checkpoint has no sufficient data to recover a model"
model = ECG_SUBTRACT_UNET_CPSC2021(config=ckpt["model_config"])
model.load_state_dict(ckpt["model_state_dict"])
return model, aux_config
class RR_LSTM_CPSC2021(RR_LSTM):
"""
"""
__DEBUG__ = True
__name__ = "RR_LSTM_CPSC2021"
def __init__(self, config:ED, **kwargs:Any) -> NoReturn:
""" finished, checked,
Parameters
----------
config: dict,
other hyper-parameters, including kernel sizes, etc.
ref. the corresponding config file
Usage
-----
from cfg import ModelCfg
task = "rr_lstm"
model_cfg = deepcopy(ModelCfg[task])
model_cfg.model_name = "rr_lstm"
model = ECG_SEQ_LAB_NET_CPSC2021(model_cfg)
"""
super().__init__(config.classes, config[config.model_name], **kwargs)
@torch.no_grad()
def inference(self,
input:Union[Sequence[float],np.ndarray,Tensor],
bin_pred_thr:float=0.5,
rpeaks:Optional[Union[Sequence[int],Sequence[Sequence[int]]]]=None,
episode_len_thr:int=5,) -> Tuple[np.ndarray, List[List[List[int]]]]:
""" finished, checked,
Parameters
----------
input: array_like,
input tensor, of shape (..., seq_len, ...)
bin_pred_thr: float, default 0.5,
the threshold for making binary predictions from scalar predictions
rpeaks: sequence of sequence of int, optional,
sequences of r peak indices
episode_len_thr: int, default 5,
minimal length of (both af and normal) episodes,
with units in number of beats (rpeaks)
Returns
-------
pred: ndarray,
the array of scalar predictions
af_episodes: list of list of intervals,
af episodes, in the form of intervals of [start, end], right inclusive
WARNING
-------
for AFf, further processing is needed to move the start and end
to the first and last indices of the signal,
rather than the indices of the first and the last rpeak
"""
self.eval()
_device = next(self.parameters()).device
_dtype = next(self.parameters()).dtype
_input = torch.as_tensor(input, dtype=_dtype, device=_device)
if _input.ndim == 2:
_input = _input.unsqueeze(0) # add a batch dimension
elif _input.ndim == 1:
_input = _input.unsqueeze(0).unsqueeze(-1) # add a batch dimension and a channel dimension
# (batch_size, seq_len, n_channels) -> (seq_len, batch_size, n_channels)
_input = _input.permute(1,0,2)
pred = self.forward(_input)
if self.config.clf.name != "crf":
pred = self.sigmoid(pred)
pred = pred.cpu().detach().numpy().squeeze(-1)
af_episodes = _main_task_post_process(
pred=pred,
fs=1/0.8,
reduction=1,
bin_pred_thr=bin_pred_thr,
rpeaks=None,
siglens=None,
episode_len_thr=episode_len_thr,
)
if rpeaks is not None:
if isinstance((rpeaks[0]), Real):
_rpeaks = [rpeaks]
else:
_rpeaks = rpeaks
# WARNING: need further processing to move start and end for the case of AFf
# NOTE that the next rpeak to the interval (of rr sequences) ends are added
af_episodes = [[[r[itv[0]], r[itv[1]+1]] for itv in a] for a,r in zip(af_episodes, _rpeaks)]
return pred, af_episodes
@torch.no_grad()
def inference_CPSC2021(self,
input:Union[Sequence[float],np.ndarray,Tensor],
bin_pred_thr:float=0.5,) -> Any:
"""
alias for `self.inference`
"""
return self.inference(input, class_names, bin_pred_thr)
@staticmethod
def from_checkpoint(path:str, device:Optional[torch.device]=None) -> Tuple[torch.nn.Module, dict]:
""" finished, checked,
Parameters
----------
path: str,
path of the checkpoint
device: torch.device, optional,
map location of the model parameters,
defaults "cuda" if available, otherwise "cpu"
Returns
-------
model: Module,
the model loaded from a checkpoint
aux_config: dict,
auxiliary configs that are needed for data preprocessing, etc.
"""
_device = device or (torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"))
ckpt = torch.load(path, map_location=_device)
aux_config = ckpt.get("train_config", None) or ckpt.get("config", None)
assert aux_config is not None, "input checkpoint has no sufficient data to recover a model"
model = RR_LSTM_CPSC2021(config=ckpt["model_config"])
model.load_state_dict(ckpt["model_state_dict"])
return model, aux_config
def _qrs_detection_post_process(pred:np.ndarray,
fs:Real,
reduction:int,
bin_pred_thr:float=0.5,
skip_dist:int=500,
duration_thr:int=4*16,
dist_thr:Union[int,Sequence[int]]=200,) -> List[np.ndarray]:
""" finished, checked,
prob --> qrs mask --> qrs intervals --> rpeaks
Parameters
----------
pred: ndarray,
array of predicted probability
fs: real number,
sampling frequency of the ECG
reduction: int,
reduction (granularity) of `pred` w.r.t. the ECG
bin_pred_thr: float, default 0.5,
the threshold for making binary predictions from scalar predictions
skip_dist: int, default 500,
detected rpeaks with distance (units in ms) shorter than `skip_dist`
to two ends of the ECG will be discarded
duration_thr: int, default 4*16,
minimum duration for a "true" qrs complex, units in ms
dist_thr: int or sequence of int, default 200,
if is sequence of int,
(0-th element). minimum distance for two consecutive qrs complexes, units in ms;
(1st element).(optional) maximum distance for checking missing qrs complexes, units in ms,
e.g. [200, 1200]
if is int, then is the case of (0-th element).
"""
batch_size, prob_arr_len = pred.shape
# print(batch_size, prob_arr_len)
model_spacing = 1000 / fs # units in ms
input_len = reduction * prob_arr_len
_skip_dist = skip_dist / model_spacing # number of samples
_duration_thr = duration_thr / model_spacing / reduction
_dist_thr = [dist_thr] if isinstance(dist_thr, int) else dist_thr
assert len(_dist_thr) <= 2
# mask = (pred > bin_pred_thr).astype(int)
rpeaks = []
for b_idx in range(batch_size):
b_prob = pred[b_idx,...]
b_mask = (b_prob >= bin_pred_thr).astype(int)
b_qrs_intervals = mask_to_intervals(b_mask, 1)
# print(b_qrs_intervals)
b_rpeaks = np.array([itv[0]+itv[1] for itv | |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# from tensorflow.bitwise import bitwise_xor
from itertools import product
import hashlib
import numpy as np
import importlib
import re
import pdb
import tensorflow as tf
import sonnet as snt
from ..argoLogging import get_logger
import copy
tf_logging = get_logger()
NUMTOL = 1e-7 # NB if you plan to changee this, talk to me (****)
AC_REGULARIZATION = "activity_and_contractive_regularizers"
CUSTOM_REGULARIZATION = "custom_regularizers"
def create_panels_lists(list_of_vpanels_of_plots):
nodes_to_log = []
names_of_nodes_to_log = []
filenames_to_log_to = []
for vpanel in list_of_vpanels_of_plots:
nodes_vpanel = []
names_vpanel = []
files_vpanel = []
for plot in vpanel:
assert isinstance(plot["nodes"], list), "`nodes` in a plot dictionary must be a list"
assert isinstance(plot["names"], list), "`names` in a plot dictionary must be a list"
assert isinstance(plot["output"], dict), "`output` in a plot dictionary must be a dict"
nodes_vpanel.append(plot["nodes"])
names_vpanel.append(plot["names"])
files_vpanel.append(plot["output"])
nodes_to_log.append(nodes_vpanel)
names_of_nodes_to_log.append(names_vpanel)
filenames_to_log_to.append(files_vpanel)
return nodes_to_log, names_of_nodes_to_log, filenames_to_log_to
def update_conf_with_defaults(opts, default_params):
# update the defaul values in opts
# use .update to modify the dict in place
# originally
#passed_opts = opts.copy()
#opts.update(self.default_params)
#opts.update(passed_opts)
# new
copy_opts = copy.deepcopy(opts)
copy_opts.update(default_params)
copy_opts.update(opts)
return copy_opts
def my_loss_full_logits(y, logits):
n = logits.get_shape().as_list()[1]
probabilities = tf.nn.softmax(logits)
loss = tf.reduce_sum(-tf.one_hot(y, depth=n) * tf.log(probabilities + NUMTOL), axis=1)
return loss
def make_list(l):
return l if isinstance(l, list) else [l]
def create_list_colors(max_colors):
r = max_colors / 10.0
return plt.cm.tab10((1 / r * np.arange(10 * r)).astype(int))
def load_sonnet_module(module_name, kwargs, instantiate=True):
tf_logging.info("Loading sonnet module " + str(module_name))
try:
my_path = '.'.join(__name__.split('.')[:-2])
# try first to get the module from argo
layer_module = importlib.import_module(my_path + ".network." + module_name)
sntmodule = eval_method_from_tuple(layer_module, (module_name, kwargs), instantiate)
except ImportError:
# otherwise get module from sonnet or sonnet.nets or raise exception
module = None
if hasattr(snt, module_name):
module = snt
elif hasattr(snt.nets, module_name):
module = snt.nets
else:
raise Exception("sonnet module " + module_name + " not recognized")
sntmodule = eval_method_from_tuple(module, (module_name, kwargs), instantiate)
except Exception as e:
raise Exception("problem loading module: %s, kwargs: %s, exception: %s" % (module_name, kwargs, e)) from e
return sntmodule
def get_ac_collection_name(additional_str=None):
ac_collection_name = AC_REGULARIZATION
if additional_str:
ac_collection_name += "_" + additional_str
return ac_collection_name
def compose_name(basename, dataset_str, separator="_"):
if basename[0] == "-":
basename = basename[1:]
return basename + separator + dataset_str
def hash_this(longstring, trunc=None):
hasher = hashlib.sha1(longstring.encode('utf-8'))
hexstr = hasher.hexdigest()
if trunc:
hexstr=hexstr[:trunc]
return hexstr
# from https://stackoverflow.com/questions/47709854/how-to-get-covariance-matrix-in-tensorflow?rq=1
# once it is compatible with Python3, we should move to
# https://www.tensorflow.org/tfx/transform/api_docs/python/tft/covariance
# NB I cannot get the value of n_points, since I concatenated the tensors, thus I need to
# return the matrix up to the moltiplication by n_points. Annoying, but I cannot find a solution
def tf_cov_times_n_points(x):
mean_x = tf.reduce_mean(x, axis=0, keepdims=True)
sum_x = tf.reduce_sum(x, axis=0, keepdims=True)
mx = tf.matmul(tf.transpose(mean_x), sum_x)
# n_points = x.shape.as_list()[0]
vx = tf.matmul(tf.transpose(x), x) # /n_points
cov_xx = vx - mx
return cov_xx
def create_reset_metric(metric, scope, **metric_args):
"""create a metric inside a scope to control over variables reset operations.
suggestion by: shoeffner -> https://github.com/tensorflow/tensorflow/issues/4814
reimplemented and added check if scope is not empty, to avoid accidentally resetting some other variables.
Args:
metric (type): a tf.metric function, this typically returns a tuple: (metric_op, update_op).
scope (type): scope_name to use in the creation of the metric nodes.
(scope should be different from any other scope already containing variables)
**metric_args (type): arguments to pass to the metric function -> metric(**metric_args).
Returns:
(metric_op, update_op, reset_op)
Example usage:
```python
metric_op, update_op, reset_op = create_reset_metric(tf.metrics.mean,
scope="mean_reset_metric/"+tensor.name,
values=tensor)
```
"""
scope = scope.replace(":", "_")
with tf.compat.v1.variable_scope(scope) as scope:
local_vars = tf.contrib.framework.get_variables(scope,
collection=tf.GraphKeys.LOCAL_VARIABLES)
# this performs a check that the scope is currently empty,
# this is very important to ensure the reset_op will reset
# only the metric variables created in the present function
if local_vars:
raise Exception("local variables already present in scope: `%s`. " \
"I cannot safely initialize reset operation for the metric." % scope.name)
metric_op, update_op = metric(**metric_args)
local_vars = tf.contrib.framework.get_variables(scope,
collection=tf.GraphKeys.LOCAL_VARIABLES)
reset_op = tf.compat.v1.variables_initializer(local_vars)
return metric_op, update_op, reset_op
def create_concat_opts(scope, node):
# self.concat_ops[ds_key] = tf.contrib.framework.get_variables(scope,
# collection=tf.GraphKeys.LOCAL_VARIABLES)
# if self.concat_ops[ds_key]:
# raise Exception("variable already present in scope: `%s`. "\
# "I cannot safely initialize reset operation for the metric." % scope.name)
scope = scope.replace(":", "_")
with tf.variable_scope(scope) as scope:
# local_vars = tf.contrib.framework.get_variables(scope,
# collection=tf.GraphKeys.LOCAL_VARIABLES)
# if local_vars:
# raise Exception("local variables already present in scope: `%s`. "\
# "I cannot safely initialize reset operation for the metric." % scope.name)
# see https://github.com/tensorflow/tensorflow/issues/4432
# TODO it must be 1-D? Maybe yes,(for PCA yes for sure).
node_shape = node.shape.as_list()
if len(node_shape) != 2:
raise RuntimeError("the node passed for concatenation is not a 2D tensor, as expected...")
dim = node_shape[1]
accumulator = tf.get_variable("accumulator",
initializer=tf.zeros([0, dim]),
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES]
)
i = tf.get_variable("index",
initializer=tf.constant(0),
dtype=tf.int32,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES]
)
def assign():
# with tf.control_dependencies(tf.assign_add(i, 1)):
return tf.assign(accumulator, node, validate_shape=False), tf.assign_add(i, 1)
def concat():
return tf.assign(accumulator, tf.concat([accumulator, node], axis=0), validate_shape=False), tf.assign_add(i, 1)
concat_update_ops = tf.cond(tf.equal(i, 0),
assign,
concat)
concat_reset_ops = tf.variables_initializer([i])
return accumulator, concat_update_ops, concat_reset_ops
'''
def sample_discrete_from_continuous(probabilies):
bernoulli = tf.distributions.Bernoulli(probs=probabilies)
return bernoulli.sample()
'''
'''
def rescale(data, eps, min_value=0., max_value=1.):
delta = max_value - min_value
return (delta - 2 * eps) * data + eps + min_value
'''
def tf_rescale(data, eps, min_value=-1.0, max_value=1.0):
delta = max_value - min_value
return (delta - 2*eps)*data + eps + min_value
'''
def clip(data, low=-1, high=1):
return np.clip(data, low, high)
'''
def tf_clip(data, low=-1.0, high=1.0):
#data = tf.cast(data, dtype=tf.float32)
return tf.clip_by_value(data, low, high)
def np_softplus(x, limit=30):
if x > limit:
return x
else:
return np.log(1.0 + np.exp(x))
dtype_short = {
'float16': 'f16',
'float32': 'f32',
'float64': 'f64',
'bfloat16': 'bf16',
'complex64': 'c64',
'complex128': 'c128'}
def get_short_dtype(dtypestr):
"""
return the dtype name (string) in short form, typically used for id construction
Args:
dtypestr (str) : dtype name in string format
Returns:
str : the short name
"""
if not dtypestr in dtype_short:
raise ValueError('the type specified %s is not supported.' % dtypestr)
return dtype_short[dtypestr]
# layer_short_names={'dense' : 'D',
# 'conv2d' : 'C',
# 'Linear' : 'D',
# 'Conv2D' : 'C',
# 'GaussianDiagonal' : 'GD',
# 'GaussianDiagonalZeroOne' : 'GDZO',
# 'LogitNormalDiagonal' : 'LND',
# 'Bernoulli' : 'B'}
#
# def get_short_layer_name(layer_str, layer_kwargs):
# """
# return the layer type name (string) in short form, typically used for id construction
#
# Args:
# layer_str (str) : layer type in string format
#
# Returns:
# str : the short name
# """
# if not layer_str in layer_short_names:
# raise ValueError('the type specified %s is not supported.'%layer_str)
# return layer_short_names[layer_str]
#
#
regularizers_short_names = {
'standard_contractive_regularizer': 'SCR',
'cos_contractive_regularizer': 'CCR',
'geometric_contractive_regularizer': 'GCR',
'wasserstein_contractive_regularizer': 'WCR',
'ring_loss_regularizer': 'RLR',
'ring_loss_variable_regularizer': 'RLVR',
'contractive_reg_list': ''}
def get_short_regularization_name(reg_name):
"""
return the regularization name (string) in short form, typically used for id construction
Args:
reg_name (str) : regularization name in string format
Returns:
str : the short name
"""
if not reg_name in regularizers_short_names:
raise ValueError('the regularizer specified %s is not supported.' % reg_name)
return regularizers_short_names[reg_name]
def regularization_info(layer_dict):
# "contractive_regularizer" : ("standard_contractive_regularizer",
# {"norm": 2, "scale_mean" : 0.1, "scale_covariance" : 0.1})
reg_info = ""
contr_reg = layer_dict.get("contractive_regularizer", None)
if contr_reg is not None:
reg_info = "r"
crname, crdict = contr_reg
if crname == 'contractive_reg_list':
list_regs = crdict['list_regs']
for reg_tuple in list_regs:
reg_info += regularization_info({"contractive_regularizer": reg_tuple})
else:
reg_info += get_short_regularization_name(crname)
if "norm" in crdict:
reg_info += "_n" + str(crdict["norm"])
if "scale_mean" in crdict:
reg_info += "_sm" + str(crdict["scale_mean"])
if "scale" in crdict:
reg_info += "_s" + str(crdict["scale"])
if "scale_covariance" in crdict:
reg_info += "_sc" + str(crdict["scale_covariance"])
# if not "refnode" in crdict:
# raise Exception("refnode field in contractive_regularizer kwargs must be: `inputs` or `net`")
# reg_info += "_rn" + crdict["refnode"][0].upper()
# TODO add your own regularizer type and extract relevant parameters!
return reg_info
method_name_short = {
# activation functions
'relu': 'R',
'elu': 'E',
'leaky_relu': 'LR',
'LeakyReLU': 'LR',
'sigmoid': 'S',
'tanh': 'T',
# layers
'dense': 'D',
'conv2d': 'C',
'max_pooling2d': 'P',
'flatten': '', # not shown
# snt modules
'Linear': 'D',
'LinearWN': 'D',
'Concatenate': 'CO',
'Conv2D': 'C',
'Conv2DTranspose': 'CT',
'Conv2DWN': 'C',
'ConvNet2D': 'CN',
'ConvNet2DTranspose': 'CNT',
'ConvDec': 'CDec',
'ResEnc': 'REnc',
'ResDec': 'RDec',
'BatchFlatten': '',
'Identity': '',
'BatchNorm': 'BN',
'LayerNorm': 'LN',
'BatchReshape': 'BR',
'ResUnit': 'RU',
'ResNet18': 'RN18',
'VGGBlock': 'V',
'Sigmoid': 'S',
'Tanh': 'T',
'MaxPooling2D': 'P',
'Dropout': 'DO',
'RandomUniform': 'RU',
'RandomGaussian': 'RG',
'AveragePooling2D': 'AP',
# stochastic_models
'GaussianDiagonal': 'GD',
'GaussianDiagonalZeroOne': 'GD01',
'GaussianDiagonalPlusMinusOne': 'GDPM1',
'Gaussian': 'G',
'vonMisesFisher': 'vMF',
'LogisticDiagonalZeroOne': 'LD01',
'LogisticDiagonalPlusMinusOne': 'LDPM1',
'LogitNormalDiagonal': 'LND',
'LogitNormalDiagonalPlusMinusOne':'LND01', # >>>>>>> TODO this is very confusing, it should be: 01 -> pm1. I don't | |
time.clock()
self.scene.removeItem(self.map_point)
for path in paths:
self.scene.addPath(path)
t2 = time.clock()
if DEBUG:
self.logInteractive("Render time: %.4f s" % (t2 - t1))
self.scene.addItem(self.map_point)
if int(event.type()) == FILE_LOG_EVENT:
self.logF2F(event.msg)
if int(event.type()) == RETURN_CODE_EVENT:
self.onF2FReturnCode(event.rc)
def drawPoint(self, x, y, z, mlb_in):
if self.map_transformation.mlb_in != mlb_in:
self.map_transformation.Insert(mlb_in)
try:
x, y, z = self.map_transformation.TransformPoint(x, y, z)
except Exception, e:
self.map_point.setBrush(QtGui.QBrush(QColor(255, 10, 10, 90)))
self.logInteractive(
"Error in map transformation - failed to draw map point", "red")
else:
self.map_point.setBrush(QtGui.QBrush(QColor(255, 10, 10, 200)))
r = 2**(-self.map_zoom) * 10
self.map_point.setPos(x - r * 0.5, -y - r * 0.5)
self.map_coords = (x, y)
self.scene.update()
# TODO: consider enabling this redraw of map-canvas on transformation....
# if (self.chb_zoom_to_point.isChecked() and self.map_zoom>0):
# self.gv_map.centerOn(self.map_point)
@pyqtSignature('') # prevents actions being handled twice
def on_bt_zoom_in_clicked(self):
if self.map_zoom > 7:
return
self.map_zoom += 1
self.zoomMap()
@pyqtSignature('') # prevents actions being handled twice
def on_bt_zoom_out_clicked(self):
if self.map_zoom <= 0:
return
self.map_zoom -= 1
self.zoomMap()
def zoomMap(self):
self.gv_map.setMatrix(
QMatrix(2**(self.map_zoom), 0, 0, 2**(self.map_zoom), 0, 0))
r = 2**(-self.map_zoom)
x, y = self.map_coords
self.map_point.setPos(x - r * 5, -y - r * 5)
self.map_point.setScale(r)
if (self.chb_zoom_to_point.isChecked()):
self.gv_map.centerOn(self.map_point)
self.scene.update()
def initRegion(self):
self._handle_system_change = False
systems, init_coords, region_name = REGIONS[self.region]
self.lbl_region_value.setText(region_name)
self.cb_input_system.clear()
self.cb_output_system.clear()
self.cb_f2f_input_system.clear()
self.cb_f2f_output_system.clear()
self.cb_f2f_input_system.addItems(systems)
self.cb_f2f_output_system.addItems(systems)
self.cb_input_system.addItems(systems)
self.cb_output_system.addItems(systems)
self.setInteractiveInput(init_coords)
self.cb_input_system.setCurrentIndex(0) # TODO: dont emit signal!
self.cb_output_system.setCurrentIndex(0)
self._handle_system_change = True
self.transformInput() # this should trigger the redraw of the point
self.zoomMap()
for widget in self.getAdditionalWidgets():
if hasattr(widget, "handleRegionChange"):
widget.handleRegionChange(self.region)
def onSystemInChanged(self):
if not self._handle_system_change:
return
#Trigger a transformation#
self.transformInput(True, False)
def onSystemOutChanged(self):
if not self._handle_system_change:
return
#Trigger a transformation#
self.transformInput(False, True)
def setSystemInfo(self, do_input=True, do_output=False):
if do_input:
mlb_in = str(self.cb_input_system.currentText())
text = TrLib.DescribeLabel(mlb_in)
if self.affine_modifications.apply_interactive and self.affine_modifications.input.apply:
text += ",+affine modification"
self.lbl_input_info.setText("Input system info: %s" % text)
labels = Minilabel.getSystemLabels(mlb_in)
if labels is not None:
for i in range(3):
self.input_labels[i].setText(labels[i])
if do_output:
mlb_out = str(self.cb_output_system.currentText())
text = TrLib.DescribeLabel(mlb_out)
if self.affine_modifications.apply_interactive and self.affine_modifications.output.apply:
text += ",+affine modification"
self.lbl_output_info.setText("Output system info: %s" % text)
labels = Minilabel.getSystemLabels(mlb_out)
if labels is not None:
for i in range(3):
self.output_labels[i].setText(labels[i])
@pyqtSignature('') # prevents actions being handled twice
def on_bt_change_h_in_clicked(self):
mlb_in = str(self.cb_input_system.currentText())
mlb = Minilabel.changeHeightSystem(
mlb_in, H_SYSTEMS[self.region], DATUM_ALLOWED_H_SYSTEMS, False)
if mlb != mlb_in:
self.cb_input_system.setEditText(mlb)
self.transformInput(True, False)
@pyqtSignature('') # prevents actions being handled twice
def on_bt_change_h_out_clicked(self):
mlb_out = str(self.cb_output_system.currentText())
mlb = Minilabel.changeHeightSystem(
mlb_out, H_SYSTEMS[self.region], DATUM_ALLOWED_H_SYSTEMS)
if mlb != mlb_out:
self.cb_output_system.setEditText(mlb)
self.transformInput(False, True)
@pyqtSignature('') # prevents actions being handled twice
def on_bt_interactive_swap_clicked(self):
# Transform - then swap input/output
# TODO - consider how to handle affine modifications on a swap. Perhaps failing is ok...
# OR we can flip the affine modifications also...
self.transformInput()
if self.output_cache.is_valid:
self._handle_system_change = False
mlb_in = str(self.cb_input_system.currentText())
mlb_out = self.output_cache.mlb
self.setInteractiveInput(self.output_cache.coords, mlb_out)
self.cb_input_system.setEditText(mlb_out)
self.cb_output_system.setEditText(mlb_in)
self._handle_system_change = True
self.transformInput()
if self.affine_modifications.apply_interactive:
self.logInteractive(
"NOTE: Systems have been interchanged - but not the affine modifications.", "blue")
# Various setters below
def setForeignHook(self, hook):
# Set the foreign hook used to return control e.g. to a test,
# after a background process has finished.
assert(hasattr(hook, "proceed"))
self.foreign_hook = hook
def setRegionDK(self):
if self.region != REGION_DK:
self.region = REGION_DK
self.initRegion()
def setRegionFO(self):
if self.region != REGION_FO:
self.region = REGION_FO
self.initRegion()
def setRegionGR(self):
if self.region != REGION_GR:
self.region = REGION_GR
self.initRegion()
def setRegionWorld(self):
if self.region != REGION_WORLD:
self.region = REGION_WORLD
self.initRegion()
def setAngularUnitsDegrees(self):
if self.geo_unit != ANGULAR_UNIT_DEGREES:
self.geo_unit = ANGULAR_UNIT_DEGREES
self.translateGeoUnits()
self.action_angular_units[ANGULAR_UNIT_DEGREES].setChecked(True)
self.lbl_geo_coords_value.setText("%3s" % ANGULAR_UNIT_DEGREES)
def setAngularUnitsRadians(self):
if self.geo_unit != ANGULAR_UNIT_RADIANS:
self.geo_unit = ANGULAR_UNIT_RADIANS
self.translateGeoUnits()
self.action_angular_units[ANGULAR_UNIT_RADIANS].setChecked(True)
self.lbl_geo_coords_value.setText("%3s" % ANGULAR_UNIT_RADIANS)
def setAngularUnitsNt(self):
if self.geo_unit != ANGULAR_UNIT_NT:
self.geo_unit = ANGULAR_UNIT_NT
self.translateGeoUnits()
self.action_angular_units[ANGULAR_UNIT_NT].setChecked(True)
self.lbl_geo_coords_value.setText("%3s" % ANGULAR_UNIT_NT)
def setAngularUnitsSx(self):
if self.geo_unit != ANGULAR_UNIT_SX:
self.geo_unit = ANGULAR_UNIT_SX
self.translateGeoUnits()
self.action_angular_units[ANGULAR_UNIT_SX].setChecked(True)
self.lbl_geo_coords_value.setText("%3s" % ANGULAR_UNIT_SX)
def setDerivedAngularUnitsDegrees(self):
if self.geo_unit_derived != ANGULAR_UNIT_DEGREES:
self.geo_unit_derived = ANGULAR_UNIT_DEGREES
self.translateDerivedGeoUnits()
self.action_angular_units_derived[
ANGULAR_UNIT_DEGREES].setChecked(True)
def setDerivedAngularUnitsRadians(self):
if self.geo_unit_derived != ANGULAR_UNIT_RADIANS:
self.geo_unit_derived = ANGULAR_UNIT_RADIANS
self.translateDerivedGeoUnits()
self.action_angular_units_derived[
ANGULAR_UNIT_RADIANS].setChecked(True)
def setDerivedAngularUnitsNt(self):
if self.geo_unit_derived != ANGULAR_UNIT_NT:
self.geo_unit_derived = ANGULAR_UNIT_NT
self.translateDerivedGeoUnits()
self.action_angular_units_derived[ANGULAR_UNIT_NT].setChecked(True)
def setDerivedAngularUnitsSx(self):
if self.geo_unit_derived != ANGULAR_UNIT_SX:
self.geo_unit_derived = ANGULAR_UNIT_SX
self.translateDerivedGeoUnits()
self.action_angular_units_derived[ANGULAR_UNIT_SX].setChecked(True)
def translateGeoUnits(self):
if self.output_cache.is_valid and TrLib.IsGeographic(self.output_cache.mlb):
WidgetUtils.setOutput(self.output_cache.coords, self.output[
:2], True, angular_unit=self.geo_unit, precision=self.coord_precision)
if TrLib.IsGeographic(str(self.cb_input_system.currentText())):
for field in self.input[:2]:
WidgetUtils.translateAngularField(field, self.geo_unit)
for widget in self.getAdditionalWidgets():
if hasattr(widget, "handleGeoUnitChange"):
widget.handleGeoUnitChange(self.geo_unit)
for key in self.action_angular_units.keys():
self.action_angular_units[key].setChecked(self.geo_unit == key)
def translateDerivedGeoUnits(self):
# for field in self.derived_angular_output:
# WidgetUtils.translateAngularField(field,self.geo_unit_derived)
self.onShowScale()
for widget in self.getAdditionalWidgets():
if hasattr(widget, "handleAngularUnitChange"):
widget.handleAngularUnitChange(self.geo_unit_derived)
for key in self.action_angular_units_derived.keys():
self.action_angular_units_derived[
key].setChecked(self.geo_unit_derived == key)
#will be called both from event handler and programtically to set/clear fields on success/error#
# Added numeric hack for s34, kk and os systems....
def onShowScale(self):
if (self.chb_show_scale.isChecked() and self.output_cache.is_valid):
if not self.output_cache.has_scale:
# cache scale and convergence....
if (self.output_cache.proj_weakly_defined):
if (self.output_cache.mlb != self.numeric_scale_transf.mlb_in):
self.numeric_scale_transf.Insert(
self.output_cache.mlb, True)
sc, m = getNumericScale(self.output_cache.coords[0], self.output_cache.coords[
1], self.numeric_scale_transf, self.fallback_ellipsoid[1], self.fallback_ellipsoid[2])
self.logInteractive(
"INFO: calculating scale and convergence numerically relative to ETRS89 datum", "blue")
else:
sc, m = self.coordinate_transformation.GetLocalGeometry(
self.output_cache.coords[0], self.output_cache.coords[1])
self.output_cache.scale = sc
self.output_cache.meridian_convergence = m
self.output_cache.has_scale = True
self.txt_scale.setText("%.7f" % self.output_cache.scale)
self.txt_meridian_convergence.setText(translateFromDegrees(
self.output_cache.meridian_convergence, self.geo_unit_derived, precision=0))
if DEBUG:
self.logInteractive(
repr(self.output_cache.coords) + "\n" + self.output_cache.mlb)
else:
self.txt_scale.setText("")
self.txt_meridian_convergence.setText("")
def getInteractiveInput(self, mlb_in=None):
if mlb_in is None:
mlb_in = str(self.cb_input_system.currentText())
is_angle = TrLib.IsGeographic(mlb_in)
coords, msg = WidgetUtils.getInput(
self.input, is_angle, angular_unit=self.geo_unit)
if len(msg) > 0:
self.logInteractive(msg)
return coords
def setInteractiveOutput(self, coords, mlb_out=None):
#if coords==[] we clear output fields#
if mlb_out is None:
mlb_out = str(self.cb_output_system.currentText())
mlb_out = str(self.cb_output_system.currentText())
is_angle = TrLib.IsGeographic(mlb_out)
WidgetUtils.setOutput(coords, self.output, is_angle, z_fields=[
2], angular_unit=self.geo_unit, precision=self.coord_precision)
def setInteractiveInput(self, coords, mlb_in=None):
if mlb_in is None:
mlb_in = str(self.cb_input_system.currentText())
is_angle = TrLib.IsGeographic(mlb_in)
WidgetUtils.setOutput(coords, self.input, is_angle, z_fields=[
2], angular_unit=self.geo_unit, precision=self.coord_precision)
def transformInput(self, input_index_changed=False, output_index_changed=False):
if self._clear_log:
self.logInteractive("", clear=True)
self.output_cache.is_valid = False
self.output_cache.has_scale = False
mlb_in = str(self.cb_input_system.currentText())
mlb_out = str(self.cb_output_system.currentText())
# self.logInteractive(repr(self.output_cache.coords))
# Check if we should update system info
update_in = (mlb_in != self.mlb_in)
update_out = (mlb_out != self.output_cache.mlb)
if (update_in or update_out):
self.setSystemInfo(update_in, update_out)
self.mlb_in = mlb_in
self.output_cache.mlb = mlb_out
self.output_cache.proj_weakly_defined = Minilabel.isProjWeaklyDefined(
mlb_out)
coords = self.getInteractiveInput(mlb_in)
if len(coords) != 3:
self.logInteractive(
"Input coordinate in field %d not OK!" % (len(coords) + 1), "red")
self.setInteractiveOutput([])
self.onShowScale()
self.input[len(coords)].setFocus()
return
x_in, y_in, z_in = coords
# Apply affine mod first.
if self.affine_modifications.apply_interactive and self.affine_modifications.input.apply:
x_in, y_in, z_in = self.affine_modifications.input.transform(
x_in, y_in, z_in)
self.logInteractive("Applying affine modification of input. Modified input: {0:.3f} {1:.3f} {2:.3f}".format(
x_in, y_in, z_in), "blue")
if mlb_in != self.coordinate_transformation.mlb_in:
try:
self.coordinate_transformation.Insert(mlb_in)
except Exception, msg:
# if call was from in_system_changed - remove item
if input_index_changed:
self._handle_system_change = False
self.cb_input_system.removeItem(
self.cb_input_system.currentIndex())
self.cb_input_system.setEditText(mlb_in)
self._handle_system_change = True
self.setInteractiveOutput([])
self.logInteractive("Input label not OK!\n%s" %
repr(msg), color="red")
return
# at this point mbl in and input coords are validated and we can
# attempt to draw the map point
self.drawPoint(x_in, y_in, z_in, mlb_in)
if mlb_out != self.coordinate_transformation.mlb_out:
try:
self.coordinate_transformation.Insert(mlb_out, False)
except Exception, msg:
if output_index_changed:
self._handle_system_change = False
self.cb_output_system.removeItem(
self.cb_output_system.currentIndex())
self.cb_output_system.setEditText(mlb_out)
self._handle_system_change = True
self.setInteractiveOutput([])
self.logInteractive("Output label not OK!\n%s" %
repr(msg), color="red")
return
try:
x, y, z, h = self.coordinate_transformation.TransformGH(
x_in, y_in, z_in)
except Exception, msg:
self.setInteractiveOutput([])
err = TrLib.GetLastError()
if err in ERRORS:
self.logInteractive("%s" % ERRORS[err], color="red")
else:
self.logInteractive("Error in transformation", color="red")
self.onShowScale()
return
#Cache output after succesfull transformation#
self.output_cache.is_valid = True
self.output_cache.coords = [x, y, z]
self.onShowScale() # here we cache scale ond convergence also!
self.txt_geoid_height.setText("%.4f m" % h)
geoid_name = self.coordinate_transformation.GetGeoidName()
if DEBUG:
self.logInteractive("Geoid: %s" % geoid_name)
# the affine modification should not infect scale, caching etc. - only
# used in display.
if self.affine_modifications.apply_interactive and self.affine_modifications.output.apply:
# check when in the logical chain to apply this.....
x, y, z = self.affine_modifications.output.transform(x, y, z)
self.logInteractive(
"Applying affine modification of output.", "blue")
# self.logInteractive(repr(self.output_cache.coords))
# does nothing but display the coords
self.setInteractiveOutput([x, y, z])
self.txt_geoid_name.setText(geoid_name)
#TAB File2File#
def initF2FTab(self):
# Auto completion - dont do it for now as input might not be a *file*
# completer=QCompleter()
# completer.setModel(QDirModel(completer))
# completer.setCompletionMode(QCompleter.InlineCompletion)
# self.txt_f2f_input_file.setCompleter(completer)
if self.gdal_settings.load_mode == 1:
self.logF2F("Using included GDAL installation.")
elif self.gdal_settings.load_mode == 2:
self.logF2F("Using custom GDAL installation.")
else:
self.logF2F("Using system GDAL installation.")
frmts = LibTrui.getOGRFormats()
self.cb_f2f_ogr_driver.clear()
self.cb_f2f_ogr_driver.addItems(frmts)
File2File.setCommand(TROGR)
rc, msg = File2File.testCommand()
if (rc != 0):
self.message(
"Batch transformation program %s not availabe!" % TROGR)
self.tab_ogr.setEnabled(False)
self.logF2F(msg)
def loadLibtrui(self):
self.has_ogr, msg = LibTrui.initLibrary(BIN_PREFIX)
if not self.has_ogr:
dmsg | |
<reponame>kakkotetsu/IxNetwork
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class PceInitiateLSPParameters(Base):
"""The PceInitiateLSPParameters class encapsulates a required pceInitiateLSPParameters node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the PceInitiateLSPParameters property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'pceInitiateLSPParameters'
def __init__(self, parent):
super(PceInitiateLSPParameters, self).__init__(parent)
@property
def PceInitiateXROobject(self):
"""An instance of the PceInitiateXROobject class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pceinitiatexroobject.PceInitiateXROobject)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pceinitiatexroobject import PceInitiateXROobject
return PceInitiateXROobject(self)
@property
def PcepEroSubObjectsList(self):
"""An instance of the PcepEroSubObjectsList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pceperosubobjectslist.PcepEroSubObjectsList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pceperosubobjectslist import PcepEroSubObjectsList
return PcepEroSubObjectsList(self)
@property
def PcepMetricSubObjectsList(self):
"""An instance of the PcepMetricSubObjectsList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pcepmetricsubobjectslist.PcepMetricSubObjectsList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pcepmetricsubobjectslist import PcepMetricSubObjectsList
return PcepMetricSubObjectsList(self)
@property
def Active(self):
"""Activate/Deactivate Configuration
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('active')
@property
def AssociationId(self):
"""The Association ID of this LSP.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('associationId')
@property
def Bandwidth(self):
"""Bandwidth (bits/sec)
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('bandwidth')
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def DestEndPointIpv4(self):
"""Dest IPv4 address of the path for which a path computation is Initiated. Will be greyed out if IP Version is IPv6.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('destEndPointIpv4')
@property
def DestEndPointIpv6(self):
"""Dest IPv6 address of the path for which a path computation is Initiated. Will be greyed out if IP Version is IPv4.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('destEndPointIpv6')
@property
def EnableXro(self):
"""Include XRO
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableXro')
@property
def ExcludeAny(self):
"""This is a type of Resource Affinity Procedure that is used to validate a link. This control accepts a link only if the link carries all of the attributes in the set.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('excludeAny')
@property
def FailBit(self):
"""Fail Bit
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('failBit')
@property
def HoldingPriority(self):
"""The priority of the LSP with respect to holding resources. The value 0 is the highest priority. Holding Priority is used in deciding whether this session can be preempted by another session.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('holdingPriority')
@property
def IncludeAll(self):
"""This is a type of Resource Affinity Procedure that is used to validate a link. This control excludes a link from consideration if the link carries any of the attributes in the set.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeAll')
@property
def IncludeAny(self):
"""This is a type of Resource Affinity Procedure that is used to validate a link. This control accepts a link if the link carries any of the attributes in the set.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeAny')
@property
def IncludeAssociation(self):
"""Indicates whether PPAG will be included in a PCInitiate message. All other attributes in sub-tab-PPAG would be editable only if this checkbox is enabled.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeAssociation')
@property
def IncludeBandwidth(self):
"""Indicates whether Bandwidth will be included in a PCInitiate message. All other attributes in sub-tab-Bandwidth would be editable only if this checkbox is enabled.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeBandwidth')
@property
def IncludeEndPoints(self):
"""Indicates whether END-POINTS object will be included in a PCInitiate message. All other attributes in sub-tab-End Points would be editable only if this checkbox is enabled
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeEndPoints')
@property
def IncludeEro(self):
"""Specifies whether ERO is active or inactive. All subsequent attributes of the sub-tab-ERO would be editable only if this is enabled.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeEro')
@property
def IncludeLsp(self):
"""Indicates whether LSP will be included in a PCInitiate message. All other attributes in sub-tab-LSP would be editable only if this checkbox is enabled.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeLsp')
@property
def IncludeLspa(self):
"""Indicates whether LSPA will be included in a PCInitiate message. All other attributes in sub-tab-LSPA would be editable only if this checkbox is enabled.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeLspa')
@property
def IncludeMetric(self):
"""Indicates whether the PCInitiate message will have the metric list that is configured. All subsequent attributes of the sub-tab-Metric would be editable only if this is enabled.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeMetric')
@property
def IncludeSrp(self):
"""Indicates whether SRP object will be included in a PCInitiate message. All other attributes in sub-tab-SRP would be editable only if this checkbox is enabled.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeSrp')
@property
def IncludeSymbolicPathNameTlv(self):
"""Indicates if Symbolic-Path-Name TLV is to be included in PCInitiate message.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includeSymbolicPathNameTlv')
@property
def IpVersion(self):
"""Drop down to select the IP Version with 2 choices : IPv4 / IPv6
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ipVersion')
@property
def LocalProtection(self):
"""When set, this means that the path must include links protected with Fast Reroute
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('localProtection')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def NumberOfEroSubObjects(self):
"""Value that indicates the number of ERO Sub Objects to be configured.
Returns:
number
"""
return self._get_attribute('numberOfEroSubObjects')
@NumberOfEroSubObjects.setter
def NumberOfEroSubObjects(self, value):
self._set_attribute('numberOfEroSubObjects', value)
@property
def NumberOfMetricSubObject(self):
"""Value that indicates the number of Metric Objects to be configured.
Returns:
number
"""
return self._get_attribute('numberOfMetricSubObject')
@NumberOfMetricSubObject.setter
def NumberOfMetricSubObject(self, value):
self._set_attribute('numberOfMetricSubObject', value)
@property
def NumberOfXroSubObjects(self):
"""Number of XRO Sub Objects
Returns:
number
"""
return self._get_attribute('numberOfXroSubObjects')
@NumberOfXroSubObjects.setter
def NumberOfXroSubObjects(self, value):
self._set_attribute('numberOfXroSubObjects', value)
@property
def OverridePlspId(self):
"""Indicates if PLSP-ID will be set by the state machine or user. If disabled user wont have the control and state machine will set it.
Returns:
bool
"""
return self._get_attribute('overridePlspId')
@OverridePlspId.setter
def OverridePlspId(self, value):
self._set_attribute('overridePlspId', value)
@property
def OverrideSrpIdNumber(self):
"""Indicates whether SRP ID Number is overridable.
Returns:
bool
"""
return self._get_attribute('overrideSrpIdNumber')
@OverrideSrpIdNumber.setter
def OverrideSrpIdNumber(self, value):
self._set_attribute('overrideSrpIdNumber', value)
@property
def PathSetupType(self):
"""Indicates which type of LSP will be requested in the PCInitiated Request.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('pathSetupType')
@property
def PlspId(self):
"""An identifier for the LSP. A PCC creates a unique PLSP-ID for each LSP that is constant for the lifetime of a PCEP session. The PCC will advertise the same PLSP-ID on all PCEP sessions it maintains at a given time.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('plspId')
@property
def ProtectionLsp(self):
"""Indicates whether Protection LSP Bit is On.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('protectionLsp')
@property
def SessionInfo(self):
"""Logs additional information about the LSP state
Returns:
list(str[advertised|delegatedActive|delegatedDown|delegatedGoingUp|delegatedUp|init|none|notDelegatedActive|notDelegatedDown|notDelegatedGoingUp|notDelegatedUp|pcErrorReceived|removedByPCC|removedByPCE|returnDelegation])
"""
return self._get_attribute('sessionInfo')
@property
def SetupPriority(self):
"""The priority of the LSP with respect to taking resources.The value 0 is the highest priority.The Setup Priority is used in deciding whether this session can preempt another session.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('setupPriority')
@property
def SrcEndPointIpv4(self):
"""Source IPv4 address of the path for which a path computation is Initiated. Will be greyed out if IP Version is set to IPv6.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('srcEndPointIpv4')
@property
def SrcEndPointIpv6(self):
"""Source IPv6 address of the path for which a path computation is Initiated. Will be greyed out if IP version is set to IPv4.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('srcEndPointIpv6')
@property
def SrpIdNumber(self):
"""The SRP object is used to correlate between initiation requests sent by the PCE and the error reports and state reports sent by the PCC. This number is unique per PCEP session and is incremented per initiation.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('srpIdNumber')
@property
def StandbyMode(self):
"""Indicates whether Standby LSP Bit is On.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('standbyMode')
@property
def SymbolicPathName(self):
"""Each LSP (path) must have a symbolic name that is unique in the PCC. It must remain constant throughout a path's lifetime, which may span across multiple consecutive PCEP sessions and/or PCC restarts.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('symbolicPathName')
def get_device_ids(self, PortNames=None, Active=None, AssociationId=None, Bandwidth=None, DestEndPointIpv4=None, DestEndPointIpv6=None, EnableXro=None, ExcludeAny=None, FailBit=None, HoldingPriority=None, IncludeAll=None, IncludeAny=None, IncludeAssociation=None, IncludeBandwidth=None, IncludeEndPoints=None, IncludeEro=None, IncludeLsp=None, IncludeLspa=None, IncludeMetric=None, IncludeSrp=None, IncludeSymbolicPathNameTlv=None, IpVersion=None, LocalProtection=None, PathSetupType=None, PlspId=None, ProtectionLsp=None, SetupPriority=None, SrcEndPointIpv4=None, SrcEndPointIpv6=None, SrpIdNumber=None, StandbyMode=None, SymbolicPathName=None):
"""Base class infrastructure that gets a list of pceInitiateLSPParameters device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Active (str): optional regex of active
AssociationId (str): optional regex of associationId
Bandwidth (str): optional regex of bandwidth
DestEndPointIpv4 (str): optional regex of destEndPointIpv4
DestEndPointIpv6 (str): optional regex of destEndPointIpv6
EnableXro (str): optional regex of enableXro
ExcludeAny (str): optional regex of excludeAny
FailBit (str): optional regex of failBit
HoldingPriority (str): optional regex of holdingPriority
IncludeAll (str): optional regex of includeAll
IncludeAny (str): optional regex of includeAny
IncludeAssociation (str): optional regex of includeAssociation
IncludeBandwidth (str): optional regex of includeBandwidth
IncludeEndPoints (str): optional regex of includeEndPoints
IncludeEro (str): optional regex of includeEro
IncludeLsp (str): optional regex of includeLsp
IncludeLspa (str): optional regex of includeLspa
IncludeMetric (str): optional regex of includeMetric
IncludeSrp (str): optional regex of | |
<reponame>telecombcn-dl/2017-dlai-team5
"""
PySC2_A3C_old.py
A script for training and running an A3C agent on the PySC2 environment, with reference to DeepMind's paper:
[1] Vinyals, Oriol, et al. "Starcraft II: A new challenge for reinforcement learning." arXiv preprint arXiv:1708.04782 (2017).
Advantage estimation uses generalized advantage estimation from:
[2] Schulman, John, et al. "High-dimensional continuous control using generalized advantage estimation." arXiv preprint arXiv:1506.02438 (2015).
Credit goes to <NAME> for providing for reference an implementation of A3C for the VizDoom environment
https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-8-asynchronous-actor-critic-agents-a3c-c88f72a5e9f2
https://github.com/awjuliani/DeepRL-Agents
Note:
Currently only works on the DefeatRoaches mini-game; work is in-progress for generalizing the script to run on all mini-games
"""
import threading
import multiprocessing
import numpy as np
import tensorflow as tf
import scipy.signal
from time import sleep
import os
from pysc2.env import sc2_env
from pysc2.env import environment
from pysc2.lib import actions
"""
Use the following command to launch Tensorboard:
tensorboard --logdir=worker_0:'./train_0',worker_1:'./train_1',worker_2:'./train_2',worker_3:'./train_3'
"""
## HELPER FUNCTIONS
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target_graph(from_scope,to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Processes PySC2 observations
def process_observation(observation):
nonspatial_size = 727
screen_channels = 7
multi_select_max = 100
# is episode over?
episode_end = observation.step_type == environment.StepType.LAST
# reward
reward = observation.reward
# features
features = observation.observation
# nonspatial features
# TimeStep.observation['control_groups'](10,2)
# TimeStep.observation['single_select'](1,7)
# TimeStep.observation['multi_select'](n,7)
nonspatial_stack = features['control_groups'].reshape(-1)
nonspatial_stack = np.concatenate((nonspatial_stack, features['single_select'].reshape(-1)))
multi_select = features['multi_select'].reshape(-1)
# if multi_select has less than multi_select_max units, pad with zeros
if len(multi_select) < multi_select_max * 7:
multi_select = np.concatenate((multi_select, np.zeros(multi_select_max * 7 - len(multi_select))))
nonspatial_stack = np.concatenate((nonspatial_stack, multi_select))
# spatial_minimap features
# not used for DefeatRoaches since no camera movement is required
minimap_stack = None
# spatial_screen features
# TimeStep.observation['screen'][5] (player_relative)
# TimeStep.observation['screen'][6] (unit_type)
# TimeStep.observation['screen'][7] (selected)
# TimeStep.observation['screen'][8] (unit_hit_points)
# TimeStep.observation['screen'][9] (unit_hit_points_ratio)
# TimeStep.observation['screen'][14] (unit_density)
# TimeStep.observation['screen'][15] (unit_density_aa)
screen_stack = np.stack((features['screen'][5], features['screen'][6], features['screen'][7], features['screen'][8], features['screen'][9], features['screen'][14], features['screen'][15]), axis=2)
return reward, nonspatial_stack.reshape([-1,nonspatial_size]), minimap_stack, screen_stack.reshape([-1,64,64,screen_channels]), episode_end
# Discounting function used to calculate discounted returns.
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
# Sample from distribution of arguments
def sample_dist(dist):
sample = np.random.choice(dist[0],p=dist[0])
sample = np.argmax(dist == sample)
return sample
## ACTOR-CRITIC NETWORK
class AC_Network():
def __init__(self,scope,trainer):
with tf.variable_scope(scope):
# Architecture here follows Atari-net Agent described in [1] Section 4.3
nonspatial_size = 727
screen_channels = 7
self.inputs_nonspatial = tf.placeholder(shape=[None,nonspatial_size], dtype=tf.float32)
self.inputs_spatial_screen_reshaped = tf.placeholder(shape=[None,64,64,screen_channels], dtype=tf.float32)
self.nonspatial_dense = tf.layers.dense(
inputs=self.inputs_nonspatial,
units=32,
activation=tf.tanh)
self.screen_conv1 = tf.layers.conv2d(
inputs=self.inputs_spatial_screen_reshaped,
filters=16,
kernel_size=[8,8],
strides=[4,4],
padding='valid',
activation=tf.nn.relu)
self.screen_conv2 = tf.layers.conv2d(
inputs=self.screen_conv1,
filters=32,
kernel_size=[4,4],
strides=[2,2],
padding='valid',
activation=tf.nn.relu)
# According to [1]: "The results are concatenated and sent through a linear layer with a ReLU activation."
self.latent_vector = tf.layers.dense(
inputs=tf.concat([self.nonspatial_dense, tf.reshape(self.screen_conv2,shape=[-1,6*6*32])], axis=1),
units=256,
activation=tf.nn.relu)
# Output layers for policy and value estimations
# 12 policy networks for base actions and arguments
# - All modeled independently
# - Spatial arguments have the x and y values modeled independently as well
# 1 value network
self.policy_base_actions = tf.layers.dense(
inputs=self.latent_vector,
units=17,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_select_add = tf.layers.dense(
inputs=self.latent_vector,
units=2,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(1.0))
self.policy_arg_queued = tf.layers.dense(
inputs=self.latent_vector,
units=2,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(1.0))
self.policy_arg_select_point_act = tf.layers.dense(
inputs=self.latent_vector,
units=4,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_select_unit_act = tf.layers.dense(
inputs=self.latent_vector,
units=4,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_control_group_act = tf.layers.dense(
inputs=self.latent_vector,
units=5,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_control_group_id = tf.layers.dense(
inputs=self.latent_vector,
units=10,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_select_unit_id = tf.layers.dense(
inputs=self.latent_vector,
units=500,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_screen_x = tf.layers.dense(
inputs=self.latent_vector,
units=64,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_screen_y = tf.layers.dense(
inputs=self.latent_vector,
units=64,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_screen2_x = tf.layers.dense(
inputs=self.latent_vector,
units=64,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_screen2_y = tf.layers.dense(
inputs=self.latent_vector,
units=64,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.value = tf.layers.dense(
inputs=self.latent_vector,
units=1,
kernel_initializer=normalized_columns_initializer(1.0))
# Only the worker network need ops for loss functions and gradient updating.
# calculates the losses
# self.gradients - gradients of loss wrt local_vars
# applies the gradients to update the global network
if scope != 'global':
self.actions_base = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_base = tf.one_hot(self.actions_base,17,dtype=tf.float32)
self.actions_arg_screen_x = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_screen_x = tf.one_hot(self.actions_arg_screen_x,64,dtype=tf.float32)
self.actions_arg_screen_y = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_screen_y = tf.one_hot(self.actions_arg_screen_y,64,dtype=tf.float32)
self.actions_arg_screen2_x = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_screen2_x = tf.one_hot(self.actions_arg_screen2_x,64,dtype=tf.float32)
self.actions_arg_screen2_y = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_screen2_y = tf.one_hot(self.actions_arg_screen2_y,64,dtype=tf.float32)
self.actions_arg_select_point_act = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_select_point_act = tf.one_hot(self.actions_arg_select_point_act,4,dtype=tf.float32)
self.actions_arg_select_add = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_select_add = tf.one_hot(self.actions_arg_select_add,2,dtype=tf.float32)
self.actions_arg_control_group_act = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_control_group_act = tf.one_hot(self.actions_arg_control_group_act,5,dtype=tf.float32)
self.actions_arg_control_group_id = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_control_group_id = tf.one_hot(self.actions_arg_control_group_id,10,dtype=tf.float32)
self.actions_arg_select_unit_id = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_select_unit_id = tf.one_hot(self.actions_arg_select_unit_id,500,dtype=tf.float32)
self.actions_arg_select_unit_act = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_select_unit_act = tf.one_hot(self.actions_arg_select_unit_act,4,dtype=tf.float32)
self.actions_arg_queued = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_queued = tf.one_hot(self.actions_arg_queued,2,dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None],dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None],dtype=tf.float32)
self.responsible_outputs_base = tf.reduce_sum(self.policy_base_actions * self.actions_onehot_base, [1])
self.responsible_outputs_arg_screen_x = tf.reduce_sum(self.policy_arg_screen_x * self.actions_onehot_arg_screen_x, [1])
self.responsible_outputs_arg_screen_y = tf.reduce_sum(self.policy_arg_screen_y * self.actions_onehot_arg_screen_y, [1])
self.responsible_outputs_arg_screen2_x = tf.reduce_sum(self.policy_arg_screen2_x * self.actions_onehot_arg_screen2_x, [1])
self.responsible_outputs_arg_screen2_y = tf.reduce_sum(self.policy_arg_screen2_y * self.actions_onehot_arg_screen2_y, [1])
self.responsible_outputs_arg_select_point_act = tf.reduce_sum(self.policy_arg_select_point_act)
self.responsible_outputs_arg_select_add = tf.reduce_sum(self.policy_arg_select_add)
self.responsible_outputs_arg_control_group_act = tf.reduce_sum(self.policy_arg_control_group_act)
self.responsible_outputs_arg_control_group_id = tf.reduce_sum(self.policy_arg_control_group_id)
self.responsible_outputs_arg_select_unit_id = tf.reduce_sum(self.policy_arg_select_unit_id)
self.responsible_outputs_arg_select_unit_act = tf.reduce_sum(self.policy_arg_select_unit_act)
self.responsible_outputs_arg_queued = tf.reduce_sum(self.policy_arg_queued)
# Loss functions
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value,[-1])))
self.log_policy_base_actions = tf.log(tf.clip_by_value(self.policy_base_actions, 1e-20, 1.0)) # avoid NaN with clipping when value in policy becomes zero
self.entropy_base = - tf.reduce_sum(self.policy_base_actions * self.log_policy_base_actions)
self.entropy_arg_screen_x = - tf.reduce_sum(self.policy_arg_screen_x * tf.log(tf.clip_by_value(self.policy_arg_screen_x, 1e-20, 1.0)))
self.entropy_arg_screen_y = - tf.reduce_sum(self.policy_arg_screen_y * tf.log(tf.clip_by_value(self.policy_arg_screen_y, 1e-20, 1.0)))
self.entropy_arg_screen2_x = - tf.reduce_sum(self.policy_arg_screen2_x * tf.log(tf.clip_by_value(self.policy_arg_screen2_x, 1e-20, 1.0)))
self.entropy_arg_screen2_y = - tf.reduce_sum(self.policy_arg_screen2_y * tf.log(tf.clip_by_value(self.policy_arg_screen2_y, 1e-20, 1.0)))
self.entropy_arg_select_point_act = - tf.reduce_sum(self.policy_arg_select_point_act * tf.log(tf.clip_by_value(self.policy_arg_select_point_act, 1e-20, 1.0)))
self.entropy_arg_select_add = - tf.reduce_sum(self.policy_arg_select_add * tf.log(tf.clip_by_value(self.policy_arg_select_add, 1e-20, 1.0)))
self.entropy_arg_control_group_act = - tf.reduce_sum(self.policy_arg_control_group_act * tf.log(tf.clip_by_value(self.policy_arg_control_group_act, 1e-20, 1.0)))
self.entropy_arg_control_group_id = - tf.reduce_sum(self.policy_arg_control_group_id * tf.log(tf.clip_by_value(self.policy_arg_control_group_id, 1e-20, 1.0)))
self.entropy_arg_select_unit_id = - tf.reduce_sum(self.policy_arg_select_unit_id * tf.log(tf.clip_by_value(self.policy_arg_select_unit_id, 1e-20, 1.0)))
self.entropy_arg_select_unit_act = - tf.reduce_sum(self.policy_arg_select_unit_act * tf.log(tf.clip_by_value(self.policy_arg_select_unit_act, 1e-20, 1.0)))
self.entropy_arg_queued = - tf.reduce_sum(self.policy_arg_queued * tf.log(tf.clip_by_value(self.policy_arg_queued, 1e-20, 1.0)))
self.entropy = self.entropy_base + self.entropy_arg_screen_x + self.entropy_arg_screen_y + self.entropy_arg_screen2_x + self.entropy_arg_screen2_y + self.entropy_arg_select_point_act + self.entropy_arg_select_add + self.entropy_arg_control_group_act + self.entropy_arg_control_group_id + self.entropy_arg_select_unit_id + self.entropy_arg_select_unit_act + self.entropy_arg_queued
self.policy_loss_base = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_base, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_screen_x = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_screen_x, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_screen_y = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_screen_y, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_screen2_x = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_screen2_x, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_screen2_y = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_screen2_y, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_select_point_act = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_select_point_act, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_select_add = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_select_add, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_control_group_act = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_control_group_act, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_control_group_id = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_control_group_id, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_select_unit_id = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_select_unit_id, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_select_unit_act = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_select_unit_act, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_queued = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_queued, 1e-20, 1.0))*self.advantages)
self.policy_loss = self.policy_loss_base + self.policy_loss_arg_screen_x + self.policy_loss_arg_screen_y + self.policy_loss_arg_screen2_x + self.policy_loss_arg_screen2_y + self.policy_loss_arg_select_point_act + self.policy_loss_arg_select_add + self.policy_loss_arg_control_group_act + self.policy_loss_arg_control_group_id + self.policy_loss_arg_select_unit_id + self.policy_loss_arg_select_unit_act + self.policy_loss_arg_queued
self.loss = 0.5 * self.value_loss + self.policy_loss - self.entropy * 0.01
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss,local_vars)
self.var_norms = tf.global_norm(local_vars)
grads,self.grad_norms = tf.clip_by_global_norm(self.gradients,40.0)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads,global_vars))
## WORKER AGENT
class Worker():
def __init__(self,name,trainer,model_path,global_episodes):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment = self.global_episodes.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("train_"+str(self.number))
#Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(self.name,trainer)
self.update_local_ops = update_target_graph('global',self.name)
self.env = sc2_env.SC2Env(map_name="DefeatRoaches")
def train(self,rollout,sess,gamma,bootstrap_value):
rollout = np.array(rollout)
obs_screen = rollout[:,0]
obs_nonspatial = rollout[:,1]
actions_base = rollout[:,2]
actions_arg_screen_x = rollout[:,3]
actions_arg_screen_y = rollout[:,4]
actions_arg_screen2_x = rollout[:,5]
actions_arg_screen2_y = rollout[:,6]
actions_arg_select_point_act = rollout[:,7]
actions_arg_select_add = rollout[:,8]
actions_arg_control_group_act = rollout[:,9]
actions_arg_control_group_id = rollout[:,10]
actions_arg_select_unit_id = rollout[:,11]
actions_arg_select_unit_act = rollout[:,12]
actions_arg_queued = rollout[:,13]
rewards = rollout[:,14]
next_obs_screen = rollout[:,15]
next_obs_nonspatial = rollout[:,16]
values = rollout[:,18]
# Here we take the rewards and values from the rollout, and use them to calculate the advantage and discounted returns.
# The advantage function uses generalized advantage estimation from [2]
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus,gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages,gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
feed_dict = {self.local_AC.target_v:discounted_rewards,
self.local_AC.inputs_spatial_screen_reshaped:np.stack(obs_screen).reshape(-1,64,64,7),
self.local_AC.inputs_nonspatial:np.stack(obs_nonspatial).reshape(-1,727),
self.local_AC.actions_base:actions_base,
self.local_AC.actions_arg_screen_x:actions_arg_screen_x,
self.local_AC.actions_arg_screen_y:actions_arg_screen_y,
self.local_AC.actions_arg_screen2_x:actions_arg_screen2_x,
self.local_AC.actions_arg_screen2_y:actions_arg_screen2_y,
self.local_AC.actions_arg_select_point_act:actions_arg_select_point_act,
self.local_AC.actions_arg_select_add:actions_arg_select_add,
self.local_AC.actions_arg_control_group_act:actions_arg_control_group_act,
self.local_AC.actions_arg_control_group_id:actions_arg_control_group_id,
self.local_AC.actions_arg_select_unit_id:actions_arg_select_unit_id,
self.local_AC.actions_arg_select_unit_act:actions_arg_select_unit_act,
self.local_AC.actions_arg_queued:actions_arg_queued,
self.local_AC.advantages:advantages}
v_l,p_l,e_l,g_n,v_n, _ = sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.apply_grads],
feed_dict=feed_dict)
return v_l / len(rollout),p_l / len(rollout),e_l / len(rollout), g_n,v_n
def work(self,max_episode_length,gamma,sess,coord,saver):
episode_count = sess.run(self.global_episodes)
total_steps = 0
print ("Starting worker " + str(self.number))
with sess.as_default(), sess.graph.as_default(): | |
(NIR + RED + L)
# Change type to float32
L = 0.5
b3f = b3.astype(np.float32)
b6f = b6.astype(np.float32)
# Perform the calculations of the formula (SAVI values from -1.0 to 1.0)
savi = np.divide(np.multiply(
(1 + L), np.subtract(b6f, b3f)), np.add(np.add(b6f, b3f), L))
# Normalized SAVI values from 0.0 to 1.0
saviNorm1 = np.add(np.multiply(savi, 0.5), 0.5)
# Normalized SAVI values from 0.0 to 255.0
saviNorm2 = np.multiply(saviNorm1, 255)
# Normalized SAVI values to export integer values
saviNorm3 = saviNorm2.astype(np.uint8)
# Colors for colormapping RGB
saviColor = np.zeros((339, 426, 3), np.uint8)
# Colors for color-mapping in RGB palette
# SAVI coloring for different materials
# Soil, vegetation and other materials respectievly
# The colors are BGR not RGB
saviColor[savi >= 0.00] = [189, 233, 234]
saviColor[savi > 0.05] = [148, 211, 215]
saviColor[savi > 0.10] = [119, 189, 202]
saviColor[savi > 0.15] = [77, 175, 175]
saviColor[savi > 0.20] = [5, 169, 128]
saviColor[savi > 0.30] = [0, 127, 12]
saviColor[savi > 0.40] = [0, 94, 0]
saviColor[savi > 0.50] = [1, 59, 0]
saviColor[savi > 0.60] = [0, 9, 0]
saviColor[savi < 0.00] = [128, 128, 128]
saviColor[savi < -0.05] = [96, 96, 96]
saviColor[savi < -0.25] = [64, 64, 64]
saviColor[savi < -0.50] = [32, 32, 32]
return saviNorm3, saviColor
# Calculate GSAVI values with custom colormap (green band 1 (560nm) is not accurate need to be approximately 510 nm)
def gsaviCalculator(self, b1, b6):
# Ignore warnings (comment the lines below when debugging)
np.seterr(divide='ignore', invalid='ignore')
# GSAVI = ((1 + L)(NIR - GREEN)) / (NIR + GREEN + L)
# GREEN (green band 1 (560nm) is not accurate needs to be approximately 510 nm for real green band)
# Change type to float32
L = 0.5
b1f = b1.astype(np.float32)
b6f = b6.astype(np.float32)
# Perform the calculations of the formula (GSAVI values from -1.0 to 1.0)
gsavi = np.divide(np.multiply(
(1 + L), np.subtract(b6f, b1f)), np.add(np.add(b6f, b1f), L))
# Normalized GSAVI values from 0.0 to 1.0
gsaviNorm1 = np.add(np.multiply(gsavi, 0.5), 0.5)
# Normalized GSAVI values from 0.0 to 255.0
gsaviNorm2 = np.multiply(gsaviNorm1, 255)
# Normalized GSAVI values to export integer values
gsaviNorm3 = gsaviNorm2.astype(np.uint8)
# Colors for colormapping RGB
gsaviColor = np.zeros((339, 426, 3), np.uint8)
# Colors for color-mapping in RGB palette
# GSAVI coloring for different materials
# Soil, vegetation and other materials respectievly
# The colors are BGR not RGB
gsaviColor[gsavi >= 0.00] = [189, 233, 234]
gsaviColor[gsavi > 0.05] = [148, 211, 215]
gsaviColor[gsavi > 0.10] = [119, 189, 202]
gsaviColor[gsavi > 0.15] = [77, 175, 175]
gsaviColor[gsavi > 0.20] = [5, 169, 128]
gsaviColor[gsavi > 0.30] = [0, 127, 12]
gsaviColor[gsavi > 0.40] = [0, 94, 0]
gsaviColor[gsavi > 0.50] = [1, 59, 0]
gsaviColor[gsavi > 0.60] = [0, 9, 0]
gsaviColor[gsavi < 0.00] = [128, 128, 128]
gsaviColor[gsavi < -0.05] = [96, 96, 96]
gsaviColor[gsavi < -0.25] = [64, 64, 64]
gsaviColor[gsavi < -0.50] = [32, 32, 32]
return gsaviNorm3, gsaviColor
# MCARI (Modified Chlorophyll Absorption Ratio Index)
def mcariCalculator(self, b1, b4, b5):
# Ignore warnings (comment the lines below when debugging)
np.seterr(divide='ignore', invalid='ignore')
# MCARI = ((R700 - R670) - 0.2 * (R700 - R550 )) * (R700 / R670)
# Change type to float32
b1f = b1.astype(np.float32)
b4f = b4.astype(np.float32)
b5f = b5.astype(np.float32)
# Perform the calculations of the formula
mcari = np.multiply(np.subtract(np.subtract(b5f, b4f), np.multiply(
0.2, np.subtract(b5f, b1f))), np.divide(b5f, b4f))
return mcari
# MSR (Modified Simple Ratio)
def msrCalculator(self, b3, b6):
# Ignore warnings (comment the lines below when debugging)
np.seterr(divide='ignore', invalid='ignore')
# MSR = (NIR/RED - 1) / (sqrt(NIR/RED) + 1)
# Change type to float32
b3f = b3.astype(np.float32)
b6f = b6.astype(np.float32)
# Perform the calculations of the formula (MSR values from -1.0 to 1.0)
msr = np.divide(np.subtract(np.divide(b6f, b3f), 1),
np.add(np.sqrt(np.divide(b6f, b3f)), 1))
# Normalized MSR values from 0.0 to 1.0
msrNorm1 = np.add(np.multiply(msr, 0.5), 0.5)
# Normalized MSR values from 0.0 to 255.0
msrNorm2 = np.multiply(msrNorm1, 255)
# Normalized MSR values to export integer values
msrNorm3 = msrNorm2.astype(np.uint8)
# Colors for colormapping RGB
msrColor = np.zeros((339, 426, 3), np.uint8)
# Colors for color-mapping in RGB palette
# MSR coloring for different materials
# Soil, vegetation and other materials respectievly
# The colors are BGR not RGB
msrColor[msr >= 0.00] = [189, 233, 234]
msrColor[msr > 0.05] = [148, 211, 215]
msrColor[msr > 0.10] = [119, 189, 202]
msrColor[msr > 0.15] = [77, 175, 175]
msrColor[msr > 0.20] = [5, 169, 128]
msrColor[msr > 0.30] = [0, 127, 12]
msrColor[msr > 0.40] = [0, 94, 0]
msrColor[msr > 0.50] = [1, 59, 0]
msrColor[msr > 0.60] = [0, 9, 0]
msrColor[msr < 0.00] = [128, 128, 128]
msrColor[msr < -0.05] = [96, 96, 96]
msrColor[msr < -0.25] = [64, 64, 64]
msrColor[msr < -0.50] = [32, 32, 32]
return msrNorm3, msrColor
# Calculate TVI (Triangular Vegetation Index)
# Calculate MTVI1 (Modified Triangular Vegetation Index 1)
# Calculate MTVI2 (Modified Triangular Vegetation Index 2)
def tviCalculator(self, b1, b3, b6, b4, b7):
# Ignore warnings (comment the lines below when debugging)
np.seterr(divide='ignore', invalid='ignore')
# Change type to float32
# GREEN (green band 1 (560nm) is not accurate needs to be approximately 510 nm for real green band)
b1f = b1.astype(np.float32)
# RED
b3f = b3.astype(np.float32)
# NIR
b6f = b6.astype(np.float32)
# Band 4 (670 nm)
b4f = b4.astype(np.float32)
# Band 7 (791 nm)
b7f = b7.astype(np.float32)
# TVI = 0.5 * (120 * (NIR - GREEN) - 200 * (RED - GREEN))
tvi = np.multiply(0.5, np.subtract(np.multiply(
120, np.subtract(b6f, b1f)), np.multiply(200, np.subtract(b3f, b1f))))
# MTVI1 = 1.2 * (1.2 * (R800 - R550) - 2.5 * (R670 - R550))
mtvi1 = np.multiply(1.2, np.subtract(np.multiply(
1.2, np.subtract(b7f, b1f)), np.multiply(2.5, np.subtract(b4f, b1f))))
# MTVI2 = (1.5 * (1.2 * (R800 - R550) - 2.5 * (R670 - R550)))/ (sqrt((2 * R800 + 1) ^ 2 - (6 * R800 - 5 * sqrt(R670)) - 0.5))
mtvi2 = np.divide(np.multiply(1.5, np.subtract(np.multiply(1.2, np.subtract(b7f, b1f)), np.multiply(2.5, np.subtract(b4f, b1f)))), np.sqrt(
np.subtract(np.subtract(np.square(np.add(np.multiply(2, b7f), 1)), np.subtract(np.multiply(6, b7f), np.multiply(5, np.sqrt(b4f)))), 0.5)))
# Normalized MTVI2 values from 0.0 to 1.0
mtvi2Norm1 = np.add(np.multiply(mtvi2, 0.5), 0.5)
# Normalized MTVI2 values from 0.0 to 255.0
mtvi2Norm2 = np.multiply(mtvi2Norm1, 255)
# Normalized MTVI2 values to export integer values
mtvi2Norm3 = mtvi2Norm2.astype(np.uint8)
return tvi, mtvi1, mtvi2Norm3
# Image segmentation
def segmentation(self, image):
# Thresholding with OTSU
_, segImage = cv2.threshold(
image, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Erosion removes noise
erosionSize = 2
erosionType = cv2.MORPH_ELLIPSE
el2 = cv2.getStructuringElement(
erosionType, (2*erosionSize + 1, 2*erosionSize+1), (erosionSize, erosionSize))
erodedImage = cv2.erode(segImage, el2)
# Dilation fills holes of the region of interest and expands it
dilatationSize = 3
dilatationType = cv2.MORPH_ELLIPSE
el1 = cv2.getStructuringElement(
dilatationType, (2*dilatationSize + 1, 2*dilatationSize+1), (dilatationSize, dilatationSize))
erdImage = cv2.dilate(erodedImage, el1)
# Return 2 segmented images
return erdImage, segImage
# Display coefficients
def printCrosstalkCoefficients(self):
for i in range(9):
for j in range(9):
print(str(self.wCoefCrossTalk[i][j]) + ", ", end='')
print("")
print("")
# Crop selected pixels and calculates the white reference coefficients
def whiteReferenceCalculator(self, rawImage):
# For white balance it has being followed the equation [whiteBalance = 255 / average of the pixels in the selected area for every band]
# Take the size of the selected area (rows)
sizeR = self.positions[3] - self.positions[1] + 1
# Take the starting point of the selected area (rows)
startR = self.positions[1] - 1
# Take the ending point of the selected area (rows)
endR = startR + sizeR
# Take the size of the selected area (columns)
sizeC = self.positions[2] - self.positions[0] + 1
# Take the starting point of the selected area (columns)
startC = self.positions[0] - 1
# Take the ending point of the selected area (columns)
endC = startC + sizeC
if (self.positions[4] == 1) and (sizeR > 2 and sizeC > 2):
self.positions[4] = -1 # Checks if the state of the area selection
| |
m.x1354 == 0)
m.c774 = Constraint(expr= m.x651 - 0.984106*m.x1115 - m.x1291 - m.x1323 - m.x1355 == 0)
m.c775 = Constraint(expr= m.x652 - 0.984106*m.x1116 - m.x1292 - m.x1324 - m.x1356 == 0)
m.c776 = Constraint(expr= m.x653 - 0.984106*m.x1117 - m.x1293 - m.x1325 - m.x1357 == 0)
m.c777 = Constraint(expr= m.x654 - 0.984106*m.x1118 - m.x1294 - m.x1326 - m.x1358 == 0)
m.c778 = Constraint(expr= m.x655 - 0.984106*m.x1119 - m.x1295 - m.x1327 - m.x1359 == 0)
m.c779 = Constraint(expr= m.x656 - 0.984106*m.x1120 - m.x1296 - m.x1328 - m.x1360 == 0)
m.c780 = Constraint(expr= m.x657 - 0.984106*m.x1121 - m.x1297 - m.x1329 - m.x1361 == 0)
m.c781 = Constraint(expr= m.x658 - 0.984106*m.x1122 - m.x1298 - m.x1330 - m.x1362 == 0)
m.c782 = Constraint(expr= m.x659 - 0.984106*m.x1123 - m.x1299 - m.x1331 - m.x1363 == 0)
m.c783 = Constraint(expr= m.x660 - 0.984106*m.x1124 - m.x1300 - m.x1332 - m.x1364 == 0)
m.c784 = Constraint(expr= m.x661 - 0.984106*m.x1125 - m.x1301 - m.x1333 - m.x1365 == 0)
m.c785 = Constraint(expr= m.x662 - 0.984106*m.x1126 - m.x1302 - m.x1334 - m.x1366 == 0)
m.c786 = Constraint(expr= m.x663 - 0.015894*m.x1127 - m.x1367 - m.x1399 - m.x1431 == 0)
m.c787 = Constraint(expr= m.x664 - 0.015894*m.x1128 - m.x1368 - m.x1400 - m.x1432 == 0)
m.c788 = Constraint(expr= m.x665 - 0.015894*m.x1129 - m.x1369 - m.x1401 - m.x1433 == 0)
m.c789 = Constraint(expr= m.x666 - 0.015894*m.x1130 - m.x1370 - m.x1402 - m.x1434 == 0)
m.c790 = Constraint(expr= m.x667 - 0.015894*m.x1131 - m.x1371 - m.x1403 - m.x1435 == 0)
m.c791 = Constraint(expr= m.x668 - 0.015894*m.x1132 - m.x1372 - m.x1404 - m.x1436 == 0)
m.c792 = Constraint(expr= m.x669 - 0.015894*m.x1133 - m.x1373 - m.x1405 - m.x1437 == 0)
m.c793 = Constraint(expr= m.x670 - 0.015894*m.x1134 - m.x1374 - m.x1406 - m.x1438 == 0)
m.c794 = Constraint(expr= m.x671 - 0.015894*m.x1135 - m.x1375 - m.x1407 - m.x1439 == 0)
m.c795 = Constraint(expr= m.x672 - 0.015894*m.x1136 - m.x1376 - m.x1408 - m.x1440 == 0)
m.c796 = Constraint(expr= m.x673 - 0.015894*m.x1137 - m.x1377 - m.x1409 - m.x1441 == 0)
m.c797 = Constraint(expr= m.x674 - 0.015894*m.x1138 - m.x1378 - m.x1410 - m.x1442 == 0)
m.c798 = Constraint(expr= m.x675 - 0.015894*m.x1139 - m.x1379 - m.x1411 - m.x1443 == 0)
m.c799 = Constraint(expr= m.x676 - 0.015894*m.x1140 - m.x1380 - m.x1412 - m.x1444 == 0)
m.c800 = Constraint(expr= m.x677 - 0.015894*m.x1141 - m.x1381 - m.x1413 - m.x1445 == 0)
m.c801 = Constraint(expr= m.x678 - 0.015894*m.x1142 - m.x1382 - m.x1414 - m.x1446 == 0)
m.c802 = Constraint(expr= m.x679 - 0.984106*m.x1127 - m.x1383 - m.x1415 - m.x1447 == 0)
m.c803 = Constraint(expr= m.x680 - 0.984106*m.x1128 - m.x1384 - m.x1416 - m.x1448 == 0)
m.c804 = Constraint(expr= m.x681 - 0.984106*m.x1129 - m.x1385 - m.x1417 - m.x1449 == 0)
m.c805 = Constraint(expr= m.x682 - 0.984106*m.x1130 - m.x1386 - m.x1418 - m.x1450 == 0)
m.c806 = Constraint(expr= m.x683 - 0.984106*m.x1131 - m.x1387 - m.x1419 - m.x1451 == 0)
m.c807 = Constraint(expr= m.x684 - 0.984106*m.x1132 - m.x1388 - m.x1420 - m.x1452 == 0)
m.c808 = Constraint(expr= m.x685 - 0.984106*m.x1133 - m.x1389 - m.x1421 - m.x1453 == 0)
m.c809 = Constraint(expr= m.x686 - 0.984106*m.x1134 - m.x1390 - m.x1422 - m.x1454 == 0)
m.c810 = Constraint(expr= m.x687 - 0.984106*m.x1135 - m.x1391 - m.x1423 - m.x1455 == 0)
m.c811 = Constraint(expr= m.x688 - 0.984106*m.x1136 - m.x1392 - m.x1424 - m.x1456 == 0)
m.c812 = Constraint(expr= m.x689 - 0.984106*m.x1137 - m.x1393 - m.x1425 - m.x1457 == 0)
m.c813 = Constraint(expr= m.x690 - 0.984106*m.x1138 - m.x1394 - m.x1426 - m.x1458 == 0)
m.c814 = Constraint(expr= m.x691 - 0.984106*m.x1139 - m.x1395 - m.x1427 - m.x1459 == 0)
m.c815 = Constraint(expr= m.x692 - 0.984106*m.x1140 - m.x1396 - m.x1428 - m.x1460 == 0)
m.c816 = Constraint(expr= m.x693 - 0.984106*m.x1141 - m.x1397 - m.x1429 - m.x1461 == 0)
m.c817 = Constraint(expr= m.x694 - 0.984106*m.x1142 - m.x1398 - m.x1430 - m.x1462 == 0)
m.c818 = Constraint(expr=-m.x855*m.x263 + m.x983 == 0)
m.c819 = Constraint(expr=-m.x856*m.x264 + m.x984 == 0)
m.c820 = Constraint(expr=-m.x857*m.x265 + m.x985 == 0)
m.c821 = Constraint(expr=-m.x858*m.x266 + m.x986 == 0)
m.c822 = Constraint(expr=-m.x859*m.x267 + m.x987 == 0)
m.c823 = Constraint(expr=-m.x860*m.x268 + m.x988 == 0)
m.c824 = Constraint(expr=-m.x861*m.x269 + m.x989 == 0)
m.c825 = Constraint(expr=-m.x862*m.x270 + m.x990 == 0)
m.c826 = Constraint(expr=-m.x863*m.x271 + m.x991 == 0)
m.c827 = Constraint(expr=-m.x864*m.x272 + m.x992 == 0)
m.c828 = Constraint(expr=-m.x865*m.x273 + m.x993 == 0)
m.c829 = Constraint(expr=-m.x866*m.x274 + m.x994 == 0)
m.c830 = Constraint(expr=-m.x867*m.x275 + m.x995 == 0)
m.c831 = Constraint(expr=-m.x868*m.x276 + m.x996 == 0)
m.c832 = Constraint(expr=-m.x869*m.x277 + m.x997 == 0)
m.c833 = Constraint(expr=-m.x870*m.x278 + m.x998 == 0)
m.c834 = Constraint(expr=-m.x855*m.x279 + m.x999 == 0)
m.c835 = Constraint(expr=-m.x856*m.x280 + m.x1000 == 0)
m.c836 = Constraint(expr=-m.x857*m.x281 + m.x1001 == 0)
m.c837 = Constraint(expr=-m.x858*m.x282 + m.x1002 == 0)
m.c838 = Constraint(expr=-m.x859*m.x283 + m.x1003 == 0)
m.c839 = Constraint(expr=-m.x860*m.x284 + m.x1004 == 0)
m.c840 = Constraint(expr=-m.x861*m.x285 + m.x1005 == 0)
m.c841 = Constraint(expr=-m.x862*m.x286 + m.x1006 == 0)
m.c842 = Constraint(expr=-m.x863*m.x287 + m.x1007 == 0)
m.c843 = Constraint(expr=-m.x864*m.x288 + m.x1008 == 0)
m.c844 = Constraint(expr=-m.x865*m.x289 + m.x1009 == 0)
m.c845 = Constraint(expr=-m.x866*m.x290 + m.x1010 == 0)
m.c846 = Constraint(expr=-m.x867*m.x291 + m.x1011 == 0)
m.c847 = Constraint(expr=-m.x868*m.x292 + m.x1012 == 0)
m.c848 = Constraint(expr=-m.x869*m.x293 + m.x1013 == 0)
m.c849 = Constraint(expr=-m.x870*m.x294 + m.x1014 == 0)
m.c850 = Constraint(expr=-m.x871*m.x263 + m.x1015 == 0)
m.c851 = Constraint(expr=-m.x872*m.x264 + m.x1016 == 0)
m.c852 = Constraint(expr=-m.x873*m.x265 + m.x1017 == 0)
m.c853 = Constraint(expr=-m.x874*m.x266 + m.x1018 == 0)
m.c854 = Constraint(expr=-m.x875*m.x267 + m.x1019 == 0)
m.c855 = Constraint(expr=-m.x876*m.x268 + m.x1020 == 0)
m.c856 = Constraint(expr=-m.x877*m.x269 + m.x1021 == 0)
m.c857 = Constraint(expr=-m.x878*m.x270 + m.x1022 == 0)
m.c858 = Constraint(expr=-m.x879*m.x271 + m.x1023 == 0)
m.c859 = Constraint(expr=-m.x880*m.x272 + m.x1024 == 0)
m.c860 = Constraint(expr=-m.x881*m.x273 + m.x1025 == 0)
m.c861 = Constraint(expr=-m.x882*m.x274 + m.x1026 == 0)
m.c862 = Constraint(expr=-m.x883*m.x275 + m.x1027 == 0)
m.c863 = Constraint(expr=-m.x884*m.x276 + m.x1028 == 0)
m.c864 = Constraint(expr=-m.x885*m.x277 + m.x1029 == 0)
m.c865 = Constraint(expr=-m.x886*m.x278 + m.x1030 == 0)
m.c866 = Constraint(expr=-m.x871*m.x279 + m.x1031 == 0)
m.c867 = Constraint(expr=-m.x872*m.x280 + m.x1032 == 0)
m.c868 = Constraint(expr=-m.x873*m.x281 + m.x1033 == 0)
m.c869 = Constraint(expr=-m.x874*m.x282 + m.x1034 == 0)
m.c870 = Constraint(expr=-m.x875*m.x283 + m.x1035 == 0)
m.c871 = Constraint(expr=-m.x876*m.x284 + m.x1036 == 0)
m.c872 = Constraint(expr=-m.x877*m.x285 + m.x1037 == 0)
m.c873 = Constraint(expr=-m.x878*m.x286 + m.x1038 == 0)
m.c874 = Constraint(expr=-m.x879*m.x287 + m.x1039 == 0)
m.c875 = Constraint(expr=-m.x880*m.x288 + m.x1040 == 0)
m.c876 = Constraint(expr=-m.x881*m.x289 + m.x1041 == 0)
m.c877 = Constraint(expr=-m.x882*m.x290 + m.x1042 == 0)
m.c878 = Constraint(expr=-m.x883*m.x291 + m.x1043 == 0)
m.c879 = Constraint(expr=-m.x884*m.x292 + m.x1044 == 0)
m.c880 = Constraint(expr=-m.x885*m.x293 + m.x1045 == 0)
m.c881 = Constraint(expr=-m.x886*m.x294 + m.x1046 == 0)
m.c882 = Constraint(expr=-m.x887*m.x103 + m.x1047 == 0)
m.c883 = Constraint(expr=-m.x888*m.x104 + m.x1048 == 0)
m.c884 = Constraint(expr=-m.x889*m.x105 + m.x1049 == 0)
m.c885 = Constraint(expr=-m.x890*m.x106 + m.x1050 == 0)
m.c886 = Constraint(expr=-m.x891*m.x107 + m.x1051 == 0)
m.c887 = Constraint(expr=-m.x892*m.x108 + m.x1052 == 0)
m.c888 = Constraint(expr=-m.x893*m.x109 + m.x1053 == 0)
m.c889 = Constraint(expr=-m.x894*m.x110 + m.x1054 == 0)
m.c890 = Constraint(expr=-m.x895*m.x111 + m.x1055 == 0)
m.c891 = Constraint(expr=-m.x896*m.x112 + m.x1056 == 0)
m.c892 = Constraint(expr=-m.x897*m.x113 + m.x1057 == 0)
m.c893 = Constraint(expr=-m.x898*m.x114 + m.x1058 == 0)
m.c894 = Constraint(expr=-m.x899*m.x115 + m.x1059 == 0)
m.c895 = Constraint(expr=-m.x900*m.x116 + m.x1060 == 0)
m.c896 = Constraint(expr=-m.x901*m.x117 + m.x1061 == 0)
m.c897 = Constraint(expr=-m.x902*m.x118 + m.x1062 == 0)
m.c898 = Constraint(expr=-m.x887*m.x151 + m.x1063 == 0)
m.c899 = Constraint(expr=-m.x888*m.x152 + m.x1064 == 0)
m.c900 = Constraint(expr=-m.x889*m.x153 + m.x1065 == 0)
m.c901 = Constraint(expr=-m.x890*m.x154 + m.x1066 == 0)
m.c902 = Constraint(expr=-m.x891*m.x155 + m.x1067 == 0)
m.c903 = Constraint(expr=-m.x892*m.x156 + m.x1068 == 0)
m.c904 = Constraint(expr=-m.x893*m.x157 + m.x1069 == 0)
m.c905 = Constraint(expr=-m.x894*m.x158 + m.x1070 == 0)
m.c906 = Constraint(expr=-m.x895*m.x159 + m.x1071 == 0)
m.c907 = Constraint(expr=-m.x896*m.x160 + m.x1072 == 0)
m.c908 = Constraint(expr=-m.x897*m.x161 + m.x1073 == 0)
m.c909 = Constraint(expr=-m.x898*m.x162 + m.x1074 == 0)
m.c910 = Constraint(expr=-m.x899*m.x163 + m.x1075 == 0)
m.c911 = Constraint(expr=-m.x900*m.x164 + m.x1076 == 0)
m.c912 = Constraint(expr=-m.x901*m.x165 + m.x1077 == 0)
m.c913 = Constraint(expr=-m.x902*m.x166 + m.x1078 == 0)
m.c914 = Constraint(expr=-m.x903*m.x103 + m.x1079 == 0)
m.c915 = Constraint(expr=-m.x904*m.x104 + m.x1080 == 0)
m.c916 = Constraint(expr=-m.x905*m.x105 + m.x1081 == 0)
m.c917 = Constraint(expr=-m.x906*m.x106 + m.x1082 == 0)
m.c918 = Constraint(expr=-m.x907*m.x107 + m.x1083 == 0)
m.c919 = Constraint(expr=-m.x908*m.x108 + m.x1084 == 0)
m.c920 = Constraint(expr=-m.x909*m.x109 + m.x1085 == 0)
m.c921 = Constraint(expr=-m.x910*m.x110 + m.x1086 == 0)
m.c922 = Constraint(expr=-m.x911*m.x111 + m.x1087 == 0)
m.c923 = Constraint(expr=-m.x912*m.x112 + m.x1088 == 0)
m.c924 = Constraint(expr=-m.x913*m.x113 + m.x1089 == 0)
m.c925 = Constraint(expr=-m.x914*m.x114 + m.x1090 == 0)
m.c926 = Constraint(expr=-m.x915*m.x115 + m.x1091 == 0)
m.c927 = Constraint(expr=-m.x916*m.x116 + m.x1092 == 0)
m.c928 = Constraint(expr=-m.x917*m.x117 + m.x1093 == 0)
m.c929 = Constraint(expr=-m.x918*m.x118 + m.x1094 == 0)
m.c930 = Constraint(expr=-m.x903*m.x151 + m.x1095 == 0)
m.c931 = Constraint(expr=-m.x904*m.x152 + m.x1096 == 0)
m.c932 = Constraint(expr=-m.x905*m.x153 + m.x1097 == 0)
m.c933 = Constraint(expr=-m.x906*m.x154 + m.x1098 == 0)
m.c934 = Constraint(expr=-m.x907*m.x155 + m.x1099 == 0)
m.c935 = Constraint(expr=-m.x908*m.x156 + | |
'''
Created on 31 Jul 2009
@author: charanpal
'''
from __future__ import print_function
import sys
import os
import numpy
from contextlib import contextmanager
import numpy.random as rand
import logging
import scipy.linalg
import scipy.sparse as sparse
import scipy.special
import pickle
from apgl.util.Parameter import Parameter
class Util(object):
'''
A class with some general useful function that don't fit in anywhere else. Not very OO unfortunately.
'''
def __init__(self):
'''
Constructor
'''
pass
@staticmethod
def histogram(v):
"""
Compute a histogram based on all unique elements in vector v
"""
if v.ndim != 1:
raise ValueError("Input must be a dimension 1 vector")
uniqElements = numpy.unique(v)
numElements = uniqElements.shape[0]
hist = numpy.zeros(numElements)
for i in range(0, numElements):
hist[i] = sum(v == uniqElements[i])
return (hist, uniqElements)
@staticmethod
def mode(v):
"""
Returns the mode of a 1D vectors, and the 1st more frequent element if more than 1
"""
if v.ndim != 1:
raise ValueError("Input must be a dimension 1 vector")
uniqElements = numpy.unique(v)
freqs = numpy.zeros(uniqElements.shape[0])
for i in range(uniqElements.shape[0]):
freqs[i] = numpy.sum(v == uniqElements[i])
return uniqElements[numpy.argmax(freqs)]
@staticmethod
def sampleWithoutReplacement(sampleSize, totalSize):
"""
Create a list of integers from 0 to totalSize, and take a random sample of size sampleSize. The
sample ordered.
"""
perm = rand.permutation(totalSize)
perm = perm[0:sampleSize]
perm = numpy.sort(perm)
return perm
@staticmethod
def randNormalInt(mean, sd, min, max):
"""
Returns a normally distributed integer within a range (inclusive of min, max)
"""
i = round(rand.normal(mean, sd));
while i<min or i>max:
i = round(random.normal(mean, sd));
return i
@staticmethod
def computeMeanVar(X):
mu = numpy.mean(X, 0)
X2 = X - mu
sigma = numpy.dot(X2.T, X2)/X.shape[0]
return (mu, sigma)
@staticmethod
def iterationStr(i, step, maxIter, preStr="Iteration: "):
outputStr = ""
if maxIter == 1:
outputStr = preStr + str(i) + " (1.0)"
elif i % step == 0:
#frm = inspect.stack()[1]
#mod = inspect.getmodule(frm[0])
#logging.info(mod.__name__ + ": " + str(i) + " (" + str(float(i)/maxIter) + ")")
outputStr = preStr + str(i) + " (" + str("%.3f" % (float(i)/(maxIter-1))) + ")"
elif i == maxIter-1:
outputStr = preStr + str(i) + " (" + str("%.3f" % (float(i)/(maxIter-1))) + ")"
else:
raise ValueError("Got invalid input: " + str((i, step, maxIter)))
return outputStr
@staticmethod
def printIteration(i, step, maxIter, preStr="Iteration: "):
if i % step == 0 or i==maxIter-1:
logging.debug(Util.iterationStr(i, step, maxIter, preStr))
@staticmethod
def printConciseIteration(i, step, maxIter, preStr="Iteration: "):
if i==0:
print(Util.iterationStr(i, step, maxIter, preStr), end=""),
elif i!=maxIter-1:
print(Util.iterationStr(i, step, maxIter, " "), end="")
else:
print(Util.iterationStr(i, step, maxIter, " "))
@staticmethod
def abstract():
"""
This is a method to be put in abstract methods so that they are identified
as such when called.
"""
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError("Method " + caller + ' must be implemented in subclass')
@staticmethod
def rank(A, tol=1e-8):
"""
Kindly borrowed from the following forum thread:
http://mail.scipy.org/pipermail/numpy-discussion/2008-February/031218.html
"""
s = numpy.linalg.svd(A, compute_uv=False)
return numpy.sum(numpy.where(s>tol, 1, 0))
@staticmethod
def randomChoice(V, n=1):
"""
Make a random choice from a vector V of values which are unnormalised
probabilities. Return the corresponding index. For example if v = [1, 2, 4]
then the probability of the indices repectively are [1/7, 2/7, 4/7]. The
parameter n is the number of random choices to make. If V is a matrix,
then the rows are taken as probabilities, and a choice is made for each
row.
"""
Parameter.checkClass(V, numpy.ndarray)
if V.shape[0]==0:
return -1
if V.ndim == 1:
cumV = numpy.cumsum(V)
p = numpy.random.rand(n)*cumV[-1]
return numpy.searchsorted(cumV, p)
elif V.ndim == 2:
cumV = numpy.cumsum(V, 1)
P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T
inds = numpy.zeros(P.shape, numpy.int)
for i in range(P.shape[0]):
inds[i, :] = numpy.searchsorted(cumV[i, :], P[i, :])
return inds
else:
raise ValueError("Invalid number of dimensions")
@staticmethod
def fitPowerLaw(x, xmin):
"""
Take a sample of data points which are drawn from a power law probability
distribution (p(x) = (x/xmin)**-alpha) and return the exponent. This works
best for continuous data.
"""
x = x[x >= xmin]
n = x.shape[0]
lnSum = n / numpy.sum(numpy.log(x/xmin))
#gamma = 1 + lnSum
gamma = lnSum
return gamma
@staticmethod
def fitDiscretePowerLaw(x, xmins = None):
"""
Take a sample of discrete data points which are drawn from a power law probability
distribution (p(x) = x-alpha / zeta(alpha, xmin)) and return the exponent.
If xmins is supplied then it searches through the set of xmins rather than
using all possible xmins. Most of the time it helps to keep xmins low.
Returns the goodness of fit, best alpha and xmin. If there is only 1 unique
value of x then -1, -1 min(x) is returned.
"""
xmax = numpy.max(x)
if xmins == None:
xmin = numpy.max(numpy.array([numpy.min(x), 1]))
xmins = numpy.arange(xmin, xmax)
#Note that x must have at least 2 unique elements
if xmins.shape[0] == 0:
return -1, -1, numpy.min(x)
alphas = numpy.arange(1.5, 3.5, 0.01)
ksAlpha = numpy.zeros((xmins.shape[0], 2))
for j in range(xmins.shape[0]):
xmin = xmins[j]
z = x[x >= xmin]
n = z.shape[0]
sumLogx = numpy.sum(numpy.log(z))
likelyhoods = numpy.zeros(alphas.shape[0])
for i in range(alphas.shape[0]):
likelyhoods[i] = -n*numpy.log(scipy.special.zeta(alphas[i], xmin)) -alphas[i]*sumLogx
k = numpy.argmax(likelyhoods)
#Compute KS statistic
cdf = numpy.cumsum(numpy.bincount(z)[xmin:xmax]/float(n))
fit = numpy.arange(xmin, xmax)**-alphas[k] /scipy.special.zeta(alphas[k], xmin)
fit = numpy.cumsum(fit)
ksAlpha[j, 0] = numpy.max(numpy.abs(cdf - fit))
ksAlpha[j, 1] = alphas[k]
i = numpy.argmin(ksAlpha[:, 0])
return ksAlpha[i, 0], ksAlpha[i, 1], xmins[i]
@staticmethod
def entropy(v):
"""
Compute the information entropy of a vector of random vector observations
using the log to the base 2.
"""
items = numpy.unique(v)
infEnt = 0
for i in items:
prob = numpy.sum(v==i)/float(v.shape[0])
infEnt -= prob * numpy.log2(prob)
return infEnt
@staticmethod
def expandIntArray(v):
"""
Take a vector of integers and expand it into a vector with counts of the
corresponding integers. For example, with v = [1, 3, 2, 4], the expanded
vector is [0, 1, 1, 1, 2, 2, 3, 3, 3, 3].
"""
Parameter.checkClass(v, numpy.ndarray)
Parameter.checkList(v, Parameter.checkInt, [0, float('inf')])
w = numpy.zeros(numpy.sum(v), numpy.int)
currentInd = 0
for i in range(v.shape[0]):
w[currentInd:currentInd+v[i]] = i
currentInd += v[i]
return w
@staticmethod
def random2Choice(V, n=1):
"""
Make a random binary choice from a vector V of values which are unnormalised
probabilities. Return the corresponding index. For example if v = [1, 2]
then the probability of the indices repectively are [1/3, 2/3]. The
parameter n is the number of random choices to make. If V is a matrix,
then the rows are taken as probabilities, and a choice is made for each
row.
"""
Parameter.checkClass(V, numpy.ndarray)
if V.ndim == 1 and V.shape[0] != 2:
raise ValueError("Function only works on binary probabilities")
if V.ndim == 2 and V.shape[1] != 2:
raise ValueError("Function only works on binary probabilities")
if V.ndim == 1:
cumV = numpy.cumsum(V)
p = numpy.random.rand(n)*cumV[-1]
cumV2 = numpy.ones(n)*cumV[0] - p
return numpy.array(cumV2 <= 0, numpy.int)
elif V.ndim == 2:
cumV = numpy.cumsum(V, 1)
P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T
cumV2 = numpy.outer(cumV[:, 0], numpy.ones(n)) - P
return numpy.array(cumV2 <= 0, numpy.int)
else:
raise ValueError("Invalid number of dimensions")
@staticmethod
def loadPickle(filename):
"""
Loads a pickled file with the given filename.
"""
file = open(filename, 'rb')
obj = pickle.load(file)
file.close()
#logging.debug("Loaded " + filename + " with object " + str(type(obj)))
return obj
@staticmethod
def savePickle(obj, filename, overwrite=True, debug=False):
if os.path.isfile(filename) and not overwrite:
raise IOError("File exists: " + filename)
file = open(filename, 'wb')
pickle.dump(obj, file)
file.close()
if debug:
logging.debug("Saved " + filename + " object type " + str(type(obj)))
@staticmethod
def incompleteCholesky(X, k):
"""
Compute the incomplete cholesky decomposition of positive semi-define
square matrix X. Use an approximation of k rows.
"""
if X.shape[0] != X.shape[1]:
raise ValueError("X must be a square matrix")
ell = X.shape[0]
R = numpy.zeros((k, ell))
d = numpy.diag(X)
aInd = numpy.argmax(d)
a = d[aInd]
nu = numpy.zeros(k)
for j in range(k):
nu[j] = numpy.sqrt(a)
for i in range(ell):
| |
import math
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#--------------------------GENERAL PROCESS CLASS-------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class Process():
def __init__(self, Name, UnitNumber, Parent= None, *args, **kwargs):
super().__init__()
# Lists
self.ParameterList =[]
# GENERAL ATTRIBUTES
# ------------------
# Non-indexed Attributes
self.Name = Name
self.Number = UnitNumber
self.Type = None
self.Group = None
self.Possible_Sources = []
# FLOW ATTRIBUTES
# ---------------
# Indexed Attributes
self.myu ={'myu': {}}
self.conc ={'conc': {self.Number: 0}}
self.kappa_1_lhs_conc = {'kappa_1_lhs_conc': {}}
self.kappa_2_lhs_conc = {'kappa_2_lhs_conc': {}}
self.kappa_1_rhs_conc = {'kappa_1_rhs_conc': {}}
self.kappa_2_rhs_conc = {'kappa_2_rhs_conc': {}}
self.FLH = {'flh': {self.Number: None}}
if Parent is not None:
Parent.add_UnitOperations(self)
def fill_unitOperationsList(self, superstructure):
superstructure.UnitsList.append(self)
superstructure.UnitsNumberList['U'].append(self.Number)
superstructure.UnitsNumberList2['UU'].append(self.Number)
for i in self.Possible_Sources:
if i is not self.Number:
superstructure.SourceSet['U_SU'].append((i,self.Number))
# GENERAL DATA SETTING
# --------------------
def set_generalData(self,
ProcessGroup,
lifetime = None,
emissions = 0,
full_load_hours = None
):
self.set_group(ProcessGroup)
self.set_full_load_hours(full_load_hours)
def set_name(self, Name):
self.Name = Name
def set_number(self, Number):
self.Number = Number
def set_group(self, processgroup):
self.Group = processgroup
def set_full_load_hours(self, full_load_hours = None):
self.FLH['flh'][self.Number] = full_load_hours
# FLOW DATA SETTING
# -----------------
def set_flowData(self,
RequiredConcentration = None,
RightHandSideReferenceFlow = None,
LeftHandSideReferenceFlow = None,
RightHandSideComponentList = [],
LeftHandSideComponentList = [],
SplitfactorDictionary = None,
):
self.__set_conc(RequiredConcentration)
self.__set_myuFactors(SplitfactorDictionary)
self.__set_kappa_1_lhs_conc(LeftHandSideComponentList)
self.__set_kappa_1_rhs_conc(RightHandSideComponentList)
self.__set_kappa_2_lhs_conc(LeftHandSideReferenceFlow)
self.__set_kappa_2_rhs_conc(RightHandSideReferenceFlow)
def __set_conc(self, concentration):
self.conc['conc'][self.Number] = concentration
def __set_myuFactors(self, myu_dic):
"""
Parameters
----------
myu_dic : Dictionary
Example: dict = {(u'1,i1):value1, (u'1,i2): value2}
"""
for i in myu_dic:
self.myu['myu'][self.Number,i] = myu_dic[i]
def __set_kappa_1_lhs_conc(self, kappa_1_lhs_conc_list):
"""
Parameters
----------
kappa_1_lhs_conc_dic : Dictionary
Example: dict = ['I1','I2',...]
"""
for i in kappa_1_lhs_conc_list:
if type(i) == list:
for j in i:
self.kappa_1_lhs_conc['kappa_1_lhs_conc'][self.Number,j] = 1
else:
self.kappa_1_lhs_conc['kappa_1_lhs_conc'][self.Number,i] = 1
def __set_kappa_1_rhs_conc(self, kappa_1_rhs_conc_list):
"""
Parameters
----------
kappa_1_rhs_conc_dic : Dictionary
Example: dict = ['I1','I2',...]
"""
for i in kappa_1_rhs_conc_list:
if type(i) == list:
for j in i:
self.kappa_1_rhs_conc['kappa_1_rhs_conc'][self.Number,j] = 1
else:
self.kappa_1_rhs_conc['kappa_1_rhs_conc'][self.Number,i] = 1
def __set_kappa_2_lhs_conc(self, kappa_2_lhs_conc_string):
"""
Parameters
----------
kappa_2_lhs_conc_dic : String
Example: 'FIN' or 'FOUT'
"""
if kappa_2_lhs_conc_string == 'FIN':
self.kappa_2_lhs_conc['kappa_2_lhs_conc'][self.Number] = 1
elif kappa_2_lhs_conc_string == 'FOUT':
self.kappa_2_lhs_conc['kappa_2_lhs_conc'][self.Number] = 0
else:
self.kappa_2_lhs_conc['kappa_2_lhs_conc'][self.Number] = 3
def __set_kappa_2_rhs_conc(self, kappa_2_rhs_conc_string):
"""
Parameters
----------
kappa_2_rhs_conc_dic : String
Example: 'FIN' or 'FOUT'
"""
if kappa_2_rhs_conc_string == 'FIN':
self.kappa_2_rhs_conc['kappa_2_rhs_conc'][self.Number] = 1
elif kappa_2_rhs_conc_string == 'FOUT':
self.kappa_2_rhs_conc['kappa_2_rhs_conc'][self.Number] = 0
else:
self.kappa_2_rhs_conc['kappa_2_rhs_conc'][self.Number] = 3
def set_possibleSources(self, SourceList):
if type(SourceList) == list:
for i in SourceList:
if i not in self.Possible_Sources:
self.Possible_Sources.append(i)
else:
if SourceList not in self.Possible_Sources:
self.Possible_Sources.append(SourceList)
# ADDITIONAL METHODS
# ------------------
def fill_parameterList(self):
"""
Fills ParameterList of Process Unit u which is used to fill Data_File
In Superstructure Class
"""
self.ParameterList.append(self.conc)
self.ParameterList.append(self.myu)
self.ParameterList.append(self.kappa_1_lhs_conc)
self.ParameterList.append(self.kappa_2_lhs_conc)
self.ParameterList.append(self.kappa_1_rhs_conc)
self.ParameterList.append(self.kappa_2_rhs_conc)
self.ParameterList.append(self.FLH)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#--------------------------VIRTUAL PROCESSES-----------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class VirtualProcess(Process):
def __init__(self, Name, UnitNumber, Parent=None, *args, **kwargs):
super().__init__(Name, UnitNumber, Parent)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#--------------------------PHYSICAL / REAL PROCESSES---------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class PhysicalProcess(Process):
def __init__(self, Name, UnitNumber, Parent = None, *args, **kwargs):
super().__init__(Name, UnitNumber, Parent)
# Indexed Attributes
self.LT = {'LT': {self.Number: None}}
self.em_fac_unit = {'em_fac_unit': {self.Number: None}}
# ECONOMIC ATTRIBUTES
# --------------------
# Indexed Attributes
self.DC_factor = {'DC': {self.Number: None}}
self.IDC_factor = {'IDC': {self.Number: None}}
self.CAPEX_factors = dict()
self.CAPEX_factors_new = {'C_Ref': None, 'm_Ref': None,
'CECPI_ref': None}
self.ACC_Factor = {'ACC_Factor': {self.Number: None}}
self.lin_CAPEX_x = dict()
self.lin_CAPEX_y = dict()
self.kappa_1_capex = {'kappa_1_capex': {}}
self.kappa_2_capex = {'kappa_2_capex': {}}
self.K_M = {'K_M': {}}
self.turn_over_acc = {'to_acc': {self.Number: 0}}
self.turnover_factors = {'CostPercentage': None, 'TimeSpan': None, 'TimeMode': 'No Mode'}
# ENERGY ATTRIBUTES
# -----------------
# Indexed Attributes
self.tau = {'tau': {}}
self.tau_h = {'tau_h':{}}
self.tau_c = {'tau_c': {}}
self.kappa_1_ut = {'kappa_1_ut': {}}
self.kappa_2_ut = {'kappa_2_ut': {}}
self.beta = {'beta': {}}
self.HeatData = {'Heat': {}, 'Heat2': {}}
self.T_IN = {'Heat': {}, 'Heat2':{}}
self.T_OUT = {'Heat': {}, 'Heat2':{}}
self.CECPI_dic = {1994: 368.1, 1995: 381.1, 1996: 381.7, 1997: 386.5,
1998: 389.5, 1999: 390.6, 2000: 394.1, 2001: 394.3,
2002: 395.6, 2003: 402.0, 2004: 444.2, 2005: 468.2,
2006: 499.6, 2007: 525.4, 2008: 575.4, 2009: 521.9,
2010: 550.8, 2011: 585.7, 2012: 584.6, 2013: 567.1,
2014: 576.1, 2015: 556.8, 2016: 541.7, 2017: 566.1,
2018: 603.1}
def fill_unitOperationsList(self, superstructure):
super().fill_unitOperationsList(superstructure)
superstructure.CostUnitsList['U_C'].append(self.Number)
# ECONOMIC DATA SETTING
# ---------------------
def set_generalData(self,
ProcessGroup,
lifetime,
emissions = 0,
full_load_hours = None,
maintenancefactor = 0.044875,
CostPercentage = None,
TimeSpan = None,
TimeMode = None
):
super().set_generalData(ProcessGroup,lifetime,emissions,full_load_hours)
self.__set_lifeTime(lifetime)
self.__set_unitoperationEmissionsFactor(emissions)
self.__set_maintenanceFactor(maintenancefactor)
self.__set_turnoverFactors(CostPercentage,
TimeSpan,
TimeMode)
def __set_unitoperationEmissionsFactor(self, emissionfactor):
self.em_fac_unit['em_fac_unit'][self.Number] = emissionfactor
def __set_lifeTime(self, lifetime):
self.LT['LT'][self.Number] = lifetime
def __set_maintenanceFactor(self, factor=0.044875):
self.K_M['K_M'][self.Number] = factor
def set_economicData(self,
DirectCostFactor,
IndirectCostFactor,
ReferenceCosts,
ReferenceFlow,
CostExponent,
ReferenceYear,
ReferenceFlowType,
ReferenceFlowComponentList
):
self.__set_dcFactor(DirectCostFactor)
self.__set_idcFactor(IndirectCostFactor)
self.__set_capexFactors(ReferenceCosts,
ReferenceFlow,
CostExponent,
ReferenceYear)
self.__set_kappa_2_capex(ReferenceFlowType)
self.__set_kappa_1_capex(ReferenceFlowComponentList)
def __set_dcFactor(self, DC):
self.DC_factor['DC'][self.Number] = DC
def __set_idcFactor(self, IDC):
self.IDC_factor['IDC'][self.Number] = IDC
def __set_capexFactors(self, CREF, MREF, F, YEAR_REF):
self.CAPEX_factors['C_Ref'] = {self.Number: CREF}
self.CAPEX_factors['m_Ref'] = {self.Number: MREF}
self.CAPEX_factors['f'] = {self.Number: F}
self.CAPEX_factors['CECPI_ref'] = {self.Number: self.CECPI_dic[YEAR_REF]}
def __set_turnoverFactors(self, CostPercentage, TimeSpan = None, TimeMode = 'No Mode'):
self.turnover_factors['CostPercentage'] = CostPercentage
if TimeMode == 'Yearly':
self.turnover_factors['TimeSpan'] = TimeSpan
self.turnover_factors['TimeMode'] = 'Yearly'
elif TimeMode == 'Hourly':
self.turnover_factors['TimeSpan'] = TimeSpan
self.turnover_factors['TimeMode'] = 'Hourly'
else:
self.turnover_factors['TimeSpan'] = None
self.turnover_factors['TimeMode'] = 'No Mode'
def __set_kappa_1_capex(self, kappa_1_list):
"""
Parameters
----------
kappa_1_list : List
Example: dict = ['I1','I2']
"""
for i in kappa_1_list:
if type(i) == list:
for j in i:
self.kappa_1_capex['kappa_1_capex'][self.Number,j] = 1
else:
self.kappa_1_capex['kappa_1_capex'][self.Number,i] = 1
def __set_kappa_2_capex(self, kappa_2_capex_string):
"""
Parameters
----------
kappa_2_lhs_conc_dic : String
Example: 'FIN' or 'FOUT'
"""
if kappa_2_capex_string == 'FIN':
self.kappa_2_capex['kappa_2_capex'][self.Number] = 1
elif kappa_2_capex_string == 'FOUT':
self.kappa_2_capex['kappa_2_capex'][self.Number] = 0
elif kappa_2_capex_string == 'PEL':
self.kappa_2_capex['kappa_2_capex'][self.Number] = 2
elif kappa_2_capex_string == 'PHEAT':
self.kappa_2_capex['kappa_2_capex'][self.Number] = 3
elif kappa_2_capex_string == 'PEL_PROD':
self.kappa_2_capex['kappa_2_capex'][self.Number] = 4
else:
self.kappa_2_capex['kappa_2_capex'][self.Number] = 5
def calc_ACCFactor(self, IR):
IR = IR['IR']
lt = self.LT['LT'][self.Number]
fac= ((IR *(1 + IR)**lt)/((1 + IR)**lt -1))
"Public"
return fac
def calc_turnoverACC(self, IR):
h = self.FLH['flh'][self.Number]
lt = self.LT['LT'][self.Number]
to_lt = self.turnover_factors['TimeSpan']
to_tm = self.turnover_factors['TimeMode']
to_c = self.turnover_factors['CostPercentage']
if to_tm == 'No Mode':
return 0
elif to_tm == 'Yearly':
number_of_turnovers = lt / to_lt
number_of_turnovers = math.ceil(number_of_turnovers)
number_of_turnovers = number_of_turnovers - 1
else:
h_tot = h * lt
number_of_turnovers = h_tot / to_lt
number_of_turnovers = math.ceil(number_of_turnovers)
number_of_turnovers = number_of_turnovers - 1
total_turnover_costs = number_of_turnovers * to_c
annual_factor = self.calc_ACCFactor(IR)
annual_turnover_costs = annual_factor * total_turnover_costs
"Public"
return annual_turnover_costs
# ENERGY DATA SETTING
# -------------------
def set_energyData(self,
Temperature1 = None,
Temperature2 = None,
ElectricityDemand = None,
HeatDemand = None,
Heat2Demand = None,
ElectricityReferenceFlow = None,
ElectricityReferenceComponentList = [],
HeatReferenceFlow = None,
HeatReferenceComponentList = [],
Heat2ReferenceFlow = None,
Heat2ReferenceComponentList = []
):
dic1 = {'Electricity': ElectricityDemand, 'Heat': HeatDemand,
'Heat2': Heat2Demand}
dic2= {'Electricity': ElectricityReferenceComponentList,
'Heat': HeatReferenceComponentList,
'Heat2': Heat2ReferenceComponentList}
dic3 = {'Electricity': ElectricityReferenceFlow,
'Heat': HeatReferenceFlow,
'Heat2': Heat2ReferenceFlow}
self.__set_tauFactors(dic1)
self.__set_kappa_1_ut(dic2)
self.__set_kappa_2_ut(dic3)
def __set_tauFactors(self, tau_dic):
"""
Parameters
----------
tau_dic : Dictionary
Example: dict= {'Utility1' : value1 , 'Utility2' : value2}
"""
for i in tau_dic:
self.tau['tau'][self.Number,i] = tau_dic[i]
def __set_kappa_1_ut(self, kappa_1_ut_dic):
"""
Parameters
----------
kappa_1_ut_dic : Dictionary
Example: dict = {'Utility1': ['I1','I2'], 'Utitility2': [...]}
"""
for i in kappa_1_ut_dic:
for j in kappa_1_ut_dic[i]:
self.kappa_1_ut['kappa_1_ut'][self.Number,i,j] = 1
def __set_kappa_2_ut(self, kappa_2_ut_dic):
"""
Parameters
----------
kappa_2_ut_dic : Dictionary
Example: dict = {'Utility1': 'FIN', 'Utiltity2': 'FOUT'}
"""
for i in kappa_2_ut_dic:
if kappa_2_ut_dic[i] == 'FIN':
self.kappa_2_ut['kappa_2_ut'][self.Number,i] = 1
elif kappa_2_ut_dic[i] == 'FOUT':
self.kappa_2_ut['kappa_2_ut'][self.Number,i] = 0
elif kappa_2_ut_dic[i] == 'FIN_M':
self.kappa_2_ut['kappa_2_ut'][self.Number,i] = 2
elif kappa_2_ut_dic[i] == 'FOUT_M':
self.kappa_2_ut['kappa_2_ut'][self.Number,i] = 4
else:
self.kappa_2_ut['kappa_2_ut'][self.Number,i] = 3
def set_Temperatures(self,
T_IN_1 = None,
T_OUT_1 = None,
tau1 = None,
T_IN_2 = None,
T_OUT_2 = None,
tau2 = None):
self.HeatData['Heat']['TIN'] = T_IN_1
self.HeatData['Heat']['TOUT'] = T_OUT_1
self.HeatData['Heat2']['TIN'] = T_IN_2
self.HeatData['Heat2']['TOUT'] = T_OUT_2
self.HeatData['Heat']['tau'] = tau1
self.HeatData['Heat2']['tau'] = tau2
self.T_IN['Heat'] = T_IN_1
self.T_IN['Heat2'] = T_IN_2
self.T_OUT['Heat'] = T_OUT_1
self.T_OUT['Heat2'] = T_OUT_2
"Public"
def fill_parameterList(self):
super().fill_parameterList()
self.ParameterList.append(self.LT)
self.ParameterList.append(self.DC_factor)
self.ParameterList.append(self.IDC_factor)
self.ParameterList.append(self.tau)
self.ParameterList.append(self.kappa_1_ut)
self.ParameterList.append(self.kappa_2_ut)
self.ParameterList.append(self.lin_CAPEX_x)
self.ParameterList.append(self.lin_CAPEX_y)
self.ParameterList.append(self.tau_h)
self.ParameterList.append(self.tau_c)
self.ParameterList.append(self.kappa_1_capex)
self.ParameterList.append(self.kappa_2_capex)
self.ParameterList.append(self.ACC_Factor)
self.ParameterList.append(self.em_fac_unit)
self.ParameterList.append(self.K_M)
self.ParameterList.append(self.turn_over_acc)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#--------------------------SPLITTER PROCESS------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class Splitter(PhysicalProcess):
def __init__(self, | |
quantum_yield = np.array(a[1].data['YIELD'][0])
return(lba_um, quantum_yield)
def example_1(wave_micron, flux_W_m2_micron):
print('Beginning of example_1')
# Synthesize magnitudes
filterlist = ['WISE2','Johnson-U','MKO-J'] # keep as array, an array of name is expected
pathvega = '/Users/albert/NIRISS/SOSSpipeline/sandbox/'
pathfilter = '/Users/albert/filterSVO/'
filtermag = syntMag(wave_micron,flux_W_m2_micron,filterlist,
path_filter_transmission=pathfilter,
path_vega_spectrum=pathvega)
# take index 0 to get the magnitude of the first filter
WISE2 = filtermag[0]
print('WISE2 magnitude of input spectrum: {:6.2f}'.format(WISE2))
mag = 15.0
print('To be normalized at magnitude {:6.2f}'.format(mag))
# Set the WISE2 magnitude to 15.0
flux_normalized = flux_W_m2_micron * 10**(-0.4*(mag-WISE2))
filtermag = syntMag(wave_micron,flux_normalized,filterlist,
path_filter_transmission=pathfilter,
path_vega_spectrum=pathvega)
WISE2ok = filtermag[0]
print('WISE2 magnitude of normalized spectrum: {:6.2f}'.format(WISE2ok))
plt.plot(wave_micron,flux_normalized)
plt.ylabel('Flambda (W/m2/micron)')
plt.xlabel('Wavelength (micron)')
plt.show()
print('End of example_1')
return()
def example_2():
print('Beginning of example_2')
pathvega = '/Users/albert/NIRISS/SOSSpipeline/sandbox/'
pathfilter = '/Users/albert/filterSVO/'
# read some spectrum and wavelength in wave_micron, flux_W_m2_micron
#wave_micron, flux_W_m2_micron = read_some_spectrum_not_implemented()
# For the example, lets read Vega
wave_micron, flux_W_m2_micron = readVega(path_vega_spectrum=pathvega)
# change the flux by 5 magnitudes, scale by 100
flux_W_m2_micron = flux_W_m2_micron * 100
filterlist = ['Johnson-V', 'KIC-r','MKO-J','WIRCam-Ks']
calibrationmag_KICr = 8.5
# Get the uncalibrated magnitude through a filter
filtermag = syntMag(wave_micron,flux_W_m2_micron,filterlist,
path_filter_transmission=pathfilter,
path_vega_spectrum=pathvega)
print('KIC-r magnitude of uncalibrated spectrum: {:6.2f}'.format(filtermag[1]))
# Normalize spectrum to the desired magnitude
flux_W_m2_micron_normalized = flux_W_m2_micron * 10**(-0.4*(calibrationmag_KICr-filtermag[1]))
# Check that it worked
filtermag = syntMag(wave_micron,flux_W_m2_micron_normalized,filterlist,
path_filter_transmission=pathfilter,
path_vega_spectrum=pathvega)
print('KIC-r magnitude of calibrated spectrum: {:6.2f}'.format(filtermag[1]))
plt.figure(figsize=(12,6))
plt.loglog(wave_micron,flux_W_m2_micron_normalized)
plt.title('Normalized Vega spectrum to KIC-r = 8.5')
plt.xlabel('Wavelength (micron)')
plt.ylabel('Flambda (W/m2/micron)')
plt.show()
# Now, divide by the photon energy and integrate over 25 m2 (JWST) and by
# the pixel dispersion (in microns per pixel: e.g. ~0.001 micron/pixel in SOSS m=1)
#Fnu_W_m2_Hz = FlambdaToFnu(wave_micron,flux_W_m2_micron_normalized)
h = 6.62607004e-34 # m2 kg / sec
c = 299792458.0 #m/sec
photon_energy = h*c/(wave_micron*1e-6) # Joule
dispersion_micron_pixel = 0.001 # microns/pixel, could be an array matching the wave_micron
area = 25.0
# number of electrons per pixel ***per second*** is:
nelectron = area * dispersion_micron_pixel * flux_W_m2_micron_normalized / photon_energy
plt.figure(figsize=(12,6))
plt.loglog(wave_micron,nelectron)
plt.title('Electron flux with JWST assuming constant dispersion of 1 nm/pixel')
plt.xlabel('Wavelength (micron)')
plt.ylabel('Electron/sec/pixel')
plt.ylim((1e-2,1e+7))
plt.show()
print('End of example_2')
return(nelectron)
def FlambdaToFnu(wave,Flambda):
#wave has to be in microns
#Flambda has to be in W/m2/micron
#Physical constants
c = 299792458.0 #m/sec
wave_m = 1.0e-6*wave
Flambda_W_m2_m = 1.0e+6*Flambda
#Convert Flambda to Fnu (W/M2/Hz)
Fnu_W_m2_Hz = np.power(wave_m,2.0) * Flambda_W_m2_m / c
#Convert to Jansky using 1 Jy = 10^26 W/m2/Hz
#Jy = 1.0e+26 * Fnu_W_m2_Hz
return(Fnu_W_m2_Hz)
def jansky_to_AB(jansky):
# Convert from Jansky to Fnu
#The AB Magnitude constant
#ABconstant = 48.594 # advocated by Arnouts
ABconstant = 48.600
# by definition:
Fnu_W_m2_Hz = 1.0e-26 * jansky
#Convert Fnu (metric) to Fnu (cgs) (erg/sec/cm2/Hz) using 1 erg = 1.0e-7 J and 1 m2 = 10^4 cm2
Fnu_erg_sec_cm2_Hz = 1.0e+3 * Fnu_W_m2_Hz
#Convert to AB magnitude using magnitude = -2.5*alog10(flux) - 48.60
ABmag = -2.5 * np.log10(Fnu_erg_sec_cm2_Hz) - ABconstant
return(ABmag)
def AB_to_jansky(magAB):
# Convert from AB magnitude to Jansky
#The AB Magnitude constant
#ABconstant = 48.594 # advocated by Arnouts
ABconstant = 48.600
# First convert from mag AB to Fnu in cgs units: erg/s/cm2/Hz
fnu_cgs = np.power(10,-(magAB+ABconstant)/2.5)
# Then convert cgs to Jy (1 Jy = 10-23 erg/s/cm2/Hz)
jansky = fnu_cgs * 1e+23
return(jansky)
def ABmag_to_Vegamag(magAB, filterlist, path_filter_transmission=None,
path_vega_spectrum=None,verbose=None):
# Convert AB magnitude to Vega magnitudes. That requires a filter name.
#
# Handles 3 cases:
# 1) if magAB and filterlist are both scalars, then the function also
# returns a scalar.
# 2) if magAB is an array but the filterlist is a scalar, then the
# function returns an array of same length as magAB.
# 3) if both magAB and filterlist are arrays, then the function returns
# a matrix of shape nmag x nfilter, e.g. mat[:,0] is all mag thru
# one filter.
if verbose:
print('shapes of input parameters:')
print('magAB:', np.shape(magAB))
print('filterlist', np.shape(filterlist))
#Check if a path for the Vega spectrum was passed. If not, assume some
# local path
if path_vega_spectrum == None:
path_vega_spectrum = '/Users/albert/NIRISS/SOSSpipeline/syntMagCode/'
if path_filter_transmission == None:
path_filter_transmission = '/Users/albert/filterSVO/'
# Initialize the filters array
if np.size(filterlist) == 1:
filters = np.array(np.reshape(filterlist,-1))
else:
filters = np.array(filterlist)
# Initialize matrix of AB to Vega magnitude offsets
magoffset = np.empty((np.size(magAB),np.size(filters)), dtype=np.float)
# Initialize input magAB into a matrix spanning filters across axis=2
magABmatrix = np.empty((np.size(magAB),np.size(filters)), dtype=np.float)
for f in range(np.size(filters)):
magABmatrix[:,f] = magAB
# Read the Vega and AB spectra. Both share the same wavelength sampling
# (that of the Vega spectrum).
wave_Vega, Flambda_Vega = readVega(path_vega_spectrum=path_vega_spectrum)
lba = np.copy(wave_Vega)
wave_AB, Flambda_AB = readAB(wave_sampling=lba)
# Each wavelength sample has a width, determined here:
dlba = sample_width(lba)
for f in range(np.size(filters)):
#Get the filter transmission curve for that range at same sampling
filter_wave, filter_t, magsystem = readFilter(filters[f],wave_sampling=lba,
path_filter_transmission=path_filter_transmission)
Flux_Vega = np.sum(Flambda_Vega * filter_t * dlba) / np.sum(filter_t * dlba)
Flux_AB = np.sum(Flambda_AB * filter_t * dlba) / np.sum(filter_t * dlba)
#magVega[f] = magAB -2.5*np.log10(Flux_AB/Flux_Vega)
magoffset[:,f] = -2.5*np.log10(Flux_AB/Flux_Vega)
# Apply offset to input AB magnitudes
magVega = magABmatrix + magoffset
# Manage output because at this point, it is a matrix (nfilter x nmag)
if (np.size(filterlist) == 1) and (np.size(magAB) == 1):
# a single magnitude through a single filter was requested.
# return a scalar:
return(magVega[0,0])
elif (np.size(filterlist) == 1) and (np.size(magAB) > 1):
# an array of magnitudes was passed, through a single filter.
# return an array of magnitudes, not a matrix
return(np.reshape(magVega[:,0],-1))
elif (np.size(filterlist) > 1) and (np.size(magAB) == 1):
# magAB is a scalr but filterlist is an array as input.
# return an array of size nfilter.
return(np.reshape(magVega[0,:],-1))
else:
# magnitudes and filters were both arrays as input.
# return a matrix
return(magVega)
def Vegamag_to_ABmag(magVega, filterlist, path_filter_transmission=None,
path_vega_spectrum=None,verbose=None):
# Convert Vega magnitude to AB magnitude.
# refer to ABmag_to_Vegamag for explanations
# Determine the AB to Vega magnitude offset for each filter.
# Send zero thru the AB --> Vega converter (Vega mag will have lower values
# so offset will be less than zero for most filters)
offset = ABmag_to_Vegamag(0,filterlist,
path_filter_transmission=path_filter_transmission,
path_vega_spectrum=path_vega_spectrum,
verbose=verbose)
# Subtract (rather than add) the offsets to get ABmags
if (np.size(magVega) > 1) and (np.size(filterlist)) > 1:
magAB = np.zeros((np.size(magVega),np.size(filterlist)))
for f in range(np.size(filterlist)):
magAB[:,f] = magVega - offset[f]
else:
magAB = magVega - offset
return(magAB)
def sample_width(lba):
# Given an array of wavelength, not necessarily sampled equally spaced
# and BUT necessarily sorted, return the wavelength width spanned by each
# sample.
# Find the indices of sorted array of wavelengths
indsort = np.argsort(lba)
if np.array_equal(lba, lba[indsort]) is False:
print('Error. The input array needs to be sorted before entering this function. Stop.')
stop
# Devise the width of each wavelength sample
dlba = lba*0.0
dlba[0:-1] = lba[1:]-lba[0:-1]
# Make the last index the same as previous last
dlba[-1] = dlba[-2]*1.0
return(dlba)
def syntMag(lba,Flba,filterlist,path_filter_transmission=None,
path_vega_spectrum=None,verbose=None):
# Computes the synthetic magnitude of a spectrum through an input list of filters
# Make sure that the input spectrum has its wavelengths sorted.
indsorted = np.argsort(lba)
if np.array_equal(lba,lba[indsorted]) is False:
print('Input spectrum to syntMag has its wavelengths not sorted in increasing order.')
stop
#Check if a path for the Vega spectrum was passed. If not, assume some
# local path
if path_vega_spectrum == None:
path_vega_spectrum = '/Users/albert/NIRISS/SOSSpipeline/syntMagCode/'
if path_filter_transmission == None:
path_filter_transmission = '/Users/albert/filterSVO/'
# Initialize array of output magnitudes
mag = np.arange(np.size(filterlist), dtype=np.float)
# Read the Vega and AB spectra first, so it is done only once
wave_Vega, Flambda_Vega = readVega(wave_sampling=lba, path_vega_spectrum=path_vega_spectrum)
wave_AB, Flambda_AB = readAB(wave_sampling=lba)
# Each wavelength sample has a width, determined here:
dlba = sample_width(lba)
for f in range(np.size(filterlist)):
#Get the filter transmission curve for that range at same sampling
filter_wave, filter_t, magsystem = readFilter(filterlist[f],wave_sampling=lba,
path_filter_transmission=path_filter_transmission)
if magsystem == 'Vega':
#Do the Vega spectrum
#Energy_Vega = np.sum(Flambda_Vega * filter_t) | |
score = Score(lives)
dispatcher.add('draw', score.draw)
return score
def start_game(self, click_event):
# if not yet started
if self.started is False:
# reset the lives
self.score.reset()
# hides the splash
self.splash.hide()
# starts the game
self.started = True
def check_game(self, game_event):
# if no more life
if game_event.score.get_lives() <= 0:
# resets the game
self.reset()
def reset(self):
# clears all existing rocks
self.rocks.clear()
# clears all missles
self.player.get_missles().clear()
# reset the player's position
self.player.reset()
# recreate the score board
self.score.reset()
# display the splash
self.splash.show()
# ends the game
self.started = False
def draw(self, canvas):
# calls parent method
Game.draw(self, canvas)
# continue drawing if started
if self.started is True:
# create a draw event
draw_event = DrawEvent(canvas, self.get_frame())
# draw rocks
if self.rocks.exists():
for rock in self.rocks.get_all():
rock.draw(draw_event)
# draw missles
if self.player.get_missles().exists():
for missle in self.player.get_missles().get_all():
# if missle is expired
if missle.is_expired():
# remove from the missle gorup
self.player.get_missles().remove(missle)
else:
missle.draw(draw_event)
# draw explosions
if self.explosions.exists():
for explosion in self.explosions.get_all():
explosion.draw(draw_event)
class SplashScreen:
def __init__(self, size, position):
self.hidden = False
# sets the size of the background
self.size = size
# sets the position of the splash screen
self.position = position
# loads the image
self.image = Image('https://www.dropbox.com/s/6qfewgjyf8k3gag/splash.png?dl=1', (400, 300))
# loads the sound
self.sound = Sound('https://www.dropbox.com/s/7jjgyyz16gubjl4/soundtrack.mp3?dl=1')
self.sound.play()
# registers the draw handler
dispatcher.add('draw', self.draw)
def set_size(self, size):
self.size = size
def get_size(self):
return self.size
def set_position(self, position):
self.position = position
def get_position(self):
return self.position
def show(self):
self.sound.rewind()
self.sound.play()
self.hidden = False
def hide(self):
self.hidden = True
def draw(self, draw_event):
# draw only if not hidden
if self.hidden is False:
# draws the background into the canvas
self.image.draw_at(draw_event.canvas, self.get_position(), self.get_size())
class Space:
def __init__(self, size):
"""
Creates the space background image
<tuple> size The size of the window
"""
# sets the size of the background
self.size = size
# loads the image
self.image = Image('https://www.dropbox.com/s/gkz1ng5b5f911tk/nebula_blue.f2014.png?dl=1', (800, 600))
# registers the draw handler
dispatcher.add('draw', self.draw)
def set_size(self, size):
self.size = size
def get_size(self):
return self.size
def draw(self, draw_event):
size = self.get_size()
# calculate the center destination
center_dest = (size[0] / 2, size[1] / 2)
# draws the background into the canvas
self.image.draw_at(draw_event.canvas, center_dest, self.size)
class Debris:
def __init__(self, size, timer):
"""
Creates the debris animation
<tuple> size The size of the window
<Timer> timer The timer instance
"""
self.size = size
self.timer = timer
# loads the image
self.image = Image('https://www.dropbox.com/s/xcygcu51maw8bam/debris2_blue.png?dl=1', (640, 480))
dispatcher.add('draw', self.draw)
def set_size(self, size):
self.size = size
def get_size(self):
return self.size
def draw(self, draw_event):
size = self.get_size()
# calc the center destination
delta = (self.timer.get_time() / 50) % size[0]
center_dest1 = (delta - size[0] / 2, size[1] / 2)
center_dest2 = (delta + size[0] / 2, size[1] / 2)
# draws the background into the canvas
self.image.draw_at(draw_event.canvas, center_dest1, size)
self.image.draw_at(draw_event.canvas, center_dest2, size)
class Sprite:
def __init__(self, size, position, velocity = (0, 0), rotation = 0, rotation_velocity = 0, lifetime = 0, animated = False):
"""
Creates a new movable object. Set the velocity vector
to make the object move by default.
<tuple> size The size of the sprite
<tuple> position The position of the sprite
<tuple> velocity The velocity of the sprite
<int> rotation The rotation/angle of the sprite (in radians)
<int> rotation_velocity The rotation/angle of the velocity (in radians)
<int> lifetime An integer representing when the item can live in ms
<bool> animated A boolean indicating if the sprite should be animated or not
"""
# sets the initial size
self.size = size
# sets the image's center position
self.set_center((self.size[0] / 2, self.size[1] / 2))
# sets the initial position
self.set_initial_position(position)
# sets the new position
self.set_position(position)
# sets the initial velocity
self.set_velocity(velocity)
# set the rotation direction flag (enum: None, "left', or "right")
self.set_rotation_dir(None)
# set the initial rotation (radians)
self.set_rotation(rotation)
# set the initial rotation velocity (radians)
self.set_rotation_velocity(rotation_velocity)
# sets the initial acceleration
self.set_acceleration(0.1)
# sets the intial friction (small constant)
self.set_friction(0.02)
# sets the age/lifetime
self.age = 0
self.lifetime = lifetime
self.animated = animated
# sets the initial accelerating flag
self.accelerating = False
def set_size(self, size):
self.size = size
def get_size(self):
return self.size
def set_center(self, center):
self.center = center
def get_center(self):
return self.center
def get_radius(self):
return self.size[0] / 2
def set_initial_position(self, initial_position):
self.initial_position = initial_position
def get_initial_position(self):
return self.initial_position
def set_position(self, position):
self.position = position
def get_position(self):
return self.position
def set_velocity(self, velocity):
self.velocity = velocity
def get_velocity(self):
return self.velocity
def set_rotation_dir(self, rotation_dir):
self.rotation_dir = rotation_dir
def get_rotation_dir(self):
return self.rotation_dir
def set_rotation(self, rotation):
self.rotation = rotation
def get_rotation(self):
return self.rotation
def set_rotation_velocity(self, rotation_velocity):
self.rotation_velocity = rotation_velocity
def get_rotation_velocity(self):
return self.rotation_velocity
def set_acceleration(self, acceleration):
self.acceleration = acceleration
def get_acceleration(self):
return self.acceleration
def set_friction(self, friction):
self.friction = friction
def get_friction(self):
return self.friction
def get_forward_vector(self):
"""
Calculates the forward vector based on the sprite's
rotation/angle (radians), acceleration, and friction
"""
rotation = self.get_rotation()
return (math.cos(rotation), math.sin(rotation))
def apply_rotation(self, rotation_velocity):
"""
Given the current rotation (in radians), applies the rotation velocity (in radians)
"""
new_rotation = self.get_rotation() + rotation_velocity
self.set_rotation(new_rotation)
def apply_velocity(self, velocity, screen_bounds):
"""
Given the current position, applies the specified velocity vector
<tuple> screen_bounds
"""
current_pos = self.get_position()
new_pos = ((current_pos[0] + velocity[0]) % screen_bounds[0], (current_pos[1] + velocity[1]) % screen_bounds[1])
# sets the new position
self.set_position(new_pos)
def apply_forward_vector(self, forward, acceleration):
"""
Given the current velocity, applies the specified forward vector, and acceleration
"""
current_vel = self.get_velocity()
new_vel = (
current_vel[0] + (forward[0] * acceleration),
current_vel[1] + (forward[1] * acceleration)
)
self.set_velocity(new_vel)
def apply_friction(self, friction):
"""
Given the current velocity, applies the specified friction constant
"""
if friction > 0:
current_vel = self.get_velocity()
new_vel = (current_vel[0] * (1 - friction), current_vel[1] * (1 - friction))
# sets the new velocity
self.set_velocity(new_vel)
def is_accelerating(self):
"""
Returns a boolean indicating if the object is accelerating
"""
return self.accelerating
def is_expired(self):
return self.lifetime > 0 and self.age > self.lifetime
def get_distance(self, a, b):
"""
Calculates the distance between 2 points
"""
dx = float(b[0]) - a[0]
dy = float(b[1]) - a[1]
return math.hypot(dx, dy)
def collide(self, object):
position = self.get_position()
radius = self.get_radius()
obj_position = object.get_position()
obj_radius = object.get_radius()
# calculate the distance between the 2 objects
dist = self.get_distance(position, obj_position)
radius_sum = radius + obj_radius
if dist <= radius_sum:
return True
return False
def collide_group(self, group):
# checks if the current object collides with the given group of objects
# returns the first object it has collided with
# otherwise, returns False
for object in group.get_all():
if self.collide(object):
return object
return False
def update(self, draw_event):
"""
Updates the position of the current sprite based on the the velocity and rotation.
"""
# applies the rotation velocity to the rotation
self.apply_rotation(self.get_rotation_velocity())
# applies the current velocity vector
self.apply_velocity(self.get_velocity(), draw_event.frame.get_size())
# if currently accelerating
if self.is_accelerating():
# retrieve the forward vector
forward_vector = self.get_forward_vector()
# retrieve the accleration
acceleration = self.get_acceleration()
# applies the forward vector, and acceleration to the current velocity
self.apply_forward_vector(forward_vector, acceleration)
# applies the friction constant
self.apply_friction(self.get_friction())
def draw(self, draw_event):
"""
Draws the spaceship into the canvas
"""
# increment age
self.age += 1
# flag to draw or not
should_draw = True
# if expired, don't draw
if self.is_expired():
should_draw = False
if should_draw:
# updates the positions, and rotations
self.update(draw_event)
# draws the sprite into the canvas
if self.animated:
self.image.draw_animated_at(draw_event.canvas, self.get_position(), self.get_size(), self.get_rotation(), self.age)
else:
self.image.draw_at(draw_event.canvas, self.get_position(), self.get_size(), self.get_rotation())
class Group:
def __init__(self, objects = set(), max_count = None):
self.objects = objects
self.set_max_count(max_count)
def set_max_count(self, max_count):
self.max_count = max_count
def add(self, object):
# add only if hasn't reached the max count
if self.max_count is not None:
if len(self.objects) < self.max_count:
self.objects.add(object)
else:
self.objects.add(object)
def remove(self, object):
# make a new set
new_set = set()
# iterate through current objects and add ones that are not removed
for o in self.objects:
if o != object:
new_set.add(o)
# override with the new set
self.objects = new_set
def clear(self):
self.objects = set()
| |
<filename>week_12_quantumChemistry1/utils2.py
import sys
version=sys.version_info.major
import os
import numpy as np
import scipy as sp
import scipy.linalg as spla
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib as mpl
try:
from colorama import Fore,Back,Style
from colorama import init
init(autoreset=True)
print_color=True
except:
print_color=False
np.set_printoptions(threshold='nan')
titles={
1:'Particle in an infinite potential well',
2:'Particle in a finite well',
3:'Particle in a double finite well (equal depth)',
4:'Particle in a double finite well (unequal depth)',
5:'Particle in a harmonic well',
6:'Particle in a Morse well',
7:'Kronig-Penney finite well'}
##################################
#FUNCTIONS
##################################
########
# IO FUNCTIONS
########
def print_center_text(s):
print '{:^79}'.format(s)
def valid_input_error_message():
if print_color:
print Fore.RED+'\nPlease enter a valid input!\n'
else:
print '\nPlease enter a valid input!\n'
def print_startup():
print ""
print '*'*79
print_center_text('Welcome to the Schrodinger Solver!')
print_center_text('Created by: <NAME>, <NAME>, and <NAME>')
print '*'*79
def print_choices():
print '\tPlease enter the case number you would like to study.'
print '\tCases:'
for i,j in titles.items():
print('\t\t {}. {}'.format(i,j))
print '\t\t99. Quit\n'
def choices(Case=111):
if Case==111:
# First Print
print_startup()
print_choices()
elif Case==666:
# Invalid input
valid_input_error_message()
print_choices()
else:
print_choices()
try:
Case=input('Enter case number (1-7 or 99): ')
except:
Case=0
if(Case in set(titles.keys()+[99])):
return Case
else:
return choices(Case=666)
print_choices()
def infinite_well_input(W=None,n=None):
if W==None:
try:
W=float(raw_input('\nEnter the width of your infinite well in atomic units (a.u.).\n\tSelect a value between 0.5 and 15: '))
W,n=infinite_well_input(W=W)
except ValueError:
valid_input_error_message()
W,n=infinite_well_input()
else:
try:
n=int(raw_input('Enter the number of wavefunctions you would like to plot.\n\tThis value must be an integer: '))
except ValueError:
valid_input_error_message()
W,n=infinite_well_input(W=W)
return W,n
def finite_well_input(W=None,D=None):
if W==None:
try:
W=float(raw_input('\nEnter the width of your finite well in atomic units (a.u.).\n\tSelect a value between 1.0 and 15. '))
W,D=finite_well_input(W=W)
except ValueError:
valid_input_error_message()
W,D=finite_well_input()
else:
try:
D=-float(raw_input('Enter the depth of your finite well in atomic units (a.u.).\n\tSelect a value between 20 and 500. '))
except ValueError:
valid_input_error_message()
W,D=finite_well_input(W=W)
return W,D
def double_finite_well_equal_depth_input(W=None,B=None,D=None):
if W==None:
try:
print "\nThis case's plot is sensitive to the following user inputs. Be aware that too wide/deep a well may prevent the user from observing the wave-like nature of the wavefunctions. Users should experiment with inputs until the desired plot is generated."
W=float(raw_input('\nEnter the width of your finite wells in atomic units (a.u.). Select a value between 0.5 and 10. '))
W,B,D=double_finite_well_equal_depth_input(W=W)
except ValueError:
valid_input_error_message()
W,B,D=double_finite_well_equal_depth_input()
elif D==None:
try:
D=-float(raw_input('\nEnter the depth of your finite wells in atomic units (a.u.). Select an integer value between 30 and 500. '))
W,B,D=double_finite_well_equal_depth_input(W=W,D=D)
except ValueError:
valid_input_error_message()
W,B,D=double_finite_well_equal_depth_input(W=W)
else:
try:
B=float(raw_input('\nEnter the distance between potential wells in atomic units (a.u.). Select an integer value between 0.1 and 10. '))
except ValueError:
valid_input_error_message()
W,B,D=double_finite_well_equal_depth_input(W=W,D=D)
return W,B,D
def double_finite_well_unequal_depth_input(W1=None,W2=None,B=None,D1=None,D2=None):
if W1==None:
try:
print "\nThis case's plot is sensitive to the following user inputs. Be aware that too wide/deep a well may prevent the user from observing the wave-like nature of the wavefunctions. Users should experiment with inputs until the desired plot is generated."
W1=float(raw_input('\nEnter the width of finite well 1 in atomic units (a.u.). Select a value between 0.5 and 10. '))
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input(W1=W1)
except ValueError:
valid_input_error_message()
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input()
elif W2==None:
try:
W2=float(raw_input('\nEnter the width of finite well 2 in atomic units (a.u.). Select a value between 0.5 and 10. '))
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input(W1=W1,W2=W2)
except ValueError:
valid_input_error_message()
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input(W1=W1)
elif B==None:
try:
B=float(raw_input('\nEnter the distance between potential wells in atomic units (a.u.). Select an integer value between 0.1 and 10. '))
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input(W1=W1,W2=W2, B=B)
except ValueError:
valid_input_error_message()
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input(W1=W1,W2=W2)
elif D1==None:
try:
D1=-float(raw_input('\nEnter the depth of finite well 1 in atomic units (a.u.). Select an integer value between 30 and 500. '))
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input(W1=W1,W2=W2,B=B,D1=D1)
except ValueError:
valid_input_error_message()
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input(W1=W1,W2=W2,B=B)
else:
try:
D2=-float(raw_input('\nEnter the depth of finite well 2 in atomic units (a.u.). Select an integer value between 30 and 500. '))
except ValueError:
valid_input_error_message()
W1,W2,B,D1,D2=double_finite_well_unequal_depth_input(W1=W1,W2=W2,B=B,D1=D1)
return W1,W2,B,D1,D2
def harmonic_well_input(omega=None,D=None):
if omega==None:
try:
omega=float(raw_input('\nEnter the force constant of your harmonic well.\n\tSelect a value between 0.3 and 1.4. '))
omega,D=harmonic_well_input(omega=omega)
except ValueError:
valid_input_error_message()
omega,D=harmonic_well_input()
else:
try:
D=-float(raw_input('Enter the depth of your harmonic well in atomic units (a.u.).\n\tSelect a value between 2 and 15. '))
except ValueError:
valid_input_error_message()
omega,D=harmonic_well_input(omega=omega)
return omega,D
def morse_well_input(omega=None,D=None):
if omega==None:
try:
omega=float(raw_input('\nEnter the force constant of your morse well.\n\tSelect a value between 0.05 and 1.4. '))
omega,D=morse_well_input(omega=omega)
except ValueError:
valid_input_error_message()
omega,D=morse_well_input()
else:
try:
D=-float(raw_input('Enter the depth of your morse well in atomic units (a.u.).\n\tSelect a value between 2 and 15. '))
except ValueError:
valid_input_error_message()
omega,D=morse_well_input(omega=omega)
return omega,np.abs(D)
def Kronig_Penney_input(A=None,D=None,B=None,num_wells=None):
if A==None:
try:
A=float(raw_input('\nEnter the width of the repeating finite wells in atomic units (a.u.).\n\tSelect a value between 1.0 and 15. '))
A,D,B,num_wells=Kronig_Penney_input(A=A)
except ValueError:
valid_input_error_message()
A,D,B,num_wells = Kronig_Penney_input()
elif D==None:
try:
D=-float(raw_input('Enter the depth of the repeating finite wells in atomic units (a.u.).\n\tSelect a value between 20 and 500. '))
A,D,B,num_wells=Kronig_Penney_input(A=A,D=D)
except ValueError:
valid_input_error_message()
A,D,B,num_wells=Kronig_Penney_input(A=A)
elif B==None:
try:
B=float(raw_input('Enter the separation distance of the repeating finite wells in atomic units (a.u.).\n\tSelect a value between 1.0 and 15. '))
A,D,B,num_wells=Kronig_Penney_input(A=A,D=D,B=B)
except ValueError:
valid_input_error_message()
A,D,B,num_wells=Kronig_Penney_input(A=A,D=D)
elif num_wells==None:
try:
num_wells=int(raw_input('Enter the number of repeating wells to use.\n\tSelect an odd integer between 3 and 7. '))
except ValueError:
valid_input_error_message()
A,D,B,num_wells=Kronig_Penney_input(A=A,D=D,B=B)
return A,D,B,num_wells
def ask_to_save_plot(error=False):
if error==True:
valid_input_error_message()
try:
image=raw_input('Would you like to save a .png image of your plot? Type yes or no. ')
except:
image=ask_to_save_plot(error=True)
image=image.strip().lower()
if image=='yes':
print 'Your image will be saved in your current working directory.'
if image not in {'yes','no'}:
image=ask_to_save_plot(error=True)
return image
def ask_to_plot_squared(error=False):
if error==True:
valid_input_error_message()
try:
sq=raw_input('Would you like to plot the probability density (psi squared) instead of the probability amplitude (psi)? Type yes or no. ')
except:
sq=ask_to_plot_squared(error=True)
sq=sq.strip().lower()
if sq not in {'yes','no'}:
sq=ask_to_plot_squared(error=True)
return sq
def print_number_of_wavefunctions(n):
if print_color:
print Fore.RED+'\nMaximum number of wavefunctions for plotting is', Fore.RED + str(n), "\n"
else:
print '\nMaximum number of wavefunctions for plotting is', n
def output(Case,input_fields,input_values,E,n):
print ""
print '*'*79
print_center_text('Schrodinger Solver Output')
print_center_text('<NAME> and <NAME>')
print '*'*79
print_center_text(titles[Case])
print ""
print "\t\tInput:"
for i,j in zip(input_fields,input_values):
print_center_text(str(i)+' : '+str(j))
print ""
print "\t\t{} lowest Bound States:".format(n)
count=0
for i in range(n):
print_center_text('E({})='.format(i) + str(E[i]))
print '*'*79
print ""
########
# SHARED FUNCTIONS
########
def step_func(x):
return 0.5*(1+np.sign(x))
def harmonic_potential(x,omega,D):
pot=(0.5*(omega**2)*(x**2))+D
for i in range(len(pot)):
if pot[i]>0:
pot[i]=0
return pot
def morse_function(a,D,x):
return D*(np.exp(-2*a*x)-2*np.exp(-a*x))
def morse_potential(omega,D,steps):
D=np.abs(D)
a=np.sqrt(omega/2.0*D)
start=0.0
stop=0.0
while morse_function(a,D,start)<0.5*np.abs(D):
start-=0.01
while morse_function(a,D,stop)<-0.1:
stop+=0.01
# create x-vector
xvec=np.linspace(2.0*start,2.0*stop,steps,dtype=np.float_)
# get step size
h=xvec[1]-xvec[0]
pot=morse_function(a,D,xvec)
for i in range(len(pot)):
if pot[i]>0:
pot[i]=0
return xvec,h,pot
def diagonalize_hamiltonian(Hamiltonian):
return spla.eigh(Hamiltonian)
########
# PLOTTING
########
def infinite_well_plot(E,V,xvec,W,steps,n,Case,ask_to_save=False,ask_squared=False):
if ask_squared:
sq=ask_to_plot_squared()
if(sq=='yes'):
V = np.multiply(np.conj(V),V)
V_new,ScaleFactor=infinite_well_plot_scaling(E,V,xvec,W)
# create the figure
f=plt.figure()
# add plot to the figure
ax=f.add_subplot(111)
# set x limit
plt.xlim(-W,W)
# determine how much to buffer the axes
buff=(np.max(V_new[0:steps,n-1])-np.min(V_new[0:steps,n-1]))
#set y limit
plt.ylim(0,np.max(V_new[0:steps,n-1])+buff)
#plot wave functions
for i in np.arange(n-1,-1,-1):
color=mpl.cm.jet_r((i)/(float)(n-1),1)
wavefunc=ax.plot(xvec,V_new[0:steps,i],c=color,label='E(a.u.)={}'.format(np.round(E[i]*1000)/1000.0))
ax.axhline(y=V_new[0,i],xmin=-20*W,xmax=20*W,c=color,ls='--')
# set plot title
ax.set_title('{}'.format(titles[Case]))
# set x label
plt.xlabel('Width of Well / (a.u.)')
# set y label
plt.ylabel('Energy / (a.u.)')
# modify tick marks
ax.set_yticklabels(np.round(ax.yaxis.get_ticklocs()*ScaleFactor))
# add plot legend
L=plt.legend(bbox_to_anchor=(1.05,1),loc=2,borderaxespad=0.)
box=ax.get_position()
ax.set_position([box.x0,box.y0,0.7*box.width,box.height])
if ask_to_save:
image=ask_to_save_plot()
if(image=='yes'):
f.savefig('Case{}.png'.format(Case),bbox_extra_artists=(L,),dpi=200,bbox_inches='tight')
plt.show()
def finite_well_plot(E,V,xvec,steps,n,Case,U,ask_to_save=False,ask_squared=False):
if ask_squared:
sq=ask_to_plot_squared()
if(sq=='yes'):
V = np.multiply(np.conj(V),V)
V_new,ScaleFactor,U_new,n=finite_well_plot_scaling(E,V,xvec,U,n,steps)
# create the figure
f=plt.figure()
# add plot to the figure
ax=f.add_subplot(111)
# plot potential
ax.plot(xvec,U_new,c='lightslategray')
# find appropriate x limits and set x limit
MinX=0
MaxX=len(xvec)-1
while U_new[MinX]==0:
MinX=MinX+1
while U_new[MaxX]==0:
MaxX=MaxX-1
for m in range(n):
V_old=V_new[MinX+1,m]
while(np.abs(V_old - V_new[MinX,m])>1e-6 and MinX>0):
V_old=V_new[MinX,m]
MinX=MinX-1
V_old=V_new[MaxX-1,m]
while(np.abs(V_old - V_new[MaxX,m])>1e-6 and MaxX<len(xvec)-1):
V_old=V_new[MaxX,m]
MaxX=MaxX+1
plt.xlim(xvec[MinX],xvec[MaxX])
# find appropriate y limits and set y limit
if(np.max(V_new)>0):
if(np.min(V_new)>np.min(U_new)):
plt.ylim(1.05*np.min(U_new),np.max(V_new)+abs(0.05*np.min(U_new)))
else:
plt.ylim(1.05*np.min(V_new),np.max(V_new)+abs(0.05*np.min(U_new)))
else:
if(np.min(V_new)>np.min(U_new)):
plt.ylim(1.05*np.min(U_new),np.max(U_new)+abs(0.05*np.min(U_new)))
else:
plt.ylim(1.05*np.min(V_new),np.max(U_new)+abs(0.05*np.min(U_new)))
#plot wave functions
for i in np.arange(n-1,-1,-1):
color=mpl.cm.jet_r((i)/(float)(n),1)
wavefunc=ax.plot(xvec,V_new[0:steps,i],c=color,label='E(a.u.)={}'.format(np.round(E[i]*1000)/1000.0))
ax.axhline(y=V_new[0,i],xmin=-10,xmax=10,c=color,ls='--')
# set plot title
ax.set_title('{}'.format(titles[Case]))
# set x label
plt.xlabel('Width of Well / (a.u.)')
# set y label
plt.ylabel('Energy / (a.u.)')
# modify tick marks
ax.set_yticklabels(np.round(ax.yaxis.get_ticklocs()*ScaleFactor))
# add plot legend
L=plt.legend(bbox_to_anchor=(1.05,1),loc=2,borderaxespad=0.)
box=ax.get_position()
ax.set_position([box.x0,box.y0,0.7*box.width,box.height])
if ask_to_save:
image=ask_to_save_plot()
if(image=='yes'):
f.savefig('Case{}.png'.format(Case),bbox_extra_artists=(L,),dpi=200,bbox_inches='tight')
plt.show()
def Kronig_Penney_Plot(E,V,xvec,steps,n,Case,U,ask_to_save=False,ask_squared=False):
if ask_squared:
sq=ask_to_plot_squared()
if(sq=='yes'):
V = np.multiply(np.conj(V),V)
V_new,ScaleFactor,U_new,n=finite_well_plot_scaling(E,V,xvec,U,n,steps)
# create the figure
f=plt.figure()
# add plot to the figure
ax=f.add_subplot(111)
# plot potential
ax.plot(xvec,U_new,c='lightslategray')
# find appropriate x limits and set x limit
MinX=0
MaxX=len(xvec)-1
while U_new[MinX]==0:
MinX=MinX+1
while U_new[MaxX]==0:
MaxX=MaxX-1
for m in range(n):
V_old=V_new[MinX+1,m]
while(np.abs(V_old - V_new[MinX,m])>1e-6 and MinX>0):
V_old=V_new[MinX,m]
MinX=MinX-1
V_old=V_new[MaxX-1,m]
while(np.abs(V_old - V_new[MaxX,m])>1e-6 and MaxX<len(xvec)-1):
V_old=V_new[MaxX,m]
MaxX=MaxX+1
plt.xlim(xvec[MinX],xvec[MaxX])
# find appropriate y limits | |
import collections
import os
from functools import partial
from itertools import product
from typing import Any, Callable, Iterable, Iterator, List, Mapping, MutableSequence, Optional, Sequence, Tuple, Union
import warnings
from copy import deepcopy
import numpy as np
import pandas as pd
from scipy.stats import scoreatpercentile
from skimage import exposure
from scipy.ndimage.filters import gaussian_filter
from slicedimage import Reader, Writer, TileSet, Tile
from slicedimage.io import resolve_path_or_url
from tqdm import tqdm
from starfish.constants import Coordinates, Indices
from starfish.errors import DataFormatWarning
from starfish.pipeline.features.spot_attributes import SpotAttributes
from starfish.intensity_table import IntensityTable
class ImageStack:
"""Container for a TileSet (field of view)
Methods
-------
get_slice retrieve a slice of the image tensor
set_slice set a slice of the image tensor
apply apply a 2d or 3d function across all Tiles in the image tensor
max_proj return a max projection over one or more axis of the image tensor
show_stack show an interactive, pageable view of the image tensor, or a slice of the image tensor
write save the (potentially modified) image tensor to disk
Properties
----------
num_chs the number of channels stored in the image tensor
num_hybs the number of hybridization rounds stored in the image tensor
num_zlayers the number of z-layers stored in the image tensor
numpy_array the 5-d image tensor is stored in this array
raw_shape the shape of the image tensor (in integers)
shape the shape of the image tensor by categorical index (channels, hybridization rounds, z-layers)
"""
AXES_MAP = {
Indices.HYB: 0,
Indices.CH: 1,
Indices.Z: 2,
}
N_AXES = max(AXES_MAP.values()) + 1
def __init__(self, image_partition):
self._image_partition = image_partition
self._num_hybs = image_partition.get_dimension_shape(Indices.HYB)
self._num_chs = image_partition.get_dimension_shape(Indices.CH)
if Indices.Z in image_partition.dimensions:
self._num_zlayers = image_partition.get_dimension_shape(Indices.Z)
else:
self._num_zlayers = 1
self._tile_shape = image_partition.default_tile_shape
# Examine the tiles to figure out the right kind (int, float, etc.) and size. We require that all the tiles
# have the same kind of data type, but we do not require that they all have the same size of data type. The
# allocated array is the highest size we encounter.
kind = None
max_size = 0
for tile in self._image_partition.tiles():
dtype = tile.numpy_array.dtype
if kind is None:
kind = dtype.kind
else:
if kind != dtype.kind:
raise TypeError("All tiles should have the same kind of dtype")
if dtype.itemsize > max_size:
max_size = dtype.itemsize
if self._tile_shape is None:
self._tile_shape = tile.tile_shape
elif tile.tile_shape is not None and self._tile_shape != tile.tile_shape:
raise ValueError("Starfish does not support tiles that are not identical in shape")
# now that we know the tile data type (kind and size), we can allocate the data array.
self._data = np.zeros(
shape=(self._num_hybs, self._num_chs, self._num_zlayers) + self._tile_shape,
dtype=np.dtype(f"{kind}{max_size}")
)
# iterate through the tiles and set the data.
for tile in self._image_partition.tiles():
h = tile.indices[Indices.HYB]
c = tile.indices[Indices.CH]
zlayer = tile.indices.get(Indices.Z, 0)
data = tile.numpy_array
if max_size != data.dtype.itemsize:
if data.dtype.kind == "i" or data.dtype.kind == "u":
# fixed point can be done with a simple multiply.
src_range = np.iinfo(data.dtype).max - np.iinfo(data.dtype).min + 1
dst_range = np.iinfo(self._data.dtype).max - np.iinfo(self._data.dtype).min + 1
data = data * (dst_range / src_range)
warnings.warn(
f"Tile "
f"(H: {tile.indices[Indices.HYB]} C: {tile.indices[Indices.CH]} Z: {tile.indices[Indices.Z]}) has "
f"dtype {data.dtype}. One or more tiles is of a larger dtype {self._data.dtype}.",
DataFormatWarning)
self.set_slice(indices={Indices.HYB: h, Indices.CH: c, Indices.Z: zlayer}, data=data)
# set_slice will mark the data as needing writeback, so we need to unset that.
self._data_needs_writeback = False
@classmethod
def from_url(cls, url: str, baseurl: Optional[str]):
"""
Constructs an ImageStack object from a URL and a base URL.
The following examples will all load from the same location:
1. url: https://www.example.com/images/hybridization.json baseurl: None
2. url: https://www.example.com/images/hybridization.json baseurl: I_am_ignored
3. url: hybridization.json baseurl: https://www.example.com/images
4. url: images/hybridization.json baseurl: https://www.example.com
Parameters:
-----------
url : str
Either an absolute URL or a relative URL referring to the image to be read.
baseurl : Optional[str]
If url is a relative URL, then this must be provided. If url is an absolute URL, then this parameter is
ignored.
"""
image_partition = Reader.parse_doc(url, baseurl)
return cls(image_partition)
@classmethod
def from_path_or_url(cls, url_or_path: str) -> "ImageStack":
"""
Constructs an ImageStack object from an absolute URL or a filesystem path.
The following examples will all load from the same location:
1. url_or_path: file:///Users/starfish-user/images/hybridization.json
2. url_or_path: /Users/starfish-user/images/hybridization.json
Parameters:
-----------
url_or_path : str
Either an absolute URL or a filesystem path to an imagestack.
"""
_, relativeurl, baseurl = resolve_path_or_url(url_or_path)
return cls.from_url(relativeurl, baseurl)
@property
def numpy_array(self):
"""Retrieves a view of the image data as a numpy array."""
result = self._data.view()
result.setflags(write=False)
return result
@numpy_array.setter
def numpy_array(self, data):
"""Sets the image's data from a numpy array. The numpy array is advised to be immutable afterwards."""
self._data = data.view()
self._data_needs_writeback = True
data.setflags(write=False)
def get_slice(
self,
indices: Mapping[Indices, Union[int, slice]]
) -> Tuple[np.ndarray, Sequence[Indices]]:
"""
Given a dictionary mapping the index name to either a value or a slice range, return a numpy array representing
the slice, and a list of the remaining axes beyond the normal x-y tile.
Example:
ImageStack axes: H, C, and Z with shape 3, 4, 5, respectively.
ImageStack Implicit axes: X, Y with shape 10, 20, respectively.
Called to slice with indices {Z: 5}.
Result: a 4-dimensional numpy array with shape (3, 4, 20, 10) and the remaining axes [H, C].
Example:
Original axes: H, C, and Z.
Implicit axes: X, Y.
Called to slice with indices {Z: 5, C: slice(2, 4)}.
Result: a 4-dimensional numpy array with shape (3, 2, 20, 10) and the remaining axes [H, C].
"""
slice_list, axes = self._build_slice_list(indices)
result = self._data[slice_list]
result.setflags(write=False)
return result, axes
def set_slice(
self,
indices: Mapping[Indices, Union[int, slice]],
data: np.ndarray,
axes: Sequence[Indices]=None):
"""
Given a dictionary mapping the index name to either a value or a slice range and a source numpy array, set the
slice of the array of this ImageStack to the values in the source numpy array. If the optional parameter axes
is provided, that represents the axes of the numpy array beyond the x-y tile.
Example:
ImageStack axes: H, C, and Z with shape 3, 4, 5, respectively.
ImageStack Implicit axes: X, Y with shape 10, 20, respectively.
Called to set a slice with indices {Z: 5}.
Data: a 4-dimensional numpy array with shape (3, 4, 10, 20)
Result: Replace the data for Z=5.
Example:
ImageStack axes: H, C, and Z. (shape 3, 4, 5)
ImageStack Implicit axes: X, Y. (shape 10, 20)
Called to set a slice with indices {Z: 5, C: slice(2, 4)}.
Data: a 4-dimensional numpy array with shape (3, 2, 10, 20)
Result: Replace the data for Z=5, C=2-3.
"""
slice_list, expected_axes = self._build_slice_list(indices)
if axes is not None:
if len(axes) != len(data.shape) - 2:
raise ValueError("data shape ({}) should be the axes ({}) and (x,y).".format(data.shape, axes))
move_src = list()
move_dst = list()
for src_idx, axis in enumerate(axes):
try:
dst_idx = expected_axes.index(axis)
except ValueError:
raise ValueError("Unexpected axis {}. Expecting only {}.".format(axis, expected_axes))
if src_idx != dst_idx:
move_src.append(src_idx)
move_dst.append(dst_idx)
if len(move_src) != 0:
data = data.view()
np.moveaxis(data, move_src, move_dst)
if self._data[slice_list].shape != data.shape:
raise ValueError("source shape {} mismatches destination shape {}".format(
data.shape, self._data[slice_list].shape))
self._data[slice_list] = data
self._data_needs_writeback = True
# TODO ambrosejcarr: update to use IntensityTable instead of SpotAttributes
def show_stack(
self, indices: Mapping[Indices, Union[int, slice]],
color_map: str= 'gray', figure_size: Tuple[int, int]=(10, 10),
show_spots: Optional[SpotAttributes]=None,
rescale: bool=False, p_min: Optional[float]=None, p_max: Optional[float]=None, **kwargs):
"""Create an interactive visualization of an image stack
Produces a slider that flips through the selected volume tile-by-tile. Supports manual adjustment of dynamic
range.
Parameters
----------
indices : Mapping[Indices, Union[int, slice]],
Indices to select a volume to visualize. Passed to `Image.get_slice()`.
See `Image.get_slice()` for examples.
color_map : str (default = 'gray')
string id of a matplotlib colormap
figure_size : Tuple[int, int] (default = (10, 10))
size of the figure in inches
show_spots : Optional[SpotAttributes]
[Preliminary functionality] if provided, should be a SpotAttribute table that corresponds
to the volume being displayed. This will be paired automatically in the future.
rescale : bool (default = False)
if True, rescale the data to exclude high and low-value outliers |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.