content
stringlengths 5
1.05M
|
---|
import argparse
import httplib2
import os
from apiclient import discovery
from oauth2client import client, tools
from oauth2client.file import Storage
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
flags.noauth_local_webserver = True
class GoogleSheetsClient(object):
"""Class to handle communication with the Google Sheets API."""
def __init__(self, config={}):
"""
Class constructor.
Args:
config -- The API config.
"""
self.SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
self.CLIENT_SECRET_FILE = config.get(
'client_secret_file',
'client_secret.json'
)
self.APPLICATION_NAME = config.get(
'application_name',
'Python Twitter Followers'
)
self.DISCOVERY_URL = (
'https://sheets.googleapis.com/$discovery/rest?version=v4'
)
self.CREDENTIALS_DIR = '.googleapis-credentials'
self.CREDENTIALS_FILE = (
'sheets.googleapis.com-python-twitter-followers.json'
)
credentials = self._get_credentials()
http = credentials.authorize(httplib2.Http())
self.service = discovery.build(
'sheets',
'v4',
http=http,
discoveryServiceUrl=self.DISCOVERY_URL
)
def _get_credentials(self):
"""Get valid user credentials from storage."""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, self.CREDENTIALS_DIR)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, self.CREDENTIALS_FILE)
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(
self.CLIENT_SECRET_FILE,
self.SCOPES
)
flow.user_agent = self.APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
print 'Storing credentials to ' + credential_path
return credentials
def append_rows(self, spreadsheet_id, rows, range_name='',
value_input_option='RAW'):
"""
Append rows to a specific spreadsheet.
Args:
spreadsheet_id -- The id of the spreadsheet to update.
rows -- The values to append to the spreadsheet.
"""
body = {
'values': rows
}
result = self.service.spreadsheets().values().append(
spreadsheetId=spreadsheet_id,
range=range_name,
valueInputOption=value_input_option,
body=body
).execute()
print '{} rows appended.'.format(
result.get('updates').get('updatedRows')
)
|
#!/usr/bin/env python
#
# Copyright 2019 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import pynodegl as ngl
from pynodegl_utils.misc import get_backend
_backend_str = os.environ.get('BACKEND')
_backend = get_backend(_backend_str) if _backend_str else ngl.BACKEND_AUTO
_vert = 'void main() { ngl_out_pos = ngl_projection_matrix * ngl_modelview_matrix * ngl_position; }'
_frag = 'void main() { ngl_out_color = color; }'
def _get_scene(geometry=None):
program = ngl.Program(vertex=_vert, fragment=_frag)
if geometry is None:
geometry = ngl.Quad()
scene = ngl.Render(geometry, program)
scene.update_frag_resources(color=ngl.UniformVec4(value=(1.0, 1.0, 1.0, 1.0)))
return scene
def api_backend():
viewer = ngl.Context()
assert viewer.configure(backend=0x1234) < 0
del viewer
def api_reconfigure():
viewer = ngl.Context()
assert viewer.configure(offscreen=1, width=16, height=16, backend=_backend) == 0
scene = _get_scene()
assert viewer.set_scene(scene) == 0
assert viewer.draw(0) == 0
assert viewer.configure(offscreen=1, width=16, height=16, backend=_backend) == 0
assert viewer.draw(1) == 0
del viewer
def api_reconfigure_clearcolor(width=16, height=16):
import zlib
viewer = ngl.Context()
capture_buffer = bytearray(width * height * 4)
viewer = ngl.Context()
assert viewer.configure(offscreen=1, width=width, height=height, backend=_backend, capture_buffer=capture_buffer) == 0
scene = _get_scene()
assert viewer.set_scene(scene) == 0
assert viewer.draw(0) == 0
assert zlib.crc32(capture_buffer) == 0xb4bd32fa
assert viewer.configure(offscreen=1, width=width, height=height, backend=_backend, capture_buffer=capture_buffer,
clear_color=(0.3, 0.3, 0.3, 1.0)) == 0
assert viewer.draw(0) == 0
assert zlib.crc32(capture_buffer) == 0xfeb0bb01
del capture_buffer
del viewer
def api_reconfigure_fail():
viewer = ngl.Context()
assert viewer.configure(offscreen=1, width=16, height=16, backend=_backend) == 0
scene = _get_scene()
assert viewer.set_scene(scene) == 0
assert viewer.draw(0) == 0
assert viewer.configure(offscreen=0, backend=_backend) != 0
assert viewer.draw(1) != 0
del viewer
def api_ctx_ownership():
viewer = ngl.Context()
viewer2 = ngl.Context()
assert viewer.configure(offscreen=1, width=16, height=16, backend=_backend) == 0
assert viewer2.configure(offscreen=1, width=16, height=16, backend=_backend) == 0
scene = _get_scene()
assert viewer.set_scene(scene) == 0
assert viewer.draw(0) == 0
assert viewer2.set_scene(scene) != 0
assert viewer2.draw(0) == 0
del viewer
del viewer2
def api_ctx_ownership_subgraph():
for shared in (True, False):
viewer = ngl.Context()
viewer2 = ngl.Context()
assert viewer.configure(offscreen=1, width=16, height=16, backend=_backend) == 0
assert viewer2.configure(offscreen=1, width=16, height=16, backend=_backend) == 0
quad = ngl.Quad()
render1 = _get_scene(quad)
if not shared:
quad = ngl.Quad()
render2 = _get_scene(quad)
scene = ngl.Group([render1, render2])
assert viewer.set_scene(render2) == 0
assert viewer.draw(0) == 0
assert viewer2.set_scene(scene) != 0
assert viewer2.draw(0) == 0 # XXX: drawing with no scene is allowed?
del viewer
del viewer2
def api_capture_buffer_lifetime(width=1024, height=1024):
capture_buffer = bytearray(width * height * 4)
viewer = ngl.Context()
assert viewer.configure(offscreen=1, width=width, height=height, backend=_backend, capture_buffer=capture_buffer) == 0
del capture_buffer
scene = _get_scene()
assert viewer.set_scene(scene) == 0
assert viewer.draw(0) == 0
del viewer
# Exercise the HUD rasterization. We can't really check the output, so this is
# just for blind coverage and similar code instrumentalization.
def api_hud(width=234, height=123):
viewer = ngl.Context()
assert viewer.configure(offscreen=1, width=width, height=height, backend=_backend) == 0
render = _get_scene()
scene = ngl.HUD(render)
assert viewer.set_scene(scene) == 0
for i in range(60 * 3):
assert viewer.draw(i / 60.) == 0
del viewer
|
import json, requests
class WVPoster:
def __init__(self, host="localhost", port=None):
if port == None:
self.url = "http://%s/sioput/" % host
else:
self.url = "http://%s:%d/sioput/" % (host, port)
def postToSIO(self, name, obj):
url = self.url+name
print "posting", name, obj, url
r = requests.post(url, data=json.dumps({'name': name, 'obj': obj}))
print r.status_code, r.reason
def test():
vp = WVPoster()
vp.postToSIO("chat", {'name': 'don'})
if __name__ == '__main__':
test()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sessions.models import Session
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djgozokia.constants import TYPE_CHAT
from djgozokia.managers import GozokiaChatManager
@python_2_unicode_compatible
class GozokiaChat(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
timestamp = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='chats',
blank=False, null=False
)
session = models.ForeignKey(Session, blank=False, null=False)
rule = models.CharField(
choices=TYPE_CHAT, max_length=70, blank=True, null=True
)
type_rule = models.CharField(
choices=TYPE_CHAT, max_length=2, blank=True, null=True
)
text = models.TextField(verbose_name=u'Texto', blank=True, null=True)
status = models.IntegerField(blank=True, null=True)
objects = GozokiaChatManager()
@classmethod
def set_chat(cls, *args, **kwargs):
kwargs['user'] = get_user_model().objects.get(id=kwargs['user'])
kwargs['session'] = Session.objects.get(session_key=kwargs['session'])
GozokiaChat.objects.create(**kwargs)
@classmethod
def get_chat(cls, *args, **kwargs):
return [u for u in GozokiaChat.objects.filter(user__id=kwargs['user'], session__session_key=kwargs['session'])]
class Meta:
verbose_name = _('Gozokia Chat')
verbose_name_plural = _('Gozokia Chats')
ordering = ('timestamp',)
def __str__(self):
return u"[%s][User: %s][Rule: %s] %s: %s" % (self.timestamp, self.user, self.rule, self.type_rule, self.text)
|
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def ExcursionDetail(request):
excursion = Excursion.objects.all()
data = ExcursionSerializer(excursion, many=True).data
return Response(data, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def ExcursionList(request, id):
try:
excursion = Excursion.objects.get(id=id)
except:
Excursion.DoesNotExist
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ExcursionSerializer(excursion)
return Response(serializer.data)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def CreateExcursion(request):
serializer = ExcursionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PATCH'])
@permission_classes([IsAuthenticated])
def EditExcursion(request, id):
try:
excursion = Excursion.objects.get(id=id)
except:
Excursion.DoesNotExist
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ExcursionSerializer(excursion)
return Response(serializer.data)
elif request.method == 'PATCH':
serializer = ExcursionSerializer(excursion, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
"""This module provides code that allows one to pickle the state of a
Python object to a dictionary.
The motivation for this is simple. The standard Python
pickler/unpickler is best used to pickle simple objects and does not
work too well for complex code. Specifically, there are two major
problems (1) the pickle file format is not easy to edit with a text
editor and (2) when a pickle is unpickled, it creates all the
necessary objects and sets the state of these objects.
Issue (2) might not appear to be a problem. However, often, the
determination of the entire 'state' of an application requires the
knowledge of the state of many objects that are not really in the
users concern. The user would ideally like to pickle just what he
thinks is relevant. Now, given that the user is not going to save the
entire state of the application, the use of pickle is insufficient
since the state is no longer completely known (or worth knowing). The
default `Unpickler` recreates the objects and the typical
implementation of `__setstate__` is usually to simply update the
object's `__dict__` attribute. This is inadequate because the pickled
information is taken out of the real context when it was saved.
The `StatePickler` basically pickles the 'state' of an object into a
large dictionary. This pickled data may be easily unpickled and
modified on the interpreter or edited with a text editor
(`pprint.saferepr` is a friend). The second problem is also
eliminated. When this state is unpickled using `StateUnpickler`, what
you get is a special dictionary (a `State` instance). This allows one
to navigate the state just like the original object. Its up to the
user to create any new objects and set their states using this
information. This allows for a lot of flexibility while allowing one
to save and set the state of (almost) any Python object.
The `StateSetter` class helps set the state of a known instance. When
setting the state of an instance it checks to see if there is a
`__set_pure_state__` method that in turn calls `StateSetter.set`
appropriately.
Additionally, there is support for versioning. The class' version is
obtain from the `__version__` class attribute. This version along
with the versions of the bases of a class is embedded into the
metadata of the state and stored. By using `version_registry.py` a
user may register a handler for a particular class and module. When
the state of an object is set using `StateSetter.set_state`, then
these handlers are called in reverse order of their MRO. This gives
the handler an opportunity to upgrade the state depending on its
version. Builtin classes are not scanned for versions. If a class
has no version, then by default it is assumed to be -1.
Example::
>>> class A:
... def __init__(self):
... self.a = 'a'
...
>>> a = A()
>>> a.a = 100
>>> import state_pickler
>>> s = state_pickler.dumps(a) # Dump the state of `a`.
>>> state = state_pickler.loads_state(s) # Get the state back.
>>> b = state_pickler.create_instance(state) # Create the object.
>>> state_pickler.set_state(b, state) # Set the object's state.
>>> assert b.a == 100
Features
--------
- The output is a plain old dictionary so is easy to parse, edit etc.
- Handles references to avoid duplication.
- Gzips Numeric arrays when dumping them.
- Support for versioning.
Caveats
-------
- Does not pickle a whole bunch of stuff including code objects and
functions.
- The output is a pure dictionary and does not contain instances. So
using this *as it is* in `__setstate__` will not work. Instead
define a `__set_pure_state__` and use the `StateSetter` class or
the `set_state` function provided by this module.
Notes
-----
Browsing the code from XMarshaL_ and pickle.py proved useful for
ideas. None of the code is taken from there though.
.. _XMarshaL: http://www.dezentral.de/soft/XMarshaL
"""
# Author: Prabhu Ramachandran <[email protected]>
# Copyright (c) 2005-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import base64
import sys
import types
import pickle
import gzip
from io import BytesIO, StringIO
import numpy
# Local imports.
from . import version_registry
from .file_path import FilePath
PY_VER = sys.version_info[0]
NumpyArrayType = type(numpy.array([]))
def gzip_string(data):
"""Given a string (`data`) this gzips the string and returns it.
"""
s = BytesIO()
writer = gzip.GzipFile(mode='wb', fileobj=s)
writer.write(data)
writer.close()
s.seek(0)
return s.read()
def gunzip_string(data):
"""Given a gzipped string (`data`) this unzips the string and
returns it.
"""
if PY_VER== 2 or (bytes is not str and type(data) is bytes):
s = BytesIO(data)
else:
s = StringIO(data)
writer = gzip.GzipFile(mode='rb', fileobj=s)
data = writer.read()
writer.close()
return data
class StatePicklerError(Exception):
pass
class StateUnpicklerError(Exception):
pass
class StateSetterError(Exception):
pass
######################################################################
# `State` class
######################################################################
class State(dict):
"""Used to encapsulate the state of an instance in a very
convenient form. The '__metadata__' attribute/key is a dictionary
that has class specific details like the class name, module name
etc.
"""
def __init__(self, **kw):
dict.__init__(self, **kw)
self.__dict__ = self
######################################################################
# `StateDict` class
######################################################################
class StateDict(dict):
"""Used to encapsulate a dictionary stored in a `State` instance.
The has_instance attribute specifies if the dict has an instance
embedded in it.
"""
def __init__(self, **kw):
dict.__init__(self, **kw)
self.has_instance = False
######################################################################
# `StateList` class
######################################################################
class StateList(list):
"""Used to encapsulate a list stored in a `State` instance. The
has_instance attribute specifies if the list has an instance
embedded in it.
"""
def __init__(self, seq=None):
if seq:
list.__init__(self, seq)
else:
list.__init__(self)
self.has_instance = False
######################################################################
# `StateTuple` class
######################################################################
class StateTuple(tuple):
"""Used to encapsulate a tuple stored in a `State` instance. The
has_instance attribute specifies if the tuple has an instance
embedded in it.
"""
def __new__(cls, seq=None):
if seq:
obj = super(StateTuple, cls).__new__(cls, tuple(seq))
else:
obj = super(StateTuple, cls).__new__(cls)
obj.has_instance = False
return obj
######################################################################
# `StatePickler` class
######################################################################
class StatePickler:
"""Pickles the state of an object into a dictionary. The
dictionary is itself either saved as a pickled file (`dump`) or
pickled string (`dumps`). Alternatively, the `dump_state` method
will return the dictionary that is pickled.
The format of the state dict is quite strightfoward. Basic types
(bool, int, long, float, complex, None, string and unicode) are
represented as they are. Everything else is stored as a
dictionary containing metadata information on the object's type
etc. and also the actual object in the 'data' key. For example::
>>> p = StatePickler()
>>> p.dump_state(1)
1
>>> l = [1,2.0, None, [1,2,3]]
>>> p.dump_state(l)
{'data': [1, 2.0, None, {'data': [1, 2, 3], 'type': 'list', 'id': 1}],
'id': 0,
'type': 'list'}
Classes are also represented similarly. The state in this case is
obtained from the `__getstate__` method or from the `__dict__`.
Here is an example::
>>> class A:
... __version__ = 1 # State version
... def __init__(self):
... self.attribute = 1
...
>>> a = A()
>>> p = StatePickler()
>>> p.dump_state(a)
{'class_name': 'A',
'data': {'data': {'attribute': 1}, 'type': 'dict', 'id': 2},
'id': 0,
'initargs': {'data': (), 'type': 'tuple', 'id': 1},
'module': '__main__',
'type': 'instance',
'version': [(('A', '__main__'), 1)]}
When pickling data, references are taken care of. Numeric arrays
can be pickled and are stored as a gzipped base64 encoded string.
"""
def __init__(self):
self._clear()
type_map = {bool: self._do_basic_type,
complex: self._do_basic_type,
float: self._do_basic_type,
int: self._do_basic_type,
type(None): self._do_basic_type,
str: self._do_basic_type,
bytes: self._do_basic_type,
tuple: self._do_tuple,
list: self._do_list,
dict: self._do_dict,
NumpyArrayType: self._do_numeric,
State: self._do_state,
}
if PY_VER == 2:
type_map[long] = self._do_basic_type
type_map[unicode] = self._do_basic_type
self.type_map = type_map
def dump(self, value, file):
"""Pickles the state of the object (`value`) into the passed
file.
"""
try:
# Store the file name we are writing to so we can munge
# file paths suitably.
self.file_name = file.name
except AttributeError:
pass
pickle.dump(self._do(value), file)
def dumps(self, value):
"""Pickles the state of the object (`value`) and returns a
string.
"""
return pickle.dumps(self._do(value))
def dump_state(self, value):
"""Returns a dictionary or a basic type representing the
complete state of the object (`value`).
This value is pickled by the `dump` and `dumps` methods.
"""
return self._do(value)
######################################################################
# Non-public methods
######################################################################
def _clear(self):
# Stores the file name of the file being used to dump the
# state. This is used to change any embedded paths relative
# to the saved file.
self.file_name = ''
# Caches id's to handle references.
self.obj_cache = {}
# Misc cache to cache things that are not persistent. For
# example, object.__getstate__()/__getinitargs__() usually
# returns a copy of a dict/tuple that could possibly be reused
# on another object's __getstate__. Caching these prevents
# some wierd problems with the `id` of the object.
self._misc_cache = []
def _flush_traits(self, obj):
"""Checks if the object has traits and ensures that the traits
are set in the `__dict__` so we can pickle it.
"""
# Not needed with Traits3.
return
def _do(self, obj):
obj_type = type(obj)
key = self._get_id(obj)
if key in self.obj_cache:
return self._do_reference(obj)
elif obj_type in self.type_map:
return self.type_map[obj_type](obj)
elif isinstance(obj, tuple):
# Takes care of StateTuples.
return self._do_tuple(obj)
elif isinstance(obj, list):
# Takes care of TraitListObjects.
return self._do_list(obj)
elif isinstance(obj, dict):
# Takes care of TraitDictObjects.
return self._do_dict(obj)
elif hasattr(obj, '__dict__'):
return self._do_instance(obj)
def _get_id(self, value):
try:
key = hash(value)
except TypeError:
key = id(value)
return key
def _register(self, value):
key = self._get_id(value)
cache = self.obj_cache
idx = len(cache)
cache[key] = idx
return idx
def _do_basic_type(self, value):
return value
def _do_reference(self, value):
key = self._get_id(value)
idx = self.obj_cache[key]
return dict(type='reference', id=idx, data=None)
def _do_instance(self, value):
# Flush out the traits.
self._flush_traits(value)
# Setup the relative paths of FilePaths before dumping.
if self.file_name and isinstance(value, FilePath):
value.set_relative(self.file_name)
# Get the initargs.
args = ()
if hasattr(value, '__getinitargs__') and value.__getinitargs__:
args = value.__getinitargs__()
# Get the object state.
if hasattr(value, '__get_pure_state__'):
state = value.__get_pure_state__()
elif hasattr(value, '__getstate__'):
state = value.__getstate__()
else:
state = value.__dict__
state.pop('__traits_version__', None)
# Cache the args and state since they are likely to be gc'd.
self._misc_cache.extend([args, state])
# Register and process.
idx = self._register(value)
args_data = self._do(args)
data = self._do(state)
# Get the version of the object.
version = version_registry.get_version(value)
module = value.__class__.__module__
class_name = value.__class__.__name__
return dict(type='instance',
module=module,
class_name=class_name,
version=version,
id=idx,
initargs=args_data,
data=data)
def _do_state(self, value):
metadata = value.__metadata__
args = metadata.get('initargs')
state = dict(value)
state.pop('__metadata__')
self._misc_cache.extend([args, state])
idx = self._register(value)
args_data = self._do(args)
data = self._do(state)
return dict(type='instance',
module=metadata['module'],
class_name=metadata['class_name'],
version=metadata['version'],
id=idx,
initargs=args_data,
data=data)
def _do_tuple(self, value):
idx = self._register(value)
data = tuple([self._do(x) for x in value])
return dict(type='tuple', id=idx, data=data)
def _do_list(self, value):
idx = self._register(value)
data = [self._do(x) for x in value]
return dict(type='list', id=idx, data=data)
def _do_dict(self, value):
idx = self._register(value)
vals = [self._do(x) for x in value.values()]
data = dict(zip(value.keys(), vals))
return dict(type='dict', id=idx, data=data)
def _do_numeric(self, value):
idx = self._register(value)
if PY_VER > 2:
data = base64.encodebytes(gzip_string(numpy.ndarray.dumps(value)))
else:
data = base64.encodestring(gzip_string(numpy.ndarray.dumps(value)))
return dict(type='numeric', id=idx, data=data)
######################################################################
# `StateUnpickler` class
######################################################################
class StateUnpickler:
"""Unpickles the state of an object saved using StatePickler.
Please note that unlike the standard Unpickler, no instances of
any user class are created. The data for the state is obtained
from the file or string, reference objects are setup to refer to
the same state value and this state is returned in the form
usually in the form of a dictionary. For example::
>>> class A:
... def __init__(self):
... self.attribute = 1
...
>>> a = A()
>>> p = StatePickler()
>>> s = p.dumps(a)
>>> up = StateUnpickler()
>>> state = up.loads_state(s)
>>> state.__class__.__name__
'State'
>>> state.attribute
1
>>> state.__metadata__
{'class_name': 'A',
'has_instance': True,
'id': 0,
'initargs': (),
'module': '__main__',
'type': 'instance',
'version': [(('A', '__main__'), -1)]}
Note that the state is actually a `State` instance and is
navigable just like the original object. The details of the
instance are stored in the `__metadata__` attribute. This is
highly convenient since it is possible for someone to view and
modify the state very easily.
"""
def __init__(self):
self._clear()
self.type_map = {'reference': self._do_reference,
'instance': self._do_instance,
'tuple': self._do_tuple,
'list': self._do_list,
'dict': self._do_dict,
'numeric': self._do_numeric,
}
def load_state(self, file):
"""Returns the state of an object loaded from the pickled data
in the given file.
"""
try:
self.file_name = file.name
except AttributeError:
pass
data = pickle.load(file)
result = self._process(data)
return result
def loads_state(self, string):
"""Returns the state of an object loaded from the pickled data
in the given string.
"""
data = pickle.loads(string)
result = self._process(data)
return result
######################################################################
# Non-public methods
######################################################################
def _clear(self):
# The file from which we are being loaded.
self.file_name = ''
# Cache of the objects.
self._obj_cache = {}
# Paths to the instances.
self._instances = []
# Caches the references.
self._refs = {}
# Numeric arrays.
self._numeric = {}
def _set_has_instance(self, obj, value):
if isinstance(obj, State):
obj.__metadata__['has_instance'] = value
elif isinstance(obj, (StateDict, StateList, StateTuple)):
obj.has_instance = value
def _process(self, data):
result = self._do(data)
# Setup all the Numeric arrays. Do this first since
# references use this.
for key, (path, val) in self._numeric.items():
if isinstance(result, StateTuple):
result = list(result)
exec('result%s = val'%path)
result = StateTuple(result)
else:
exec('result%s = val'%path)
# Setup the references so they really are references.
for key, paths in self._refs.items():
for path in paths:
x = self._obj_cache[key]
if isinstance(result, StateTuple):
result = list(result)
exec('result%s = x'%path)
result = StateTuple(result)
else:
exec('result%s = x'%path)
# if the reference is to an instance append its path.
if isinstance(x, State):
self._instances.append(path)
# Now setup the 'has_instance' attribute. If 'has_instance'
# is True then the object contains an instance somewhere
# inside it.
for path in self._instances:
pth = path
while pth:
ns = {'result': result}
exec('val = result%s'%pth, ns, ns)
self._set_has_instance(ns['val'], True)
end = pth.rfind('[')
pth = pth[:end]
# Now make sure that the first element also has_instance.
self._set_has_instance(result, True)
return result
def _do(self, data, path=''):
if type(data) is dict:
return self.type_map[data['type']](data, path)
else:
return data
def _do_reference(self, value, path):
id = value['id']
if id in self._refs:
self._refs[id].append(path)
else:
self._refs[id] = [path]
return State(__metadata__=value)
def _handle_file_path(self, value):
if (value['class_name'] == 'FilePath') and \
('file_path' in value['module']) and \
self.file_name:
data = value['data']['data']
fp = FilePath(data['rel_pth'])
fp.set_absolute(self.file_name)
data['abs_pth'] = fp.abs_pth
def _do_instance(self, value, path):
self._instances.append(path)
initargs = self._do(value['initargs'],
path + '.__metadata__["initargs"]')
# Handle FilePaths.
self._handle_file_path(value)
d = self._do(value['data'], path)
md = dict(type='instance',
module=value['module'],
class_name=value['class_name'],
version=value['version'],
id=value['id'],
initargs=initargs,
has_instance=True)
result = State(**d)
result.__metadata__ = md
self._obj_cache[value['id']] = result
return result
def _do_tuple(self, value, path):
res = []
for i, x in enumerate(value['data']):
res.append(self._do(x, path + '[%d]'%i))
result = StateTuple(res)
self._obj_cache[value['id']] = result
return result
def _do_list(self, value, path):
result = StateList()
for i, x in enumerate(value['data']):
result.append(self._do(x, path + '[%d]'%i))
self._obj_cache[value['id']] = result
return result
def _do_dict(self, value, path):
result = StateDict()
for key, val in value['data'].items():
result[key] = self._do(val, path + '["%s"]'%key)
self._obj_cache[value['id']] = result
return result
def _do_numeric(self, value, path):
if PY_VER > 2:
data = value['data']
if isinstance(data, str):
data = value['data'].encode('utf-8')
junk = gunzip_string(base64.decodebytes(data))
result = pickle.loads(junk, encoding='bytes')
else:
junk = gunzip_string(value['data'].decode('base64'))
result = pickle.loads(junk)
self._numeric[value['id']] = (path, result)
self._obj_cache[value['id']] = result
return result
######################################################################
# `StateSetter` class
######################################################################
class StateSetter:
"""This is a convenience class that helps a user set the
attributes of an object given its saved state. For instances it
checks to see if a `__set_pure_state__` method exists and calls
that when it sets the state.
"""
def __init__(self):
# Stores the ids of instances already done.
self._instance_ids = []
self.type_map = {State: self._do_instance,
StateTuple: self._do_tuple,
StateList: self._do_list,
StateDict: self._do_dict,
}
def set(self, obj, state, ignore=None, first=None, last=None):
"""Sets the state of the object.
This is to be used as a means to simplify loading the state of
an object from its `__setstate__` method using the dictionary
describing its state. Note that before the state is set, the
registered handlers for the particular class are called in
order to upgrade the version of the state to the latest
version.
Parameters
----------
- obj : `object`
The object whose state is to be set. If this is `None`
(default) then the object is created.
- state : `dict`
The dictionary representing the state of the object.
- ignore : `list(str)`
The list of attributes specified in this list are ignored
and the state of these attributes are not set (this excludes
the ones specified in `first` and `last`). If one specifies
a '*' then all attributes are ignored except the ones
specified in `first` and `last`.
- first : `list(str)`
The list of attributes specified in this list are set first (in
order), before any other attributes are set.
- last : `list(str)`
The list of attributes specified in this list are set last (in
order), after all other attributes are set.
"""
if (not isinstance(state, State)) and \
state.__metadata__['type'] != 'instance':
raise StateSetterError(
'Can only set the attributes of an instance.'
)
# Upgrade the state to the latest using the registry.
self._update_and_check_state(obj, state)
self._register(obj)
# This wierdness is needed since the state's own `keys` might
# be set to something else.
state_keys = list(dict.keys(state))
state_keys.remove('__metadata__')
if first is None:
first = []
if last is None:
last = []
# Remove all the ignored keys.
if ignore:
if '*' in ignore:
state_keys = first + last
else:
for name in ignore:
try:
state_keys.remove(name)
except KeyError:
pass
# Do the `first` attributes.
for key in first:
state_keys.remove(key)
self._do(obj, key, state[key])
# Remove the `last` attributes.
for key in last:
state_keys.remove(key)
# Set the remaining attributes.
for key in state_keys:
self._do(obj, key, state[key])
# Do the last ones in order.
for key in last:
self._do(obj, key, state[key])
######################################################################
# Non-public methods.
######################################################################
def _register(self, obj):
idx = id(obj)
if idx not in self._instance_ids:
self._instance_ids.append(idx)
def _is_registered(self, obj):
return (id(obj) in self._instance_ids)
def _has_instance(self, value):
"""Given something (`value`) that is part of the state this
returns if the value has an instance embedded in it or not.
"""
if isinstance(value, State):
return True
elif isinstance(value, (StateDict, StateList, StateTuple)):
return value.has_instance
return False
def _get_pure(self, value):
"""Returns the Python representation of the object (usually a
list, tuple or dict) that has no instances embedded within it.
"""
result = value
if self._has_instance(value):
raise StateSetterError(
'Value has an instance: %s'%value
)
if isinstance(value, (StateList, StateTuple)):
result = [self._get_pure(x) for x in value]
if isinstance(value, StateTuple):
result = tuple(result)
elif isinstance(value, StateDict):
result = {}
for k, v in value.items():
result[k] = self._get_pure(v)
return result
def _update_and_check_state(self, obj, state):
"""Updates the state from the registry and then checks if the
object and state have same class.
"""
# Upgrade this state object to the latest using the registry.
# This is done before testing because updating may change the
# class name/module.
version_registry.registry.update(state)
# Make sure object and state have the same class and module names.
metadata = state.__metadata__
cls = obj.__class__
if (metadata['class_name'] != cls.__name__):
raise StateSetterError(
'Instance (%s) and state (%s) do not have the same class'\
' name!'%(cls.__name__, metadata['class_name'])
)
if (metadata['module'] != cls.__module__):
raise StateSetterError(
'Instance (%s) and state (%s) do not have the same module'\
' name!'%(cls.__module__, metadata['module'])
)
def _do(self, obj, key, value):
try:
attr = getattr(obj, key)
except AttributeError:
raise StateSetterError(
'Object %s does not have an attribute called: %s'%(obj, key)
)
if isinstance(value, (State, StateDict, StateList, StateTuple)):
# Special handlers are needed.
if not self._has_instance(value):
result = self._get_pure(value)
setattr(obj, key, result)
elif isinstance(value, StateTuple):
setattr(obj, key, self._do_tuple(getattr(obj, key), value))
else:
self._do_object(getattr(obj, key), value)
else:
setattr(obj, key, value)
def _do_object(self, obj, state):
self.type_map[state.__class__](obj, state)
def _do_instance(self, obj, state):
if self._is_registered(obj):
return
else:
self._register(obj)
metadata = state.__metadata__
if hasattr(obj, '__set_pure_state__'):
self._update_and_check_state(obj, state)
obj.__set_pure_state__(state)
elif 'tvtk_classes' in metadata['module']:
self._update_and_check_state(obj, state)
tmp = self._get_pure(StateDict(**state))
del tmp['__metadata__']
obj.__setstate__(tmp)
else:
# No need to update or check since `set` does it for us.
self.set(obj, state)
def _do_tuple(self, obj, state):
if not self._has_instance(state):
return self._get_pure(state)
else:
result = list(obj)
self._do_list(result, state)
return tuple(result)
def _do_list(self, obj, state):
if len(obj) == len(state):
for i in range(len(obj)):
if not self._has_instance(state[i]):
obj[i] = self._get_pure(state[i])
elif isinstance(state[i], tuple):
obj[i] = self._do_tuple(state[i])
else:
self._do_object(obj[i], state[i])
else:
raise StateSetterError(
'Cannot set state of list of incorrect size.'
)
def _do_dict(self, obj, state):
for key, value in state.items():
if not self._has_instance(value):
obj[key] = self._get_pure(value)
elif isinstance(value, tuple):
obj[key] = self._do_tuple(value)
else:
self._do_object(obj[key], value)
######################################################################
# Internal Utility functions.
######################################################################
def _get_file_read(f):
if hasattr(f, 'read'):
return f
else:
return open(f, 'rb')
def _get_file_write(f):
if hasattr(f, 'write'):
return f
else:
return open(f, 'wb')
######################################################################
# Utility functions.
######################################################################
def dump(value, file):
"""Pickles the state of the object (`value`) into the passed file
(or file name).
"""
f = _get_file_write(file)
try:
StatePickler().dump(value, f)
finally:
f.flush()
if f is not file:
f.close()
def dumps(value):
"""Pickles the state of the object (`value`) and returns a string.
"""
return StatePickler().dumps(value)
def load_state(file):
"""Returns the state of an object loaded from the pickled data in
the given file (or file name).
"""
f = _get_file_read(file)
try:
state = StateUnpickler().load_state(f)
finally:
if f is not file:
f.close()
return state
def loads_state(string):
"""Returns the state of an object loaded from the pickled data
in the given string.
"""
return StateUnpickler().loads_state(string)
def get_state(obj):
"""Returns the state of the object (usually as a dictionary). The
returned state may be used directy to set the state of the object
via `set_state`.
"""
s = dumps(obj)
return loads_state(s)
def set_state(obj, state, ignore=None, first=None, last=None):
StateSetter().set(obj, state, ignore, first, last)
set_state.__doc__ = StateSetter.set.__doc__
def update_state(state):
"""Given the state of an object, this updates the state to the
latest version using the handlers given in the version registry.
The state is modified in-place.
"""
version_registry.registry.update(state)
def create_instance(state):
"""Create an instance from the state if possible.
"""
if (not isinstance(state, State)) and \
('class_name' not in state.__metadata__):
raise StateSetterError('No class information in state')
metadata = state.__metadata__
class_name = metadata.get('class_name')
mod_name = metadata.get('module')
if 'tvtk_classes' in mod_name:
# FIXME: This sort of special-case is probably indicative of something
# that needs more thought, plus it makes it tought to decide whether
# this component depends on tvtk!
from tvtk.api import tvtk
return getattr(tvtk, class_name)()
initargs = metadata['initargs']
if initargs.has_instance:
raise StateUnpicklerError('Cannot unpickle non-trivial initargs')
__import__(mod_name, globals(), locals(), class_name)
mod = sys.modules[mod_name]
cls = getattr(mod, class_name)
return cls(*initargs)
|
from pathlib import Path
NER_PATH = Path(__file__).parent
MODEL_PATH = NER_PATH / "model.pth"
MODEL_CONFIG_PATH = NER_PATH / "model_conf.json"
DATA_PATH = NER_PATH / "data"
BERT_INPUT_SEQUENCE_LENGTH = 128
LABELS = {
"B_technology": 0,
"I_technology": 1,
"B_malware": 2,
"I_malware": 3,
"B_company": 4,
"I_company": 5,
"B_organization": 6,
"I_organization": 7,
"B_product": 8,
"I_product": 9,
"B_attack_vector": 10,
"I_attack_vector": 11,
"B_cybervulnerability": 12,
"I_cybervulnerability": 13,
"O": 14,
"X": 15,
}
def idx2label(idx):
for label, index in LABELS.items():
if idx == index:
return label
|
#!/usr/bin/env python3
"""
genSBoxesFlexAEADv11.py - module to generate FlexAE SBoxes.
Usage:
genSBoxesFlexAEADv11.py
Options:
no options
"""
__author__ = 'Eduardo Marsola do Nascimento'
__copyright__ = 'Copyright 2019-10-20'
__credits__ = ''
__license__ = 'MIT'
__version__ = '0.01'
__maintainer__ = ''
__email__ = ''
__status__ = 'Development'
def multiGF8( input1, input2, modP8):
"""
multiGF8 - multiply 2 number on a Galois Field defined by a polynomial.
Args:
input1: first number to multiply.
input2: second number to multiply.
modP8: polynomial defining the Galos Field.
Returns:
the multiplication result.
"""
state1 = 0x0
for i in range(8):
#print( 'i: {:2x}, input1: {:4x}, input2: {:4x}, state1: {:4x}'
# ''.format(i,input1,input2,state1))
if input2&0x1:
state1 ^= input1
input2 >>= 1
input1 <<= 1
if input1&0x100:
input1 ^= modP8
return state1
def invMultiGF8( input1, modP8):
"""
invMultiGF8 - calculate the inverse multiplicative of a number
on a Galois Field defined by a polynomial.
Args:
input1: number to find the inverse multiplicative.
modP8: polynomial defining the Galos Field.
Returns:
the inverse multiplicative.
"""
invmulti=1
while invmulti<0x100:
state1=multiGF8(invmulti,input1,modP8);
if state1==1:
return invmulti
invmulti = invmulti + 1
return invmulti
def affTransf( input1, addConst, multConst ):
"""
affTransf - performs an affine transformation using an additive and
an multiplicative constants.
Args:
input1: number to transform.
addConst: the additive constant to be used.
multConst: the multiplicative constant to be used.
Returns:
the transformed number.
"""
state1 = multiGF8(multConst, input1, 0x101)
return state1^addConst
def genSBox( IP, MC, AC ):
"""
genSBox - generates a SBox using the same method as proposed on AES
definition but can use other parameters.
Args:
IP: Irreducible Polynomial to define the Galois Field.
MC: Multiplicative Constant to be used on the affine transformation.
AC: Additive Constant to be used on the affine transformation.
Returns:
a SBox define buy the parameters.
"""
SBox = [0]*0x100
SBox[0]=AC
for i in range(0x1,0x100):
SBox[i] = invMultiGF8( i, IP)
SBox[i] = affTransf(SBox[i], AC, MC )
return SBox
def parityOf( n ):
"""
parityOf - determine if the number of bits is odd or even
Args:
n: the number to be tested.
Returns:
o - if the number has even bits.
1 - if the number has odd bits.
"""
parity = 0
while( n ):
parity ^= (n&1)
n >>= 1
return parity
def genInvSBox( SBox ):
"""
genInvSBox - generates inverse of an SBox.
Args:
SBox: The SBox to generate the inverse.
Returns:
The inverse SBox.
"""
InvSBox = [0]*0x100
for i in range(0x100):
InvSBox[ SBox[i] ] = i
return InvSBox
def tweakSBox( SBox ):
"""
tweakSBox - tweak the SBox to make stronger for FlexAE cipher.
Args:
SBox: the SBoxto be Tweaked.
Returns:
a tweaked SBox.
"""
TweakSBox = [0]*0x100
for i in range(0x100):
left = (SBox[i] & 0xF0) >> 4
right = (SBox[i] & 0x0F)
TweakSBox[i] = SBox[i] ^ \
((0x00 if parityOf(left) else 0x0F) + \
(0x00 if parityOf(right) else 0xF0))
return TweakSBox
def printSBox( name, SBox ):
"""
printSBox - print the SBox on screen.
Args:
name: the SBox name.
SBox: the SBox to be printed.
Returns:
nothing.
"""
print(' """')
print(' {}: '.format(name))
print(' - 0 1 2 3 4 5 6 7 8 9 A B C D E F')
for i in range(0x10):
s=' {:1X} -'.format(i)
for j in range(0x10):
s = s + ' {:02X}'.format(SBox[i*0x10+j])
print(s)
print(' """')
def printFlexAEADv11SBoxClass( SBoxDict ):
"""
printFlexAEADv11SBoxClass - print the FlexAEADv11SBox Class Code on screen.
Args:
SBox0, SBox1, SBox2: the SBoxes to be used on the class.
Returns:
nothing.
"""
print('class FlexAEADv11SBox:')
for name, SBox in SBoxDict.items():
printSBox( name, SBox)
s = ' {} = ['.format(name)
for i in range(0x100):
if (i%0x10)==0:
print(s)
s=' '
s = s + '0x{:02X}'.format(SBox[i])+','
print(s)
print(' ]')
if __name__ == "__main__":
"""
generate the FlexAE SBoxes
"""
# track execution time
from datetime import datetime
startTime=datetime.now()
#
# Create an Empty Dictonary
SBoxDict={}
# SBox0
IP0 = 0b100011011; MC0 = 0x1F; AC0 = 0x63;
SBox0 = genSBox(IP0,MC0,AC0)
SBoxDict['dirSBox0' ] = SBox0
SBoxDict['invSBox0' ] = genInvSBox(SBox0)
# SBox1
IP1 = 0b100011101; MC1 = 0x3D; AC1 = 0x95;
SBox1 = genSBox(IP1,MC1,AC1)
SBoxDict['dirSBox1' ] = SBox1
SBoxDict['invSBox1' ] = genInvSBox(SBox1)
# SBox2
IP2 = 0b100101011; MC2 = 0x3B; AC2 = 0xA6;
SBox2 = genSBox(IP2,MC2,AC2)
SBoxDict['dirSBox2' ] = SBox2
SBoxDict['invSBox2' ] = genInvSBox(SBox2)
# SBox3
IP3 = 0b100101101; MC3 = 0x37; AC3 = 0xD9;
SBox3 = genSBox(IP3,MC3,AC3)
SBoxDict['dirSBox3' ] = SBox3
SBoxDict['invSBox3' ] = genInvSBox(SBox3)
#
# Print the Class FlexAESbox
#
printFlexAEADv11SBoxClass(SBoxDict)
# track execution time
finishTime=datetime.now()
print( '\nStart: {}, Finish:{}, Running Time: {}'
''.format(startTime.replace(microsecond=0),
finishTime.replace(microsecond=0),
finishTime-startTime))
################### END #################
|
# Copyright 2020-2021 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
def get_name(pathname, output_file, partial_match=False):
pathname = str(pathname)
output_file = str(output_file)
try:
candidates = []
for file in os.listdir(pathname):
if not os.path.isfile(os.path.join(pathname, file)):
continue # Directory
file_components = file.split("-", 1)
if len(file_components) <= 1:
continue
step_index, name = file_components
step_index = int(step_index)
if partial_match:
if output_file not in name:
continue
else:
if output_file != name:
continue
candidates.append((step_index, name))
candidates.sort(key=lambda x: x[0], reverse=True)
file = f"{candidates[0][0]}-{candidates[0][1]}"
return candidates[0][0], os.path.join(pathname, file)
except Exception:
return "", os.path.join(pathname, output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Returns the output_file name with the highest index."
)
parser.add_argument("--path", "-p", required=True, help="Path")
parser.add_argument(
"--output_file",
"-o",
required=True,
help="File name to search for, i.e. 1.X 2.X 3.X, then the script will return <path>/3.X",
)
# This whole thing is a contrived way to say "partial match"
parser.add_argument(
"--include_only",
"-I",
action="store_true",
default=False,
help="If enabled the matching is done for inclusion, i.e. the passed output_file is a string that is included in the file name to be matched. -o exam will return matches like: exam.txt and example.txl.",
)
args = parser.parse_args()
path = args.path
output_file = args.output_file
include_only = args.include_only
print(get_name(path, output_file, include_only))
|
import tensorflow as tf
def apply_jitter(point_cloud_batch, label_cloud_batch):
# Jitter point and label clouds.
noise = tf.random.uniform(
tf.shape(label_cloud_batch), -0.005, 0.005, dtype=tf.float64
)
point_cloud_batch += noise[:, :, :3]
label_cloud_batch += tf.cast(noise, tf.float32)
return point_cloud_batch, label_cloud_batch
|
# NLP written by GAMS Convert at 04/21/18 13:54:24
#
# Equation counts
# Total E G L N X C B
# 14 1 2 11 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 11 11 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 116 106 10 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,100),initialize=0)
m.obj = Objective(expr=m.x1*m.x6 - m.x1 - m.x6 + m.x2*m.x7 - 2*m.x2 - 2*m.x7 + m.x3*m.x8 - 3*m.x3 - 3*m.x8 + m.x4*m.x9
- 4*m.x4 - 4*m.x9 + m.x5*m.x10 - 5*m.x5 - 5*m.x10, sense=minimize)
m.c1 = Constraint(expr= m.x1 + 7*m.x2 + 5*m.x3 + 5*m.x4 - 6*m.x6 - 3*m.x7 - 3*m.x8 + 5*m.x9 - 7*m.x10 <= 80)
m.c2 = Constraint(expr= - 3*m.x1 + 3*m.x2 + 8*m.x3 + 7*m.x4 - 9*m.x5 - 7*m.x6 - 9*m.x7 + 8*m.x9 - 7*m.x10 <= 57)
m.c3 = Constraint(expr= m.x1 + m.x3 + 3*m.x4 + 8*m.x5 + 9*m.x6 + 9*m.x8 - 7*m.x9 - 8*m.x10 <= 92)
m.c4 = Constraint(expr= - m.x1 - 2*m.x2 + 2*m.x3 + 9*m.x5 + 5*m.x6 - 3*m.x7 + m.x8 - m.x9 - 5*m.x10 <= 55)
m.c5 = Constraint(expr= - 5*m.x1 + 8*m.x2 - 8*m.x3 + 3*m.x5 + 4*m.x7 - 5*m.x8 - 2*m.x9 + 9*m.x10 <= 76)
m.c6 = Constraint(expr= 4*m.x1 - m.x2 + 6*m.x3 - 4*m.x4 - 7*m.x5 - 8*m.x6 - 7*m.x7 + 6*m.x8 - 2*m.x9 - 9*m.x10 <= 14)
m.c7 = Constraint(expr= 7*m.x2 + 4*m.x3 + 9*m.x5 - 6*m.x8 - 5*m.x9 - 5*m.x10 <= 47)
m.c8 = Constraint(expr= - 5*m.x1 - m.x2 + 7*m.x4 - m.x5 + 2*m.x6 + 5*m.x7 - 8*m.x8 - 5*m.x9 + 2*m.x10 <= 51)
m.c9 = Constraint(expr= - 4*m.x1 - 7*m.x2 - 9*m.x4 + 2*m.x5 + 6*m.x6 - 9*m.x7 + m.x8 - 5*m.x9 <= 36)
m.c10 = Constraint(expr= - 2*m.x1 + 6*m.x2 + 8*m.x4 - 6*m.x5 + 8*m.x6 + 8*m.x7 + 5*m.x8 + 2*m.x9 - 7*m.x10 <= 92)
m.c11 = Constraint(expr= m.x1 + m.x2 + m.x3 - 2*m.x4 + m.x5 + m.x6 + m.x7 + 4*m.x8 + m.x9 + 3*m.x10 <= 200)
m.c12 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5 >= 1)
m.c13 = Constraint(expr= m.x6 + m.x7 + m.x8 + m.x9 + m.x10 >= 2)
|
'''
Created on Aug 14, 2018
@author: Burkhard A. Meier
'''
import tkinter as tk
from PIL import Image, ImageTk
class CanvasAndCar():
def __init__(self, win):
self.canvas = tk.Canvas(win, width=700, height=500) # create a tkinter canvas
self.canvas.pack() # use the pack() manager
self.canvas.update() # call update() or winfo_ won't work
self.car_pos_x = self.canvas.winfo_width() // 2 # center of canvas
self.car_pos_y = self.canvas.winfo_height() // 2
self.img_file = 'car.png' # our .png image file located in the same folder as this .py file
self.place_car() # call the method to position the car
def place_car(self):
image = Image.open(self.img_file) # open the image
self.car_image = ImageTk.PhotoImage(image) # pass the image into PhotoImage. Use self.car_image or image might not show
self.car_canvas = self.canvas.create_image(self.car_pos_x, self.car_pos_y,
image=self.car_image)
win = tk.Tk() # create a tkinter window
car_game = CanvasAndCar(win) # create CanvasAndCar and save class instance in variable or image might not show
win.mainloop() # start the tkinter main gui event loop
|
# 文件夹配置
model_save_dir = 'saved_models'
cache_dir = '/ext/A/cache'
# 数据配置
'''一个拼音最短大概0.1s,
若hop_s为0.016s,那么6.25个hop覆盖0.1s'''
stft_fea = {
'name':'stft',
'kwargs':{
'fft_s': 0.128, # fft_s:一个短时傅里叶变换的窗长,单位为秒
'hop_s': 0.016, # hop_s:窗之间间隔长,单位为秒
'target_sr': 8000, # 统一音频采样率目标,音频将自动重采样为该采样率
}
}
mel_fea = {
'name':'mel',
'kwargs':{
'fft_s': 0.128, # fft_s:一个短时傅里叶变换的窗长,单位为秒
'hop_s': 0.016, # hop_s:窗之间间隔长,单位为秒
'target_sr': 8000, # 统一音频采样率目标,音频将自动重采样为该采样率
'n_mels': 128 # mel 特征维度
}
}
feature = stft_fea
label_type = 'pinyin'
#训练配置
epochs = 500
batch_size = 64 |
#!/usr/bin/env python3
# Errors severity
MAJOR = 0
MINOR = 1
C_TYPES = [ \
"int",
"long",
"short",
"char",
"void",
"float",
"double",
"struct",
"bool",
"[a-zA-Z]*_t",
"FILE",
"DIR",
"WIN",
"static",
"unsigned",
"int8_t",
"int16_t",
"int32_t",
"int64_t",
"uint8_t",
"uint16_t",
"uint32_t",
"uint64_t",
"union",
"enum",
"typedef",
"register",
"auto",
"volatile",
"extern",
"const"
]
|
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from Moodipy.UserSummary import Person
from screeninfo import get_monitors
class ChoosePlaylistPG(QMainWindow):
def __init__(self):
max_screen_width = 1536
min_screen_width = 1000
max_screen_height = 864
min_screen_height = 610
super().__init__()
self.title = "Choose Playlist"
self.desktop = QApplication.desktop()
self.left = 0
self.top = 0
temp_width = get_monitors()[0].width * .5
self.width = max(min(temp_width, max_screen_width), min_screen_width)
temp_height = get_monitors()[0].height * .5
self.height = max(min(temp_height, max_screen_height), min_screen_height)
self.initUI()
def initUI(self):
self.sw = (self.width / 1000)
self.sh = (self.height / 610)
self.setWindowTitle(self.title)
self.setGeometry(self.top, self.left, self.width, self.height)
self.setStyleSheet("background-color: #96bef0")
self.currItem = None
self.mood_window()
self.show()
def mood_window(self):
title = QLabel("Choose Up To Three Playlists", self)
title.setGeometry(self.sw*20, self.sh*10, self.sw*690, self.sh*45)
title.setStyleSheet("background-color:#96bef0; font-weight: bold; color: white")
title.setFont(QFont('Arial Rounded MT Bold', self.sw*30))
Person.setLabel(self, "", False, 0, self.sh * 100, self.width, self.sh * 20, 0, "white", False, 'Segoe UI')
Person.moodLabel = Person.setMoodLabel(Person, Person.currentmood)
subtitle = QLabel("Base Your "+Person.moodLabel+" Playlist On The Songs In One Of Your Public Playlists", self)
subtitle.setGeometry(self.sw * 21, self.sh * 60, self.sw * 999, self.sh * 30)
subtitle.setStyleSheet("background-color:#96bef0; font-weight: bold; color: white")
subtitle.setFont(QFont('Arial Rounded MT Bold', self.sw * 13))
self.listWidget = QListWidget(self)
self.listWidget.setGeometry(0, self.sh * 121, self.width, self.sh * 440)
scrollBar = QScrollBar(self)
self.listWidget.setVerticalScrollBar(scrollBar)
self.listWidget.itemSelectionChanged.connect(self.check_selection)
self.listWidget.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.listWidget.setStyleSheet("background-color: #96bef0;color: white")
self.listWidget.itemSelectionChanged.connect(self.on_change)
self.nextbtn = QPushButton("Create Playlist", self)
self.nextbtn.setStyleSheet("background-color: #c2dcfb; font-weight: bold; border: 5px solid; border-color: #ebf3fb; hover { background-color : white}")
self.nextbtn.setGeometry(self.sw * 620, self.sh * 565, self.sw * 180, self.sh * 40)
self.nextbtn.clicked.connect(self.on_click)
self.backbtn = QPushButton("Go Back", self)
self.backbtn.setStyleSheet("background-color: #c2dcfb; font-weight: bold; border: 5px solid; border-color: #ebf3fb; hover { background-color : white}")
self.backbtn.setGeometry(self.sw * 200, self.sh * 565, self.sw * 180, self.sh * 40)
self.backbtn.clicked.connect(self.on_click2)
self.newbtn = QPushButton("Discover Page", self)
self.newbtn.setStyleSheet("background-color: #c2dcfb; font-weight: bold; border: 5px solid; border-color: #ebf3fb; hover { background-color : white}")
self.newbtn.setGeometry(self.sw*800, self.sh*20, self.sw*180, self.sh*30)
self.newbtn.clicked.connect(self.on_click3)
self.playlists = Person.playlists
num = 1
for playlist in self.playlists:
if num < 10:
playlistTitle = QListWidgetItem(str(num) + " " + playlist)
else:
playlistTitle = QListWidgetItem(str(num) + " " + playlist)
playlistTitle.setFont(QFont('Arial Rounded MT Bold', self.sw * 20))
self.listWidget.addItem(playlistTitle)
num = num + 1
def check_selection(self):
items = self.listWidget.selectedItems()
if len(items) > 3:
items[0].setSelected(False)
def on_change(self):
self.currItem = self.listWidget.selectedItems()
rows = self.listWidget.selectedIndexes()
currItems = self.currItem
names = []
for i, item in enumerate(currItems):
curr = str(item.text())
num = rows[i].row() + 1
length = len(str(num))
if num < 10:
length = length + 5
else:
length = length + 3
curr = curr[length:]
names.append(curr)
Person.playlistNames = names
def on_click2(self):
from Moodipy.MoodAnalyzerGUI import MoodAnalyzerPg
self.nextbtn.setEnabled(False)
self.backbtn.setEnabled(False)
self.newbtn.setEnabled(False)
self.nextPg = MoodAnalyzerPg()
self.nextPg.show()
self.hide()
def on_click3(self):
from Moodipy.DiscoverPgGUI import DiscoverPG
self.nextbtn.setEnabled(False)
self.backbtn.setEnabled(False)
self.newbtn.setEnabled(False)
self.nextPg = DiscoverPG()
self.nextPg.show()
self.hide()
def on_click(self):
self.nextbtn.setEnabled(False)
self.backbtn.setEnabled(False)
self.newbtn.setEnabled(False)
if self.currItem == None:
self.pop_up()
else:
from Moodipy.LoadChoiceGUI import LoadChoicePg
self.nextPg = LoadChoicePg()
self.nextPg.show()
self.hide()
def pop_up(self):
msg = QMessageBox.question(self, 'Error', 'Please select a playlist.', QMessageBox.Ok)
self.nextbtn.setEnabled(True)
self.backbtn.setEnabled(True)
self.newbtn.setEnabled(True)
|
from setuptools import setup, find_packages
package_name = 'svo'
setup(
name=package_name,
version='0.1.0',
packages=find_packages(),
install_requires=[
'setuptools',
'nltk',
],
author='Andreas Klintberg',
maintainer='Andreas Klintberg',
description='NLTK SVO',
license='Apache License, Version 2.0',
test_suite='pytest'
)
|
while True:
try:
sequencia = input()
processosSimultaneos = int(input())
ciclos = 0
ciclos += sequencia.count('R' * processosSimultaneos)
sequencia = sequencia.replace('R' * processosSimultaneos, '')
for pos, char in enumerate(sequencia):
if pos == len(sequencia) - 1:
ciclos += 1
else:
if char == 'R':
if sequencia[pos + 1] == 'W':
ciclos += 1
elif char == 'W':
ciclos += 1
print(ciclos)
except EOFError:
break
|
from typing import List
from base import version
class Solution:
@version("28ms, 16.4mb")
def nextGreatestLetter(self, letters: List[str], target: str) -> str:
l, r = 0, len(letters)
while l < r:
m = l + (r - l) // 2
if letters[m] > target:
r = m
else:
l = m + 1
return letters[l % len(letters)]
|
import os
import numpy as np
import pandas as pd
from statsmodels.sandbox.stats.multicomp import multipletests
from tqdm import tqdm
from .processing_schemes import Processor
from .utilities import recursivedict, get_outdir_path
class Scorer(Processor):
def __init__(self, settings=None):
super().__init__(settings=settings)
self.annotations['type'] = 'Scorer'
self.df = pd.DataFrame()
self.y_dict = {}
def get_y_dict(self, dataset):
"""
"""
ids = dataset.data[dataset.id_col].tolist()
y = dataset.data[dataset.target].tolist()
y = [int(val) for val in y]
y_dict = {}
for i, y in zip(ids, y):
y_dict[i] = y
self.y_dict = y_dict
def get_score(self, gmt=None):
pass
class ScoreLPOCV(Scorer):
def __init__(self, settings=None):
super().__init__(settings=settings)
pass
def get_score(self, gmt=None):
infolder = get_outdir_path(self.settings)
if gmt:
self.annotations['gmt'] = gmt.suffix
infolder += 'hypothesis_predictions/'
file_names = [gmt.suffix + '.csv']
else:
infolder += 'background_predictions/'
file_names = os.listdir(infolder)
file_names = [x for x in file_names if '.csv' in x]
auc_dict_list = []
for fn in tqdm(file_names):
df = pd.read_csv(infolder + fn)
pairs = get_pair_auc_dict(df, self.y_dict)
out = pd.DataFrame(pairs)
cols = out.columns.tolist()
if not self.annotations.get('gmt'):
out = out.rename(columns={a: int(a) for a in cols})
cols = out.columns.tolist()
out = out[list(sorted(cols))]
out = dict(out.mean())
auc_dict_list.append(out)
auc_df = pd.DataFrame(auc_dict_list)
cols = auc_df.columns.tolist()
if not self.annotations.get('gmt'):
auc_df = auc_df.rename(columns={a: int(a) for a in cols})
cols = auc_df.columns.tolist()
auc_df = auc_df[list(sorted(cols))]
self.annotations['score_metric'] = 'AUC'
if self.annotations.get('gmt'):
self.annotations['score_type'] = 'hypothesis'
self.annotations['gmt'] = gmt.suffix
else:
self.annotations['score_type'] = 'background'
self.df = auc_df
def get_stats(self, gmt, dataset):
folder = get_outdir_path(self.settings) + 'score/'
scored_predictions = pd.read_csv(folder + gmt.suffix + '_auc.csv')
background = pd.read_csv(folder + 'background_auc.csv')
bcg_cols = background.columns.tolist()
bcg_cols = [int(x) for x in bcg_cols]
d_cols = dataset.data_cols
scores = []
self.annotations['gmt'] = gmt.suffix
for link, desc, g_list, m_list in gmt.generate(dataset_genes=d_cols):
gene_list = g_list + m_list
intersect = g_list
if len(intersect) < 1:
continue
s = {}
s['id'] = link
s['description'] = desc
s['AUC'] = scored_predictions[link].tolist()[0]
s['n_genes'] = len(gene_list)
s['intersect'] = len(intersect)
b_idx = (np.abs(np.array(bcg_cols) - len(intersect))).argmin()
b_col = str(bcg_cols[b_idx])
bcg_vals = background[b_col].tolist()
bcg_vals_t = [x for x in bcg_vals if x >= s['AUC']]
s['p_value'] = len(bcg_vals_t) / len(bcg_vals)
scores.append(s)
df_scores = pd.DataFrame(scores)
p_values = df_scores['p_value'].tolist()
mt = multipletests(p_values, alpha=0.05, method='fdr_bh')
df_scores['adjusted_p'] = mt[1]
df_scores = df_scores.sort_values(by=['adjusted_p', 'AUC'],
ascending=[True, False])
df_scores = df_scores[['id', 'description', 'n_genes', 'intersect',
'AUC', 'p_value', 'adjusted_p']]
folder = "/".join(folder.split('/')[:-2] + ['stats', ''])
filepath = folder + gmt.suffix + '_stats.csv'
self.df = df_scores
self.annotations['stats_metric'] = 'AUC'
if self.annotations.get('gmt'):
self.annotations['stats_type'] = 'hypothesis'
self.annotations['gmt'] = gmt.suffix
else:
self.annotations['stats_type'] = 'background'
def get_pair_auc_dict(df, y_dict):
pair_auc_dict = recursivedict()
predict_meta_cols = ['pair_index', 'ID', 'class']
predict_data_cols = [x for x in df.columns.tolist()
if x not in predict_meta_cols]
for col in predict_data_cols:
cols_f = predict_meta_cols + [col]
df_t = df.loc[:, :].copy()
df_t = df_t.loc[:, cols_f]
df_t.loc[:, 'true'] = [y_dict[x] for x in df_t.loc[:, 'ID'].tolist()]
sort_cols = ['pair_index', 'true', col, 'class']
cols_ascending = [True, True, False, True]
df_t.sort_values(sort_cols,
ascending=cols_ascending,
inplace=True)
df_t.drop_duplicates(subset=['ID', 'pair_index'],
keep='first',
inplace=True)
pair_idx_list = list(set(df_t['pair_index'].tolist()))
for pair_idx in pair_idx_list:
df_p = df_t.loc[df_t['pair_index'] == pair_idx, :]
sample_class_list = df_p['class'].tolist()
lo = sample_class_list[0]
hi = sample_class_list[1]
if lo == hi:
probabilities_list = df_p[col].tolist()
if lo == 0:
probabilities_list = list(reversed(probabilities_list))
lo = probabilities_list[0]
hi = probabilities_list[1]
auc = calculate_pair_auc(lo, hi)
pair_auc_dict[col][pair_idx] = auc
return pair_auc_dict
def calculate_pair_auc(lo, hi):
if lo == hi:
auc = 0.5
elif lo < hi:
auc = 1
else:
auc = 0
return auc
|
import torch
__all__ = [
"compute_ent",
"compute_kld",
]
@torch.no_grad()
def compute_ent(confidences: torch.Tensor, reduction="mean", eps=1e-12) -> torch.Tensor:
"""
Args:
confidences (Tensor): a tensor of shape [N, K] of predicted confidences.
reduction (str): specifies the reduction to apply to the output.
- none: no reduction will be applied,
- mean: the sum of the output will be divided by
the number of elements in the output.
eps (float): small value to avoid evaluation of log(0).
Returns:
ent (Tensor): entropies for given confidences.
- a tensor of shape [N,] when reduction is "none",
- a tensor of shape [,] when reduction is "mean".
"""
assert reduction in [
"none", "mean",
], f"Unknown reduction = \"{reduction}\""
ent = (confidences * torch.log(eps + confidences)).sum(1).neg() # [N,]
if reduction == "mean":
ent = ent.mean() # [,]
return ent
@torch.no_grad()
def compute_kld(confidences: torch.Tensor, reduction="mean") -> torch.Tensor:
"""
Args:
confidences (Tensor): a tensor of shape [N, M, K] of predicted confidences from ensembles.
reduction (str): specifies the reduction to apply to the output.
- none: no reduction will be applied,
- mean: the sum of the output will be divided by
the number of elements in the output.
Returns:
kld (Tensor): KL divergences for given confidences from ensembles.
- a tensor of shape [N,] when reduction is "none",
- a tensor of shape [,] when reduction is "mean".
"""
assert reduction in [
"none", "mean",
], f"Unknown reduction = \"{reduction}\""
kld = torch.zeros(confidences.size(0), device=confidences.device) # [N,]
ensemble_size = confidences.size(1)
if ensemble_size > 1:
pairs = []
for i in range(ensemble_size):
for j in range(ensemble_size):
pairs.append((i, j))
for (i, j) in pairs:
if i == j:
continue
kld += torch.nn.functional.kl_div(
confidences[:, i, :].log(),
confidences[:, j, :],
reduction="none", log_target=False,
).sum(1) # [N,]
kld = kld / (ensemble_size * (ensemble_size - 1))
if reduction == "mean":
kld = kld.mean() # [,]
return kld
|
# coding: utf-8
def hook(event):
print 'mail handle'
print event
|
import asyncio
from typing import Generator
import os
import pytest
from fastapi.testclient import TestClient
from tortoise.contrib.test import finalizer, initializer
from app.setup import create_app
from app.models import User, Content
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_FILE = BASE_DIR + "/test_data/Code_dPWe4YTTWI.png"
@pytest.fixture()
def client() -> Generator:
app = create_app()
initializer(["app.models"], "sqlite://:memory:")
with TestClient(app) as c:
yield c
finalizer()
@pytest.fixture()
def event_loop(client: TestClient) -> Generator:
yield client.task.get_loop()
def test_uploading_file(client: TestClient, event_loop: asyncio.AbstractEventLoop):
data = {"username": "michaelC", "password": "secret"}
response = client.post("/auth/sign_up", json=data)
data = {"api_key": response.json()["api_key"], "testing": True}
file = {"sharex": open(TEST_FILE, "rb")}
response = client.post("/sharex/", files=file, data=data)
assert response
assert response.ok
user = event_loop.run_until_complete(User.filter(api_key=data["api_key"]).first())
user_contents = event_loop.run_until_complete(user.contents)
assert user_contents
content = event_loop.run_until_complete(Content.filter(user=user).first())
assert content
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.DiDispStaMuonMonitoring_cfi import DiDispStaMuonMonitoring
hltDiDispStaMuonMonitoring = DiDispStaMuonMonitoring.clone()
hltDiDispStaMuonMonitoring.FolderName = cms.string('HLT/EXO/DiDispStaMuon/DoubleL2Mu23NoVtx_2Cha/')
hltDiDispStaMuonMonitoring.histoPSet.lsPSet = cms.PSet(
nbins = cms.uint32 ( 250 ),
xmin = cms.double( 0.),
xmax = cms.double( 2500.),
)
hltDiDispStaMuonMonitoring.histoPSet.muonPtPSet = cms.PSet(
nbins = cms.uint32(25),
xmin = cms.double(-0.5),
xmax = cms.double(99.5),
)
hltDiDispStaMuonMonitoring.histoPSet.muonEtaPSet = cms.PSet(
nbins = cms.uint32(24),
xmin = cms.double(-2.4),
xmax = cms.double(2.4),
)
hltDiDispStaMuonMonitoring.histoPSet.muonPhiPSet = cms.PSet(
nbins = cms.uint32(24),
xmin = cms.double(-3.2),
xmax = cms.double(3.2),
)
hltDiDispStaMuonMonitoring.histoPSet.muonDxyPSet = cms.PSet(
nbins = cms.uint32(25),
xmin = cms.double(-60.),
xmax = cms.double(60.),
)
hltDiDispStaMuonMonitoring.muons = cms.InputTag("displacedStandAloneMuons")
hltDiDispStaMuonMonitoring.nmuons = cms.uint32(2)
hltDiDispStaMuonMonitoring.muonSelection = cms.PSet(
general = cms.string("hitPattern.numberOfValidMuonHits > 16 && pt > 0 && normalizedChi2 < 10 "),
#general = cms.string("hitPattern.muonStationsWithValidHits > 1 && pt > 5 && normalizedChi2 < 10"),
pt = cms.string("pt > 2 "),
dxy = cms.string("dxy > 5 "),
)
hltDiDispStaMuonMonitoring.numGenericTriggerEventPSet.andOr = cms.bool( False )
#hltDiDispStaMuonMonitoring.numGenericTriggerEventPSet.dbLabel = cms.string("ExoDQMTrigger") # it does not exist yet, we should consider the possibility of using the DB, but as it is now it will need a label per path !
hltDiDispStaMuonMonitoring.numGenericTriggerEventPSet.andOrHlt = cms.bool(True)# True:=OR; False:=AND
hltDiDispStaMuonMonitoring.numGenericTriggerEventPSet.hltInputTag = cms.InputTag( "TriggerResults::HLT" )
hltDiDispStaMuonMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_DoubleL2Mu23NoVtx_2Cha_v*") # HLT_ZeroBias_v*
hltDiDispStaMuonMonitoring.numGenericTriggerEventPSet.errorReplyHlt = cms.bool( False )
hltDiDispStaMuonMonitoring.numGenericTriggerEventPSet.verbosityLevel = cms.uint32(0)
hltDiDispStaMuonMonitoring.denGenericTriggerEventPSet.andOr = cms.bool( False )
hltDiDispStaMuonMonitoring.denGenericTriggerEventPSet.andOrHlt = cms.bool(True)# True:=OR; False:=AND
hltDiDispStaMuonMonitoring.denGenericTriggerEventPSet.dcsInputTag = cms.InputTag( "scalersRawToDigi" )
hltDiDispStaMuonMonitoring.denGenericTriggerEventPSet.dcsPartitions = cms.vint32 ( 24, 25, 26, 27, 28, 29 ) # 24-27: strip, 28-29: pixel, we should add all other detectors !
hltDiDispStaMuonMonitoring.denGenericTriggerEventPSet.andOrDcs = cms.bool( False )
hltDiDispStaMuonMonitoring.denGenericTriggerEventPSet.errorReplyDcs = cms.bool( True )
hltDiDispStaMuonMonitoring.denGenericTriggerEventPSet.verbosityLevel = cms.uint32(1)
|
"""
simple example
Shows the basic steps for connecting to Iven Cloud:
1) Activate device
2) Send data
"""
import ivencloud
import Adafruit_DHT
# credentials
secret_key = "<your secret key>"
device_uid = "<your device uid>"
# server address
hostname = "staging.iven.io"
ivencloud.set_cloud_address(hostname) # defaults to staging.iven.io
# activate device
activate_request = ivencloud.activate_device(secret_key, device_uid)
if activate_request.error is None and activate_request.status == 200:
print "Activation Successful, api key: {0}".format(activate_request.api_key)
else:
if activate_request.error is not None:
print "Error on activation with code: {0}, message: {1}".format(activate_request.error.iven_code, activate_request.error.message)
print "Error on activation status: {0}, description: {1}".format(activate_request.status, activate_request.description)
# Prepare data to send
sensor = Adafruit_DHT.DHT11
pin = 4
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
sys.exit(1)
data = {
'temperature': temperature,
'humidity': humidity
}
print "Sending temp: {0}, hum: {1}".format(data['temp'], data['hum'])
# Send data to cloud
response = ivencloud.send_data(data)
if response.error is None and response.status == 200:
print "Data is sent successfully"
if response.task is not None:
print "There is a task, iven_code: {0}, task value: {1}".format(response.task.iven_code, response.task.value)
task_response = ivencloud.task_done(response.task.iven_code)
if task_response.error is None and task_response.status == 200:
print "Task done is sent successfully"
else:
if response.error is not None:
print "Error on send data with code: {0}, message: {1}".format(response.error.iven_code, response.error.message)
print "Error on send data status: {0}".format(response.status)
|
from __future__ import annotations
from collections import defaultdict
class Facade:
_systems = defaultdict(list)
def __init__(self, sys1: Subsys1, sys2: Subsys2) -> None:
self.sys1 = sys1
self.sys2 = sys2
@classmethod
def get_sys_ops(cls, sys: Subsys1 or Subsys2) -> list:
key = sys.__class__.__name__
if key in cls._systems.keys():
return cls._systems[key]
else:
return f"System : {key} does not exist add it using set_sys_ops"
@classmethod
def add_sys_ops(cls, sys: Subsys1 or Subsys2, op) -> str:
key = sys.__class__.__name__
cls._systems[key].append(op)
@classmethod
def get_all(cls) -> dict:
return cls._systems
class Subsys1:
def op1(self) -> str:
return " Get ready to start Subsystem 1"
def op2(self) -> str:
return "Subsystem 1 started"
class Subsys2:
def op1(self) -> str:
return " Get ready to start Subsystem 2"
def op2(self) -> str:
return "Subsystem 2 started"
class Client:
def __init__(self, facade: Facade) -> None:
print(facade.get_all())
if __name__ == "__main__":
s1 = Subsys1()
s2 = Subsys2()
facade = Facade(s1, s2)
facade.add_sys_ops(s1, s1.op1())
facade.add_sys_ops(s1, s1.op2())
facade.add_sys_ops(s2, s2.op1())
facade.add_sys_ops(s2, s2.op2())
operations = facade.get_sys_ops(s1)
print(operations)
Client(facade) |
import numpy as np
import argparse
import Repo.Network as Network
import Repo.MnistHandler as mh
import Repo.GradientBasedOptimizers as gbo
# import Repo.CommonUtilityFunctions as cuf
parser = argparse.ArgumentParser()
parser.add_argument("--lr",help="initial learning rate for gradient descent based algorithms",type=float)
parser.add_argument("--momentum",help="momentum to be used by momentum based algorithms",type=float)
parser.add_argument("--num_hidden",
help="number of hidden layers - this does not include the input layer and the output layer",
type=int)
parser.add_argument("--sizes",
help="a comma separated list for the size of each hidden layer",
type=str)
parser.add_argument("--activation",
help="the choice of activation function - valid values are tanh/sigmoid",
type=str)
parser.add_argument("--loss",
help="possible choices are squared error[sq] or cross entropy loss[ce]",
type=str)
parser.add_argument("--opt",
help="possible choices are adam, nag, gd and momentum",
type=str)
parser.add_argument("--batch_size",
help="the batch size to be used",
type=int)
parser.add_argument("--anneal",
help="if true the algorithm should halve the learning rate if at any epoch the validation loss decreases and then restart that epoch",
type=bool)
parser.add_argument("--save_dir",
help="the directory in which the pickled model should be saved - by model we mean all the weights and biases of the network",
type=str)
parser.add_argument("--expt_dir",
help="the directory in which the log files will be saved",
type=str)
parser.add_argument("--mnist",
help="path to the mnist data in pickeled format 2",
type=str)
parser.add_argument("--epochs",
help="Maximum no of epochs on data ",
type=int)
parser.add_argument("--lamda",
help="Regularization rate. By default 0",
type=float)
#parser.add_argument("--momentum",help="momentum to be used by momentum based algorithms",type=float)
args=parser.parse_args()
if(args.lr==None):
print("learning rate not provided, using default value of 0.1")
args.lr=0.1
if(args.momentum==None):
print("momentum not provided, using default value of 0.1")
args.momentum=0.1
if(args.num_hidden==None ):
print("Please provide number of hidden layers and try again. try using option -h for help")
args.num_hidden = 3
if(args.sizes==None ):
print("Please provide size of each hidden layers and try again. try using option -h for help")
args.sizes = '100,100,100'
args.sizes = [int(i) for i in args.sizes.split(',')]
else :
args.sizes = [int(i) for i in args.sizes.split(',')]
if(args.activation==None ):
print("Activations of hidden layers not provided. assuming sigmoid by default")
args.activation="LogSigmoid"
if args.activation == "sigmoid":
args.activation="LogSigmoid"
elif args.activation== "tanh":
args.activation="TanSigmoid"
elif args.activation == "relu":
args.activation = "ReLU"
else :
print("Invalid activations of hidden layers provided. assuming sigmoid by default")
args.activation = "LogSigmoid"
args.activation = [args.activation for i in args.sizes]
if args.loss == "sq":
args.loss= "SquaredError"
outAct="PureLin"
else:
args.loss= "CrossEntropy"
outAct="SoftMax"
if args.opt not in ['adam','nag','gd','momentum']:
print("Invalid optimizer provided. try using option -h for help")
if(args.batch_size==None ):
print("batch_Size of hidden layers not provided. assuming 200 by default")
args.batch_size=200
if(args.anneal==None ):
print("Assuming anneal to false by default")
args.anneal=False
if(args.expt_dir==None ):
print("saving log to /tmp by default")
args.expt_dir="/tmp"
# if(args.size==None ):
# print("Please provide size of each hidden layers and try again. try using option -h for help")
# exit(0)
if(args.epochs==None ):
print("Maximum no of epochs not provided. assuming 200 by default")
args.epochs=200
if(args.lamda==None ):
print("L2 regularization rate not provided. assuming 0 by default (no regularization)")
args.lamda=0.5
if(args.opt == None):
args.opt = "adam"
# trainData,valData,testData=mh.readMNISTData('/home/hari/Desktop/mnist.pkl.gz')
trainData,valData,testData=mh.DataLoadMaster('/home/hari/Documents/CourseWork/DeepLearning/Assignment/DeepLearningAssignment1/DeepLearningV2/Data/train.csv',
'/home/hari/Documents/CourseWork/DeepLearning/Assignment/DeepLearningAssignment1/DeepLearningV2/Data/val.csv',
'/home/hari/Documents/CourseWork/DeepLearning/Assignment/DeepLearningAssignment1/DeepLearningV2/Data/val.csv')
trainLabels=trainData[1]
trainData=np.transpose(trainData[0])
trainTargets = np.transpose(np.eye(len(np.unique(trainLabels)))[trainLabels])
valLabels=valData[1]
valData=np.transpose(valData[0])
valTargets = np.transpose(np.eye(len(np.unique(valLabels)))[valLabels])
testLabels=testData[1]
testData=np.transpose(testData[0])
testTargets = np.transpose(np.eye(len(np.unique(testLabels)))[testLabels])
net = Network.Network(args.sizes,args.activation,outAct,args.loss,trainData.shape[0],trainTargets.shape[0],args.expt_dir)
if args.opt=="nag":
net=gbo.NestrovAccelaratedGradientDecent(net,trainData,trainTargets,
int(trainTargets.shape[1]/args.batch_size)*args.epochs,args.batch_size,
eta=args.lr,gamma=args.momentum,valData=valData,valTargets=valTargets,
testData=testData,testTargets=testTargets,annel=args.anneal,
regularization=True,lamda=args.lamda)
elif args.opt=="adam":
net=gbo.AdamOptimizer(net,trainData,trainTargets,
int(trainTargets.shape[1]/args.batch_size)*args.epochs,args.batch_size,
eta=args.lr,valData=valData,valTargets=valTargets,
testData=testData,testTargets=testTargets,annel=args.anneal,
regularization=True,lamda=args.lamda)
elif args.opt=="momentum":
net=gbo.MiniBatchGradientDecentWithMomentum(net,trainData,trainTargets,
int(trainTargets.shape[1]/args.batch_size)*args.epochs,args.batch_size,
eta=args.lr,gamma=args.momentum,valData=valData,valTargets=valTargets,
testData=testData,testTargets=testTargets,annel=args.anneal,
regularization=True,lamda=args.lamda)
else :
# net = gbo.MiniBatchGradientDecent(net, trainData, trainTargets,
# int(trainTargets.shape[1] / args.batch_size) * args.epochs,
# args.batch_size,
# eta=args.lr, valData=valData, valTargets=valTargets,
# testData=testData, testTargets=testTargets, annel=args.anneal,
# regularization=True, lamda=args.lamda)
net = gbo.BatchGradientDecent(net, trainData, trainTargets, args.lr, args.epochs,
valData=valData, valTargets=valTargets,
testData=testData, testTargets=testTargets, annel=args.anneal)
newTestData = mh.LoadDataFile('/home/hari/Documents/CourseWork/DeepLearning/Assignment/DeepLearningAssignment1/DeepLearningV2/Data/test.csv')
x,_ = net.FeedForward(valData)
testPred = np.argmax(x ,0)
print('Here I go')
print(testPred)
[print(x) for x in testPred]
print('Here I stop')
# validPrediction,_=net.FeedForward(valData)
# validPrediction = np.argmax(validPrediction, 0)
# testPrediction,_=net.FeedForward(testData)
# print("Final Test ER:",cuf.accuracy(testPrediction,testTargets))
# testPrediction = np.argmax(testPrediction, 0)
# outfile = open(args.expt_dir+'/valid_prediction.txt', "w+")
# outfile.write("\n".join(map(str,validPrediction)))
# outfile.close()
# outfile = open(args.expt_dir+'/test_prediction.txt', "w+")
# outfile.write("\n".join(map(str,testPrediction)))
# outfile.close
#
# if(args.save_dir==None):
# print("No save dir mentioned. Program completed without saving")
# else:
# weights= [None] * (len(args.sizes)+1)
# biases = [None] * (len(args.sizes)+1)
# for i in range(0,len(net.weights)):
# weights[i],biases[i]=cuf.DisIntergrateBiasFromWeights(net.weights[i],biasRequired=True)
# with open(args.save_dir+"/FinalTrainedModel.pickle", "w") as output_file:
# pickle.dump([weights,biases], output_file) |
import json
import torch
from sklearn.model_selection import train_test_split
curr_path = '../'
def load_dictionary():
index2word = torch.load(curr_path + 'data/dict.bin')
word2index = {v: k for k, v in index2word.items()}
return index2word, word2index
def load_data_v2():
i2w, w2i = load_dictionary()
data_o = open(curr_path + 'data/classify_data.seg', 'r', encoding='utf-8').readlines()
data_t = open(curr_path + 'data/classify.tag', 'r', encoding='utf8').readlines()
data = []
for line, tag in zip(data_o, data_t):
line = line.rstrip('\n').split()
line = list(map(lambda token: w2i.get(token, 1), line))
tag = tag.strip().split()
data.append([line, tag])
labels = [int(l.rstrip('\n')) for l in open(curr_path + 'data/classify_label.txt').read()]
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2)
json.dump({'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test},
open(curr_path + 'data/classify.tag.json', 'w'))
if __name__ == '__main__':
load_data_v2()
|
from .tprint import table_print, str_pad, str_len |
"""
File: quadratic_solver.py
Name:張文銓
-----------------------
This program should implement a console program
that asks 3 inputs (a, b, and c)
from users to compute the roots of equation:
ax^2 + bx + c = 0
Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
import math
def main():
"""
This function helps user to check if an equation has one real root, two real roots or no real root
and also calculate the roots if it has real roots.
"""
print("stanCode Quadratic Solver!")
a = int(input(""))
b = int(input(""))
c = int(input(""))
discriminant = b ** 2 - (4 * a * c)
# discriminant is a equation that help user to check how many real roots are there.
if discriminant > 0:
# the situation when the equation has two real roots
x = ((-b) + math.sqrt(discriminant)) / 2
y = ((-b) - math.sqrt(discriminant)) / 2
print("Two roots: " + str(x) + ", " + str(y))
elif discriminant == 0:
# the situation when the equation has one root
x = ((-b) + math.sqrt(discriminant)) / 2
print("One root: " + str(x))
elif discriminant < 0:
# the situation when the equation has no real roots
print("No real roots")
# DO NOT EDIT CODE BELOW THIS LINE #
if __name__ == "__main__":
main()
|
# Copyright 2019 The KRules Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RuleConst(object):
"""
Basic consts
"""
PROCESS_ID = "process_id"
ORIGIN_ID = "origin_id"
RULENAME = "name"
DESCRIPTION = "description"
SUBSCRIBE_TO = "subscribe_to"
RULEDATA = "data"
FILTERS = "filters"
PROCESSING = "processing"
FINALLY = "finally"
TYPE = "type"
SUBJECT = "subject"
SECTION = "section"
FUNC_NAME = "function"
PAYLOAD = "payload"
ARGS = "args"
KWARGS = "kwargs"
RETURNS = "returns"
PASSED = "passed"
EVENT_INFO = "event_info"
SOURCE = "source"
GOT_ERRORS = "got_errors"
EXCEPTION = "exception"
EXC_INFO = "exc_info"
EXC_EXTRA_INFO = "exc_extra_info"
PAYLOAD_DIFFS = "payload_diffs"
class ConfigKeyConst(object):
TYPE_TOPICS_PREFIX = "TYPE_TOPICS_PREFIX"
class ProcEventsLevel(object):
DISABLED = 0
LIGHT = 1
FULL = 2
|
#!/usr/bin/env python
"""
These are used at during testing to check the results.
Hazen 02/17
"""
import numpy
import storm_analysis.sa_library.sa_h5py as saH5Py
def verifyDriftCorrection(actual_drift_fname, measured_drift_fname):
"""
Return maximum difference between drift correction files.
"""
actual_drift = numpy.loadtxt(actual_drift_fname)
measured_drift = numpy.loadtxt(measured_drift_fname)
diffs = []
for i in range(4):
diffs.append(numpy.max(numpy.abs(actual_drift[:,i] - measured_drift[:,i])))
return diffs
def verifyIsCloseEnough(number1, number2, margin = 0.05):
"""
Return true if number1 is within margin of number 2.
"""
max_diff = number2 * margin
return (abs(number1 - number2) < max_diff)
def verifyNumberLocalizations(h5_name):
"""
Return the number of localizations in a HDF5 file.
"""
n_locs = None
with saH5Py.SAH5Py(h5_name) as h5:
assert(h5.isAnalysisFinished())
n_locs = h5.getNLocalizations()
return n_locs
def verifyZWasCalculated(h5_name):
"""
Return the true if all the Z values are not exactly identical.
"""
locs = None
with saH5Py.SAH5Py(h5_name) as h5:
locs = h5.getLocalizations(fields = ["z"])
return (numpy.std(locs["z"]) > 1.0e-6)
|
# -*- coding: utf-8 -*-
import pymysql
import xlwt
import logging
import os
from twisted.enterprise import adbapi
import traceback
from yzwspider.yzw.items import YzwItem
logger = logging.getLogger("YzwPipeline")
class YzwPipeline(object):
def __init__(self, pool, settings):
self.dbpool = pool
self.settings = settings
self.excelstyle = self.getExcelStyle()
excel_path = os.getcwd() if settings.get("EXCEL_FILE_PATH") == '.' else settings.get("EXCEL_FILE_PATH")
excel_file = settings.get("EXCEL_FILE_NAME") + '.xls'
self.excelFile = os.path.join(excel_path, excel_file)
@classmethod
def from_settings(cls, settings):
params = dict(
host=settings.get("HOST"),
port=settings.get("PORT"),
db=settings.get("DATABASE"),
user=settings.get("USER"),
passwd=settings.get("PASSWORD"),
charset=settings.get("CHARSET"),
cursorclass=pymysql.cursors.DictCursor
)
db_connect_pool = None
if settings.get("MYSQL"):
YzwPipeline.__test_mysql_settings(**params)
db_connect_pool = adbapi.ConnectionPool('pymysql', **params)
obj = cls(db_connect_pool, settings)
return obj
def _create_table(self, txn):
try:
sql = "DROP TABLE IF EXISTS `{0}`".format(self.settings.get("TABLE"))
re = txn.execute(sql)
sql = self.settings.get("CREATE_TEBLE_SQL").format(self.settings.get("TABLE"))
re = txn.execute(sql)
logger.info("创建表:'%s'成功." % self.settings.get('TABLE'))
except Exception as e:
logger.critical(traceback.format_exc())
def open_spider(self, spider):
if self.dbpool:
obj = self.dbpool.runInteraction(self._create_table)
else:
self.newExcelFile()
def close_spider(self, spider):
try:
if self.dbpool:
self.dbpool.close()
logger.info("数据已存储于数据库" + self.settings.get("DATABASE") + ", 表:" + self.settings.get("TABLE"))
else:
self.wbk.save(self.excelFile)
logger.info("excel文件已存储于 " + self.excelFile)
except Exception as e:
logger.error(traceback.format_exc())
def process_item(self, item, spider):
try:
if self.dbpool:
self.process_mysql(item)
else:
self.process_excel(item)
except Exception as e:
logger.critical(traceback.format_exc())
def process_mysql(self, item):
result = self.dbpool.runInteraction(self.insert, item)
# 给result绑定一个回调函数,用于监听错误信息
result.addErrback(self.error, item)
def insert(self, cursor, item):
insert_sql = self.__make_sql(item)
cursor.execute(insert_sql)
def error(self, reason, item):
# 跳过主键重复error
if reason.value.args[0] != 1062:
logger.error(
"insert to database err: ---------\n" + reason.getErrorMessage() + f"sql=\n{self.__make_sql(item)}")
def process_excel(self, item):
flag = False if (self.row & 1 == 0) else True
style = xlwt.XFStyle()
if flag:
style = self.excelstyle
else:
style = xlwt.XFStyle()
alignment = xlwt.Alignment()
alignment.horz = xlwt.Alignment.HORZ_CENTER
style.alignment = alignment
for i in range(0, YzwItem.fields.__len__()):
ret = self.sheet.write(self.row, i, item[self.list[i]], style)
self.row += 1
def getExcelStyle(self):
style = xlwt.XFStyle()
pattern = xlwt.Pattern()
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
pattern.pattern_fore_colour = 42
style.pattern = pattern
borders = xlwt.Borders()
borders.top = 1
borders.bottom = 1
borders.top_colour = 17
borders.bottom_colour = 17
style.borders = borders
alignment = xlwt.Alignment()
alignment.horz = xlwt.Alignment.HORZ_CENTER
style.alignment = alignment
return style
def getExcelTitleStyle(self):
style = xlwt.XFStyle()
pattern = xlwt.Pattern()
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
pattern.pattern_fore_colour = 17
style.pattern = pattern
fnt = xlwt.Font()
fnt.name = u'黑体'
fnt.height = 0X00D9
fnt.colour_index = 1
fnt.bold = True
style.font = fnt
alignment = xlwt.Alignment()
alignment.horz = xlwt.Alignment.HORZ_CENTER
style.alignment = alignment
return style
def newExcelFile(self):
self.wbk = xlwt.Workbook()
self.sheet = self.wbk.add_sheet('Sheet1')
self.row = 1
self.sheet.col(0).width = 3000
self.sheet.col(1).width = 8000
self.sheet.col(2).width = 3000
self.sheet.col(3).width = 10000
self.sheet.col(4).width = 7000
self.sheet.col(5).width = 10000
self.sheet.col(6).width = 7000
self.sheet.col(7).width = 3000
self.sheet.col(8).width = 5000
self.sheet.col(9).width = 11000
self.sheet.col(10).width = 4000
self.sheet.col(11).width = 7000
self.sheet.col(12).width = 2000
self.sheet.col(13).width = 2000
self.sheet.col(14).width = 2000
self.sheet.col(15).width = 2000
self.sheet.col(16).width = 6000
self.sheet.col(17).width = 10000
self.list = ['id', '招生单位', '院校特性', '院系所', '专业', '研究方向', '学习方式', '拟招生人数'
, '业务课一', '业务课二', '外语', '政治', '所在地', '专业代码', '指导老师', '门类', '一级学科', '备注']
style = self.getExcelTitleStyle()
for i in range(0, YzwItem.fields.__len__()):
self.sheet.write(0, i, self.list[i], style)
@staticmethod
def __test_mysql_settings(**params):
try:
db = pymysql.connect(**params)
db.close()
except Exception as e:
logger.critical(str(e))
os._exit(1)
def __make_sql(self, item):
sql = f"""insert into `{self.settings.get('TABLE')}`
(`id`, `招生单位`, `院校特性`, `院系所`, `专业`,`研究方向`,`学习方式`, `拟招生人数`, `备注`, `业务课一`, `业务课二`, `外语`, `政治`, `所在地`, `专业代码`,`指导老师`, `门类`, `一级学科` )
VALUES ('{item['id']}','{item['招生单位']}','{item['院校特性']}','{item['院系所']}','{item['专业']}','{item['研究方向']}',
'{item['学习方式']}','{item['拟招生人数']}','{item['备注']}','{item['业务课一']}','{item['业务课二']}','{item['外语']}',
'{item['政治']}','{item['所在地']}', '{item['专业代码']}', '{item['指导老师']}','{item['门类']}','{item['一级学科']}')"""
# 处理转义字符
sql = sql.replace("\\'", "\\\\'")
return sql
|
import numpy as np
import pandas as ps
import math
import sys
import random
### Negative windows
# prediction window and labeled window length in seconds
directory = sys.argv[1]
xSize = int(sys.argv[2]) # xwindow size
ySize = int(sys.argv[3]) # ywindow size
uID = sys.argv[4] # participant ID
norm = sys.argv[5] # normalisation type: zscore/minmax
timecodes = ps.read_csv(directory + 'timestamps.csv')
startRecording = int(timecodes['start'][0])
endRecording = int(timecodes['end'][0])
listStart = timecodes['start'][1:].tolist()
listEnd = timecodes['end'][1:].tolist()
listHand = timecodes['hand'][1:].tolist()
listLabel = timecodes['label'][1:].tolist()
listStage = timecodes['stage'][1:].tolist()
for i in range(0,len(listStart)):
listStart[i] = int(startRecording + math.floor(listStart[i])*60*1000 + (listStart[i] - math.floor(listStart[i]))*100*1000)
listEnd[i] = int(startRecording + math.floor(listEnd[i])*60*1000 + (listEnd[i] - math.floor(listEnd[i]))*100*1000)
dfTimestamps = ps.DataFrame(list(zip(listStart,listEnd,listHand,listLabel,listStage)), columns=['start','end','hand','label','stage'])
dfTimestamps = dfTimestamps.replace(np.nan,'',regex=True)
dfTimestamps = dfTimestamps.loc[(dfTimestamps['label'] != '')]
def GenerateNegativeWindows():
sensorDataAcc = ps.read_csv(directory + f'acc{norm}.csv')
sensorDataGyr = ps.read_csv(directory + f'gyr{norm}.csv')
sensorDataHrm = ps.read_csv(directory + f'hrm{norm}.csv')
sensorDataPpg = ps.read_csv(directory + 'ppgLabeled.csv') # ppg processed separately
window = ps.DataFrame()
for i in range(0,len(dfTimestamps)):
check = True
wIndex = i + 1
while check:
mark = random.randrange(startRecording/1000,endRecording/1000,1) * 1000
# print(mark)
if mark < startRecording + xSize * 1000:
continue
for j in dfTimestamps.itertuples():
if mark > j[1] and mark < j[2]:
# print('during behaviour period ' + str(wIndex) + ' ')
break
elif mark + ySize * 1000 > j[1] and mark + ySize * 1000 < j[2]:
# print('behaviour overlap ' + str(wIndex) + ' ' + str(j[1]))
break
check = False
window = sensorDataAcc.loc[(sensorDataAcc['timestamp'] >= mark - xSize * 1000) & (sensorDataAcc['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_acc_-_{wIndex}.csv', index=False)
# print('acc windows generated')
window = sensorDataGyr.loc[(sensorDataGyr['timestamp'] >= mark - xSize * 1000) & (sensorDataGyr['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_gyr_-_{wIndex}.csv', index=False)
# print('gyr windows generated')
window = sensorDataHrm.loc[(sensorDataHrm['timestamp'] >= mark - xSize * 1000) & (sensorDataHrm['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_hrm_-_{wIndex}.csv', index=False)
# print('hrm windows generated')
# window = sensorDataPpg.loc[(sensorDataPpg['timestamp'] >= mark - xSize * 1000) & (sensorDataPpg['timestamp'] <= mark + ySize * 1000)]
# window.drop('hand',axis=1,inplace=True)
# window.to_csv(f'{directory}windows/P{uID}_ppg_{wIndex}_-_.csv', index=False)
GenerateNegativeWindows()
# # generate positive and negative windows of length
# def GeneratePositiveWindows(sensorType):
# sensorData = ps.read_csv(directory + sensorType + 'Labeled.csv')
# window = ps.DataFrame()
# wIndex = 1
# lastTuple = (listStart[0],listEnd[0])
# for i in dfTimestamps.itertuples():
# if i[1] - xSize * 1000 < startRecording or i[4] == -1:
# continue
# # If behaviour not as long as y window
# # if i[2]-i[1] < ySize * 1000:
# # continue
# window = sensorData.loc[(sensorData['timestamp'] >= i[1] - xSize * 1000) & (sensorData['timestamp'] <= i[2] + ySize * 1000)]
# print(i)
# if i[1] - lastTuple[1] > xSize * 1000:
# # window.to_csv(directory + sensorType + 'Window' + '_' + str(wIndex) + '_' + str(i[4]) + '_clean' + '.csv', index=False)
# window.to_csv(f'{directory}{sensorType}Window_{wIndex}_{i[4]}_clean.csv', index=False)
# else:
# # window.to_csv(directory + sensorType + 'Window' + '_' + str(wIndex) + '_' + str(i[4]) + '_dirty' + '.csv', index=False)
# window.to_csv(f'{directory}{sensorType}Window_{wIndex}_{i[4]}_dirty.csv', index=False)
# wIndex += 1
# # f'Window_{wIndex}_{i[4]}_clean.csv'
# GeneratePositiveWindows('acc') |
"""Salesforce Event Log
Retrieve hourly Salesforce event log files from the API
"""
import csv
import io
import json
import shutil
import os
import tempfile
from simple_salesforce import Salesforce
from runners.helpers import db, log
from runners.helpers.dbconfig import ROLE as SA_ROLE
from connectors.utils import yaml_dump
CONNECTION_OPTIONS = [
{
'name': 'username',
'title': "Salesforce Username",
'prompt': "The username for API authentication",
'type': 'str',
'placeholder': "[email protected]",
'required': True,
},
{
'name': 'password',
'title': "Salesforce Password",
'prompt': "The password for API authentication",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'security_token',
'title': "Salesforce Security Token",
'prompt': "The Security Token for API authentication, associated with the user",
'type': 'str',
'secret': True,
'required': True,
},
{
'name': 'environment',
'title': "Salesforce Environment",
'prompt': "Choose between Test (Sandbox) environment and Production environment",
'type': 'select',
'options': [
{'value': 'prod', 'label': "Production"},
{'value': 'test', 'label': "Test"},
],
'type': 'str',
'secret': True,
'required': True,
},
]
LANDING_TABLE_COLUMNS = [('raw', 'VARIANT')]
def connect(connection_name, options):
table_name = f'salesforce_events_{connection_name}'
landing_log_table = f'data.{table_name}_connection'
comment = yaml_dump(module='salesforce_event_log', **options)
db.create_table(
name=landing_log_table,
cols=LANDING_TABLE_COLUMNS,
comment=comment,
stage_file_format='TYPE = JSON STRIP_OUTER_ARRAY = TRUE',
stage_copy_options='PURGE = TRUE',
)
db.execute(f'GRANT INSERT, SELECT ON {landing_log_table} TO ROLE {SA_ROLE}')
return {
'newStage': 'finalized',
'newMessage': "Salesforce Event Log ingestion table created!",
}
def ingest(table_name, options):
landing_table = f'data.{table_name}'
username = options['username']
password = options['password']
security_token = options['security_token']
environment_raw = options['environment']
environment = 'test' if environment_raw == 'test' else None
# We will fetch EventLogFiles where the LogDate is greater than the maximum
# timestamp seen in all previous EventLogFiles
start_time = db.fetch_latest(landing_table, col='raw:TIMESTAMP_DERIVED')
if start_time is None:
start_time = '1900-01-01T00:00:00.000Z'
# TODO: Support more auth methods, including client certificates.
sf = Salesforce(
username=username,
password=password,
security_token=security_token,
client_id='SnowAlert',
domain=environment,
)
event_log_soql_query = (
f'SELECT id, eventtype, logdate '
f'FROM eventlogfile '
f'WHERE interval=\'Hourly\' '
f' AND logdate > {start_time}'
)
log.info(f'Querying event logs: {event_log_soql_query}')
log_files = sf.query_all(event_log_soql_query)
# Create a temp directory only accessible by the current user, which we will delete after Snowflake upload
temp_dir = tempfile.mkdtemp('_sfevents')
# Salesforce will provide a bunch of files, an hourly extract of each of the different event types in CSV format
# There are around 50 different event types and they all have different fields. Rather than a table per event type,
# we'll convert them to JSON and do schema-on-read.
# We'll load from the table stage which has the 'STRIP_OUTER_ARRAY' option, so there will be one row per event.
total_files = log_files['totalSize']
log.info(f'Found {total_files} event files to load.')
if total_files > 0:
for record in log_files['records']:
url = record['attributes']['url']
id = record['Id']
log.info(f'Downloading event log file {id} from {url}.')
# The URL provided is relative, but includes part of the base URL which we have to trim out before combining
# E.g. it could look like /services/data/v38.0/sobjects/EventLogFile/0AT0o00000NSIv5GAB
# where the base URL will look like: https://ap8.salesforce.com/services/data/v38.0/
url_relative = 'sobjects/' + url.split('sobjects/')[1] + '/LogFile'
result = sf._call_salesforce(
'GET', sf.base_url + url_relative, name=url_relative
)
# TODO: Investigate streaming the result and converting to JSON in chunks.
# Current method has high memory requirements for large files, but unlikely to be
# multi-GB hourly unless it's a really busy Salesforce org.
reader = csv.DictReader(io.StringIO(result.text))
file_path = os.path.join(temp_dir, id + '.json')
with open(file_path, 'w') as f:
# This will create a single line JSON file containing an array of objects
json.dump(list(reader), f)
if shutil.disk_usage("/").free < 2**30:
# running out of disk space, next run will catch up
break
# Copy all the staged .json files into the landing table
log.info(f'Uploading all files to Snowflake stage: {table_name}.')
db.copy_file_to_table_stage(table_name, os.path.join(temp_dir, '*.json'))
log.info(f'Upload successful, deleting all local files.')
shutil.rmtree(temp_dir)
# The table is configured to purge upon load from its stage, so we don't need to clean up
log.info(f'Copying events into Snowflake table from staged files.')
db.load_from_table_stage(table_name)
log.info(f'Loaded {total_files} event files.')
else:
log.info(f'Skipping load as there are no new event files.')
return total_files
|
import grequests
import requests
import json
import pandas as pd
import numpy as np
from datetime import datetime
# from multiprocessing import Pool
import os
from functools import reduce,partial
import re
class Project:
def __init__(self, url, id_var, date_var, token, project="cin"):
self.url = url
self.date_var = date_var
self.id_var = id_var
self.token = token
self.project = project
project=Project(url="",id_var="id",date_var="date_today",
token="")
# gets data from redcap
def create_chunk_request_data(ids_,project,variables=None):
x = {}
for i, j in enumerate(ids_):
x["records[{}]".format(i)] = '{}'.format(j)
data = {
'token': project.token,
'content': 'record',
'format': 'json',
'type': 'flat',
'rawOrLabel': 'raw',
'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false',
'exportDataAccessGroups': 'false',
'returnFormat': 'json'
}
for k, v in x.items():
data[k] = v
if variables is not None:
for i,v in enumerate(variables):
data[f'fields[{i}]'] = v
return data
def get_data(project, start=None, stop=None,variables=None, max_chunk_size=500, parallel_calls=50):
"""
:param max_chunk_size: Maximum number of records in a chunk
:param parallel_calls: Number of request to make at a time
:param project: A project object
:param start: start date eg '2009-01-01'. leave None for beginning of study
:param stop: stop date eg '2009-01-02'. leave None for latest input
:param variables: List of variables to fetch. None for all
:return:
"""
data = {
'token': project.token,
'content': 'record',
'format': 'json',
'type': 'flat',
'fields[0]': project.id_var,
'fields[1]': project.date_var,
#'record[]': outputTwo(),
'rawOrLabel': 'raw',
'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false',
'exportDataAccessGroups': 'false',
'returnFormat': 'json'
}
print("Fetching record ids and dates")
request = requests.post(project.url, data=data, verify=False)
data = json.loads(request.text)
data2 = pd.DataFrame(data)
data2[project.date_var] = pd.to_datetime(data2[project.date_var])
if start is not None:
data2 = data2.loc[data2[project.date_var] >= pd.to_datetime(start), :]
if stop is not None:
data2 = data2.loc[data2[project.date_var] <= pd.to_datetime(stop), :]
# print(data2)
if data2.shape[0] == 0:
return []
ids_len=data2.shape[0]
ids=[]
for i in range(0, ids_len, max_chunk_size):
if (i+max_chunk_size)<ids_len:
ids.append(data2[project.id_var][i:i+max_chunk_size].values)
else:
ids.append(data2[project.id_var][i:i+max_chunk_size].values)
all_requests=[]
for id_chunk in ids:
chunk_request=create_chunk_request_data(ids_=id_chunk,project=project,variables=variables)
all_requests.append(grequests.post(project.url, data=chunk_request, verify=False))
all_responses=grequests.map(all_requests,size=parallel_calls)
data_lists=[]
for response in all_responses:
if response.status_code != 200:
raise Exception(f"Error fetching data from redcap, message: {response.text} ")
data_lists.append(json.loads(response.text))
# download_fun=partial(get_chunk,project=project,variables=variables)
# print("Fetching data in %d chunks in %d parallel processes" % (len(ids),parallel))
# with Pool(processes=parallel) as pool:
# data_lists=pool.map(download_fun,ids)
data_combined=reduce(lambda x,y:x+y,data_lists)
return data_combined
def get_metadata(project):
"""
:param project: project object
:returns: metadata
"""
data1 = {
'token': project.token,
'content': 'metadata',
'format': 'json',
'returnFormat': 'json'
}
request1 = requests.post(project.url, data=data1, verify=False)
data1 = json.loads(request1.text)
return data1
print(get_metadata(project))
class Metadata:
def __init__(self, metadata):
self.metadata = metadata
self.vars_expanded = []
self.vars_non_expanded = []
self.metadata_expanded = {}
self.metadata_non_expanded = {}
for v in metadata:
self.vars_non_expanded.append(v['field_name'])
self.metadata_non_expanded[v['field_name']] = v
if v['field_type'] == 'checkbox':
t = v['select_choices_or_calculations']
t2 = t.split("|")
t3 = list(map(lambda x: x.split(",")[0], t2))
t3b=[str.strip(i) for i in t3]
t4 = [v['field_name'] + "___" + i for i in t3b]
t5 = [i.replace("-", "_") for i in t4]
self.vars_expanded = self.vars_expanded+t5
for v2 in t5:
self.metadata_expanded[v2] = v
else:
self.vars_expanded.append(v['field_name'])
self.metadata_expanded[v['field_name']] = v
# self.variables={v['field_name']: v for v in self.metadata}
# self.vars_non_expanded=list(self.variables.keys())
def exists(self, variable):
"""
:param variable: variable
:return: True or False depending on whether the variable exists in the metadata
"""
result = variable in (self.vars_expanded + self.vars_non_expanded)
return result
def get_variables(self, expand_checkbox=True):
"""
:param expand_checkbox: if true the function returns expanded variables and vice versa
:return:
"""
if expand_checkbox:
return self.vars_expanded
else:
return self.vars_non_expanded
def get_variables_without_description(self):
"""
:return: variables which
"""
variables = self.get_variables(expand_checkbox=True)
for variable in variables:
if self.metadata_expanded[variable]['field_type'] == 'descriptive':
variables.remove(variable)
return variables
def get_label(self, variable):
"""
:param variable: variable
:return: the label of the variable
"""
if not self.exists(variable):
raise Exception("Variable {} does not exist".format(variable))
label=self.metadata_expanded[variable]['field_label']
return label
def get_type(self, variable):
"""
:param variable: variable
:return: the type of the data in the variable
"""
if not self.exists(variable):
raise Exception("Variable {} does not exist".format(variable))
field_type=self.metadata_expanded[variable]['field_type']
if field_type =="checkbox":
return "checkbox"
if field_type != "text":
return "categorical"
type_=self.metadata_expanded[variable]['text_validation_type_or_show_slider_number']
v_type='str'
if type_ == '':
v_type = 'str'
elif 'date' in type_:
v_type = 'date'
elif type_ == "number":
v_type = 'float'
elif type_ == 'integer':
v_type = 'int'
return v_type
def get_valid_range(self, variable):
"""
:param variable: variable
:return: the range of the given variable
"""
if not self.exists(variable):
raise Exception("Variable {} does not exist".format(variable))
min = self.metadata_expanded[variable]['text_validation_min']
if min == '':
min=None
else:
type_=self.get_type(variable)
if type_ == 'float':
min=float(min)
elif type_ == 'date':
min=datetime.strptime(min,'%Y-%m-%d')
elif type_ == 'int':
min = int(min)
max = self.metadata_expanded[variable]['text_validation_max']
if max == '':
max=None
else:
type_ = self.get_type(variable)
if type_ == 'float':
max = float(max)
elif type_ == 'date':
max = datetime.strptime(max, '%Y-%m-%d')
elif type_ == 'int':
max = int(max)
range=None
if (min is not None) | (max is not None): range = (min, max)
return range
def get_is_required(self,variable):
"""
:param variable: variable
:return: true or false depending on whether a variable is required or not
"""
if not self.exists(variable):
raise Exception("Variable {} does not exist".format(variable))
required = self.metadata_expanded[variable]['required_field']
if required == '': required = False
else: required = True
return required
def get_choices(self, variable):
if not self.exists(variable):
raise Exception("Variable {} does not exist".format(variable))
if self.metadata_expanded[variable]['field_type'] in ["yesno",]:
return {'0':"No",'1':"Yes"}
if self.metadata_expanded[variable]['field_type'] in ["checkbox",]:
return {'0':"Unchecked",'1':"Checked"}
choice = self.metadata_expanded[variable]['select_choices_or_calculations']
if choice=="":
raise Exception("variable %s does not have choices" % variable)
choices = choice.split("|")
pattern_keys=re.compile(r'(\d+)\s?,')
keys=[pattern_keys.search(item).group(1) for item in choices]
pattern_values=re.compile(r'\d+\s?,(.*)')
values=[pattern_values.search(item).group(1) for item in choices]
choices={k:v.strip() for k,v in zip(keys,values)}
return choices
def get_branching_logic(self, variable):
"""
:param variable: variable
:return: the branching logic of the variable
"""
if not self.exists(variable):
raise Exception("Variable {} does not exist".format(variable))
logic = self.metadata_expanded[variable]['branching_logic']
if logic == '':
logic2 = None
else:
logic2 = logic
return logic2
def get_hidden(self, variable):
"""
:param variable: variable
:returns: true or false whether the variable is hidden or not
"""
if not self.exists(variable):
raise Exception("Variable {} does not exist".format(variable))
hidden = self.metadata_expanded[variable]['field_annotation']
if hidden == '':
return False
elif '@HIDDEN' in hidden:
return True
else:
return False
def format_data(self, row=None, labels=False):
# for key, value in row.items():
# if not self.exists(key):
# raise Exception("Variable {} does not exist".format(key))
"""
:param variable: row
:return: a row whose values have been converted to their respective types
"""
new_row = {}
for variable, value in row.items():
if value == '':
new_row[variable] = None
continue
type_ = self.get_type(variable=variable)
if type_ in ["categorical","checkbox"]:
choices=self.get_choices(variable)
new_row[variable]=choices.get(value,value)
elif type_ == 'str':
new_row[variable] = value
elif type_ == 'float':
new_row[variable] = float(value)
elif type_ == 'int':
new_row[variable] = int(re.compile(r'(\d+)').search(value).group(1))
elif type_ == 'date':
try:
new_row[variable] = datetime.strptime(value, '%Y-%m-%d')
except:
new_row[variable] = datetime.strptime(value, '%Y/%m/%d')
return new_row
def format_column(self,var_name,column):
type_ = self.get_type(variable=var_name)
if type_ in ["categorical","checkbox"]:
choices=self.get_choices(var_name)
column=column.map(choices)
elif type_ == 'str':
column = column
elif type_ == 'float':
column = column.replace('',np.nan).astype(float)
elif type_ == 'int':
column=column.map(lambda x:re.compile(r'(\d+)').search(x).group(1) if x != '' else '')
column=pd.to_numeric(column,downcast='integer')
# column = column.replace('',np.nan).astype(float)
elif type_ == 'date':
try:
column = pd.to_datetime(column, format='%Y-%m-%d',errors = 'coerce')
except:
column = pd.to_datetime(column, format='%Y/%m/%d',errors = 'coerce')
return column
|
from __future__ import absolute_import
from bokeh.core.properties import Int, String
from bokeh.util.options import Options
class DummyOpts(Options):
foo = String(default="thing")
bar = Int()
def test_empty():
empty = dict()
o = DummyOpts(empty)
assert o.foo == "thing"
assert o.bar == None
assert empty == {}
def test_exact():
exact = dict(foo="stuff", bar=10)
o = DummyOpts(exact)
assert o.foo == "stuff"
assert o.bar == 10
assert exact == {}
def test_extra():
extra = dict(foo="stuff", bar=10, baz=22.2)
o = DummyOpts(extra)
assert o.foo == "stuff"
assert o.bar == 10
assert extra == {'baz': 22.2}
def test_mixed():
mixed = dict(foo="stuff", baz=22.2)
o = DummyOpts(mixed)
assert o.foo == "stuff"
assert o.bar == None
assert mixed == {'baz': 22.2}
|
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import uuid
import numpy as np
import warnings
from graphlearn import pywrap_graphlearn as pywrap
from graphlearn.python.data.values import Nodes
from graphlearn.python.gsl.dag_edge import get_dag_edge, get_eid
from graphlearn.python.utils import strategy2op
class DagNode(object):
def __init__(self, dag, op_name="", params={}):
self._dag = dag
self._op_name = op_name
# Lazy init.
self._alias = None
self._params = {} # keep params for the node
self._output_field = None
self._shape = None
self._sparse = False
self._path = None # Upstream edge type
self._type = None # Edge type or node type of current DagNode
self._strategy = "random"
# Add by GSL.
# All the DagEdges expect link to sink node are mapped to DagEdgeDef.
self._out_edges = [] # downstream DagEdges
self._in_edges = [] # upstream DagEdges
# Indicates setting which field to src_input of the downstream edges
self._graph = self._dag.graph
self._decoder = None
self._lookup_node = None # Each traverse node has a LookupDagNode
self._degree_nodes = [] # Traverse node may have several DegreeDagNodes
# Init until Dag is ready.
self._node_def = None
self._nid = -1
self._params.update(params)
# Downstream Traverse DagNodes
self._pos_downstreams = []
# Downstream Negative Sampled Traverse DagNodes
self._neg_downstreams = []
def _add_degree_node(self, edge_type, node_from):
edge = self._new_edge(dst_input=pywrap.kNodeIds)
# Add an edge from upstream node to degree node.
self._add_out_edge(edge)
node = DegreeDagNode(
"GetDegree", self, [edge],
{pywrap.kEdgeType: edge_type,
pywrap.kNodeFrom: int(node_from),
pywrap.kPartitionKey: pywrap.kNodeIds})
self._degree_nodes.append(node)
@property
def spec(self):
return self._decoder
@property
def nid(self):
return self._nid
@property
def op_name(self):
return self._op_name
@property
def in_edges(self):
return self._in_edges
@property
def out_edges(self):
return self._out_edges
@property
def pos_downstreams(self):
return self._pos_downstreams
@property
def neg_downstreams(self):
return self._neg_downstreams
def set_path(self, path, node_from):
assert isinstance(path, str)
assert isinstance(node_from, pywrap.NodeFrom)
topo = self._graph.get_topology()
path_to_type = {pywrap.NodeFrom.NODE: lambda x: x,
pywrap.NodeFrom.EDGE_SRC: topo.get_src_type,
pywrap.NodeFrom.EDGE_DST: topo.get_dst_type}
self._path = path
self._type = path_to_type.get(node_from)(path)
if self._type in self._graph.get_node_decoders().keys():
self._decoder = self._graph.get_node_decoder(self._type)
else:
self._decoder = self._graph.get_edge_decoder(self._type)
def _add_param(self, name, value):
self._params[name] = value
def _add_in_edge(self, edge):
self._in_edges.append(edge)
def _add_out_edge(self, edge):
self._out_edges.append(edge)
def set_output_field(self, field):
self._output_field = field
@property
def output_field(self):
return self._output_field
@property
def type(self):
return self._type
@property
def shape(self):
return self._shape
@property
def sparse(self):
return self._sparse
@property
def node_def(self):
return self._node_def
def get_alias(self):
return self._alias
def get_lookup_node(self):
return self._lookup_node
def get_degree_nodes(self):
return self._degree_nodes
""" GSL APIs """
def alias(self, alias):
self._set_alias(alias, temp=False)
return self
def batch(self, batch_size):
assert isinstance(batch_size, int) and batch_size > 0
self._shape = (batch_size,)
self._add_param(pywrap.kBatchSize, batch_size)
self._add_param(pywrap.kEpoch, sys.maxsize >> 32)
self._add_param(pywrap.kStrategy, "by_order")
return self
def shuffle(self, traverse=False):
strategy_map = {True: "shuffle", False: "random"}
self._add_param(pywrap.kStrategy, strategy_map.get(traverse, False))
return self
def sample(self, count):
assert isinstance(count, int)
self._add_param(pywrap.kNeighborCount, count)
self._add_param(pywrap.kPartitionKey, pywrap.kSrcIds)
self._shape = (np.prod(self._shape), count)
return self
def by(self, strategy):
self._strategy = strategy
if self._op_name == "NegativeSampler":
assert strategy in ["random", "in_degree", "conditional", "node_weight"]
elif self._op_name == "Sampler":
assert strategy in \
["random", "topk", "in_degree", "edge_weight", "full"]
else:
raise ValueError("`by(strategy)` can only be used after`sample(count)`")
self._sparse = (strategy == "full")
self._op_name = strategy2op(self._strategy, self._op_name)
self._add_param(pywrap.kStrategy, self._op_name)
return self
def filter(self, target):
"""Filter the samples that are not equal to target ids.
Args:
target (string): Alias of upstream TraverseVertexDagNode.
Raises:
ValueError: target upstream is not existed.
"""
if isinstance(target, str):
target = self._dag.get_node(target)
if not isinstance(target, TraverseVertexDagNode):
raise ValueError("filter only accepts upstream Nodes.")
edge = self._new_edge(src_output=target.output_field,
dst_input=pywrap.kFilterIds)
target._add_out_edge(edge)
self._add_in_edge(edge)
self._add_param(pywrap.kFilterType, 1)
return self
def where(self, target, condition={}):
""" Add condition for negative samlpler. Used after `by`.
Args:
target (string): Alias of upstream TraverseVertexDagNode which is the
postive sample that condition should match.
condition (dict, optional): Keys are as following.
"batch_share" (bool, optional): Whether sampled negative samples are
shared by this batch. Defaults to False.
"unique" (bool, optional): Whether sampled negtive samples are unique.
Defaults to False.
"int_cols" (int list, optional): int columns as condition.
Defaults to [].
"int_props" (float list, optional) : proportions of int columns.
Defaults to [].
"float_cols" (int list, optional): float columns as condition.
Defaults to [].
"float_props" (float list, optional): proportions of float columns.
Defaults to [].
"str_cols" (int list, optional): string columns as condition.
Defaults to [].
"str_props" (float list, optional): proportions of string columns.
Defaults to [].
Raises:
ValueError: target upstream is not existed.
"""
if isinstance(target, str):
target = self._dag.get_node(target)
if not isinstance(target, TraverseVertexDagNode):
raise ValueError("where only accepts upstream Nodes.")
edge = self._new_edge(src_output=target.output_field,
dst_input=pywrap.kDstIds)
target._add_out_edge(edge)
self._add_in_edge(edge)
default_kvs = {
"batch_share": (pywrap.kBatchShare, False),
"unique": (pywrap.kUnique, False),
"int_cols": (pywrap.kIntCols, None),
"int_props": (pywrap.kIntProps, None),
"float_cols": (pywrap.kFloatCols, None),
"float_props": (pywrap.kFloatProps, None),
"str_cols": (pywrap.kStrCols, None),
"str_props": (pywrap.kStrProps, None)
}
for k in condition.keys():
if k not in default_kvs.keys():
raise ValueError("condition {} is not supported.".format(k))
for k, v in default_kvs.items():
param_key, default_value = v
value = condition.get(k, default_value)
if value is not None:
self._add_param(param_key, value)
self._op_name = "ConditionalNegativeSampler"
self._add_param(pywrap.kStrategy, self._strategy)
self._add_param(pywrap.kDstType, target.type)
return self
def each(self, func):
func(self)
return self
def values(self, func=lambda x: x):
self._dag.set_ready(func)
return self._dag
""" GSL Apis """
def _set_alias(self, alias=None, temp=False):
if self._alias:
return
if not alias:
alias = str(uuid.uuid1())
self._alias = alias
self._dag.add_node(alias, self, temp=temp)
self._lookup_node = self._lookup()
self._link_to_sink()
def _get_shape_and_degrees(self, dag_values):
shape = self._shape
degrees = None
if self._sparse:
assert isinstance(shape, tuple) and len(shape) == 2
degrees = pywrap.get_dag_value(dag_values, self._nid, pywrap.kDegreeKey)
shape = (shape[0], shape[1] if shape[1] else max(degrees))
return shape, degrees
def feed_values(self, dag_values):
pass
def _reverse_edge(self, edge_type, force=True):
reverse_mask = "_reverse"
if edge_type.endswith(reverse_mask):
return edge_type[: -len(reverse_mask)]
elif force:
return edge_type + reverse_mask
return edge_type
def _new_edge(self, src_output=None, dst_input=None):
# add an edge for cur node
eid = get_eid()
cur_edge = get_dag_edge(eid)
default_field = "fake"
cur_edge.src_output = src_output or self._output_field or default_field
cur_edge.dst_input = dst_input or default_field
return cur_edge
def _new_edge_node(self, op_name, edge_type, in_edge):
assert edge_type is not None and isinstance(edge_type, str)
self._add_out_edge(in_edge)
shape = self._shape
next_node = TraverseEdgeDagNode(self._dag, op_name=op_name,
upstream=self)
next_node._shape = shape
next_node._add_param(pywrap.kEdgeType, edge_type)
next_node._add_in_edge(in_edge)
next_node.set_path(edge_type, pywrap.NodeFrom.NODE)
next_node.set_output_field(pywrap.kEdgeIds)
return next_node
def _new_vertex_node(self, op_name, edge_type, in_edge,
node_from=pywrap.NodeFrom.EDGE_DST):
assert edge_type is not None and isinstance(edge_type, str)
self._add_out_edge(in_edge)
shape = self._shape
next_node = TraverseVertexDagNode(self._dag, op_name=op_name)
next_node._shape = shape
next_node._add_param(pywrap.kEdgeType, edge_type)
next_node._add_in_edge(in_edge)
next_node.set_path(edge_type, node_from)
next_node.set_output_field(pywrap.kNodeIds)
return next_node
def set_ready(self, node_id):
""" Set dag_node ready and format the DagNodeDef proto.
"""
add_param_map = {
int: pywrap.add_dag_node_int_params,
str: pywrap.add_dag_node_string_params,
unicode: pywrap.add_dag_node_string_params}
add_vector_param_map = {
int: pywrap.add_dag_node_int_vector_params,
float: pywrap.add_dag_node_float_vector_params}
self._nid = node_id
node_def = pywrap.new_dag_node()
pywrap.set_dag_node_id(node_def, node_id)
pywrap.set_dag_node_op_name(node_def, self._op_name)
for in_edge in self._in_edges:
in_edge.dst = self
pywrap.add_dag_node_in_edge(
node_def, in_edge.dag_edge_def)
for out_edge in self._out_edges:
out_edge.src = self
pywrap.add_dag_node_out_edge(
node_def, out_edge.dag_edge_def)
for k, v in self._params.items():
if isinstance(v, bool):
v = int(v)
if not isinstance(v, list):
add_param_map[type(v)](node_def, k, v)
continue
if len(v) == 0:
continue
add_vector_param_map[type(v[0])](node_def, k, v)
self._node_def = node_def
return True
def _lookup(self):
return None
def _link_to_sink(self):
edge = self._new_edge()
self._add_out_edge(edge)
self._dag.sink_node._add_in_edge(edge)
class SinkNode(DagNode):
def __init__(self, dag):
super(SinkNode, self).__init__(dag, "Sink")
self._dag.sink_node = self
def alias(self, alias):
self._set_alias(alias, temp=True)
return self
def _lookup(self):
# Override
return None
def _link_to_sink(self):
# Override
pass
class TraverseVertexDagNode(DagNode):
def __init__(self, dag, op_name="", params={}):
super(TraverseVertexDagNode, self).__init__(dag, op_name, params)
def outV(self, edge_type=None):
self._set_alias()
in_edge = self._new_edge(dst_input=pywrap.kSrcIds)
next_node = self._new_vertex_node("Sampler", edge_type, in_edge)
self._pos_downstreams.append(next_node)
self._add_degree_node(edge_type, pywrap.NodeFrom.EDGE_SRC)
return next_node
def inV(self, edge_type=None):
self._set_alias()
in_edge = self._new_edge(dst_input=pywrap.kSrcIds)
next_node = self._new_vertex_node("Sampler",
self._reverse_edge(edge_type), in_edge)
self._pos_downstreams.append(next_node)
self._add_degree_node(self._reverse_edge(edge_type), pywrap.NodeFrom.EDGE_SRC)
return next_node
def outE(self, edge_type):
self._set_alias()
in_edge = self._new_edge(dst_input=pywrap.kSrcIds)
next_node = self._new_edge_node("Sampler", edge_type, in_edge)
self._pos_downstreams.append(next_node)
return next_node
def inE(self, edge_type):
self._set_alias()
in_edge = self._new_edge(dst_input=pywrap.kSrcIds)
next_node = self._new_edge_node("Sampler",
self._reverse_edge(edge_type), in_edge)
self._pos_downstreams.append(next_node)
return next_node
def outNeg(self, edge_type):
self._set_alias()
in_edge = self._new_edge(dst_input=pywrap.kSrcIds)
next_node = self._new_vertex_node("NegativeSampler", edge_type, in_edge)
self._neg_downstreams.append(next_node)
return next_node
def inNeg(self, edge_type):
self._set_alias()
in_edge = self._new_edge(dst_input=pywrap.kSrcIds)
next_node = self._new_vertex_node("NegativeSampler",
self._reverse_edge(edge_type), in_edge)
self._neg_downstreams.append(next_node)
return next_node
def Neg(self, node_type):
self._set_alias()
in_edge = self._new_edge(dst_input=pywrap.kSrcIds)
next_node = self._new_vertex_node("NegativeSampler", node_type, in_edge,
node_from=pywrap.NodeFrom.NODE)
self._neg_downstreams.append(next_node)
return next_node
def _lookup(self):
# Override
# Generate an edge from traverse node to it's lookup node.
edge = self._new_edge(dst_input=pywrap.kNodeIds)
self._add_out_edge(edge)
return LookupDagNode("LookupNodes", self, [edge],
{pywrap.kNodeType: self._type,
pywrap.kPartitionKey: pywrap.kNodeIds})
def feed_values(self, dag_values):
shape, degrees = self._get_shape_and_degrees(dag_values)
return self._graph.get_nodes(
self._type, pywrap.get_dag_value(
dag_values, self._nid, pywrap.kNodeIds), degrees, shape)
class TraverseNegVertexDagNode(TraverseVertexDagNode):
def __init__(self, dag, op_name="", params={}):
super(TraverseNegVertexDagNode, self).__init__(dag, op_name, params)
class TraverseEdgeDagNode(DagNode):
def __init__(self, dag, op_name="", params={}, upstream=None):
super(TraverseEdgeDagNode, self).__init__(dag, op_name, params)
self._upstream = upstream
def inV(self):
self._set_alias()
next_node = FakeNode(self)
next_node.set_path(self._path, pywrap.NodeFrom.EDGE_DST)
next_node.set_output_field(pywrap.kNodeIds)
return next_node
def outV(self):
raise warnings.warn("outV is just the upstream Nodes.")
def _lookup(self):
# Override
# Generate an edge from current node to it's lookup node.
edge = self._new_edge(dst_input=pywrap.kEdgeIds)
# Add an edge from upstream node to lookup node.
extra_edge = self._new_edge(
src_output=self._upstream.output_field, dst_input=pywrap.kSrcIds)
self._add_out_edge(edge)
self._upstream._add_out_edge(extra_edge)
return LookupDagNode(
"LookupEdges", self, [edge, extra_edge],
{pywrap.kEdgeType: self._reverse_edge(self._type, False),
pywrap.kPartitionKey: pywrap.kSrcIds})
def feed_values(self, dag_values):
shape, degrees = self._get_shape_and_degrees(dag_values)
edge_ids = pywrap.get_dag_value(dag_values, self._nid, pywrap.kEdgeIds)
assert isinstance(shape, tuple) and len(shape) == 2
nbr_counts = degrees if self._sparse else [shape[1]] * shape[0]
src_ids = pywrap.get_dag_value(dag_values,
self._upstream.nid,
pywrap.kNodeIds)
src_ids = src_ids.reshape(self._upstream.shape)
src_ids = np.concatenate(
[src_ids[idx].repeat(d) for idx, d in enumerate(nbr_counts)])
dst_ids = pywrap.get_dag_value(dag_values, self._nid, pywrap.kNodeIds)
return self._graph.get_edges(
self._type, src_ids, dst_ids, edge_ids, degrees, shape)
class TraverseSourceEdgeDagNode(TraverseEdgeDagNode):
def __init__(self, dag, op_name="", params={}):
super(TraverseSourceEdgeDagNode, self).__init__(
dag, op_name=op_name, params=params)
def outV(self):
self._set_alias()
next_node = FakeNode(self)
next_node.set_path(self._path, pywrap.NodeFrom.EDGE_SRC)
next_node.set_output_field(pywrap.kSrcIds)
return next_node
def inV(self, edge_type=None):
self._set_alias()
next_node = FakeNode(self)
next_node.set_path(self._path, pywrap.NodeFrom.EDGE_DST)
next_node.set_output_field(pywrap.kDstIds)
return next_node
def _lookup(self):
# Override
edge = self._new_edge(dst_input=pywrap.kEdgeIds)
extra_edge = self._new_edge(src_output=pywrap.kSrcIds,
dst_input=pywrap.kSrcIds)
self._add_out_edge(edge)
self._add_out_edge(extra_edge)
return LookupDagNode(
"LookupEdges", self, [edge, extra_edge],
{pywrap.kEdgeType: self._reverse_edge(self._type, False),
pywrap.kPartitionKey: pywrap.kSrcIds})
def feed_values(self, dag_values):
shape, degrees = self._get_shape_and_degrees(dag_values)
edge_ids = pywrap.get_dag_value(dag_values, self._nid, pywrap.kEdgeIds)
src_ids = pywrap.get_dag_value(dag_values, self._nid, pywrap.kSrcIds)
dst_ids = pywrap.get_dag_value(dag_values, self._nid, pywrap.kDstIds)
return self._graph.get_edges(
self._type, src_ids, dst_ids, edge_ids, degrees, shape)
class LookupDagNode(DagNode):
def __init__(self, op_name="", upstream=None, in_edges=[], params={}):
super(LookupDagNode, self).__init__(upstream._dag, op_name, params)
self._upstream = upstream
self._shape = upstream._shape
for edge in in_edges:
self._add_in_edge(edge)
self._set_alias(temp=True)
self.set_output_field("properties")
class DegreeDagNode(DagNode):
def __init__(self, op_name="", upstream=None, in_edges=[], params={}):
super(DegreeDagNode, self).__init__(upstream._dag, op_name, params)
self._upstream = upstream
self._shape = upstream._shape
for edge in in_edges:
self._add_in_edge(edge)
self._set_alias(temp=True)
self.set_output_field(pywrap.kDegrees)
@property
def edge_type(self):
return self._params[pywrap.kEdgeType]
@property
def node_from(self):
return self._params[pywrap.kNodeFrom]
class FakeNode(TraverseVertexDagNode):
""" FakeNode is used for adding corresponding DagNode of E.outV()/inV()
to Dag. E.outV()/inV() doesn't raise any operator, but only changes the
field which downstream absorbs.
"""
def __init__(self, dag_node):
super(FakeNode, self).__init__(dag_node._dag)
if not isinstance(dag_node, TraverseEdgeDagNode):
raise ValueError("FakeNode is a fake for TraverseEdgeDagNode, not {}"
.format(type(dag_node)))
self._upstream = dag_node
self._shape = dag_node._shape
@property
def nid(self):
return self._upstream.nid
def set_ready(self, node_id):
# Override
return False
def _add_out_edge(self, edge):
# Override
self._upstream._add_out_edge(edge)
def _link_to_sink(self):
# Override
pass
def feed_values(self, dag_values):
edges = self._upstream.feed_values(dag_values)
return (edges.dst_nodes, edges.src_nodes)[int(self._output_field == pywrap.kSrcIds)]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
import sys
from typing import Any, List
import pkg_resources
from hydra.experimental import compose, initialize_config_module
from omegaconf import OmegaConf
logger = logging.getLogger("vissl")
# List all the config files, used to generate the unit tests on the fly
def list_config_files(dir_path, exclude_folders):
resource_name = "configs"
assert pkg_resources.resource_isdir(resource_name, dir_path)
all_items = pkg_resources.resource_listdir(resource_name, dir_path)
config_files = []
def valid_file(filename):
if not filename.endswith("yaml"):
return False
if exclude_folders and any(x in filename for x in exclude_folders):
return False
return True
for item in all_items:
subpath = f"{dir_path}/{item}"
if pkg_resources.resource_isdir(resource_name, subpath):
# Recursively test all the tree
config_files.extend(list_config_files(subpath, exclude_folders))
if valid_file(subpath):
# If valid leaf, return the config file
config_files.append(subpath)
return config_files
def create_valid_input(input_list):
out_list = []
for item in input_list:
out_list.append(re.sub("config/", "config=", item))
return out_list
# we skip object detection configs since they are for detectron2 codebase
BENCHMARK_CONFIGS = create_valid_input(
list_config_files("config/benchmark", exclude_folders=["object_detection"])
)
PRETRAIN_CONFIGS = create_valid_input(
list_config_files("config/pretrain", exclude_folders=None)
)
INTEGRATION_TEST_CONFIGS = create_valid_input(
list_config_files("config/test/integration_test", exclude_folders=None)
)
ROOT_CONFIGS = create_valid_input(
list_config_files(
"config", exclude_folders=["models", "optimization", "object_detection"]
)
)
ROOT_OSS_CONFIGS = create_valid_input(
list_config_files(
"config", exclude_folders=["models", "optimization", "object_detection", "fb"]
)
)
# configs that require loss optimization and hence trainable
ROOT_LOSS_CONFIGS = create_valid_input(
list_config_files(
"config",
exclude_folders=[
"models",
"optimization",
"object_detection",
"nearest_neighbor",
"feature_extraction",
"fb",
],
)
)
UNIT_TEST_CONFIGS = create_valid_input(
list_config_files("config/test/cpu_test", exclude_folders=None)
)
initialize_config_module(config_module="vissl.config")
class SSLHydraConfig(object):
def __init__(self, overrides: List[Any] = None):
self.overrides = []
if overrides is not None and len(overrides) > 0:
self.overrides.extend(overrides)
cfg = compose(config_name="defaults", overrides=self.overrides)
self.default_cfg = cfg
@classmethod
def from_configs(cls, config_files: List[Any] = None):
return cls(config_files)
def override(self, config_files: List[Any]):
sys.argv = config_files
cli_conf = OmegaConf.from_cli(config_files)
self.default_cfg = OmegaConf.merge(self.default_cfg, cli_conf)
|
from importlib.metadata import entry_points
from setuptools import find_packages
from setuptools import setup
setup(
name="daves_utilities",
version="1.0.0",
description="daves utilities are some fun little functions.",
author="David Kuchelmeister",
author_email="[email protected]",
packages=find_packages(exclude=("test*", "testing*")),
)
|
import pytest
from django.db.utils import IntegrityError
from tags.models import Tag
pytestmark = [pytest.mark.django_db]
def test_invalid_color():
color = 'invalid-color'
with pytest.raises(
IntegrityError,
match='CHECK constraint failed: HEX_color',
):
Tag.objects.create(
name='Полдник',
color=color,
slug='afternoon-tea',
)
|
import copy
import voluptuous as vol
from esphomeyaml import core
import esphomeyaml.config_validation as cv
from esphomeyaml.const import CONF_ABOVE, CONF_ACTION_ID, CONF_AND, CONF_AUTOMATION_ID, \
CONF_BELOW, CONF_CONDITION, CONF_CONDITION_ID, CONF_DELAY, \
CONF_ELSE, CONF_ID, CONF_IF, CONF_LAMBDA, \
CONF_OR, CONF_RANGE, CONF_THEN, CONF_TRIGGER_ID
from esphomeyaml.core import ESPHomeYAMLError
from esphomeyaml.helpers import App, ArrayInitializer, Pvariable, TemplateArguments, add, add_job, \
esphomelib_ns, float_, process_lambda, templatable, uint32, get_variable, PollingComponent, \
Action, Component, Trigger
from esphomeyaml.util import ServiceRegistry
def maybe_simple_id(*validators):
validator = vol.All(*validators)
def validate(value):
if isinstance(value, dict):
return validator(value)
return validator({CONF_ID: value})
return validate
def validate_recursive_condition(value):
return CONDITIONS_SCHEMA(value)
def validate_recursive_action(value):
value = cv.ensure_list(value)[:]
for i, item in enumerate(value):
item = copy.deepcopy(item)
if not isinstance(item, dict):
raise vol.Invalid(u"Action must consist of key-value mapping! Got {}".format(item))
key = next((x for x in item if x != CONF_ACTION_ID), None)
if key is None:
raise vol.Invalid(u"Key missing from action! Got {}".format(item))
if key not in ACTION_REGISTRY:
raise vol.Invalid(u"Unable to find action with the name '{}', is the component loaded?"
u"".format(key))
item.setdefault(CONF_ACTION_ID, None)
key2 = next((x for x in item if x != CONF_ACTION_ID and x != key), None)
if key2 is not None:
raise vol.Invalid(u"Cannot have two actions in one item. Key '{}' overrides '{}'! "
u"Did you forget to indent the action?"
u"".format(key, key2))
validator = ACTION_REGISTRY[key][0]
value[i] = {
CONF_ACTION_ID: cv.declare_variable_id(Action)(item[CONF_ACTION_ID]),
key: validator(item[key])
}
return value
ACTION_REGISTRY = ServiceRegistry()
# pylint: disable=invalid-name
DelayAction = esphomelib_ns.class_('DelayAction', Action, Component)
LambdaAction = esphomelib_ns.class_('LambdaAction', Action)
IfAction = esphomelib_ns.class_('IfAction', Action)
UpdateComponentAction = esphomelib_ns.class_('UpdateComponentAction', Action)
Automation = esphomelib_ns.class_('Automation')
Condition = esphomelib_ns.class_('Condition')
AndCondition = esphomelib_ns.class_('AndCondition', Condition)
OrCondition = esphomelib_ns.class_('OrCondition', Condition)
RangeCondition = esphomelib_ns.class_('RangeCondition', Condition)
LambdaCondition = esphomelib_ns.class_('LambdaCondition', Condition)
CONDITIONS_SCHEMA = vol.All(cv.ensure_list, [cv.templatable({
cv.GenerateID(CONF_CONDITION_ID): cv.declare_variable_id(Condition),
vol.Optional(CONF_AND): validate_recursive_condition,
vol.Optional(CONF_OR): validate_recursive_condition,
vol.Optional(CONF_RANGE): vol.All(vol.Schema({
vol.Optional(CONF_ABOVE): vol.Coerce(float),
vol.Optional(CONF_BELOW): vol.Coerce(float),
}), cv.has_at_least_one_key(CONF_ABOVE, CONF_BELOW)),
vol.Optional(CONF_LAMBDA): cv.lambda_,
})])
def validate_automation(extra_schema=None, extra_validators=None, single=False):
schema = AUTOMATION_SCHEMA.extend(extra_schema or {})
def validator_(value):
if isinstance(value, list):
try:
# First try as a sequence of actions
return [schema({CONF_THEN: value})]
except vol.Invalid as err:
# Next try as a sequence of automations
try:
return vol.Schema([schema])(value)
except vol.Invalid as err2:
if 'Unable to find action' in str(err):
raise err2
raise vol.MultipleInvalid([err, err2])
elif isinstance(value, dict):
if CONF_THEN in value:
return [schema(value)]
return [schema({CONF_THEN: value})]
# This should only happen with invalid configs, but let's have a nice error message.
return [schema(value)]
def validator(value):
value = validator_(value)
if extra_validators is not None:
value = vol.Schema([extra_validators])(value)
if single:
if len(value) != 1:
raise vol.Invalid("Cannot have more than 1 automation for templates")
return value[0]
return value
return validator
AUTOMATION_SCHEMA = vol.Schema({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(Trigger),
cv.GenerateID(CONF_AUTOMATION_ID): cv.declare_variable_id(Automation),
vol.Optional(CONF_IF): CONDITIONS_SCHEMA,
vol.Required(CONF_THEN): validate_recursive_action,
})
def build_condition(config, arg_type):
template_arg = TemplateArguments(arg_type)
if isinstance(config, core.Lambda):
lambda_ = None
for lambda_ in process_lambda(config, [(arg_type, 'x')]):
yield
yield LambdaCondition.new(template_arg, lambda_)
elif CONF_AND in config:
yield AndCondition.new(template_arg, build_conditions(config[CONF_AND], template_arg))
elif CONF_OR in config:
yield OrCondition.new(template_arg, build_conditions(config[CONF_OR], template_arg))
elif CONF_LAMBDA in config:
lambda_ = None
for lambda_ in process_lambda(config[CONF_LAMBDA], [(arg_type, 'x')]):
yield
yield LambdaCondition.new(template_arg, lambda_)
elif CONF_RANGE in config:
conf = config[CONF_RANGE]
rhs = RangeCondition.new(template_arg)
type = RangeCondition.template(template_arg)
condition = Pvariable(config[CONF_CONDITION_ID], rhs, type=type)
if CONF_ABOVE in conf:
template_ = None
for template_ in templatable(conf[CONF_ABOVE], arg_type, float_):
yield
condition.set_min(template_)
if CONF_BELOW in conf:
template_ = None
for template_ in templatable(conf[CONF_BELOW], arg_type, float_):
yield
condition.set_max(template_)
yield condition
else:
raise ESPHomeYAMLError(u"Unsupported condition {}".format(config))
def build_conditions(config, arg_type):
conditions = []
for conf in config:
condition = None
for condition in build_condition(conf, arg_type):
yield None
conditions.append(condition)
yield ArrayInitializer(*conditions)
DELAY_ACTION_SCHEMA = cv.templatable(cv.positive_time_period_milliseconds)
@ACTION_REGISTRY.register(CONF_DELAY, DELAY_ACTION_SCHEMA)
def delay_action_to_code(config, action_id, arg_type):
template_arg = TemplateArguments(arg_type)
rhs = App.register_component(DelayAction.new(template_arg))
type = DelayAction.template(template_arg)
action = Pvariable(action_id, rhs, type=type)
for template_ in templatable(config, arg_type, uint32):
yield
add(action.set_delay(template_))
yield action
IF_ACTION_SCHEMA = vol.All({
vol.Required(CONF_CONDITION): validate_recursive_condition,
vol.Optional(CONF_THEN): validate_recursive_action,
vol.Optional(CONF_ELSE): validate_recursive_action,
}, cv.has_at_least_one_key(CONF_THEN, CONF_ELSE))
@ACTION_REGISTRY.register(CONF_IF, IF_ACTION_SCHEMA)
def if_action_to_code(config, action_id, arg_type):
template_arg = TemplateArguments(arg_type)
for conditions in build_conditions(config[CONF_CONDITION], arg_type):
yield None
rhs = IfAction.new(template_arg, conditions)
type = IfAction.template(template_arg)
action = Pvariable(action_id, rhs, type=type)
if CONF_THEN in config:
for actions in build_actions(config[CONF_THEN], arg_type):
yield None
add(action.add_then(actions))
if CONF_ELSE in config:
for actions in build_actions(config[CONF_ELSE], arg_type):
yield None
add(action.add_else(actions))
yield action
LAMBDA_ACTION_SCHEMA = cv.lambda_
@ACTION_REGISTRY.register(CONF_LAMBDA, LAMBDA_ACTION_SCHEMA)
def lambda_action_to_code(config, action_id, arg_type):
template_arg = TemplateArguments(arg_type)
for lambda_ in process_lambda(config, [(arg_type, 'x')]):
yield None
rhs = LambdaAction.new(template_arg, lambda_)
type = LambdaAction.template(template_arg)
yield Pvariable(action_id, rhs, type=type)
CONF_COMPONENT_UPDATE = 'component.update'
COMPONENT_UPDATE_ACTION_SCHEMA = maybe_simple_id({
vol.Required(CONF_ID): cv.use_variable_id(PollingComponent),
})
@ACTION_REGISTRY.register(CONF_COMPONENT_UPDATE, COMPONENT_UPDATE_ACTION_SCHEMA)
def component_update_action_to_code(config, action_id, arg_type):
template_arg = TemplateArguments(arg_type)
for var in get_variable(config[CONF_ID]):
yield None
rhs = UpdateComponentAction.new(var)
type = UpdateComponentAction.template(template_arg)
yield Pvariable(action_id, rhs, type=type)
def build_action(full_config, arg_type):
action_id = full_config[CONF_ACTION_ID]
key, config = next((k, v) for k, v in full_config.items() if k in ACTION_REGISTRY)
builder = ACTION_REGISTRY[key][1]
for result in builder(config, action_id, arg_type):
yield None
yield result
def build_actions(config, arg_type):
actions = []
for conf in config:
action = None
for action in build_action(conf, arg_type):
yield None
actions.append(action)
yield ArrayInitializer(*actions, multiline=False)
def build_automation_(trigger, arg_type, config):
rhs = App.make_automation(TemplateArguments(arg_type), trigger)
type = Automation.template(arg_type)
obj = Pvariable(config[CONF_AUTOMATION_ID], rhs, type=type)
if CONF_IF in config:
conditions = None
for conditions in build_conditions(config[CONF_IF], arg_type):
yield None
add(obj.add_conditions(conditions))
actions = None
for actions in build_actions(config[CONF_THEN], arg_type):
yield None
add(obj.add_actions(actions))
yield obj
def build_automation(trigger, arg_type, config):
add_job(build_automation_, trigger, arg_type, config)
|
from .ModelParametersEstimation import *
from .LoadData import * |
from llvmlite import binding as ll
from llvmlite.llvmpy import core as lc
from numba.core.codegen import BaseCPUCodegen, CodeLibrary
from numba.core import utils
from .cudadrv import nvvm
CUDA_TRIPLE = {32: 'nvptx-nvidia-cuda',
64: 'nvptx64-nvidia-cuda'}
class CUDACodeLibrary(CodeLibrary):
# We don't optimize the IR at the function or module level because it is
# optimized by NVVM after we've passed it on.
def _optimize_functions(self, ll_module):
pass
def _optimize_final_module(self):
pass
def _finalize_specific(self):
# Fix global naming
for gv in self._final_module.global_variables:
if '.' in gv.name:
gv.name = gv.name.replace('.', '_')
def get_asm_str(self):
# Return nothing: we can only dump assembly code when it is later
# generated (in numba.cuda.compiler).
return None
class JITCUDACodegen(BaseCPUCodegen):
"""
This codegen implementation for CUDA actually only generates optimized LLVM
IR. Generation of PTX code is done separately (see numba.cuda.compiler).
"""
_library_class = CUDACodeLibrary
def _init(self, llvm_module):
assert list(llvm_module.global_variables) == [], "Module isn't empty"
self._data_layout = nvvm.default_data_layout
self._target_data = ll.create_target_data(self._data_layout)
def _create_empty_module(self, name):
ir_module = lc.Module(name)
ir_module.triple = CUDA_TRIPLE[utils.MACHINE_BITS]
if self._data_layout:
ir_module.data_layout = self._data_layout
return ir_module
def _module_pass_manager(self):
raise NotImplementedError
def _function_pass_manager(self, llvm_module):
raise NotImplementedError
def _add_module(self, module):
pass
|
import jax
import jax.numpy as jnp
import numpy as np
import numpyro.distributions as dists
from numpyro.primitives import sample, plate
import pandas as pd
from typing import Tuple, Iterable
def preprocess(orig_data: pd.DataFrame) -> Tuple[Iterable[pd.DataFrame], int]:
return (orig_data['first'].to_frame(), orig_data['second'].to_frame()), len(orig_data)
def postprocess(syn_data: pd.DataFrame) -> pd.DataFrame:
return syn_data.copy()
def model(x_first = None, x_second = None, num_obs_total = None) -> None:
batch_size = 1
if x_first is not None:
batch_size = x_first.shape[0]
if num_obs_total is None:
num_obs_total = batch_size
mu = sample('mu', dists.Normal())
sigma = sample('sigma', dists.InverseGamma(1.))
with plate('batch', num_obs_total, batch_size):
sample('x_first', dists.Normal(mu, sigma), obs=x_first)
sample('x_second', dists.Normal(mu, sigma), obs=x_second)
|
from Tkinter import *
import numpy as np
import json
import pfilter
from tkanvas import TKanvas
from collections import defaultdict
class Recogniser(object):
def __init__(self, pfilter, gestures):
self.screen_size = 500
c = TKanvas(draw_fn=self.draw, event_fn=self.event, quit_fn=self.quit, w=self.screen_size, h=self.screen_size)
self.mouse = [0,0] # position of the mouse
self.pfilter = pfilter
self.pfilter.init_filter()
self.toast_state = 50
self.toast = "Start!"
self.gestures = gestures
self.complete_threshold = 0.9 # point at which gesture is considered complete
self.entropy_threshold = 0.65 # point at which we will classify a gesture
def quit(self, src):
pass
def event(self, src, event_type, event):
if event_type=='mousemotion':
self.mouse = (event.x, event.y)
def draw(self, src):
src.clear()
self.toast_state -= 1
colors = ["red", "blue", "yellow", "green", "orange", "cyan", "magenta"]
letters = ["e", "s", "n", "d", "a", "r","",""]
src.circle(self.mouse[0], self.mouse[1], 3, fill="grey")
n_gestures = len(self.gestures)
self.pfilter.update(np.array(self.mouse))
particles = self.pfilter.original_particles
observations = self.pfilter.hypotheses
weights = self.pfilter.weights
classes = np.zeros(n_gestures,)
completed = np.zeros(n_gestures,)
for pos,particle,weight in zip(observations, particles, weights):
src.circle(pos[0], pos[1], 2, fill=colors[int(particle[0])])
if not np.isnan(weight):
ix = int(particle[0])
classes[int(particle[0])] += weight
gesture_length = len(self.gestures[ix])
# count how many phases ar
if particle[5]>self.complete_threshold * gesture_length:
completed[ix] += weight
entropy = np.sum([-p*np.log(p)/np.log(2) for p in classes])
# we have a decision (possibly!)
if entropy<self.entropy_threshold:
if np.max(completed)>0.5:
recognised = np.argmax(completed)
self.toast = letters[recognised]
self.toast_state = 100
self.pfilter.init_filter() # force filter to restart
x = 0
width = 50
for i in range(n_gestures):
h = classes[i] * 50.0
src.rectangle(x, src.h-h, x+width, src.h, fill=colors[i])
src.text(x+width/2, src.h-20, text=letters[i], fill="white", font=("Arial", 20))
x+=width
if self.toast_state>0:
src.text(src.w/2, src.h/2, text=self.toast, fill="gray", font=("Arial", 60))
def interactive_recogniser(dynamics, observation, prior, weight, gestures):
pf = pfilter.ParticleFilter(initial=prior,
observe_fn=observation,
n_particles=200,
dynamics_fn=dynamics,
weight_fn=weight,
resample_proportion=0.01)
recogniser = Recogniser(pf, gestures)
class GestureData(object):
def __init__(self, jsonfile):
with open(jsonfile, "r") as f:
gestures_json = json.load(f)
self.screen_size = max(gestures_json["width"], gestures_json["height"])
self.gestures = [np.array(path) for path in gestures_json["gestures"]]
self.n_gestures = len(self.gestures)
def get_template(self, i, t):
if 0<i<self.n_gestures:
gesture = self.gestures[int(i)]
t = np.floor(np.clip(t,0,len(gesture)-1))
x, y = gesture[int(t)]
return [x,y]
else:
return [0,0]
def get_speed(self):
return 1
class Gesture(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.master = Tk()
self.master.call('wm', 'attributes', '.', '-topmost', True)
self.w = Canvas(self.master, width=300, height=300)
self.w.pack()
self.gesture = None
self.gestures = []
self.w.bind('<Motion>', self.motion)
self.w.bind('<Button-1>', self.click)
self.master.bind('<Escape>', self.exit)
self.ox, self.oy = None, None
def exit(self, event):
if self.gesture is not None:
self.click()
with open("gestures.txt", "w") as f:
f.write(self.json())
print "%d gestures recorded to gestures.txt" % (len(self.gestures))
self.master.destroy()
def redraw(self):
w.move(line_id, 0, 1)
master.after(50, redraw)
def click(self, event):
if self.gesture is None:
self.gesture = []
else:
self.gestures.append(self.gesture)
self.gesture = None
self.w.delete("all")
def motion(self,event):
if self.gesture is not None:
x, y = event.x, self.height - event.y
self.gesture.append([x,y])
self.w.create_line(self.ox, self.oy, x,y)
self.ox, self.oy = x,y
def json(self):
return json.dumps({"width":self.width, "height":self.height, "gestures":self.gestures})
def record_gestures():
gesture = Gesture(400,400)
|
__all__ = ['TAPTestRunner', 'TAPTestResult', 'run']
from .taprunner import TAPTestRunner, TAPTestResult
from .cli import run
|
import os
import nltk
import pickle
# in case cant download wordnet, run the following code:
# import ssl
# try:
# _create_unverified_https_context = ssl._create_unverified_context
# except AttributeError:
# pass
# else:
# ssl._create_default_https_context = _create_unverified_https_context
# nltk.download('wordnet')
wordnet = nltk.corpus.wordnet
words = [w for w in list(set(w for w in wordnet.words())) if ('_' not in w)]
pos_map = {'nouns': ['n'], 'adjectives': ['a', 's'], 'verbs': ['v'], 'adverbs': ['r']}
all_synonyms = {'nouns': {}, 'verbs': {}, 'adjectives': {}, 'adverbs': {}}
for idx, word in enumerate(words):
if (idx % 5000) == 0:
print ('processing word ' + str(idx) + '...')
for pos in pos_map.keys():
synonyms = []
for synset in wordnet.synsets(word, pos=pos_map[pos]):
synonyms.extend([syn.lower() for syn in synset.lemma_names() if ('_' not in syn)])
synonyms = list(set(synonyms) - set([word]))
if len(synonyms):
all_synonyms[pos][word] = synonyms
# print (all_synonyms)
# 'so-so': ['acceptably', 'tolerably']. pay attention to '-'
synonyms_path = os.path.join(os.getcwd(), 'nlp_data/synonyms.pickle')
# os.getenv('HOME')
if not os.path.isfile(synonyms_path):
with open(synonyms_path, 'wb') as file:
pickle.dump(all_synonyms, file, pickle.HIGHEST_PROTOCOL)
file.close()
|
from subsystems.snowveyorsubsystem import SnowveyorSubsystem
import commands2
import wpilib
from networktables import NetworkTables
class dropOff(commands2.CommandBase):
def __init__(self, duration: float, speed: float, snowveyor: SnowveyorSubsystem) -> None:
super().__init__()
self.snowveyor = snowveyor
self.speed = speed
self.duration = duration
self.timer = wpilib.Timer()
def initialize(self) -> None:
self.timer.reset()
self.timer.start()
def execute(self) -> None:
self.snowveyor.tankDrive(self.speed, self.speed)
def end(self, interrupted: bool) -> None:
self.snowveyor.tankDrive(0, 0)
def isFinished(self) -> bool:
# self.
return self.timer.get() > self.duration |
# -*- coding: utf-8 -*-
# /usr/bin/python3
import logging
import os
import tensorflow as tf
from models.encoderdecoder import EncoderDecoder
from utils.data_load import get_batch
from utils.hparams import Hparams
from utils.utils import get_hypotheses, load_hparams
logging.basicConfig(level=logging.INFO)
logging.info("# hparams")
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
load_hparams(hp, hp.ckpt)
logging.info("# Prepare test batches")
test_batches, num_test_batches, num_test_samples = get_batch(hp.test, hp.test_batch_size, shuffle=False)
iter = tf.data.Iterator.from_structure(test_batches.output_types, test_batches.output_shapes)
id, xs, ys = iter.get_next()
test_init_op = iter.make_initializer(test_batches)
logging.info("# Load model")
m = EncoderDecoder(hp)
y_hat, _ = m.eval(id, xs, ys)
logging.info("# Session")
with tf.Session() as sess:
ckpt_ = tf.train.latest_checkpoint(hp.ckpt)
ckpt = hp.ckpt if ckpt_ is None else ckpt_ # None: ckpt is a file. otherwise dir.
saver = tf.train.Saver()
saver.restore(sess, ckpt)
sess.run(test_init_op)
logging.info("# get hypotheses")
hypotheses = get_hypotheses(num_test_batches, num_test_samples, sess, y_hat, m.vec2word)
logging.info("# write results")
model_output = ckpt.split("/")[-1]
if not os.path.exists(hp.testdir):
os.makedirs(hp.testdir)
captions = os.path.join(hp.testdir, model_output)
with open(captions, 'w') as fout:
fout.write("\n".join(hypotheses))
|
import sys, os
def eprint(*args, **kwargs):
"""
Print to stderr - see https://stackoverflow.com/a/14981125/8545455
"""
print(*args, file=sys.stderr, **kwargs)
def stripEnd(h, s):
if h.endswith(s):
h = h[:-len(s)]
return h
def hostCleanup(host):
"""
Condense URL into a standard form
:param host: str
:return: str
"""
if not host.startswith('https://'):
host = 'https://' + host # Add schema
host = stripEnd(host, '/')
host = stripEnd(host, '/api/v1')
host = stripEnd(host, '/')
return host
def getenv_check(e):
res = os.getenv(e)
if res == None:
print(e, 'environment variable not set - stopping.')
exit(1)
else:
return res
def getenv(*args, **kwargs):
return os.getenv(*args, **kwargs)
|
from django.shortcuts import render
from django.views.generic import ListView
from acheve_mgt.models import Student, MyClass, ScoreShip, Course
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
# Create your views here.
@login_required
def person(request, pk):
user = request.user
myclass = MyClass.objects.all()[0]
student = myclass.student.get(pk=pk)
scoreship = ScoreShip.objects.get(student=student, course=student.course.all()[0])
term = scoreship.get_term_display()
return render(request, 'acheve_mgt/person.html', context={
'user': user,
'myclass': myclass,
'student': student,
'term': term,
})
@login_required
def person_data(request, pk):
myclass = MyClass.objects.all()[0]
students = myclass.student.all()
data = []
s = students.get(pk=pk)
count = s.course.count()
id = 1
for c in s.course.all():
d = {}
d['id'] = id
id = id + 1
d['course_name'] = c.name
score = ScoreShip.objects.get(student=s, course=c)
d['exam_score'] = score.exam_score
d['daily_score'] = score.daily_score
d['sum_score'] = score.exam_score*0.7 + score.daily_score*0.3
data.append(d)
result = {
"code": 0,
"msg":"",
"count": count,
"data": data,
}
return JsonResponse(result)
@login_required
def single_course(request, pk):
user = request.user
myclass = MyClass.objects.all()[0]
student = myclass.student.all()[0]
course = student.course.get(pk=pk)
scoreship = ScoreShip.objects.get(student=student, course=course)
term = scoreship.get_term_display()
students_of_course = course.score.all()
student_score_data = []
count = 1
for s in students_of_course:
d = {}
d['order'] = count
count += 1
d['name'] = s.name
d['number'] = s.number
scoreship_tmp = ScoreShip.objects.get(student=s, course=course)
d['exam_score'] = scoreship_tmp.exam_score
d['daily_score'] = scoreship_tmp.daily_score
d['sum_score'] = scoreship_tmp.exam_score*0.7 + scoreship_tmp.daily_score*0.3
d['id'] = s.id
student_score_data.append(d)
return render(request, 'acheve_mgt/single_course.html', context={
'user': user,
'myclass': myclass,
'course': course,
'student_score_data': student_score_data,
'term': term,
})
@login_required
def view_course(request):
user = request.user
myclass = MyClass.objects.all()[0]
student = myclass.student.all()[0]
courses = student.course.all()
course_list = []
count = 1
for course in courses:
d = {}
d['order'] = count
count += 1
d['name'] = course.name
d['teacher_name'] = course.teacher_name
d['id'] = course.id
course_list.append(d)
scoreship = ScoreShip.objects.get(student=student, course=courses[0])
term = scoreship.get_term_display()
return render(request, 'acheve_mgt/view_course.html', context={
'user': user,
'myclass': myclass,
'student': student,
'course_list': course_list,
'term': term,
})
@login_required
def score_together(request):
user = request.user
myclass = MyClass.objects.all()[0]
students = myclass.student.all()
courses = students[0].course.all()
scoreship = ScoreShip.objects.get(student=students[0], course=courses[0])
term = scoreship.get_term_display()
score_together_data = []
count = 1
for s in students:
d = {}
d['order'] = count
count += 1
d['number'] = s.number
d['name'] = s.name
four_course_score = 0
courses_score = []
courses_name = []
for c in courses:
tmp = ScoreShip.objects.get(student=s, course=c)
each_course_score = tmp.exam_score*0.7 + tmp.daily_score*0.3
four_course_score += each_course_score
courses_score.append(each_course_score)
courses_name.append(c.name)
d['courses_score'] = courses_score
d['courses_name'] = courses_name
d['avg_score'] = four_course_score/4
d['id'] = s.id
score_together_data.append(d)
return render(request, 'acheve_mgt/score_together.html', context={
'user': user,
'myclass': myclass,
'score_together_data': score_together_data,
'row_score_together': score_together_data[0],
'term': term,
})
@login_required
def score_rating(request):
user = request.user
myclass = MyClass.objects.all()[0]
students = myclass.student.all()
courses = students[0].course.all()
scoreship = ScoreShip.objects.get(student=students[0], course=courses[0])
term = scoreship.get_term_display()
rating_data = []
order = 1
for c in courses:
d = {}
d['order'] = order
order += 1
d['course_name'] = c.name
ra, rb, rc, rd, re = 0, 0, 0, 0, 0
for s in students:
scoreship = ScoreShip.objects.get(student=s, course=c)
score = scoreship.daily_score*0.3 + scoreship.exam_score*0.7
if score >=90 and score <= 100:
ra += 1
elif score>=80 and score <90:
rb += 1
elif score>=70 and score<80:
rc += 1
elif score>=60 and score<70:
rd += 1
else:
re += 1
d['a'] = ra
d['b'] = rb
d['c'] = rc
d['d'] = rd
d['e'] = re
rating_data.append(d)
all_course_rating = {}
ra, rb, rc, rd, re = 0, 0, 0, 0, 0
for s in students:
sum_score = 0
for c in courses:
scoreship = ScoreShip.objects.get(student=s, course=c)
score = scoreship.daily_score * 0.3 + scoreship.exam_score * 0.7
sum_score += score
if score < 60:
re += 1
break
else:
avg_score = sum_score/s.course.count()
if avg_score >=90 and avg_score <= 100:
ra += 1
elif avg_score>=80 and avg_score <90:
rb += 1
elif avg_score>=70 and avg_score<80:
rc += 1
elif avg_score>=60 and avg_score<70:
rd += 1
else:
re += 1
all_course_rating['a'] = ra
all_course_rating['b'] = rb
all_course_rating['c'] = rc
all_course_rating['d'] = rd
all_course_rating['e'] = re
all_course_rating['order'] = order
all_course_rating['four_courses'] = '四门课程总评'
return render(request, 'acheve_mgt/score_rating.html', context={
'user': user,
'myclass': myclass,
'rating_data': rating_data,
'all_course_rating': all_course_rating,
'term': term,
})
|
from . import utils # noqa
from .dfinity_gitlab_config import DfinityGitLabConfig # noqa
from .gitrepo import GitRepo # noqa
|
from django import forms
from .models import UserInfo
class UserForm(forms.ModelForm):
Discription = forms.CharField( widget=forms.Textarea (
attrs={
'placeholder':'Description',
'class': 'form-control',
}
))
UserBookName = forms.CharField( widget=forms.TextInput (
attrs={
'placeholder':'Book Name',
'class': 'form-control',
}
))
UserBookPrice = forms.CharField( widget=forms.TextInput (
attrs={
'placeholder':'Book Price',
'class': 'form-control'
}
))
UserName = forms.CharField( widget=forms.TextInput (
attrs={
'placeholder':'Name',
'class': 'form-control'
}
))
UserPhoneNumber = forms.CharField( widget=forms.TextInput (
attrs={
'placeholder':'Number(optional)',
'class': 'form-control'
}
))
UserEmail = forms.CharField( widget=forms.TextInput (
attrs={
'placeholder':'Email address',
'class': 'form-control'
}
))
UserLocation = forms.CharField( widget=forms.TextInput (
attrs={
'placeholder':'Your City Name',
'class': 'form-control'
}
))
class Meta:
model = UserInfo
fields = (
"UserBookName",
"Discription",
"UserName",
"UserLocation",
"UserEmail",
"UserPhoneNumber",
"UserBookPrice",
"UserBookLogo",
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 17:01:45 2020
@author: natnem
"""
class Graph(object):
def __init__(self,adj):
self.adj = adj
def itervertices(self):
return self.adj.keys()
def neighbors(self,v):
return self.adj[v]
class dfsResult(object):
"""Object Oriented way"""
def __init__(self):
self.parent = {}
self.order = []
def dfs(g):
results = dfsResult()
for vertex in g.itervertices():
if vertex not in results.parent:
dfsVisit(g,vertex,results)
return results
def dfsVisit(g,v,results,parent =None):
results.parent[v] = parent
if v in g.adj.keys():
for n in g.neighbors(v):
if n not in results.parent:
dfsVisit(g,n,results,v)
results.order.append(v)
def TopologicalSort(adj):
Topsort = dfs(adj)
Topsort = Topsort.order
Topsort.reverse()
return Topsort
#G = {"shorts":["pants","shoes"],"pants":["belt","shoes"],
# "belt":["jacket"],"shirt":["tie","belt"],
# "socks":["shoes"],"watch":[],"tie":["jacket"]}
#g = Graph(G)
#print(TopologicalSort(g)) |
import os
import glob
import requests
from bs4 import BeautifulSoup
from tinydb import TinyDB
with open("./db.json", "w") as fp:
pass
DB = TinyDB("./db.json")
HOST = "https://batotoo.com"
SESSION = requests.Session()
def browse_pages(skip_saved=False):
if not os.path.exists("./browse"):
os.mkdir("browse")
PAGE = 1
MAX_PAGE = 1431
IDENTIFIER = "Browse - Bato.To"
while PAGE < MAX_PAGE:
if skip_saved:
if PAGE < len(glob.glob("./browse/*.html")):
PAGE += 1
continue
print(f"BROWSE: {PAGE} of {MAX_PAGE}")
response = SESSION.get(f"{HOST}/browse?page={PAGE}")
soup = BeautifulSoup(response.text, "html.parser")
if soup.title.get_text() == IDENTIFIER:
file_path = "./browse/{PAGE}.html"
file = open(file_path, "w")
file.write(response.text)
file.close()
browse_series(file_path)
PAGE += 1
else:
print(f"BOT DETECTED: {soup.title.get_text()}")
break
def browse_series(file_path):
if not os.path.exists("./series"):
os.mkdir("series")
html = open(file_path, "r").read()
soup = BeautifulSoup(html, "html.parser")
titles = soup.find_all("a", class_="item-title")
for title in titles:
filename = " ".join(title
.findAll(text=True, recursive=False)[0]
.split('/'))
response = SESSION.get(f"{HOST}{title['href']}")
soup = BeautifulSoup(response.text, "html.parser")
if title.get_text() in soup.title.get_text():
print(f"TITLE: {filename}")
series_filepath = f"./series/{filename}.html"
series_file = open(series_filepath, "w")
series_file.write(response.text)
series_file.close()
browse_meta(series_filepath)
break
else:
print(f"BOT DETECTED: {soup.title.get_text()}")
break
def browse_meta(file_path):
html = open(file_path, "r").read()
soup = BeautifulSoup(html, "html.parser")
title = soup.title.get_text()
print(title)
if __name__ == '__main__':
browse_pages()
|
# -*- coding: utf-8 -*-
"""
@author: Zheng Fang
"""
class Fetch:
k__ = None
|
# -*- coding: utf-8 -*-
"""
Created on 2020.05.19
@author: Jiahua Rao, Weiming Li, Hui Yang, Jiancong Xie
Code based on:
Shang et al "Edge Attention-based Multi-Relational Graph Convolutional Networks" -> https://github.com/Luckick/EAGCN
Coley et al "Convolutional Embedding of Attributed Molecular Graphs for Physical Property Prediction" -> https://github.com/connorcoley/conv_qsar_fast
Maziarka, Łukasz, et al. "Molecule Attention Transformer." -> https://github.com/ardigen/MAT
"""
import os
import pickle
import numpy as np
import pandas as pd
import math
from pathlib import Path
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import MolFromSmiles
from sklearn.metrics import pairwise_distances
class CoMPTEmbeddings():
def __init__(self, data_df, model_name, features_path, dataset_config, use_data_saving=True):
self.model_name = model_name
self.whole_data_df = data_df
self.features_path = features_path
self.dataset_config = dataset_config
self.use_data_saving = use_data_saving
self.smiles_col = self.dataset_config["smiles_column"]
self.target_cols = self.dataset_config["target_columns"]
@property
def dim_features(self):
return self._dim_features
@property
def max_num_nodes(self):
return None
def process(self):
"""
Load and featurize data stored in a CSV file.
"""
features_path = self.features_path
if self.use_data_saving and os.path.exists(features_path):
x_all, y_all = pickle.load(open(features_path, "rb"))
self._dim_features = x_all[0][0].shape[1]
else:
data_x = self.whole_data_df.loc[:,self.smiles_col].values
data_y = self.whole_data_df.loc[:,self.target_cols].values
x_all, y_all = self.load_data_from_smiles(data_x, data_y, atom_hidden=115, bond_hidden=13) # revised!
self._dim_features = x_all[0][0].shape[1]
if self.use_data_saving and not os.path.exists(features_path):
pickle.dump((x_all, y_all), open(features_path, "wb"))
def load_data_from_smiles(self, x_smiles, labels, atom_hidden, bond_hidden):
"""
Load and featurize data from lists of SMILES strings and labels.
Args:
- x_smiles (list[str]): A list of SMILES strings.
- labels (list[float]): A list of the corresponding labels.
Returns:
A tuple (X, y) in which X is a list of graph descriptors (node features, adjacency matrices, distance matrices),
and y is a list of the corresponding labels.
"""
x_all, y_all = [], []
for smiles, label in zip(x_smiles, labels):
try:
mol = MolFromSmiles(smiles)
# Set Stereochemistry
Chem.rdmolops.AssignAtomChiralTagsFromStructure(mol)
Chem.rdmolops.AssignStereochemistryFrom3D(mol)
AllChem.ComputeGasteigerCharges(mol)
afm, efm, adj = self.featurize_mol(mol, atom_hidden, bond_hidden)
x_all.append([afm, efm, adj])
y_all.append(label)
except ValueError as e:
print('the SMILES ({}) can not be converted to a graph.\nREASON: {}'.format(smiles, e))
return x_all, np.array(y_all)
def featurize_mol(self, mol, atom_hidden, bond_hidden):
"""Featurize molecule.
Args:
- mol (rdchem.Mol): An RDKit Mol object.
- add_dummy_node (bool): If True, a dummy node will be added to the molecular graph.
- one_hot_formal_charge (bool): If True, formal charges on atoms are one-hot encoded.
Returns:
A tuple of molecular graph descriptors (node features, adjacency matrix, distance matrix).
"""
node_features = np.array([self.get_atom_features(atom, atom_hidden) for atom in mol.GetAtoms()])
# Get Bond features
bond_features = np.zeros((mol.GetNumAtoms(), mol.GetNumAtoms(), bond_hidden))
for bond in mol.GetBonds():
begin_atom_idx = bond.GetBeginAtom().GetIdx()
end_atom_idx = bond.GetEndAtom().GetIdx()
bond_features[begin_atom_idx, end_atom_idx, :] = bond_features[end_atom_idx, begin_atom_idx, :] = self.get_bond_features(bond, bond_hidden)
# Get Adjacency matrix without self loop
adjacency_matrix = Chem.rdmolops.GetDistanceMatrix(mol).astype(np.float32)
return node_features, bond_features, adjacency_matrix
def get_atom_features(self, atom, atom_hidden):
# 100+1=101 dimensions
v1 = self.one_hot_vector(atom.GetAtomicNum(), [i for i in range(1, 101)])
# 5+1=6 dimensions
v2 = self.one_hot_vector(
atom.GetHybridization(), [
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2]
)
# 8 dimensions
v4 = [
atom.GetTotalNumHs(includeNeighbors=True) / 8,
atom.GetDegree() / 4,
atom.GetFormalCharge() / 8,
atom.GetTotalValence() / 8,
0 if math.isnan(atom.GetDoubleProp('_GasteigerCharge')) or math.isinf(
atom.GetDoubleProp('_GasteigerCharge')) else atom.GetDoubleProp('_GasteigerCharge'),
0 if math.isnan(atom.GetDoubleProp('_GasteigerHCharge')) or math.isinf(
atom.GetDoubleProp('_GasteigerHCharge')) else atom.GetDoubleProp('_GasteigerHCharge'),
int(atom.GetIsAromatic()),
int(atom.IsInRing())
]
# index for position encoding
v5 = [
atom.GetIdx() + 1 # start from 1
]
attributes = np.concatenate([v1, v2, v4, v5], axis=0)
# total for 32 dimensions
assert len(attributes) == atom_hidden + 1
return attributes
def get_bond_features(self, bond, bond_hidden):
# 4 dimensions
v1 = self.one_hot_vector(
bond.GetBondType(), [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC
], add_unknown=False)
# 6 dimensions
v2 = self.one_hot_vector(
bond.GetStereo(), [
Chem.rdchem.BondStereo.STEREOANY,
Chem.rdchem.BondStereo.STEREOCIS,
Chem.rdchem.BondStereo.STEREOE,
Chem.rdchem.BondStereo.STEREONONE,
Chem.rdchem.BondStereo.STEREOTRANS,
Chem.rdchem.BondStereo.STEREOZ], add_unknown=False)
# 3 dimensions
v4 = [
int(bond.GetIsConjugated()),
int(bond.GetIsAromatic()),
int(bond.IsInRing())
]
# total for 115+13=128 dimensions
attributes = np.concatenate([v1, v2, v4])
assert len(attributes) == bond_hidden
return attributes
def one_hot_vector(self, val, lst, add_unknown=True):
"""Converts a value to a one-hot vector based on options in lst"""
if add_unknown:
vec = np.zeros(len(lst) + 1)
else:
vec = np.zeros(len(lst))
vec[lst.index(val) if val in lst else -1] = 1
return vec
|
from enum import Enum
class ScPythonEventType(Enum):
AddInputEdge = 0
AddOutputEdge = 1
ContentChanged = 2
EraseElement = 3
RemoveInputEdge = 4
RemoveOutputEdge = 5 |
MOCK = False
def get_saml_authenticator(*args, **kwargs):
if MOCK:
from . import mock
return mock.SamlAuthenticator(*args, **kwargs)
else:
from onelogin.saml2.auth import OneLogin_Saml2_Auth
return OneLogin_Saml2_Auth(*args, **kwargs)
|
from unittest.mock import Mock, patch
from weakref import ref
import pytest
try:
from django.db import models
# from parasolr.django.indexing import ModelIndexable
from parasolr.django.signals import IndexableSignalHandler
from parasolr.django.tests import test_models
except ImportError:
IndexableSignalHandler = None
from parasolr.tests.utils import skipif_django, skipif_no_django
def setup_module():
# connect indexing signal handlers for this test module only
if IndexableSignalHandler:
IndexableSignalHandler.connect()
def teardown_module():
# disconnect indexing signal handlers
if IndexableSignalHandler:
IndexableSignalHandler.disconnect()
@skipif_django
def test_no_django_indexable():
# should not be defined when django is not installed
with pytest.raises(ImportError):
from parasolr.django.signals import IndexableSignalHandler
@skipif_no_django
class TestIndexableSignalHandler:
def test_connect(self):
# check that signal handlers are connected as expected
# - model save and delete
post_save_handlers = [item[1] for item in
models.signals.post_save.receivers]
assert ref(IndexableSignalHandler.handle_save) in post_save_handlers
post_del_handlers = [item[1] for item in
models.signals.post_delete.receivers]
assert ref(IndexableSignalHandler.handle_delete) in post_del_handlers
# many to many
m2m_handlers = [item[1] for item in
models.signals.m2m_changed.receivers]
assert ref(IndexableSignalHandler.handle_relation_change) \
in m2m_handlers
# testing related handlers based on test models
post_save_handlers = [item[1] for item in
models.signals.post_save.receivers]
assert ref(test_models.signal_method) in post_save_handlers
pre_del_handlers = [item[1] for item in
models.signals.pre_delete.receivers]
assert ref(test_models.signal_method) in pre_del_handlers
def test_handle_save(self):
instance = test_models.IndexItem()
with patch.object(instance, 'index') as mockindex:
# call directly
IndexableSignalHandler.handle_save(Mock(), instance)
mockindex.assert_any_call()
# call via signal
mockindex.reset_mock()
models.signals.post_save.send(test_models.IndexItem,
instance=instance)
mockindex.assert_any_call()
# non-indexable object should be ignored
nonindexable = Mock()
IndexableSignalHandler.handle_save(Mock(), nonindexable)
nonindexable.index.assert_not_called()
def test_handle_delete(self):
with patch.object(test_models.IndexItem, 'remove_from_index') as \
mock_rmindex:
instance = test_models.IndexItem()
IndexableSignalHandler.handle_delete(Mock(), instance)
mock_rmindex.assert_called_with()
# non-indexable object should be ignored
nonindexable = Mock()
IndexableSignalHandler.handle_delete(Mock(), nonindexable)
nonindexable.remove_from_index.assert_not_called()
@pytest.mark.django_db
def test_handle_relation_change(self):
instance = test_models.IndexItem()
with patch.object(instance, 'index') as mockindex:
# call directly - supported actions
for action in ['post_add', 'post_remove', 'post_clear']:
mockindex.reset_mock()
IndexableSignalHandler.handle_relation_change(
test_models.IndexItem, instance, action)
mockindex.assert_any_call()
# if action is not one we care about, should be ignored
mockindex.reset_mock()
IndexableSignalHandler.handle_relation_change(
test_models.IndexItem, instance, 'pre_remove')
mockindex.assert_not_called()
# non-indexable object should be ignored
nonindexable = Mock()
IndexableSignalHandler.handle_relation_change(
Mock(), nonindexable, 'post_add')
nonindexable.index.assert_not_called()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""SExtractor wrapper (limited functionality, but simple to use)
SExtractor: http://www.astromatic.net/software/sextractor
Other SExtractor Python wrappers (not BSD licensed!):
* http://chimera.googlecode.com/svn/trunk/src/chimera/util/sextractor.py
* https://pypi.python.org/pypi/pysex/
* http://gitorious.org/pysextractor/pysextractor/trees/master/pysextractor
"""
from __future__ import print_function, division
import logging
import subprocess
import tempfile
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
from astropy.table import Table
__all__ = ['sex']
def sex(image,
image2='',
catalog_name=None,
config_name=None,
parameters_name=None,
checkimage_name=None,
detect_thresh=5,
detect_minarea=1,
deblend_mincont=1,
):
"""Run SExtractor to detect sources in an image.
Parameters
----------
image : str
Detection image filename
image2 : str
Measurement image filename (same as image if '')
catalog_name : str
Output catalog filename
config_name : str
Config filename
parameters_name : str
Name of the file describing the catalog output parameters
checkimage_name : str
Filename for the check-image (TODO: none for '')
detect_thresh : float
Detection threshold
detect_minarea : int
Minimum number of pixels above threshold
deblend_mincont : float in range 0 to 1
Minimum contrast parameter for deblending.
* 0 = each peak is a single source
* 1 = no deblending, one source per segment
Returns
-------
catalog : `~astropy.table.Table`
Catalog of detected objects
checkimage : `~astropy.io.fits.PrimaryHDU`
Segmented image
Examples
--------
TODO: look what other Python sextractor wrappers do:
TODO: where to run the command and put the output files?
TODO: return filenames or dict with results?
"""
if catalog_name is None:
catalog_name = tempfile.mktemp('.fits')
if checkimage_name is None:
checkimage_name = tempfile.mktemp('.fits')
if config_name is None:
config_name = get_pkg_data_filename('sex.cfg')
if parameters_name is None:
parameters_name = get_pkg_data_filename('sex.param')
logging.info('Running SExtractor')
logging.info('INPUT image: {0}'.format(image))
logging.info('INPUT image2: {0}'.format(image2))
logging.info('INPUT config_name: {0}'.format(config_name))
logging.info('INPUT parameters_name: {0}'.format(parameters_name))
logging.info('OUTPUT catalog_name: {0}'.format(catalog_name))
logging.info('OUTPUT checkimage_name: {0}'.format(checkimage_name))
cmd = ['sex', image, image2,
'-c', config_name,
'-catalog_name', catalog_name,
'-parameters_name', parameters_name,
'-checkimage_name', checkimage_name,
'-detect_thresh', str(detect_thresh),
'-detect_minarea', str(detect_minarea),
'-deblend_mincont', str(deblend_mincont)
]
logging.info('Executing the following command now:\n\n{0}\n'.format(' '.join(cmd)))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
# Read output files
catalog = Table.read(catalog_name)
checkimage = fits.open(checkimage_name)[0]
logging.info('Number of objects detected: {0}'.format(len(catalog)))
return catalog, checkimage
|
import numpy as np
import pygame
from rllab.envs.box2d.parser import find_body
from rllab.core.serializable import Serializable
from rllab.envs.box2d.box2d_env import Box2DEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
# Tornio, Matti, and Tapani Raiko. "Variational Bayesian approach for
# nonlinear identification and control." Proc. of the IFAC Workshop on
# Nonlinear Model Predictive Control for Fast Systems, NMPC FS06. 2006.
class CartpoleSwingupEnv(Box2DEnv, Serializable):
@autoargs.inherit(Box2DEnv.__init__)
def __init__(self, *args, **kwargs):
super(CartpoleSwingupEnv, self).__init__(
self.model_path("cartpole.xml.mako"),
*args, **kwargs
)
self.max_cart_pos = 3
self.max_reward_cart_pos = 3
self.cart = find_body(self.world, "cart")
self.pole = find_body(self.world, "pole")
Serializable.__init__(self, *args, **kwargs)
@overrides
def reset(self):
self._set_state(self.initial_state)
self._invalidate_state_caches()
bounds = np.array([
[-1, -2, np.pi - 1, -3],
[1, 2, np.pi + 1, 3],
])
low, high = bounds
xpos, xvel, apos, avel = np.random.uniform(low, high)
self.cart.position = (xpos, self.cart.position[1])
self.cart.linearVelocity = (xvel, self.cart.linearVelocity[1])
self.pole.angle = apos
self.pole.angularVelocity = avel
return self.get_current_obs()
@overrides
def compute_reward(self, action):
yield
if self.is_current_done():
yield -100
else:
if abs(self.cart.position[0]) > self.max_reward_cart_pos:
yield -1
else:
yield np.cos(self.pole.angle)
@overrides
def is_current_done(self):
return abs(self.cart.position[0]) > self.max_cart_pos
@overrides
def action_from_keys(self, keys):
if keys[pygame.K_LEFT]:
return np.asarray([-10])
elif keys[pygame.K_RIGHT]:
return np.asarray([+10])
else:
return np.asarray([0])
|
class DiagnosticPlots:
"""Reproduces the 4 base plots of an OLS model in R.
Original code from here: https://bit.ly/3a4YGH1, with modifications
by gt2447.
"""
def __init__(self, X, y):
self.X = X
self.y = y
self.fig, self.ax = plt.subplots(2, 2)
self.set_properties()
self.model()
self.resid_vs_fitted()
self.qq_plot()
self.heteroscedasticity()
self.resid_vs_leverage()
def model(self):
"""Creates a linear regression model"""
self.model = sm.OLS(self.y, sm.add_constant(self.X)).fit()
# create df from X, y for easier plot handling
self.df = pd.concat([self.X, self.y], axis=1)
# Getting miscallaneous properties
self.modelted_y = self.model.fittedvalues
self.model_residuals = self.model.resid
self.model_norm_residuals = self.model.get_influence().resid_studentized_internal
self.model_norm_residuals_abs_sqrt = np.sqrt(
np.abs(self.model_norm_residuals))
self.model_abs_resid = np.abs(self.model_residuals)
self.model_leverage = self.model.get_influence().hat_matrix_diag
self.model_cooks = self.model.get_influence().cooks_distance[0]
print(self.model.summary())
def graph(self, formula, x_range, label=None):
"""Helper function for plotting cook's distance lines"""
x = x_range
y = formula(x)
plt.plot(x, y, label=label, lw=1, ls='--', color='red')
def resid_vs_fitted(self):
sns.residplot(self.modelted_y, self.df.columns[-1], data=self.df,
ax=self.ax[0, 0],
lowess=True,
scatter_kws={'alpha': 0.5},
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8})
# Annotations
abs_resid = self.model_abs_resid.sort_values(ascending=False)
abs_resid_top_3 = abs_resid[:3]
for i in abs_resid_top_3.index:
self.ax[0, 0].annotate(i,
xy=(self.modelted_y[i],
self.model_residuals[i]))
def qq_plot(self):
QQ = ProbPlot(self.model_norm_residuals)
QQ.qqplot(line='45',
alpha=0.5,
color='#4C72B0',
lw=1,
ax=self.ax[0, 1])
# Annotations
abs_norm_resid = np.flip(np.argsort(
np.abs(self.model_norm_residuals)), 0)
self.abs_norm_resid_top_3 = abs_norm_resid[:3]
for r, i in enumerate(self.abs_norm_resid_top_3):
self.ax[0, 1].annotate(i,
xy=(np.flip(QQ.theoretical_quantiles,
0)[r],
self.model_norm_residuals[i]))
def heteroscedasticity(self):
self.ax[1, 0].scatter(self.modelted_y,
self.model_norm_residuals_abs_sqrt,
alpha=0.5)
sns.regplot(self.modelted_y,
self.model_norm_residuals_abs_sqrt,
ax=self.ax[1, 0],
scatter=False,
ci=False,
lowess=True,
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8})
# Annotations
abs_sq_norm_resid = np.flip(
np.argsort(self.model_norm_residuals_abs_sqrt), 0)
abs_sq_norm_resid_top_3 = abs_sq_norm_resid[:3]
for i in self.abs_norm_resid_top_3:
self.ax[1, 0].annotate(i,
xy=(self.modelted_y[i],
self.model_norm_residuals_abs_sqrt[i]))
def resid_vs_leverage(self):
self.ax[1, 1].scatter(self.model_leverage,
self.model_norm_residuals, alpha=0.5)
sns.regplot(self.model_leverage, self.model_norm_residuals,
ax=self.ax[1, 1],
scatter=False,
ci=False,
lowess=True,
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8})
# Annotations
leverage_top_3 = np.flip(np.argsort(self.model_cooks), 0)[:3]
for i in leverage_top_3:
self.ax[1, 1].annotate(i,
xy=(self.model_leverage[i],
self.model_norm_residuals[i]))
p = len(self.model.params) # number of model parameters
self.graph(lambda x: np.sqrt((0.5 * p * (1 - x)) / x),
np.linspace(0.001, max(self.model_leverage), 50),
'Cook\'s distance') # 0.5 line
self.graph(lambda x: np.sqrt((1 * p * (1 - x)) / x),
np.linspace(0.001, max(self.model_leverage), 50)) # 1 line
self.ax[1, 1].legend(loc='upper right')
def set_properties(self):
self.ax[0, 0].set_title('Residuals vs Fitted')
self.ax[0, 0].set_xlabel('Fitted values')
self.ax[0, 0].set_ylabel('Residuals')
self.ax[0, 0].xaxis.tick_top()
self.ax[0, 0].xaxis.set_label_position('top')
self.ax[0, 1].set_title('Normal Q-Q')
self.ax[0, 1].set_xlabel('Theoretical Quantiles')
self.ax[0, 1].set_ylabel('Standardized Residuals')
self.ax[0, 1].yaxis.tick_right()
self.ax[0, 1].xaxis.tick_top()
self.ax[0, 1].xaxis.set_label_position('top')
self.ax[0, 1].yaxis.set_label_position('right')
self.ax[1, 0].set_ylabel('Std. Residuals')
self.ax[1, 0].set_title('Scale-Location')
self.ax[1, 0].set_xlabel('Fitted values')
self.ax[1, 0].set_ylabel('$\sqrt{|Standardized Residuals|}$')
self.ax[1, 1].yaxis.tick_right()
self.ax[1, 1].set_xlabel('Leverage')
self.ax[1, 1].set_ylabel('Std. Residuals')
self.ax[1, 1].yaxis.set_label_position('right')
self.ax[1, 1].set_xlim(0, max(self.model_leverage)+0.01)
self.ax[1, 1].set_ylim(-3, 5)
self.ax[1, 1].set_title('Residuals vs Leverage') |
import copy
import random
import re
from datetime import datetime
from functools import cached_property
from unittest.mock import MagicMock, Mock
import pytest
from bot import Bot
from command_handlers import HelpHandler
CHAT_ID = -593555199
UPDATE = {
"update_id": 360316438,
"message": {
"message_id": 125,
"from": {
"id": 427258479,
"is_bot": False,
"first_name": "Иван",
"username": "iivanov",
},
"chat": {
"id": CHAT_ID,
"title": "Bot Test (dev)",
"type": "group",
"all_members_are_administrators": True,
},
"date": 1612207828,
"text": "Hello World!",
},
}
def make_message(text):
body = copy.deepcopy(UPDATE)
body["message"]["text"] = text
re_to_type = {
r"/\w*\b": "bot_command",
r"#\w*\b": "hashtag",
}
entities = []
for regexp, entity_type in re_to_type.items():
for match in re.finditer(regexp, text):
start, end = match.span()
entities.append(
{
"offset": start,
"length": end - start,
"type": entity_type,
}
)
if entities:
body["message"]["entities"] = entities
return body
def new_message(bot, text):
message = make_message(text)
bot.client.get_updates = Mock(side_effect=[[message], KeyboardInterrupt])
bot.start()
return message
def get_response(client):
return client.reply.call_args.args
@pytest.fixture
def client():
return Mock()
@pytest.fixture
def collector():
mock = MagicMock()
mock.status = Mock(return_value={})
mock.cancel = Mock(return_value=5)
return mock
class FakeMessageRecord:
@cached_property
def message_id(self):
return random.randint(1000, 10000)
@cached_property
def delete_after(self):
return int(datetime.utcnow().timestamp()) + random.randint(100, 1000)
class FakeQuery:
def __init__(self, records=None) -> None:
self.records = records if records is not None else []
def offset(self, *args):
return self
def limit(self, *args):
return self
def count(self):
return len(self.records)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.records):
record = self.records[self.n]
self.n += 1
return record
else:
raise StopIteration
@classmethod
def populate(cls, n):
return cls([FakeMessageRecord() for _ in range(n)])
class TestBot:
@pytest.fixture
def bot(self, client, collector):
return Bot(client, collector)
def test_message(self, collector, bot):
new_message(bot, "Hi there!")
assert collector.add_message.called
def test_ping(self, bot):
new_message(bot, "/ping")
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == "pong"
def test_gc(self, collector, bot):
new_message(bot, "/gc")
response = f"Please choose an expiration time for new messages"
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == response
@pytest.mark.parametrize(
"message",
[
"/gc 15",
"/gc 15s",
"/gc 15 seconds",
"/gc 0.25m",
],
)
def test_gc_params(self, collector, bot, message):
new_message(bot, message)
response = (
f"Garbage collector enabled - automatically removing all new messages "
f"after 15 seconds."
)
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == response
assert collector.enable.call_args.args == (CHAT_ID, 15)
@pytest.mark.parametrize(
"message",
[
"/gc abcd",
"/gc -15",
"/gc 2.34",
"/gc 345123",
"/gc qwefno oenf wqoiefn wqefoin",
],
)
def test_gc_param_validation(self, bot, message):
new_message(bot, message)
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert "valid integer" in text
def test_gc_off(self, collector, bot):
new_message(bot, "/gcoff")
response = (
"Garbage collector disabled - new messages won't be removed automatically."
)
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == response
assert collector.disable.call_args.args == (CHAT_ID,)
def test_cancel(self, collector, bot):
new_message(bot, "/cancel")
response = "Cancelled removal of 5 pending messages."
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == response
assert collector.cancel.call_args.args == (CHAT_ID,)
assert bot.collector.retry.call_count == 0
def test_retry(self, bot):
bot.collector.count_failed = Mock(return_value=5)
new_message(bot, "/retry")
response = "Attempting to delete 5 failed message(s)."
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == response
assert bot.collector.retry.call_args.args == (CHAT_ID, None)
assert bot.client.send_chat_action.call_args.args == (CHAT_ID, "typing")
def test_retry_param(self, bot):
bot.collector.count_failed = Mock(return_value=5)
new_message(bot, "/retry 1")
response = "Attempting to delete 5 failed message(s)."
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == response
assert bot.collector.retry.call_args.args == (CHAT_ID, 1)
assert bot.client.send_chat_action.call_args.args == (CHAT_ID, "typing")
def test_retry_no_failed(self, bot):
bot.collector.count_failed = Mock(return_value=0)
new_message(bot, "/retry")
response = "No failed messages found, not re-trying."
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == response
@pytest.mark.parametrize(
"message",
[
"/retry abcd",
"/retry -15",
"/retry 2.34",
"/retry 0",
"/retry 1001",
"/retry qwefno oenf wqoiefn wqefoin",
],
)
def test_retry_param_validation(self, bot, message):
new_message(bot, message)
response = (
"Please provide a valid integer between 1 and 1000 for the "
"<i>max_attempts</i> parameter."
)
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == response
def test_queue(self, bot):
bot.collector.get_removal_queue = Mock(return_value=FakeQuery.populate(10))
new_message(bot, "/queue")
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert "Message IDs to be deleted next" in text
def test_queue_empty(self, bot):
bot.collector.get_removal_queue = Mock(return_value=FakeQuery())
new_message(bot, "/queue")
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert "No messages queued for removal." in text
def test_status(self, bot):
new_message(bot, "/status")
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert "Status:" in text
def test_help(self, bot):
new_message(bot, "/help")
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == HelpHandler.HELP
def test_noop(self, bot):
new_message(bot, "/noop")
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text.startswith("Aborting")
def test_username_command(self, bot):
bot.USERNAME = "gcservantbot"
new_message(bot, "/ping@gcservantbot")
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert text == "pong"
def test_invalid_command(self, bot):
new_message(bot, "/invalid")
message, text = get_response(bot.client)
assert message.chat.id == CHAT_ID
assert "unrecognized command" in text.lower()
def test_tags(self, bot):
new_message(bot, "Hi there #5m #10m #test")
message = bot.collector.add_message.call_args.args[0]
assert bot.collector.add_message.call_args.args[1] == 5 * 60
@pytest.mark.parametrize(
"message",
[
"Hi #whatsupdog",
"Hi #2days5s",
"Hi #2secodns",
],
)
def test_invalid_tags(self, bot, message):
new_message(bot, message)
args = bot.collector.add_message.call_args.args
assert len(args) == 1
|
import os
import torch
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.tokenization import (
BertTokenizer, PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP,
)
TF_PYTORCH_BERT_NAME_MAP = {
"bert-base-uncased": "uncased_L-12_H-768_A-12",
"bert-large-uncased": "uncased_L-24_H-1024_A-16",
}
def get_bert_config_path(bert_model_name):
return os.path.join(os.environ["BERT_ALL_DIR"], TF_PYTORCH_BERT_NAME_MAP[bert_model_name])
def load_overall_state(bert_load_path, relaxed=True):
if bert_load_path is None:
if relaxed:
return None
else:
raise RuntimeError("Need 'bert_load_path'")
else:
return torch.load(bert_load_path)
def create_tokenizer(bert_model_name, bert_load_mode, do_lower_case, bert_vocab_path=None):
if bert_load_mode == "from_pretrained":
assert bert_vocab_path is None
tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=do_lower_case)
elif bert_load_mode in ["model_only", "state_model_only", "state_all", "state_full_model",
"full_model_only",
"state_adapter"]:
tokenizer = load_tokenizer(
bert_model_name=bert_model_name,
do_lower_case=do_lower_case,
bert_vocab_path=bert_vocab_path,
)
else:
raise KeyError(bert_load_mode)
return tokenizer
def load_tokenizer(bert_model_name, do_lower_case, bert_vocab_path=None):
if bert_vocab_path is None:
bert_vocab_path = os.path.join(get_bert_config_path(bert_model_name), "vocab.txt")
max_len = min(PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[bert_model_name], int(1e12))
tokenizer = BertTokenizer(
vocab_file=bert_vocab_path,
do_lower_case=do_lower_case,
max_len=max_len,
)
return tokenizer
def get_opt_train_steps(num_train_examples, args):
num_train_steps = int(
num_train_examples
/ args.train_batch_size
/ args.gradient_accumulation_steps
* args.num_train_epochs,
)
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
return t_total
def create_optimizer(model, learning_rate, t_total, loss_scale, fp16, warmup_proportion, state_dict):
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = [
'bias', 'LayerNorm.bias', 'LayerNorm.weight',
'adapter.down_project.weight', 'adapter.up_project.weight',
]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex "
"to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=learning_rate,
warmup=warmup_proportion,
t_total=t_total)
if state_dict is not None:
optimizer.load_state_dict(state_dict)
return optimizer
def stage_model(model, fp16, device, local_rank, n_gpu):
if fp16:
model.half()
model.to(device)
if local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex "
"to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
return model
def get_tunable_state_dict(model, verbose=True):
# Drop non-trainable params
# Sort of a hack, because it's not really clear when we want/don't want state params,
# But for now, layer norm works in our favor. But this will be annoying.
model_state_dict = model.state_dict()
for name, param in model.named_parameters():
if not param.requires_grad:
if verbose:
print(" Skip {}".format(name))
del model_state_dict[name]
return model_state_dict
|
# coding: utf-8
import pprint
import re # noqa: F401
import six
class Compare(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'base_commit': 'str',
'merge_base_commit': 'str',
'commits': 'str',
'files': 'str'
}
attribute_map = {
'base_commit': 'base_commit',
'merge_base_commit': 'merge_base_commit',
'commits': 'commits',
'files': 'files'
}
def __init__(self, base_commit=None, merge_base_commit=None, commits=None, files=None): # noqa: E501
"""Compare - a model defined in Swagger""" # noqa: E501
self._base_commit = None
self._merge_base_commit = None
self._commits = None
self._files = None
self.discriminator = None
if base_commit is not None:
self.base_commit = base_commit
if merge_base_commit is not None:
self.merge_base_commit = merge_base_commit
if commits is not None:
self.commits = commits
if files is not None:
self.files = files
@property
def base_commit(self):
"""Gets the base_commit of this Compare. # noqa: E501
:return: The base_commit of this Compare. # noqa: E501
:rtype: str
"""
return self._base_commit
@base_commit.setter
def base_commit(self, base_commit):
"""Sets the base_commit of this Compare.
:param base_commit: The base_commit of this Compare. # noqa: E501
:type: str
"""
self._base_commit = base_commit
@property
def merge_base_commit(self):
"""Gets the merge_base_commit of this Compare. # noqa: E501
:return: The merge_base_commit of this Compare. # noqa: E501
:rtype: str
"""
return self._merge_base_commit
@merge_base_commit.setter
def merge_base_commit(self, merge_base_commit):
"""Sets the merge_base_commit of this Compare.
:param merge_base_commit: The merge_base_commit of this Compare. # noqa: E501
:type: str
"""
self._merge_base_commit = merge_base_commit
@property
def commits(self):
"""Gets the commits of this Compare. # noqa: E501
:return: The commits of this Compare. # noqa: E501
:rtype: str
"""
return self._commits
@commits.setter
def commits(self, commits):
"""Sets the commits of this Compare.
:param commits: The commits of this Compare. # noqa: E501
:type: str
"""
self._commits = commits
@property
def files(self):
"""Gets the files of this Compare. # noqa: E501
:return: The files of this Compare. # noqa: E501
:rtype: str
"""
return self._files
@files.setter
def files(self, files):
"""Sets the files of this Compare.
:param files: The files of this Compare. # noqa: E501
:type: str
"""
self._files = files
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Compare, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Compare):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# ---------------------------------------------------------------
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for OSCAR. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
"""
Helper functions / classes for using Isaac Gym
"""
from isaacgym import gymtorch
from isaacgym import gymapi
import torch
from collections import Iterable
class SimInterface:
"""
Base class for central interfaces with sim. Subclasses should serve as singular interface points for directly
interfacing with the sim.
Args:
gym (Gym): Active gym object
sim (Sim): Active sim object
device (str or int): Device to send action tensors to
actors_with_dof (int or list): Actor handle(s) corresponding to actors with nDOF > 0
"""
def __init__(self, gym, sim, device, actors_with_dof):
# Store internal references
self._gym = gym
self._sim = sim
self.device = device
self.actors_wtih_dof = [actors_with_dof] if isinstance(actors_with_dof, int) else actors_with_dof
# Get relevant sim metadata
self.n_envs = self._gym.get_env_count(self._sim)
self.n_bodies = self._gym.get_sim_rigid_body_count(self._sim)
self.n_bodies_per_env = self.n_bodies // self.n_envs
self.n_actors = self._gym.get_sim_actor_count(self._sim)
self.n_actors_per_env = self.n_actors // self.n_envs
self.n_dof = self._gym.get_sim_dof_count(self._sim)
self.n_dof_per_env = self.n_dof // self.n_envs
def _ids_to_global_ids(self, actor_ids=None, env_ids=None, only_actors_with_dof=False):
"""
Converts the requested @actor_ids and @env_ids into a single 1D torch tensor of equivalent global IDs
Args:
actor_ids (None or int or list or tensor): Actor (relative) ID(s) corresponding to actors that
will be modified. If None, we assume that all actors will be modified
env_ids (None or int or list or tensor): Environment ID(s) corresponding to envs that will be modified.
If None, we assume that all envs will be modified
only_actors_with_dof (bool): If True, if actor_ids is None, will only populate actor ids with ids
corresponding to actors that have nDOF > 0
Returns:
tensor: 1D tensor of length len(actor_ids) * len(env_ids)
"""
# First make sure both inputs are iterables
if not isinstance(actor_ids, Iterable):
if actor_ids is None:
actor_ids = self.actors_wtih_dof if only_actors_with_dof else torch.arange(self.n_actors_per_env)
else:
actor_ids = [actor_ids]
if not isinstance(env_ids, Iterable):
env_ids = torch.arange(self.n_envs) if env_ids is None else [env_ids]
else:
env_ids = env_ids.clone()
# Compose array
global_ids = torch.arange(self.n_actors, dtype=torch.int32, device=self.device, requires_grad=False).view(self.n_envs, -1)
# Grab relevant indices, flatten, and return
return global_ids[env_ids][:, actor_ids].flatten()
class SimStates(SimInterface):
"""
Simple class that should serve a singular reference to all relevant simulation states
(root states, dof states, rigid body states). Only one instance should exist per sim, and
any external objects should take views / slices of this object's tensor attributes in order
to maintain the singular reference.
Main attributes that should be shared with external objects are the following:
self.actor_root_states (tensor) (n_env, n_actor_per_env, 13), where 13 = (pos, quat, lin_vel, ang_vel)
self.dof_states (tensor) (n_env, total_dof_per_env, 2), where 2 = (pos, vel)
self.rigid_body_states (tensor) (n_env, n_rigid_bodies_per_env, 13), where 13 = (pos, quat, lin_vel, ang_vel)
self.contact_forces (tensor) (n_env, n_rigid_bodies_per_env, 3), where 3 = (f_x, f_y, f_z)
Args:
gym (Gym): Active gym object
sim (Sim): Active sim object
device (str or int): Device to send action tensors to
actors_with_dof (int or list): Actor handle(s) corresponding to actors with nDOF > 0
"""
def __init__(self, gym, sim, device, actors_with_dof):
# Run super init first
super().__init__(gym=gym, sim=sim, device=device, actors_with_dof=actors_with_dof)
# Setup GPU state tensors
_actor_root_state_tensor = self._gym.acquire_actor_root_state_tensor(self._sim)
_dof_state_tensor = self._gym.acquire_dof_state_tensor(self._sim)
_rigid_body_state_tensor = self._gym.acquire_rigid_body_state_tensor(self._sim)
_contact_forces_tensor = self._gym.acquire_net_contact_force_tensor(self._sim)
# Wrap references in an actual tensor that we can call later
self.actor_root_states = gymtorch.wrap_tensor(_actor_root_state_tensor).view(self.n_envs, -1, 13)
self.dof_states = gymtorch.wrap_tensor(_dof_state_tensor).view(self.n_envs, -1, 2)
self.rigid_body_states = gymtorch.wrap_tensor(_rigid_body_state_tensor).view(self.n_envs, -1, 13)
self.contact_forces = gymtorch.wrap_tensor(_contact_forces_tensor).view(self.n_envs, -1, 3)
def refresh(self, contact_forces=True):
"""
Refreshes all internal tensors. Should only occur ONCE per sim.simulate() step
Args:
contact_forces (bool): If True, will refresh contact forces. Should be set to True if a sim.simulate() step
has occurred.
"""
self._gym.refresh_actor_root_state_tensor(self._sim)
self._gym.refresh_dof_state_tensor(self._sim)
self._gym.refresh_rigid_body_state_tensor(self._sim)
if contact_forces:
self._gym.refresh_net_contact_force_tensor(self._sim)
def set_actor_root_states(self):
"""
Sets the actor root states based on the current references. Should only occur ONCE per sim.simulate() step
"""
self._gym.set_actor_root_state_tensor(self._sim, gymtorch.unwrap_tensor(self.actor_root_states))
def set_actor_root_states_indexed(self, actor_ids=None, env_ids=None):
"""
Sets a subset of all actor root states based on the current references. Should only occur ONCE
per sim.simulate() step.
Args:
actor_ids (None or int or list or tensor): Actor (relative) ID(s) corresponding to actors that
will be modified. If None, we assume that all actors will be modified
env_ids (None or int or list or tensor): Environment ID(s) corresponding to envs that will be modified.
If None, we assume that all envs will be modified
"""
# If both inputs are None, we simply run the non-indexed version for speed
if actor_ids is None and env_ids is None:
self.set_actor_root_states()
else:
# Convert relative IDs into global ids
global_ids = self._ids_to_global_ids(actor_ids=actor_ids, env_ids=env_ids, only_actors_with_dof=False)
self._gym.set_actor_root_state_tensor_indexed(
self._sim, gymtorch.unwrap_tensor(self.actor_root_states),
gymtorch.unwrap_tensor(global_ids), len(global_ids))
def set_dof_states(self):
"""
Sets the DOF states based on the current references. Should only occur ONCE per sim.simulate() step
"""
self._gym.set_dof_state_tensor(self._sim, gymtorch.unwrap_tensor(self.dof_states))
def set_dof_states_indexed(self, actor_ids=None, env_ids=None):
"""
Sets a subset of all DOF states based on the current references. Should only occur ONCE
per sim.simulate() step.
Args:
actor_ids (None or int or list or tensor): Actor (relative) ID(s) corresponding to actors that
will be modified. If None, we assume that all actors will be modified
env_ids (None or int or list or tensor): Environment ID(s) corresponding to envs that will be modified.
If None, we assume that all envs will be modified
"""
# If both inputs are None, we simply run the non-indexed version for speed
if actor_ids is None and env_ids is None:
self.set_dof_states()
else:
# Convert relative IDs into global ids
global_ids = self._ids_to_global_ids(actor_ids=actor_ids, env_ids=env_ids, only_actors_with_dof=True)
self._gym.set_dof_state_tensor_indexed(
self._sim, gymtorch.unwrap_tensor(self.dof_states),
gymtorch.unwrap_tensor(global_ids), len(global_ids))
def set_rigid_body_states(self):
"""
Sets the rigid body states based on the current references. Should only occur ONCE per sim.simulate() step
"""
self._gym.set_rigid_body_state_tensor(self._sim, gymtorch.unwrap_tensor(self.rigid_body_states))
def set_rigid_body_states_indexed(self, actor_ids=None, env_ids=None):
"""
Sets a subset of all rigid body states based on the current references. Should only occur ONCE
per sim.simulate() step.
Args:
actor_ids (None or int or list or tensor): Actor (relative) ID(s) corresponding to actors that
will be modified. If None, we assume that all actors will be modified
env_ids (None or int or list or tensor): Environment ID(s) corresponding to envs that will be modified.
If None, we assume that all envs will be modified
"""
raise NotImplementedError
def clear_contact_forces(self):
"""
Clears the contact forces.
NOTE: Calling self.refresh(contact_forces=True) will override these values!
"""
self.contact_forces[:] = torch.zeros_like(self.contact_forces[:])
def clear_contact_forces_indexed(self, env_ids=None):
"""
Clears a subset of all contact forces based on the current references.
NOTE: Calling self.refresh(contact_forces=True) will override these values!
Args:
env_ids (None or int or list or tensor): Environment ID(s) corresponding to envs that will be modified.
If None, we assume that all envs will be modified
"""
# If both inputs are None, we simply run the non-indexed version for speed
if env_ids is None:
self.clear_contact_forces()
else:
# Standardize end_ids
if not isinstance(env_ids, Iterable):
env_ids = torch.arange(self.n_envs) if env_ids is None else [env_ids]
else:
env_ids = env_ids.clone()
# Clear requested contact forces
self.contact_forces[env_ids] = torch.zeros_like(self.contact_forces[env_ids])
class SimActions(SimInterface):
"""
Simple class that should serve a singular reference to all relevant simulation actions
(dof pos, vel, effort). Only one instance should exist per sim, and
any external objects should take views / slices of this object's tensor attributes in order
to maintain the singular reference.
NOTE: We assume all envs have the same number of DOFs
Main attributes that should be shared with external objects are the following:
self.pos_actions (tensor) (n_env, n_dof_per_env)
self.vel_actions (tensor) (n_env, n_dof_per_env)
self.effort_actions (tensor) (n_env, n_dof_per_env)
Args:
gym (Gym): Active gym object
sim (Sim): Active sim object
device (str or int): Device to send action tensors to
actors_with_dof (int or list): Actor handle(s) corresponding to actors with nDOF > 0
modes (int or list or set): Modes that actions cover. Should be one / list of
(gymapi.DOF_MODE_POS, gymapi.DOF_MODE_VEL, gymapi.DOF_MODE_EFFORT)
"""
def __init__(self, gym, sim, device, actors_with_dof, modes=[gymapi.DOF_MODE_POS]):
# Run super init first
super().__init__(gym=gym, sim=sim, device=device, actors_with_dof=actors_with_dof)
# Store modes
self.modes = set(modes) if isinstance(modes, Iterable) else {modes}
# Setup action tensors
self.pos_actions = torch.zeros((self.n_envs, self.n_dof_per_env), dtype=torch.float, device=self.device)
self.vel_actions = torch.zeros_like(self.pos_actions)
self.effort_actions = torch.zeros_like(self.pos_actions)
def deploy(self):
"""
Applies the internal actions in sim. Should only occur ONCE per sim.simulate() step
"""
if gymapi.DOF_MODE_POS in self.modes:
self._gym.set_dof_position_target_tensor(self._sim, gymtorch.unwrap_tensor(self.pos_actions))
if gymapi.DOF_MODE_VEL in self.modes:
self._gym.set_dof_velocity_target_tensor(self._sim, gymtorch.unwrap_tensor(self.vel_actions))
if gymapi.DOF_MODE_EFFORT in self.modes:
self._gym.set_dof_actuation_force_tensor(self._sim, gymtorch.unwrap_tensor(self.effort_actions))
def deploy_indexed(self, actor_ids=None, env_ids=None):
"""
Applies subset of internal actions in sim. Should only occur ONCE per sim.simulate() step
Args:
actor_ids (None or int or list or tensor): Actor (relative) ID(s) corresponding to actors that
will be modified. If None, we assume that all actors will be modified
env_ids (None or int or list or tensor): Environment ID(s) corresponding to envs that will be modified.
If None, we assume that all envs will be modified
"""
# If both inputs are None, we simply run the non-indexed version for speed
if actor_ids is None and env_ids is None:
self.deploy()
else:
# Convert relative IDs into global ids
global_ids = self._ids_to_global_ids(actor_ids=actor_ids, env_ids=env_ids, only_actors_with_dof=True)
n_ids = len(global_ids)
# Apply actions
if gymapi.DOF_MODE_POS in self.modes:
self._gym.set_dof_position_target_tensor_indexed(
self._sim, gymtorch.unwrap_tensor(self.pos_actions), gymtorch.unwrap_tensor(global_ids), n_ids)
if gymapi.DOF_MODE_VEL in self.modes:
self._gym.set_dof_velocity_target_tensor_indexed(
self._sim, gymtorch.unwrap_tensor(self.vel_actions), gymtorch.unwrap_tensor(global_ids), n_ids)
if gymapi.DOF_MODE_EFFORT in self.modes:
self._gym.set_dof_actuation_force_tensor_indexed(
self._sim, gymtorch.unwrap_tensor(self.effort_actions), gymtorch.unwrap_tensor(global_ids), n_ids)
|
#!./venv/bin/python3.7
import argparse
import sys
import os
import cv2
import numpy as np
from src import lib
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help="Input image path", required=True)
parser.add_argument('-o', '--outdir', help="Output directory", required=True)
parser.add_argument(
'-m', '--model', help="Model to predict", required=True)
parser.add_argument('-it', '--interactive',
help='Interactive mode', action='store_true')
parser.add_argument('-a', '--alpha')
args = parser.parse_args()
alpha = args.alpha
img_path = args.input
if not os.path.isfile(img_path):
raise RuntimeError(f"Could not find image {img_path}")
output_dir = args.outdir
if not os.path.isdir(output_dir):
raise RuntimeError(f"Could not find dir {output_dir}")
model_path = args.model
if not os.path.isfile(model_path):
raise RuntimeError(f"Could not find model {model_path}")
original_img = cv2.imread(img_path)
img = cv2.bitwise_not(cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY))
model_name = lib.extract_name(model_path)
name, extension = lib.split_name(img_path)
out_path = f'{output_dir}/{name}_{model_name}_described'
if not args.interactive:
img = lib.binarize(img)
img = lib.morph_open(img, (3, 3))
img = lib.drop_contacting_objects(img)
else:
img, alpha = lib.interactive_edit_image(img)
cnts, hierarchy = cv2.findContours(
img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
h, w = img.shape
model = lib.load_trained_model(model_path)
labels = {}
alpha = 0.0001 if alpha is None else alpha
AR_THRESHOLD = w * h * alpha
nn = len(cnts)
i = 1
for c in cnts:
print(f'Predicting {i}/{nn}')
i += 1
ar = cv2.contourArea(c)
if ar < AR_THRESHOLD:
print('Skipped, too small probe')
continue
res = lib.create_probe_from_contour(img, c, original_img=original_img)
if res is None:
continue
probe, x, y = res
probe = lib.reshape_probe(probe)
label = lib.predict_single_image(model, probe)
text_label = lib.NUM_TO_LABEL[str(label)]
if text_label in labels:
labels[text_label] += 1
else:
labels[text_label] = 1
lib.mark_object(original_img, c, text_label, (x, y))
print("Predicted and marked")
print(labels)
print(out_path)
cv2.imwrite(out_path + f'.{extension}', original_img)
|
import sys
str1 = sys.argv[1]
str2 = str1.swapcase()
print(str2)
|
# -*- coding: utf-8 -*-
"""This module is designed to hold custom components with their classes and
associated individual constraints (blocks) and groupings.
Therefore this module holds the class definition and the block directly located
by each other.
SPDX-FileCopyrightText: Uwe Krien <[email protected]>
SPDX-FileCopyrightText: Simon Hilpert
SPDX-FileCopyrightText: Cord Kaldemeyer
SPDX-FileCopyrightText: Patrik Schönfeldt
SPDX-FileCopyrightText: Johannes Röder
SPDX-FileCopyrightText: jakob-wo
SPDX-FileCopyrightText: gplssm
SPDX-License-Identifier: MIT
"""
import logging
from pyomo.core.base.block import SimpleBlock
from pyomo.environ import Binary
from pyomo.environ import BuildAction
from pyomo.environ import Constraint
from pyomo.environ import Expression
from pyomo.environ import NonNegativeReals
from pyomo.environ import Set
from pyomo.environ import Var
from oemof.network.network import Transformer as NetworkTransformer
from oemof.solph.network import Bus
from oemof.solph.network import Flow
from oemof.solph.network import Sink
from oemof.solph.network import Transformer
from oemof.solph.plumbing import sequence
class ElectricalBus(Bus):
r"""A electrical bus object. Every node has to be connected to Bus. This
Bus is used in combination with ElectricalLine objects for linear optimal
power flow (lopf) calculations.
Parameters
----------
slack: boolean
If True Bus is slack bus for network
v_max: numeric
Maximum value of voltage angle at electrical bus
v_min: numeric
Mininum value of voltag angle at electrical bus
Note: This component is experimental. Use it with care.
Notes
-----
The following sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.blocks.Bus`
The objects are also used inside:
* :py:class:`~oemof.solph.custom.ElectricalLine`
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.slack = kwargs.get('slack', False)
self.v_max = kwargs.get('v_max', 1000)
self.v_min = kwargs.get('v_min', -1000)
class ElectricalLine(Flow):
r"""An ElectricalLine to be used in linear optimal power flow calculations.
based on angle formulation. Check out the Notes below before using this
component!
Parameters
----------
reactance : float or array of floats
Reactance of the line to be modelled
Note: This component is experimental. Use it with care.
Notes
-----
* To use this object the connected buses need to be of the type
:py:class:`~oemof.solph.custom.ElectricalBus`.
* It does not work together with flows that have set the attr.`nonconvex`,
i.e. unit commitment constraints are not possible
* Input and output of this component are set equal, therefore just use
either only the input or the output to parameterize.
* Default attribute `min` of in/outflows is overwritten by -1 if not set
differently by the user
The following sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.custom.ElectricalLineBlock`
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reactance = sequence(kwargs.get('reactance', 0.00001))
# set input / output flow values to -1 by default if not set by user
if self.nonconvex is not None:
raise ValueError(
("Attribute `nonconvex` must be None for " +
"component `ElectricalLine` from {} to {}!").format(
self.input, self.output))
if self.min is None:
self.min = -1
# to be used in grouping for all bidi flows
self.bidirectional = True
def constraint_group(self):
return ElectricalLineBlock
class ElectricalLineBlock(SimpleBlock):
r"""Block for the linear relation of nodes with type
class:`.ElectricalLine`
Note: This component is experimental. Use it with care.
**The following constraints are created:**
Linear relation :attr:`om.ElectricalLine.electrical_flow[n,t]`
.. math::
flow(n, o, t) = 1 / reactance(n, t) \\cdot ()
voltage_angle(i(n), t) - volatage_angle(o(n), t), \\
\forall t \\in \\textrm{TIMESTEPS}, \\
\forall n \\in \\textrm{ELECTRICAL\_LINES}.
TODO: Add equate constraint of flows
**The following variable are created:**
TODO: Add voltage angle variable
TODO: Add fix slack bus voltage angle to zero constraint / bound
TODO: Add tests
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
""" Creates the linear constraint for the class:`ElectricalLine`
block.
Parameters
----------
group : list
List of oemof.solph.ElectricalLine (eline) objects for which
the linear relation of inputs and outputs is created
e.g. group = [eline1, eline2, ...]. The components inside the
list need to hold a attribute `reactance` of type Sequence
containing the reactance of the line.
"""
if group is None:
return None
m = self.parent_block()
# create voltage angle variables
self.ELECTRICAL_BUSES = Set(initialize=[n for n in m.es.nodes
if isinstance(n, ElectricalBus)])
def _voltage_angle_bounds(block, b, t):
return b.v_min, b.v_max
self.voltage_angle = Var(self.ELECTRICAL_BUSES, m.TIMESTEPS,
bounds=_voltage_angle_bounds)
if True not in [b.slack for b in self.ELECTRICAL_BUSES]:
# TODO: Make this robust to select the same slack bus for
# the same problems
bus = [b for b in self.ELECTRICAL_BUSES][0]
logging.info(
"No slack bus set,setting bus {0} as slack bus".format(
bus.label))
bus.slack = True
def _voltage_angle_relation(block):
for t in m.TIMESTEPS:
for n in group:
if n.input.slack is True:
self.voltage_angle[n.output, t].value = 0
self.voltage_angle[n.output, t].fix()
try:
lhs = m.flow[n.input, n.output, t]
rhs = 1 / n.reactance[t] * (
self.voltage_angle[n.input, t] -
self.voltage_angle[n.output, t])
except ValueError:
raise ValueError("Error in constraint creation",
"of node {}".format(n.label))
block.electrical_flow.add((n, t), (lhs == rhs))
self.electrical_flow = Constraint(group, m.TIMESTEPS, noruleinit=True)
self.electrical_flow_build = BuildAction(
rule=_voltage_angle_relation)
class Link(Transformer):
"""A Link object with 1...2 inputs and 1...2 outputs.
Parameters
----------
conversion_factors : dict
Dictionary containing conversion factors for conversion of each flow.
Keys are the connected tuples (input, output) bus objects.
The dictionary values can either be a scalar or an iterable with length
of time horizon for simulation.
Note: This component is experimental. Use it with care.
Notes
-----
The sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.custom.LinkBlock`
Examples
--------
>>> from oemof import solph
>>> bel0 = solph.Bus(label="el0")
>>> bel1 = solph.Bus(label="el1")
>>> link = solph.custom.Link(
... label="transshipment_link",
... inputs={bel0: solph.Flow(), bel1: solph.Flow()},
... outputs={bel0: solph.Flow(), bel1: solph.Flow()},
... conversion_factors={(bel0, bel1): 0.92, (bel1, bel0): 0.99})
>>> print(sorted([x[1][5] for x in link.conversion_factors.items()]))
[0.92, 0.99]
>>> type(link)
<class 'oemof.solph.custom.Link'>
>>> sorted([str(i) for i in link.inputs])
['el0', 'el1']
>>> link.conversion_factors[(bel0, bel1)][3]
0.92
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if len(self.inputs) > 2 or len(self.outputs) > 2:
raise ValueError("Component `Link` must not have more than \
2 inputs and 2 outputs!")
self.conversion_factors = {
k: sequence(v)
for k, v in kwargs.get('conversion_factors', {}).items()}
def constraint_group(self):
return LinkBlock
class LinkBlock(SimpleBlock):
r"""Block for the relation of nodes with type
:class:`~oemof.solph.custom.Link`
Note: This component is experimental. Use it with care.
**The following constraints are created:**
TODO: Add description for constraints
TODO: Add tests
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
""" Creates the relation for the class:`Link`.
Parameters
----------
group : list
List of oemof.solph.custom.Link objects for which
the relation of inputs and outputs is createdBuildAction
e.g. group = [link1, link2, link3, ...]. The components inside
the list need to hold an attribute `conversion_factors` of type
dict containing the conversion factors for all inputs to outputs.
"""
if group is None:
return None
m = self.parent_block()
all_conversions = {}
for n in group:
all_conversions[n] = {
k: v for k, v in n.conversion_factors.items()}
def _input_output_relation(block):
for t in m.TIMESTEPS:
for n, conversion in all_conversions.items():
for cidx, c in conversion.items():
try:
expr = (m.flow[n, cidx[1], t] ==
c[t] * m.flow[cidx[0], n, t])
except ValueError:
raise ValueError(
"Error in constraint creation",
"from: {0}, to: {1}, via: {2}".format(
cidx[0], cidx[1], n))
block.relation.add((n, cidx[0], cidx[1], t), (expr))
self.relation = Constraint(
[(n, cidx[0], cidx[1], t)
for t in m.TIMESTEPS
for n, conversion in all_conversions.items()
for cidx, c in conversion.items()], noruleinit=True)
self.relation_build = BuildAction(rule=_input_output_relation)
class GenericCAES(NetworkTransformer):
"""
Component `GenericCAES` to model arbitrary compressed air energy storages.
The full set of equations is described in:
Kaldemeyer, C.; Boysen, C.; Tuschy, I.
A Generic Formulation of Compressed Air Energy Storage as
Mixed Integer Linear Program – Unit Commitment of Specific
Technical Concepts in Arbitrary Market Environments
Materials Today: Proceedings 00 (2018) 0000–0000
[currently in review]
Parameters
----------
electrical_input : dict
Dictionary with key-value-pair of `oemof.Bus` and `oemof.Flow` object
for the electrical input.
fuel_input : dict
Dictionary with key-value-pair of `oemof.Bus` and `oemof.Flow` object
for the fuel input.
electrical_output : dict
Dictionary with key-value-pair of `oemof.Bus` and `oemof.Flow` object
for the electrical output.
Note: This component is experimental. Use it with care.
Notes
-----
The following sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.blocks.GenericCAES`
TODO: Add description for constraints. See referenced paper until then!
Examples
--------
>>> from oemof import solph
>>> bel = solph.Bus(label='bel')
>>> bth = solph.Bus(label='bth')
>>> bgas = solph.Bus(label='bgas')
>>> # dictionary with parameters for a specific CAES plant
>>> concept = {
... 'cav_e_in_b': 0,
... 'cav_e_in_m': 0.6457267578,
... 'cav_e_out_b': 0,
... 'cav_e_out_m': 0.3739636077,
... 'cav_eta_temp': 1.0,
... 'cav_level_max': 211.11,
... 'cmp_p_max_b': 86.0918959849,
... 'cmp_p_max_m': 0.0679999932,
... 'cmp_p_min': 1,
... 'cmp_q_out_b': -19.3996965679,
... 'cmp_q_out_m': 1.1066036114,
... 'cmp_q_tes_share': 0,
... 'exp_p_max_b': 46.1294016678,
... 'exp_p_max_m': 0.2528340303,
... 'exp_p_min': 1,
... 'exp_q_in_b': -2.2073411014,
... 'exp_q_in_m': 1.129249765,
... 'exp_q_tes_share': 0,
... 'tes_eta_temp': 1.0,
... 'tes_level_max': 0.0}
>>> # generic compressed air energy storage (caes) plant
>>> caes = solph.custom.GenericCAES(
... label='caes',
... electrical_input={bel: solph.Flow()},
... fuel_input={bgas: solph.Flow()},
... electrical_output={bel: solph.Flow()},
... params=concept, fixed_costs=0)
>>> type(caes)
<class 'oemof.solph.custom.GenericCAES'>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.electrical_input = kwargs.get('electrical_input')
self.fuel_input = kwargs.get('fuel_input')
self.electrical_output = kwargs.get('electrical_output')
self.params = kwargs.get('params')
# map specific flows to standard API
self.inputs.update(kwargs.get('electrical_input'))
self.inputs.update(kwargs.get('fuel_input'))
self.outputs.update(kwargs.get('electrical_output'))
def constraint_group(self):
return GenericCAESBlock
class GenericCAESBlock(SimpleBlock):
r"""Block for nodes of class:`.GenericCAES`.
Note: This component is experimental. Use it with care.
**The following constraints are created:**
.. _GenericCAES-equations:
.. math::
&
(1) \qquad P_{cmp}(t) = electrical\_input (t)
\quad \forall t \in T \\
&
(2) \qquad P_{cmp\_max}(t) = m_{cmp\_max} \cdot CAS_{fil}(t-1)
+ b_{cmp\_max}
\quad \forall t \in\left[1, t_{max}\right] \\
&
(3) \qquad P_{cmp\_max}(t) = b_{cmp\_max}
\quad \forall t \notin\left[1, t_{max}\right] \\
&
(4) \qquad P_{cmp}(t) \leq P_{cmp\_max}(t)
\quad \forall t \in T \\
&
(5) \qquad P_{cmp}(t) \geq P_{cmp\_min} \cdot ST_{cmp}(t)
\quad \forall t \in T \\
&
(6) \qquad P_{cmp}(t) = m_{cmp\_max} \cdot CAS_{fil\_max}
+ b_{cmp\_max} \cdot ST_{cmp}(t)
\quad \forall t \in T \\
&
(7) \qquad \dot{Q}_{cmp}(t) =
m_{cmp\_q} \cdot P_{cmp}(t) + b_{cmp\_q} \cdot ST_{cmp}(t)
\quad \forall t \in T \\
&
(8) \qquad \dot{Q}_{cmp}(t) = \dot{Q}_{cmp_out}(t)
+ \dot{Q}_{tes\_in}(t)
\quad \forall t \in T \\
&
(9) \qquad r_{cmp\_tes} \cdot\dot{Q}_{cmp\_out}(t) =
\left(1-r_{cmp\_tes}\right) \dot{Q}_{tes\_in}(t)
\quad \forall t \in T \\
&
(10) \quad\; P_{exp}(t) = electrical\_output (t)
\quad \forall t \in T \\
&
(11) \quad\; P_{exp\_max}(t) = m_{exp\_max} CAS_{fil}(t-1)
+ b_{exp\_max}
\quad \forall t \in\left[1, t_{\max }\right] \\
&
(12) \quad\; P_{exp\_max}(t) = b_{exp\_max}
\quad \forall t \notin\left[1, t_{\max }\right] \\
&
(13) \quad\; P_{exp}(t) \leq P_{exp\_max}(t)
\quad \forall t \in T \\
&
(14) \quad\; P_{exp}(t) \geq P_{exp\_min}(t) \cdot ST_{exp}(t)
\quad \forall t \in T \\
&
(15) \quad\; P_{exp}(t) \leq m_{exp\_max} \cdot CAS_{fil\_max}
+ b_{exp\_max} \cdot ST_{exp}(t)
\quad \forall t \in T \\
&
(16) \quad\; \dot{Q}_{exp}(t) = m_{exp\_q} \cdot P_{exp}(t)
+ b_{cxp\_q} \cdot ST_{cxp}(t)
\quad \forall t \in T \\
&
(17) \quad\; \dot{Q}_{exp\_in}(t) = fuel\_input(t)
\quad \forall t \in T \\
&
(18) \quad\; \dot{Q}_{exp}(t) = \dot{Q}_{exp\_in}(t)
+ \dot{Q}_{tes\_out}(t)+\dot{Q}_{cxp\_add}(t)
\quad \forall t \in T \\
&
(19) \quad\; r_{exp\_tes} \cdot \dot{Q}_{exp\_in}(t) =
(1 - r_{exp\_tes})(\dot{Q}_{tes\_out}(t) + \dot{Q}_{exp\_add}(t))
\quad \forall t \in T \\
&
(20) \quad\; \dot{E}_{cas\_in}(t) = m_{cas\_in}\cdot P_{cmp}(t)
+ b_{cas\_in}\cdot ST_{cmp}(t)
\quad \forall t \in T \\
&
(21) \quad\; \dot{E}_{cas\_out}(t) = m_{cas\_out}\cdot P_{cmp}(t)
+ b_{cas\_out}\cdot ST_{cmp}(t)
\quad \forall t \in T \\
&
(22) \quad\; \eta_{cas\_tmp} \cdot CAS_{fil}(t) = CAS_{fil}(t-1)
+ \tau\left(\dot{E}_{cas\_in}(t) - \dot{E}_{cas\_out}(t)\right)
\quad \forall t \in\left[1, t_{max}\right] \\
&
(23) \quad\; \eta_{cas\_tmp} \cdot CAS_{fil}(t) =
\tau\left(\dot{E}_{cas\_in}(t) - \dot{E}_{cas\_out}(t)\right)
\quad \forall t \notin\left[1, t_{max}\right] \\
&
(24) \quad\; CAS_{fil}(t) \leq CAS_{fil\_max}
\quad \forall t \in T \\
&
(25) \quad\; TES_{fil}(t) = TES_{fil}(t-1)
+ \tau\left(\dot{Q}_{tes\_in}(t)
- \dot{Q}_{tes\_out}(t)\right)
\quad \forall t \in\left[1, t_{max}\right] \\
&
(26) \quad\; TES_{fil}(t) =
\tau\left(\dot{Q}_{tes\_in}(t)
- \dot{Q}_{tes\_out}(t)\right)
\quad \forall t \notin\left[1, t_{max}\right] \\
&
(27) \quad\; TES_{fil}(t) \leq TES_{fil\_max}
\quad \forall t \in T \\
&
**Table: Symbols and attribute names of variables and parameters**
.. csv-table:: Variables (V) and Parameters (P)
:header: "symbol", "attribute", "type", "explanation"
:widths: 1, 1, 1, 1
":math:`ST_{cmp}` ", ":py:obj:`cmp_st[n,t]` ", "V", "Status of
compression"
":math:`{P}_{cmp}` ", ":py:obj:`cmp_p[n,t]`", "V", "Compression power"
":math:`{P}_{cmp\_max}`", ":py:obj:`cmp_p_max[n,t]`", "V", "Max.
compression power"
":math:`\dot{Q}_{cmp}` ", ":py:obj:`cmp_q_out_sum[n,t]`", "V", "Summed
heat flow in compression"
":math:`\dot{Q}_{cmp\_out}` ", ":py:obj:`cmp_q_waste[n,t]`", "V", "
Waste heat flow from compression"
":math:`ST_{exp}(t)`", ":py:obj:`exp_st[n,t]`", "V", "Status of
expansion (binary)"
":math:`P_{exp}(t)`", ":py:obj:`exp_p[n,t]`", "V", "Expansion power"
":math:`P_{exp\_max}(t)`", ":py:obj:`exp_p_max[n,t]`", "V", "Max.
expansion power"
":math:`\dot{Q}_{exp}(t)`", ":py:obj:`exp_q_in_sum[n,t]`", "V", "
Summed heat flow in expansion"
":math:`\dot{Q}_{exp\_in}(t)`", ":py:obj:`exp_q_fuel_in[n,t]`", "V", "
Heat (external) flow into expansion"
":math:`\dot{Q}_{exp\_add}(t)`", ":py:obj:`exp_q_add_in[n,t]`", "V", "
Additional heat flow into expansion"
":math:`CAV_{fil}(t)`", ":py:obj:`cav_level[n,t]`", "V", "Filling level
if CAE"
":math:`\dot{E}_{cas\_in}(t)`", ":py:obj:`cav_e_in[n,t]`", "V", "
Exergy flow into CAS"
":math:`\dot{E}_{cas\_out}(t)`", ":py:obj:`cav_e_out[n,t]`", "V", "
Exergy flow from CAS"
":math:`TES_{fil}(t)`", ":py:obj:`tes_level[n,t]`", "V", "Filling
level of Thermal Energy Storage (TES)"
":math:`\dot{Q}_{tes\_in}(t)`", ":py:obj:`tes_e_in[n,t]`", "V", "Heat
flow into TES"
":math:`\dot{Q}_{tes\_out}(t)`", ":py:obj:`tes_e_out[n,t]`", "V", "Heat
flow from TES"
":math:`b_{cmp\_max}`", ":py:obj:`cmp_p_max_b[n,t]`", "P", "Specific
y-intersection"
":math:`b_{cmp\_q}`", ":py:obj:`cmp_q_out_b[n,t]`", "P", "Specific
y-intersection"
":math:`b_{exp\_max}`", ":py:obj:`exp_p_max_b[n,t]`", "P", "Specific
y-intersection"
":math:`b_{exp\_q}`", ":py:obj:`exp_q_in_b[n,t]`", "P", "Specific
y-intersection"
":math:`b_{cas\_in}`", ":py:obj:`cav_e_in_b[n,t]`", "P", "Specific
y-intersection"
":math:`b_{cas\_out}`", ":py:obj:`cav_e_out_b[n,t]`", "P", "Specific
y-intersection"
":math:`m_{cmp\_max}`", ":py:obj:`cmp_p_max_m[n,t]`", "P", "Specific
slope"
":math:`m_{cmp\_q}`", ":py:obj:`cmp_q_out_m[n,t]`", "P", "Specific
slope"
":math:`m_{exp\_max}`", ":py:obj:`exp_p_max_m[n,t]`", "P", "Specific
slope"
":math:`m_{exp\_q}`", ":py:obj:`exp_q_in_m[n,t]`", "P", "Specific
slope"
":math:`m_{cas\_in}`", ":py:obj:`cav_e_in_m[n,t]`", "P", "Specific
slope"
":math:`m_{cas\_out}`", ":py:obj:`cav_e_out_m[n,t]`", "P", "Specific
slope"
":math:`P_{cmp\_min}`", ":py:obj:`cmp_p_min[n,t]`", "P", "Min.
compression power"
":math:`r_{cmp\_tes}`", ":py:obj:`cmp_q_tes_share[n,t]`", "P", "Ratio
between waste heat flow and heat flow into TES"
":math:`r_{exp\_tes}`", ":py:obj:`exp_q_tes_share[n,t]`", "P", "Ratio
between external heat flow into expansion and heat flows from TES and
additional source"
":math:`\tau`", ":py:obj:`m.timeincrement[n,t]`", "P", "Time interval
length"
":math:`TES_{fil\_max}`", ":py:obj:`tes_level_max[n,t]`", "P", "Max.
filling level of TES"
":math:`CAS_{fil\_max}`", ":py:obj:`cav_level_max[n,t]`", "P", "Max.
filling level of TES"
":math:`\tau`", ":py:obj:`cav_eta_tmp[n,t]`", "P", "Temporal efficiency
(loss factor to take intertemporal losses into account)"
":math:`electrical\_input`", "
:py:obj:`flow[list(n.electrical_input.keys())[0], n, t]`", "P", "
Electr. power input into compression"
":math:`electrical\_output`", "
:py:obj:`flow[n, list(n.electrical_output.keys())[0], t]`", "P", "
Electr. power output of expansion"
":math:`fuel\_input`", "
:py:obj:`flow[list(n.fuel_input.keys())[0], n, t]`", "P", "Heat input
(external) into Expansion"
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
"""
Create constraints for GenericCAESBlock.
Parameters
----------
group : list
List containing `.GenericCAES` objects.
e.g. groups=[gcaes1, gcaes2,..]
"""
m = self.parent_block()
if group is None:
return None
self.GENERICCAES = Set(initialize=[n for n in group])
# Compression: Binary variable for operation status
self.cmp_st = Var(self.GENERICCAES, m.TIMESTEPS, within=Binary)
# Compression: Realized capacity
self.cmp_p = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Compression: Max. Capacity
self.cmp_p_max = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Compression: Heat flow
self.cmp_q_out_sum = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Compression: Waste heat
self.cmp_q_waste = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Expansion: Binary variable for operation status
self.exp_st = Var(self.GENERICCAES, m.TIMESTEPS, within=Binary)
# Expansion: Realized capacity
self.exp_p = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Expansion: Max. Capacity
self.exp_p_max = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Expansion: Heat flow of natural gas co-firing
self.exp_q_in_sum = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Expansion: Heat flow of natural gas co-firing
self.exp_q_fuel_in = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Expansion: Heat flow of additional firing
self.exp_q_add_in = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Cavern: Filling levelh
self.cav_level = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Cavern: Energy inflow
self.cav_e_in = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Cavern: Energy outflow
self.cav_e_out = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# TES: Filling levelh
self.tes_level = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# TES: Energy inflow
self.tes_e_in = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# TES: Energy outflow
self.tes_e_out = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Spot market: Positive capacity
self.exp_p_spot = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Spot market: Negative capacity
self.cmp_p_spot = Var(self.GENERICCAES, m.TIMESTEPS,
within=NonNegativeReals)
# Compression: Capacity on markets
def cmp_p_constr_rule(block, n, t):
expr = 0
expr += -self.cmp_p[n, t]
expr += m.flow[list(n.electrical_input.keys())[0], n, t]
return expr == 0
self.cmp_p_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cmp_p_constr_rule)
# Compression: Max. capacity depending on cavern filling level
def cmp_p_max_constr_rule(block, n, t):
if t != 0:
return (self.cmp_p_max[n, t] ==
n.params['cmp_p_max_m'] * self.cav_level[n, t-1] +
n.params['cmp_p_max_b'])
else:
return self.cmp_p_max[n, t] == n.params['cmp_p_max_b']
self.cmp_p_max_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cmp_p_max_constr_rule)
def cmp_p_max_area_constr_rule(block, n, t):
return self.cmp_p[n, t] <= self.cmp_p_max[n, t]
self.cmp_p_max_area_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cmp_p_max_area_constr_rule)
# Compression: Status of operation (on/off)
def cmp_st_p_min_constr_rule(block, n, t):
return (
self.cmp_p[n, t] >= n.params['cmp_p_min'] * self.cmp_st[n, t])
self.cmp_st_p_min_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cmp_st_p_min_constr_rule)
def cmp_st_p_max_constr_rule(block, n, t):
return (self.cmp_p[n, t] <=
(n.params['cmp_p_max_m'] * n.params['cav_level_max'] +
n.params['cmp_p_max_b']) * self.cmp_st[n, t])
self.cmp_st_p_max_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cmp_st_p_max_constr_rule)
# (7) Compression: Heat flow out
def cmp_q_out_constr_rule(block, n, t):
return (self.cmp_q_out_sum[n, t] ==
n.params['cmp_q_out_m'] * self.cmp_p[n, t] +
n.params['cmp_q_out_b'] * self.cmp_st[n, t])
self.cmp_q_out_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cmp_q_out_constr_rule)
# (8) Compression: Definition of single heat flows
def cmp_q_out_sum_constr_rule(block, n, t):
return (self.cmp_q_out_sum[n, t] == self.cmp_q_waste[n, t] +
self.tes_e_in[n, t])
self.cmp_q_out_sum_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cmp_q_out_sum_constr_rule)
# (9) Compression: Heat flow out ratio
def cmp_q_out_shr_constr_rule(block, n, t):
return (self.cmp_q_waste[n, t] * n.params['cmp_q_tes_share'] ==
self.tes_e_in[n, t] * (1 - n.params['cmp_q_tes_share']))
self.cmp_q_out_shr_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cmp_q_out_shr_constr_rule)
# (10) Expansion: Capacity on markets
def exp_p_constr_rule(block, n, t):
expr = 0
expr += -self.exp_p[n, t]
expr += m.flow[n, list(n.electrical_output.keys())[0], t]
return expr == 0
self.exp_p_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_p_constr_rule)
# (11-12) Expansion: Max. capacity depending on cavern filling level
def exp_p_max_constr_rule(block, n, t):
if t != 0:
return (self.exp_p_max[n, t] ==
n.params['exp_p_max_m'] * self.cav_level[n, t-1] +
n.params['exp_p_max_b'])
else:
return self.exp_p_max[n, t] == n.params['exp_p_max_b']
self.exp_p_max_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_p_max_constr_rule)
# (13)
def exp_p_max_area_constr_rule(block, n, t):
return self.exp_p[n, t] <= self.exp_p_max[n, t]
self.exp_p_max_area_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_p_max_area_constr_rule)
# (14) Expansion: Status of operation (on/off)
def exp_st_p_min_constr_rule(block, n, t):
return (
self.exp_p[n, t] >= n.params['exp_p_min'] * self.exp_st[n, t])
self.exp_st_p_min_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_st_p_min_constr_rule)
# (15)
def exp_st_p_max_constr_rule(block, n, t):
return (self.exp_p[n, t] <=
(n.params['exp_p_max_m'] * n.params['cav_level_max'] +
n.params['exp_p_max_b']) * self.exp_st[n, t])
self.exp_st_p_max_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_st_p_max_constr_rule)
# (16) Expansion: Heat flow in
def exp_q_in_constr_rule(block, n, t):
return (self.exp_q_in_sum[n, t] ==
n.params['exp_q_in_m'] * self.exp_p[n, t] +
n.params['exp_q_in_b'] * self.exp_st[n, t])
self.exp_q_in_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_q_in_constr_rule)
# (17) Expansion: Fuel allocation
def exp_q_fuel_constr_rule(block, n, t):
expr = 0
expr += -self.exp_q_fuel_in[n, t]
expr += m.flow[list(n.fuel_input.keys())[0], n, t]
return expr == 0
self.exp_q_fuel_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_q_fuel_constr_rule)
# (18) Expansion: Definition of single heat flows
def exp_q_in_sum_constr_rule(block, n, t):
return (self.exp_q_in_sum[n, t] == self.exp_q_fuel_in[n, t] +
self.tes_e_out[n, t] + self.exp_q_add_in[n, t])
self.exp_q_in_sum_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_q_in_sum_constr_rule)
# (19) Expansion: Heat flow in ratio
def exp_q_in_shr_constr_rule(block, n, t):
return (n.params['exp_q_tes_share'] * self.exp_q_fuel_in[n, t] ==
(1 - n.params['exp_q_tes_share']) *
(self.exp_q_add_in[n, t] + self.tes_e_out[n, t]))
self.exp_q_in_shr_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=exp_q_in_shr_constr_rule)
# (20) Cavern: Energy inflow
def cav_e_in_constr_rule(block, n, t):
return (self.cav_e_in[n, t] ==
n.params['cav_e_in_m'] * self.cmp_p[n, t] +
n.params['cav_e_in_b'])
self.cav_e_in_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cav_e_in_constr_rule)
# (21) Cavern: Energy outflow
def cav_e_out_constr_rule(block, n, t):
return (self.cav_e_out[n, t] ==
n.params['cav_e_out_m'] * self.exp_p[n, t] +
n.params['cav_e_out_b'])
self.cav_e_out_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cav_e_out_constr_rule)
# (22-23) Cavern: Storage balance
def cav_eta_constr_rule(block, n, t):
if t != 0:
return (n.params['cav_eta_temp'] * self.cav_level[n, t] ==
self.cav_level[n, t-1] + m.timeincrement[t] *
(self.cav_e_in[n, t] - self.cav_e_out[n, t]))
else:
return (n.params['cav_eta_temp'] * self.cav_level[n, t] ==
m.timeincrement[t] *
(self.cav_e_in[n, t] - self.cav_e_out[n, t]))
self.cav_eta_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cav_eta_constr_rule)
# (24) Cavern: Upper bound
def cav_ub_constr_rule(block, n, t):
return self.cav_level[n, t] <= n.params['cav_level_max']
self.cav_ub_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=cav_ub_constr_rule)
# (25-26) TES: Storage balance
def tes_eta_constr_rule(block, n, t):
if t != 0:
return (self.tes_level[n, t] ==
self.tes_level[n, t-1] + m.timeincrement[t] *
(self.tes_e_in[n, t] - self.tes_e_out[n, t]))
else:
return (self.tes_level[n, t] ==
m.timeincrement[t] *
(self.tes_e_in[n, t] - self.tes_e_out[n, t]))
self.tes_eta_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=tes_eta_constr_rule)
# (27) TES: Upper bound
def tes_ub_constr_rule(block, n, t):
return self.tes_level[n, t] <= n.params['tes_level_max']
self.tes_ub_constr = Constraint(
self.GENERICCAES, m.TIMESTEPS, rule=tes_ub_constr_rule)
class SinkDSM(Sink):
r"""
Demand Side Management implemented as Sink with flexibility potential.
Based on the paper by Zerrahn, Alexander and Schill, Wolf-Peter (2015):
`On the representation of demand-side management in power system models
<https://www.sciencedirect.com/science/article/abs/pii/S036054421500331X>`_,
in: Energy (84), pp. 840-845, 10.1016/j.energy.2015.03.037,
accessed 17.09.2019, pp. 842-843.
SinkDSM adds additional constraints that allow to shift energy in certain
time window constrained by :attr:`~capacity_up` and
:attr:`~capacity_down`.
Parameters
----------
demand: numeric
original electrical demand
capacity_up: int or array
maximum DSM capacity that may be increased
capacity_down: int or array
maximum DSM capacity that may be reduced
method: 'interval' , 'delay'
Choose one of the DSM modelling approaches. Read notes about which
parameters to be applied for which approach.
interval :
Simple model in which the load shift must be compensated in a
predefined fixed interval (:attr:`~shift_interval` is mandatory).
Within time windows of the length :attr:`~shift_interval` DSM
up and down shifts are balanced. See
:class:`~SinkDSMIntervalBlock` for details.
delay :
Sophisticated model based on the formulation by
Zerrahn & Schill (2015). The load-shift of the component must be
compensated in a predefined delay-time (:attr:`~delay_time` is
mandatory).
For details see :class:`~SinkDSMDelayBlock`.
shift_interval: int
Only used when :attr:`~method` is set to 'interval'. Otherwise, can be
None.
It's the interval in which between :math:`DSM_{t}^{up}` and
:math:`DSM_{t}^{down}` have to be compensated.
delay_time: int
Only used when :attr:`~method` is set to 'delay'. Otherwise, can be
None.
Length of symmetrical time windows around :math:`t` in which
:math:`DSM_{t}^{up}` and :math:`DSM_{t,tt}^{down}` have to be
compensated.
cost_dsm_up : :obj:`int`
Cost per unit of DSM activity that increases the demand
cost_dsm_down : :obj:`int`
Cost per unit of DSM activity that decreases the demand
Note
----
* This component is a candidate component. It's implemented as a custom
component for users that like to use and test the component at early
stage. Please report issues to improve the component.
* As many constraints and dependencies are created in method 'delay',
computational cost might be high with a large 'delay_time' and with model
of high temporal resolution
* Using :attr:`~method` 'delay' might result in demand shifts that exceed
the specified delay time by activating up and down simultaneously in
the time steps between to DSM events.
* It's not recommended to assign cost to the flow that connects
:class:`~SinkDSM` with a bus. Instead, use :attr:`~SinkDSM.cost_dsm_up`
or :attr:`~cost_dsm_down`
"""
def __init__(self, demand, capacity_up, capacity_down, method,
shift_interval=None, delay_time=None, cost_dsm_up=0,
cost_dsm_down=0, **kwargs):
super().__init__(**kwargs)
self.capacity_up = sequence(capacity_up)
self.capacity_down = sequence(capacity_down)
self.demand = sequence(demand)
self.method = method
self.shift_interval = shift_interval
self.delay_time = delay_time
self.cost_dsm_up = cost_dsm_up
self.cost_dsm_down = cost_dsm_down
def constraint_group(self):
possible_methods = ['delay', 'interval']
if self.method == possible_methods[0]:
if self.delay_time is None:
raise ValueError('Please define: **delay_time'
'is a mandatory parameter')
return SinkDSMDelayBlock
elif self.method == possible_methods[1]:
if self.shift_interval is None:
raise ValueError('Please define: **shift_interval'
' is a mandatory parameter')
return SinkDSMIntervalBlock
else:
raise ValueError(
'The "method" must be one of the following set: '
'"{}"'.format('" or "'.join(possible_methods)))
class SinkDSMIntervalBlock(SimpleBlock):
r"""Constraints for SinkDSM with "interval" method
**The following constraints are created for method = 'interval':**
.. _SinkDSMInterval-equations:
.. math::
&
(1) \quad \dot{E}_{t} = demand_{t} + DSM_{t}^{up} - DSM_{t}^{do}
\quad \forall t \in \mathbb{T}\\
&
(2) \quad DSM_{t}^{up} \leq E_{t}^{up} \quad \forall t \in
\mathbb{T}\\
&
(3) \quad DSM_{t}^{do} \leq E_{t}^{do} \quad \forall t \in
\mathbb{T}\\
&
(4) \quad \sum_{t=t_s}^{t_s+\tau} DSM_{t}^{up} =
\sum_{t=t_s}^{t_s+\tau} DSM_{t}^{do} \quad \forall t_s \in \{k
\in \mathbb{T} \mid k \mod \tau = 0\} \\
&
**Table: Symbols and attribute names of variables and parameters**
.. csv-table:: Variables (V) and Parameters (P)
:header: "symbol", "attribute", "type", "explanation"
:widths: 1, 1, 1, 1
":math:`DSM_{t}^{up}` ",":attr:`~SinkDSM.capacity_up` ","V", "DSM
up shift"
":math:`DSM_{t}^{do}` ",":attr:`~SinkDSM.capacity_down` ","V","DSM
down shift"
":math:`\dot{E}_{t}`",":attr:`~SinkDSM.inputs`","V", "Energy
flowing in from electrical bus"
":math:`demand_{t}`",":attr:`demand[t]`","P", "Electrical demand
series"
":math:`E_{t}^{do}`",":attr:`capacity_down[tt]`","P", "Capacity
DSM down shift capacity"
":math:`E_{t}^{up}`",":attr:`capacity_up[tt]`","P", "Capacity
DSM up shift "
":math:`\tau` ",":attr:`~SinkDSM.shift_interval` ","P", "Shift
interval"
":math:`\mathbb{T}` "," ","P", "Time steps"
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
if group is None:
return None
m = self.parent_block()
# for all DSM components get inflow from bus_elec
for n in group:
n.inflow = list(n.inputs)[0]
# ************* SETS *********************************
# Set of DSM Components
self.dsm = Set(initialize=[n for n in group])
# ************* VARIABLES *****************************
# Variable load shift down
self.dsm_do = Var(self.dsm, m.TIMESTEPS, initialize=0,
within=NonNegativeReals)
# Variable load shift up
self.dsm_up = Var(self.dsm, m.TIMESTEPS, initialize=0,
within=NonNegativeReals)
# ************* CONSTRAINTS *****************************
# Demand Production Relation
def _input_output_relation_rule(block):
"""
Relation between input data and pyomo variables.
The actual demand after DSM.
Generator Production == Demand_el +- DSM
"""
for t in m.TIMESTEPS:
for g in group:
# Generator loads directly from bus
lhs = m.flow[g.inflow, g, t]
# Demand + DSM_up - DSM_down
rhs = g.demand[t] + self.dsm_up[g, t] - self.dsm_do[g, t]
# add constraint
block.input_output_relation.add((g, t), (lhs == rhs))
self.input_output_relation = Constraint(group, m.TIMESTEPS,
noruleinit=True)
self.input_output_relation_build = BuildAction(
rule=_input_output_relation_rule)
# Upper bounds relation
def dsm_up_constraint_rule(block):
"""
Realised upward load shift at time t has to be smaller than
upward DSM capacity at time t.
"""
for t in m.TIMESTEPS:
for g in group:
# DSM up
lhs = self.dsm_up[g, t]
# Capacity dsm_up
rhs = g.capacity_up[t]
# add constraint
block.dsm_up_constraint.add((g, t), (lhs <= rhs))
self.dsm_up_constraint = Constraint(group, m.TIMESTEPS,
noruleinit=True)
self.dsm_up_constraint_build = BuildAction(rule=dsm_up_constraint_rule)
# Upper bounds relation
def dsm_down_constraint_rule(block):
"""
Realised downward load shift at time t has to be smaller than
downward DSM capacity at time t.
"""
for t in m.TIMESTEPS:
for g in group:
# DSM down
lhs = self.dsm_do[g, t]
# Capacity dsm_down
rhs = g.capacity_down[t]
# add constraint
block.dsm_down_constraint.add((g, t), (lhs <= rhs))
self.dsm_down_constraint = Constraint(group, m.TIMESTEPS,
noruleinit=True)
self.dsm_down_constraint_build = BuildAction(
rule=dsm_down_constraint_rule)
def dsm_sum_constraint_rule(block):
"""
Relation to compensate the total amount of positive
and negative DSM in between the shift_interval.
This constraint is building balance in full intervals starting
with index 0. The last interval might not be full.
"""
for g in group:
intervals = range(m.TIMESTEPS.value_list[0],
m.TIMESTEPS.value_list[-1],
g.shift_interval)
for interval in intervals:
if (interval + g.shift_interval - 1) \
> m.TIMESTEPS.value_list[-1]:
timesteps = range(interval,
m.TIMESTEPS.value_list[-1] + 1)
else:
timesteps = range(interval, interval +
g.shift_interval)
# DSM up/down
lhs = sum(self.dsm_up[g, tt]
for tt in timesteps)
# value
rhs = sum(self.dsm_do[g, tt]
for tt in timesteps)
# add constraint
block.dsm_sum_constraint.add((g, interval), (lhs == rhs))
self.dsm_sum_constraint = Constraint(group, m.TIMESTEPS,
noruleinit=True)
self.dsm_sum_constraint_build = BuildAction(
rule=dsm_sum_constraint_rule)
def _objective_expression(self):
"""Adding cost terms for DSM activity to obj. function"""
m = self.parent_block()
dsm_cost = 0
for t in m.TIMESTEPS:
for g in self.dsm:
dsm_cost += self.dsm_up[g, t] * g.cost_dsm_up
dsm_cost += self.dsm_do[g, t] * g.cost_dsm_down
self.cost = Expression(expr=dsm_cost)
return self.cost
class SinkDSMDelayBlock(SimpleBlock):
r"""Constraints for SinkDSM with "delay" method
**The following constraints are created for method = 'delay':**
.. _SinkDSMDelay-equations:
.. math::
&
(1) \quad \dot{E}_{t} = demand_{t} + DSM_{t}^{up} -
\sum_{tt=t-L}^{t+L} DSM_{t,tt}^{do} \quad \forall t \in \mathbb{T} \\
&
(2) \quad DSM_{t}^{up} = \sum_{tt=t-L}^{t+L} DSM_{t,tt}^{do}
\quad \forall t \in \mathbb{T} \\
&
(3) \quad DSM_{t}^{up} \leq E_{t}^{up} \quad \forall t \in
\mathbb{T} \\
&
(4) \quad \sum_{tt=t-L}^{t+L} DSM_{t,tt}^{do} \leq E_{t}^{do}
\quad \forall t \in \mathbb{T} \\
&
(5) \quad DSM_{t}^{up} + \sum_{tt=t-L}^{t+L} DSM_{t,tt}^{do}
\leq max \{ E_{t}^{up}, E_{t}^{do} \}\quad \forall t \in \mathbb{T} \\
&
**Table: Symbols and attribute names of variables and parameters**
.. csv-table:: Variables (V) and Parameters (P)
:header: "symbol", "attribute", "type", "explanation"
:widths: 1, 1, 1, 1
":math:`DSM_{t}^{up}` ",":attr:`dsm_do[g,t,tt]`", "V","DSM up
shift (additional load)"
":math:`DSM_{t,tt}^{do}` ",":attr:`dsm_up[g,t]`","V","DSM down
shift (less load)"
":math:`\dot{E}_{t}` ",":attr:`flow[g,t]`","V","Energy
flowing in from electrical bus"
":math:`L`",":attr:`delay_time`","P", "Delay time for
load shift"
":math:`demand_{t}` ",":attr:`demand[t]`","P","Electrical
demand series"
":math:`E_{t}^{do}` ",":attr:`capacity_down[tt]`","P","Capacity
DSM down shift "
":math:`E_{t}^{up}` ", ":attr:`capacity_up[tt]`", "P","Capacity
DSM up shift"
":math:`\mathbb{T}` "," ","P", "Time steps"
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
if group is None:
return None
m = self.parent_block()
# for all DSM components get inflow from bus_elec
for n in group:
n.inflow = list(n.inputs)[0]
# ************* SETS *********************************
# Set of DSM Components
self.dsm = Set(initialize=[g for g in group])
# ************* VARIABLES *****************************
# Variable load shift down
self.dsm_do = Var(self.dsm, m.TIMESTEPS, m.TIMESTEPS, initialize=0,
within=NonNegativeReals)
# Variable load shift up
self.dsm_up = Var(self.dsm, m.TIMESTEPS, initialize=0,
within=NonNegativeReals)
# ************* CONSTRAINTS *****************************
# Demand Production Relation
def _input_output_relation_rule(block):
"""
Relation between input data and pyomo variables. The actual demand
after DSM. Generator Production == Demand +- DSM
"""
for t in m.TIMESTEPS:
for g in group:
# first time steps: 0 + delay time
if t <= g.delay_time:
# Generator loads from bus
lhs = m.flow[g.inflow, g, t]
# Demand +- DSM
rhs = g.demand[t] + self.dsm_up[g, t] - sum(
self.dsm_do[g, tt, t]
for tt in range(t + g.delay_time + 1))
# add constraint
block.input_output_relation.add((g, t), (lhs == rhs))
# main use case
elif (g.delay_time < t <=
m.TIMESTEPS[-1] - g.delay_time):
# Generator loads from bus
lhs = m.flow[g.inflow, g, t]
# Demand +- DSM
rhs = g.demand[t] + self.dsm_up[g, t] - sum(
self.dsm_do[g, tt, t]
for tt in range(t - g.delay_time,
t + g.delay_time + 1))
# add constraint
block.input_output_relation.add((g, t), (lhs == rhs))
# last time steps: end - delay time
else:
# Generator loads from bus
lhs = m.flow[g.inflow, g, t]
# Demand +- DSM
rhs = g.demand[t] + self.dsm_up[g, t] - sum(
self.dsm_do[g, tt, t]
for tt in range(t - g.delay_time,
m.TIMESTEPS[-1] + 1))
# add constraint
block.input_output_relation.add((g, t), (lhs == rhs))
self.input_output_relation = Constraint(group, m.TIMESTEPS,
noruleinit=True)
self.input_output_relation_build = BuildAction(
rule=_input_output_relation_rule)
# Equation 7
def dsm_up_down_constraint_rule(block):
"""
Equation 7 by Zerrahn, Schill:
Every upward load shift has to be compensated by downward load
shifts in a defined time frame. Slightly modified equations for
the first and last time steps due to variable initialization.
"""
for t in m.TIMESTEPS:
for g in group:
# first time steps: 0 + delay time
if t <= g.delay_time:
# DSM up
lhs = self.dsm_up[g, t]
# DSM down
rhs = sum(self.dsm_do[g, t, tt]
for tt in range(t + g.delay_time + 1))
# add constraint
block.dsm_updo_constraint.add((g, t), (lhs == rhs))
# main use case
elif g.delay_time < t <= (
m.TIMESTEPS[-1] - g.delay_time):
# DSM up
lhs = self.dsm_up[g, t]
# DSM down
rhs = sum(self.dsm_do[g, t, tt]
for tt in range(t - g.delay_time,
t + g.delay_time + 1))
# add constraint
block.dsm_updo_constraint.add((g, t), (lhs == rhs))
# last time steps: end - delay time
else:
# DSM up
lhs = self.dsm_up[g, t]
# DSM down
rhs = sum(self.dsm_do[g, t, tt]
for tt in range(t - g.delay_time,
m.TIMESTEPS[-1] + 1))
# add constraint
block.dsm_updo_constraint.add((g, t), (lhs == rhs))
self.dsm_updo_constraint = Constraint(group, m.TIMESTEPS,
noruleinit=True)
self.dsm_updo_constraint_build = BuildAction(
rule=dsm_up_down_constraint_rule)
# Equation 8
def dsm_up_constraint_rule(block):
"""
Equation 8 by Zerrahn, Schill:
Realised upward load shift at time t has to be smaller than
upward DSM capacity at time t.
"""
for t in m.TIMESTEPS:
for g in group:
# DSM up
lhs = self.dsm_up[g, t]
# Capacity dsm_up
rhs = g.capacity_up[t]
# add constraint
block.dsm_up_constraint.add((g, t), (lhs <= rhs))
self.dsm_up_constraint = Constraint(group, m.TIMESTEPS,
noruleinit=True)
self.dsm_up_constraint_build = BuildAction(rule=dsm_up_constraint_rule)
# Equation 9
def dsm_do_constraint_rule(block):
"""
Equation 9 by Zerrahn, Schill:
Realised downward load shift at time t has to be smaller than
downward DSM capacity at time t.
"""
for tt in m.TIMESTEPS:
for g in group:
# first times steps: 0 + delay
if tt <= g.delay_time:
# DSM down
lhs = sum(self.dsm_do[g, t, tt]
for t in range(tt + g.delay_time + 1))
# Capacity DSM down
rhs = g.capacity_down[tt]
# add constraint
block.dsm_do_constraint.add((g, tt), (lhs <= rhs))
# main use case
elif g.delay_time < tt <= (
m.TIMESTEPS[-1] - g.delay_time):
# DSM down
lhs = sum(self.dsm_do[g, t, tt]
for t in range(tt - g.delay_time,
tt + g.delay_time + 1))
# Capacity DSM down
rhs = g.capacity_down[tt]
# add constraint
block.dsm_do_constraint.add((g, tt), (lhs <= rhs))
# last time steps: end - delay time
else:
# DSM down
lhs = sum(self.dsm_do[g, t, tt]
for t in range(tt - g.delay_time,
m.TIMESTEPS[-1] + 1))
# Capacity DSM down
rhs = g.capacity_down[tt]
# add constraint
block.dsm_do_constraint.add((g, tt), (lhs <= rhs))
self.dsm_do_constraint = Constraint(group, m.TIMESTEPS,
noruleinit=True)
self.dsm_do_constraint_build = BuildAction(
rule=dsm_do_constraint_rule)
# Equation 10
def c2_constraint_rule(block):
"""
Equation 10 by Zerrahn, Schill:
The realised DSM up or down at time T has to be smaller than
the maximum downward or upward capacity at time T. Therefore in
total each DSM unit can only be shifted up OR down.
"""
for tt in m.TIMESTEPS:
for g in group:
# first times steps: 0 + delay time
if tt <= g.delay_time:
# DSM up/down
lhs = self.dsm_up[g, tt] + sum(
self.dsm_do[g, t, tt]
for t in range(tt + g.delay_time + 1))
# max capacity at tt
rhs = max(g.capacity_up[tt], g.capacity_down[tt])
# add constraint
block.C2_constraint.add((g, tt), (lhs <= rhs))
elif g.delay_time < tt <= (
m.TIMESTEPS[-1] - g.delay_time):
# DSM up/down
lhs = self.dsm_up[g, tt] + sum(
self.dsm_do[g, t, tt]
for t in range(tt - g.delay_time,
tt + g.delay_time + 1))
# max capacity at tt
rhs = max(g.capacity_up[tt], g.capacity_down[tt])
# add constraint
block.C2_constraint.add((g, tt), (lhs <= rhs))
else:
# DSM up/down
lhs = self.dsm_up[g, tt] + sum(
self.dsm_do[g, t, tt]
for t in range(tt - g.delay_time,
m.TIMESTEPS[-1] + 1))
# max capacity at tt
rhs = max(g.capacity_up[tt], g.capacity_down[tt])
# add constraint
block.C2_constraint.add((g, tt), (lhs <= rhs))
self.C2_constraint = Constraint(group, m.TIMESTEPS, noruleinit=True)
self.C2_constraint_build = BuildAction(rule=c2_constraint_rule)
def _objective_expression(self):
"""Adding cost terms for DSM activity to obj. function"""
m = self.parent_block()
dsm_cost = 0
for t in m.TIMESTEPS:
for g in self.dsm:
dsm_cost += self.dsm_up[g, t] * g.cost_dsm_up
dsm_cost += sum(self.dsm_do[g, t, tt] for tt in m.TIMESTEPS
) * g.cost_dsm_down
self.cost = Expression(expr=dsm_cost)
return self.cost
|
"""Base Configuration File."""
from typing import TextIO, Type, TypeVar
from pydantic import Extra
from pydantic.dataclasses import dataclass
from ruamel.yaml import YAML
T = TypeVar("T", bound='ConfigModel')
@dataclass
class ConfigModel:
"""A base configuration class."""
class Config:
"""Configure the configuration model."""
extra = Extra.forbid
validate_assignment = True
@classmethod
def load_from_file(cls: Type[T], fp: TextIO) -> T:
"""Load a ConfigModel object from a file."""
yaml = YAML()
data = yaml.load(fp)
if data is None:
return cls()
else:
return cls(**data) # type: ignore
|
import logging
logger = logging.getLogger(__name__)
from typing import List
from . import SizeDistributionBaseModel as PSDBase
from .GaudinMeloy import GaudinMeloy
from .GGSSizeDistributionModel import GGS
from .LogNormalSizeDistributionModel import LogNormal
from .RRBSizeDistributionModel import RRB
from .SigmoidSizeDistributionModel import Sigmoid
def getPSDModelsList() -> List[PSDBase.SizeDistributionBaseModel]:
return [RRB(), GGS(), LogNormal(), Sigmoid()]
m: PSDBase.SizeDistributionBaseModel
available_models = [m.getModelName() for m in getPSDModelsList()]
|
from .middleware import RequestIDMiddleware, get_request_id
|
import csv
from StringIO import StringIO
def csv_res2_dict_lst(res):
"""Convert CSV string with a header into list of dictionaries"""
return list(csv.DictReader(StringIO(res), delimiter=","))
def expected_repnames(repos_cfg):
"""Generate expected repository names '{account_name}/{repo_name}'"""
templ = "{account_name}/{repo_name}"
lst = []
for account_name, rep_cfg in repos_cfg.items():
for repo_name in rep_cfg.keys():
lst.append(
templ.format(account_name=account_name, repo_name=repo_name)
)
return sorted(lst)
def test_simple_call(binbb):
# Just try to run it and hope it will not fail
binbb.sysexec("repo", "list")
binbb.sysexec("repo", "list", "-f", "csv")
binbb.sysexec("repo", "list", "-f", "value")
binbb.sysexec("repo", "list", "-c", "Owner", "-c", "Repo Name")
binbb.sysexec("repo", "list", "-c", "Owner", "-c", "Repo Name", "-f", "csv")
def test_listed_names_csv(binbb, repos_cfg):
res = binbb.sysexec("repo", "list", "-f", "csv")
recs = csv_res2_dict_lst(res)
resnames = ["{rec[Owner]}/{rec[Repo Name]}".format(rec=rec) for rec in recs]
resnames = sorted(resnames)
expected = expected_repnames(repos_cfg)
assert resnames == expected
def test_listed_names_value(binbb, repos_cfg):
# Just try to run it and hope it will not fail
bbcmd = ["repo", "list", "-f", "value", "-c" "Owner", "-c", "Repo Name"]
res = binbb.sysexec(*bbcmd)
recs = res.strip().splitlines()
recs = [line.split(" ", 1) for line in recs]
templ = "{owner}/{repo}"
resnames = [templ.format(owner=owner, repo=repo) for owner, repo in recs]
resnames = sorted(resnames)
expected = expected_repnames(repos_cfg)
assert resnames == expected
|
from .resnest import build_resnest_backbone, build_resnest_fpn_backbone
from .config import add_resnest_config
|
# Generated by Django 2.0.8 on 2018-11-16 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workplace', '0019_model_translation'),
]
operations = [
migrations.AddField(
model_name='historicalreservation',
name='is_present',
field=models.BooleanField(default=False, verbose_name='Present'),
),
migrations.AddField(
model_name='reservation',
name='is_present',
field=models.BooleanField(default=False, verbose_name='Present'),
),
]
|
from flask import Flask
from flask_restful import Api, Resource, reqparse
app = Flask(__name__)
api = Api(app)
animals = [
{
"id": "1",
"name": "Gallinazo Rey",
"class": "Ave",
"order": "Incertae Sedis",
"family": "Cathartidae",
"gender": "Sarcoramphus",
"species": "Arcoramphus papa",
"commonName": "Zopilote rey, condor real, cuervo real"
},
{
"id": "2",
"name": "Pavon negro o Crax rubra",
"class": "Ave",
"order": "galliformes",
"family": "cracidae",
"gender": "crax",
"species": "crax rubra",
"commonName": "pavon negro, hocofaisan, pavon norteno"
},
{
"id": "3",
"name": "Guacamaya Amarilla",
"class": "Ave",
"order": "Psittaciformes",
"family": "Psittacidae",
"gender": "Ara",
"species": "Ara ararauna",
"commonName": "Guacamaya azul o azul amarillo, papagayo o paraba azul amarillo"
},
{
"id": "4",
"name": "Guacamaya Bandera",
"class": "Ave",
"order": "Psittaciformes",
"family": "Psittacidae",
"gender": "ara",
"species": "Ara ararauna",
"commonName": "guacamaya bandera, guacamayo macao, guacamayo rojo"
},
{
"id": "5",
"name": "Tapir",
"class": "mammalia",
"order": "perissodactyla",
"family": "tapiridae",
"gender": "tapirus",
"species": "tapirus bairdii",
"commonName": "tapir centroamericano, danta, anteburro, macho de monte"
},
{
"id": "6",
"name": "Venado cola blanca",
"class": " mammalia",
"order": " artiodactyla",
"family": ": cervidae",
"gender": " odocoileus",
"species":" odocoileus virginiaus",
"commonName": " Venado de cola blanca, ciervo de cola blanca, ciervo de virginia"
},
{
"id": "7",
"name": "Jaguar",
"class": " Mammalia",
"order": " Carnívora",
"family": ": felidae",
"gender": " panthera",
"species":" panthera onca",
"commonName": " jaguar, Yaguar, Yaguerete Balam, Barum"
},
{
"id": "8",
"name": "Zorro cangrejero",
"class": " mammalia",
"order": " carnivora",
"family": ": canidae",
"gender": " cersocyon",
"species":" cerdocyon thous",
"commonName": " zorro de monte, zorro sabanero"
},
{
"id": "9",
"name": "Nutria",
"class": " Mammalia",
"order": " carnívora ",
"family": ": Mustelidae",
"gender": " Sanguinus",
"species":" Lontra longicaudis",
"commonName": " nutria, lobito de río"
},
{
"id": "10",
"name": "Saino",
"class": " Mammalia",
"order": " artiodactyla",
"family": ": tayassuidae",
"gender": " tayassu",
"species":" tayassu tajacu",
"commonName": " saino, pecarí de collar, jabalí"
},
{
"id": "11",
"name": " puma",
"class": " Mammalia",
"order": " carnivora",
"family": " feliade",
"gender": " puma",
"species":" puma con color",
"commonName": " leon de montaña"
},
{
"id": "12",
"name": " mono cara blanca ",
"class": " Mammalia",
"order": " primate",
"family": " cedibae",
"gender": " cebus",
"species":" cebius capuchino",
"commonName": " cari blanco maicero capuchino tanque manchin"
},
{
"id": "13",
"name": " mono titi panameño",
"class": " Mammalia",
"order": " primates",
"family": " calitrichidae",
"gender": " saguinus",
"species":" saguinus geoffroyi",
"commonName": " titi tamarindo panameño,tamarindo de nuca café, pinche de morron"
},
{
"id": "14",
"name": " Loro comun",
"class": " aves",
"order": " psittaciformes",
"family": " psiittacidae",
"gender": " Amazona",
"species":" amazona ochrocephala",
"commonName": " Amazonas Harinoso , Loro Harinoso, amazónico"
},
{
"id": "15",
"name": " taira",
"class": " Mammalia",
"order": " carnivora",
"family": ": mustelidae",
"gender": " eira",
"species":" eira barbara",
"commonName": " huron mayor,cabeza de viejo"
},
{
"id": "16",
"name": " tucan de pico castaño",
"class": " Aves",
"order": " piciformes",
"family": " ramphastidae",
"gender": " ramphastos",
"species":" ramphastos swainsonii",
"commonName": " tucan Dio te de"
},
{
"id": "17",
"name": " tortuga terrestre de patas rojas",
"class": " Sauropsida",
"order": " Testudin",
"family": " Testudinidae",
"gender": " chelonoidis",
"species":" chelonoidis carbonaria",
"commonName": "tortuga morrocoya"
},
{
"id": "18",
"name": " Tigrillo",
"class": " Mammalia",
"order": " carnivora",
"family": " felidae",
"gender": " leopardus",
"species":" leopardus wiedii",
"commonName": " gato tigre, caucel, maracaya"
},
{
"id": "19",
"name": " gato solo",
"class": " Mammalia",
"order": " carnivora",
"family": " procyonidae",
"gender": " nasua",
"species":" nasua narica",
"commonName": "coati"
},
{
"id": "20",
"name": " mono araña colorado",
"class": " Mammalia",
"order": " primates",
"family": " cebidae",
"gender": " ateles",
"species":" ateles geoffroy",
"commonName": "mono araña de manos negras"
},
{
"id": "21",
"name": " suirirí piquirrojo",
"class": " aves",
"order": " anseriformes",
"family": " anatidae",
"gender": " dendrocygna",
"species":" Dendrocygna autumnalis",
"commonName": "güichichi "
},
{
"id": "22",
"name": " guacamaya rojo",
"class": " ave",
"order": " psittaciforme",
"family": " psittacidae",
"gender": " Ara",
"species":" Ara chloropterus",
"commonName": " guacamayo aliverde"
},
{
"id": "23",
"name": " águila harpía",
"class": " ave",
"order": " accipitriforme",
"family": " accipitriforme",
"gender": " harpia",
"species":" harpia harpyja",
"commonName": " harpía mayor"
},
{
"id": "24",
"name": " capibara ronsoco",
"class": " Mammalia",
"order": " rodentia",
"family": ": caviidae",
"gender": " hydrochoerus",
"species":" Hydrochoerus hydrochaeris",
"commonName": " chigüire, pancho, chigüiro"
}
]
class Animal(Resource):
def get(self, id):
for animal in animals:
if(id == animal["id"]):
return animal, 200
return "User not found", 404
def post(self, id):
parser = reqparse.RequestParser()
parser.add_argument("class")
parser.add_argument("order")
parser.add_argument("family")
parser.add_argument("gender")
parser.add_argument("species")
parser.add_argument("commonName")
args = parser.parse_args()
for animal in animals:
if(id == animal["id"]):
return "Animal with name {} already exists".format(id), 400
animal = {
"id": id,
"name": args["name"],
"class": args["class"],
"order": args["order"],
"family": args["family"],
"gender": args["gender"],
"species": args["species"],
"commonName": args["commonName"]
}
animals.append(animal)
return animal, 201
def put(self, id):
parser = reqparse.RequestParser()
parser.add_argument("id")
parser.add_argument("class")
parser.add_argument("order")
parser.add_argument("family")
parser.add_argument("gender")
parser.add_argument("species")
parser.add_argument("commonName")
args = parser.parse_args()
for animal in animals:
if(id == animal["id"]):
animal["class"] = args["class"]
animal["order"] = args["order"]
animal["family"] = args["family"]
animal["gender"] = args["species"]
animal["species"] = args["species"]
animal["commonName"] = args["commonName"]
return animal, 200
animal = {
"id": id,
"name": args["name"],
"class": args["class"],
"order": args["order"],
"family": args["family"],
"gender": args["gender"],
"species": args["species"],
"commonName": args["commonName"]
}
animals.append(animal)
return animal, 201
def delete(self, name):
global animals
animals = [animal for animal in animals if animal["id"] != animal]
return "{} is deleted.".format(id), 200
api.add_resource(Animal, "/animal/<string:id>")
app.run(debug=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: drawMSATopo.py
#
# Description:
# Visualize aligned membrane proteins by highlighting features of membrane
# topology
#
# Author:
# Nanjiang Shu [email protected]
import string
import sys
import re
import os
import myfunc
import math
import libtopologycmp as lcmp
import numpy as np
import Bio.SubsMat.MatrixInfo
import subprocess
from matplotlib.lines import *
from colour import Color
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
rundir = os.path.dirname(os.path.realpath(__file__))
# pySVG
# import pysvg.structure
# import pysvg.builders
# import pysvg.text
# matplotlib
# from matplotlib.font_manager import FontProperties
# from pylab import *
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import tempfile
import logging
import logging.config
import yaml
logger = logging.getLogger(__name__)
# PyX
#import pyx
nodename = os.uname()[1]
colorList = ["red", "blue", "green", "cyan","pink"]
colorList_DG_profile = ["red", "black", "green", "cyan", "yellow"]
ylabel_DGprofile = "\u0394G (kcal/mol)"
PIL_user_path = os.environ['HOME'] + "/usr/lib64/python2.6/site-packages/PIL"
if nodename.find("uppmax") != -1:
sys.path.append(PIL_user_path)
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
GAP = myfunc.GAP
#import cProfile
# ChangeLog 2011-11-21 #{{{
# 1. Bug solved for height overflow. When sepration line is added, canvas
# height should be extended as well.
# 2. AA sequences are obtained from the original sequence source, not the
# alignment. In this case, un-aligned sequences will be matched to aligned
# topology accordingly.
# ChangeLog 2011-11-23
# 1. columns with mostly gaps and not TM regions are shrinked according to
# its reciprocal gap percentage.
# ChangeLog 2011-11-24
# 1. Bug solved for ShrinkGapInMSA. The function ShrinkGapInMSA_0 is used by
# default now.
# ChangeLog 2011-11-25
# 1. Bug solved in IsSafetoDeleteTheRegion so that the beginning and end
# topology state will not be deleted.
# 2. For blocks with no 'M' state but with both 'i' and 'o', if for each
# subsequence in the block there are at most one state, it is OK to shrink
# the block to one column
# ChangeLog 2011-12-01
# 1. Bug in ShrinkGapInMSA_0 fixed
# ChangeLog 2012-02-22
# 1. For pairwise topology alignment, annotation is the sequence id
# ChangeLog 2012-02-23
# 1. Draw DG profile
# ChangeLog 2012-03-21
# when aaseq file is added, only use this file
# ChangeLog 2012-09-28
# Location of dgscanProg and font_dir set to user folder. Using environ
# DATADIR3
# ChangeLog 2013-04-17
# Add krbias to outfile name when -krbias is enabled
# ChangeLog 2013-11-20
# Add option -colorTMbox, so that the whole TM helix will be colored red,
# including the gaps within the TM helix
# Add option -showTMidx, so that the index of TM helices will be showin
# instead of the sequence.
# ChangeLog 2015-03-17
# 1. for matplotlib, set the font by fontpath, avoid "font not found
# problem" if not in the system
# 2. the shrinkrate is set so that the length of the normalized sequence
# length is 100, unless set globally
# 3. set also MAX_SIZE_ANNOTATION
# ChangeLog 2016-09-13
# 1. draw signal peptide as green, now only in PIL
# ChangeLog 2017-06-29
# Added a new function to draw core-rainbow
# ChangeLog 2017-07-03
# Moved the fond_dir fonts within the script folder, removed the DATADIR3
# env variable
# ChangeLog 2017-08-07
# 1. add the flag "-ptag"
#}}}
# global constant
usage="""
Usage: drawMSATopo.py [-i] topomsa-in-fasta-format
Options:
-method STR Modules to use for plotting, (default: pil)
Can be pyx, svg, pil, mat, core-rainbow
-of STR Output format, can be png
-l FILE Set input file list
-mode STR Image mode, P or RGB, (default: P)
-fontsize INT Set the font size, (default: 9)
-text y|n Wether draw text i, o or M in the alignment, (default: yes)
if no, then only the background color is shown
red for M, faded-yellow for i and faded-blue for o
-sep y|n Whether draw a seprator line in grey between each group
(default: yes)
-pfm y|n Whether draw profile for 'M' state, (default: yes)
-pdg y|n Whether draw DG profile, (default: no)
-pmsa y|n Whether draw MSA region, (default: yes)
-pscale y|n Whether draw the residue position scale bar, (default: yes)
If MSA region is not drawn, this will also be disabled.
-ptag y|n Whether draw a vertical bar for proteins in different groups, (default: no)
-dgpfile FILE DG profile file produced by myscanDG.pl
-aapath DIR Set path for amino acid sequence file, if set, sequence file
will be searched as $ID.fa
-outpath DIR Set outpath, (default: $dirname(infile))
-autosize y|n Whether autosize font, (default: yes)
-shrink y|n Whether shrink gap regions, (default: yes)
-m-shrink INT method of shrinking, (default: 1)
0: shrink both non-TM region and TM region
1: just shrink non-TM region
-aaseq FILE Set aaseq file for all
-krbias Draw krbias
-maxdistkr INT Maximal distance to TM for KR residue, (default: 100)
-win INT Window size for text and html format alignment output. (default: 70)
-h, --help Print this help message and exit
-htmlheader STR Set header text for HTML output
-colorhtml Use colorful output for HTML alignment
-colorTMbox Color the whole TM helix as a red rectangle, even gaps
-colorkingdom Color the TM regions by kingdom (Archaea green, Bacteria Red, Eukaryota blue) as present in annotations.
-advtopo Show advanced topology at top (reentrant regions and in/out helices)
-showTMidx Display index of TM helix as the text for the sequence, e.g. TM1 TM2
-shrinkrate FLOAT Proportional shrink rate, (default: 1.0)
-shrinkrateTM FLOAT Proportional shrink rate for TM regions, (default: 2.0)
-max-hold-loop INT Maximal positions to keep for loop regions (default: 12)
-imagescale FLOAT Overal scale of the image (default: None). If not set, it will be calculated automatically
-h2wratio FLOAT Set the height to width ratio (default: None). If not set, it use the original ratio
-cleanplot Make clean plot, works only for the PIL mode
-showgap Show gap as blank region, works only for the PIL mode
-debug Print debug information, (default: no)
Created 2011-09-05, updated 2020-06-26, Nanjiang Shu
Examples:
# Draw topology alignment with aligned and unaligned regions, aligned regions
# shown in rainbow color
%s -pdg n -shrink yes -method core-rainbow -text no -shrink yes topoalignFile
"""%(sys.argv[0])
def PrintHelp():
print(usage)
def sig2(x, scale=1):
""" Calculate sigmoid value
"""
return 1/(1+math.exp(-x/scale))
def WriteTXTAlignment(idList, annoList, alignedTopoSeqList,#{{{
originalAlignedTopoSeqList, aaSeqList, final2seq_idxMapList,
outfile):
WIDTH = g_params['window_size']
maxSizeAnno = max([len(x) for x in annoList])
lengthAlignment = len(alignedTopoSeqList[0])
numSeq = len(idList)
posTMList = [myfunc.GetTMPosition(x) for x in alignedTopoSeqList]
fpout = open(outfile, "w")
strs = [""]*numSeq
j = 0 # iterator for the alignment position
isStart = True
cnt = 0
while j < lengthAlignment:
if isStart:
strs = [""]*numSeq
for i in range(numSeq):
try:
strs[i] += "%-*s %4d "%(maxSizeAnno, annoList[i],
final2seq_idxMapList[i][j])
except KeyError:
print("final2seq_idxMapList error i=%d, j=%d"%(i,j))
pass
isStart = False
isWithinTMregion = False
for i in range(numSeq):
if lcmp.IsWithinTMRegion(j, posTMList[i]):
aa = aaSeqList[i][j].upper()
isWithinTMregion = True # if hit TM region of any sequence, set as TRUE
else:
aa = aaSeqList[i][j].lower()
strs[i] += aa
if (cnt >= WIDTH and isWithinTMregion == False) or (j >= lengthAlignment-1):
for i in range(numSeq):
strs[i] += " %4d"%(final2seq_idxMapList[i][j])
for i in range(numSeq):
fpout.write("%s\n"%(strs[i]))
fpout.write("\n")
fpout.write("\n")
strs = [""]*numSeq
isStart = True
cnt = 0
j += 1
cnt += 1
fpout.close()
#}}}
def WriteHTMLAlignment(idList, annoList, alignedTopoSeqList,#{{{
originalAlignedTopoSeqList, aaSeqList, final2seq_idxMapList,
outfile):
# but do not break in the middle of a helix
WIDTH = g_params['window_size']
maxSizeAnno = max([len(x) for x in annoList])
lengthAlignment = len(alignedTopoSeqList[0])
numSeq = len(idList)
posTMList = [myfunc.GetTMPosition(x) for x in alignedTopoSeqList]
fpout = open(outfile, "w")
header = """
<!DOCTYPE html>
<html>
<body>
<table border="0" cellspacing="0" cellpadding="0">
"""
tail = """
</table>
</body>
</html>
"""
print(header, file=fpout)
strs = [""]*numSeq
j = 0 # iterator for the alignment position
isStart = True
cnt = 0
while j < lengthAlignment:
if isStart:
strs = [""]*numSeq
for i in range(numSeq):
strs[i] += "<tr><td>%s</td><td>%d</td>"%(annoList[i],
final2seq_idxMapList[i][j])
isStart = False
isWithinTMregion = False
for i in range(numSeq):
if lcmp.IsWithinTMRegion(j, posTMList[i]):
aa = aaSeqList[i][j].upper()
isWithinTMregion = True # if hit TM region of any sequence, set as TRUE
strs[i] += "<td><b><font color=\"black\">%s</font></b></td>"%(aa)
else:
aa = aaSeqList[i][j].lower()
strs[i] += "<td><font color=\"grey\">%s</font></td>"%(aa)
if (cnt >= WIDTH and isWithinTMregion == False) or (j >= lengthAlignment-1):
for i in range(numSeq):
strs[i] += "<td>%d</td></tr>"%(final2seq_idxMapList[i][j])
for i in range(numSeq):
fpout.write("%s\n"%(strs[i]))
fpout.write("\n")
strs = [""]*numSeq
isStart = True
cnt = 0
j += 1
cnt += 1
print(tail, file=fpout)
fpout.close()
#}}}
def WriteHTMLAlignment2(idList, annoList, alignedTopoSeqList,#{{{
originalAlignedTopoSeqList, aaSeqList, final2seq_idxMapList,
outfile):
logger = logging.getLogger(__name__)
# for two sequence pairwise alignment
# assign also the identical and similarity by using BLOSUM62
annoList = idList
#WIDTH = 90 # but do not break in the middle of a helix, adjust 1
#WIDTH = 60 # but do not break in the middle of a helix
WIDTH = g_params['window_size']
maxSizeAnno = max([len(x) for x in annoList])
lengthAlignment = len(alignedTopoSeqList[0])
numSeq = len(idList)
posTMList = [myfunc.GetTMPosition(x) for x in alignedTopoSeqList]
blosum62 = Bio.SubsMat.MatrixInfo.blosum62
if g_params['colorhtml']:
color_TM = 'red'
color_nonTM = 'grey'
else:
color_TM = 'black'
color_nonTM = 'grey'
fpout = open(outfile, "w")
header = """
<!DOCTYPE html>
<html>
<body>
<h3>%s</h3>
<pre>
"""%(g_params['htmlheader'])
tail = """
</pre>
</body>
</html>
"""
print(header, file=fpout)
strs = [""]*numSeq
j = 0 # iterator for the alignment position
isStart = True
cnt = 0
while j < lengthAlignment:
if isStart:
strs = [""]*(numSeq+1)
for i in range(numSeq):
try:
strs[i] += "%-*s %4d "%(maxSizeAnno, annoList[i],
final2seq_idxMapList[i][j])
except KeyError:
logger.debug("final2seq_idxMapList error i=%d, j=%d"%(i,j))
pass
strs[2] += "%-*s %4s "%(maxSizeAnno, "", "")
isStart = False
isWithinTMregion = False
aa1 = aaSeqList[0][j].upper()
aa2 = aaSeqList[1][j].upper()
if aa1 == GAP or aa2 == GAP:
char_rel = " "
else:
if (aa1,aa2) in blosum62:
blosum_score = blosum62[(aa1,aa2)]
elif (aa2,aa1) in blosum62:
blosum_score = blosum62[(aa2,aa1)]
else:
blosum_score = -1
if aa1 == aa2:
char_rel = "|"
elif blosum_score > 0:
char_rel = "."
else:
char_rel = " "
strs[2] += char_rel
for i in range(numSeq):
if lcmp.IsWithinTMRegion(j, posTMList[i]):
aa = aaSeqList[i][j].upper()
isWithinTMregion = True # if hit TM region of any sequence, set as TRUE
strs[i] += "<b><font color=\"%s\">%s</font></b>"%(color_TM, aa)
else:
aa = aaSeqList[i][j].lower()
strs[i] += "<font color=\"%s\">%s</font>"%(color_nonTM, aa)
if ((cnt >= WIDTH and isWithinTMregion == False)
or (j >= lengthAlignment-1)
or j == 190):
for i in range(numSeq):
strs[i] += " %4d"%(final2seq_idxMapList[i][j])
fpout.write("%s\n"%(strs[0]))
fpout.write("%s\n"%(strs[2])) #relationship
fpout.write("%s\n"%(strs[1]))
fpout.write("\n\n")
strs = [""]*(numSeq+1)
isStart = True
cnt = 0
j += 1
cnt += 1
print(tail, file=fpout)
fpout.close()
#}}}
def WriteHTMLAlignment3(idList, annoList, alignedTopoSeqList,#{{{
originalAlignedTopoSeqList, aaSeqList, final2seq_idxMapList,
outfile):
logger = logging.getLogger(__name__)
annoList = idList
WIDTH = g_params['window_size']
maxSizeAnno = max([len(x) for x in annoList])
lengthAlignment = len(alignedTopoSeqList[0])
numSeq = len(idList)
posTMList = [myfunc.GetTMPosition(x) for x in alignedTopoSeqList]
fpout = open(outfile, "w")
header = """
<!DOCTYPE html>
<html>
<body>
<pre>
"""
tail = """
</pre>
</body>
</html>
"""
print(header, file=fpout)
strs = [""]*numSeq
j = 0 # iterator for the alignment position
isStart = True
cnt = 0
while j < lengthAlignment:
if isStart:
strs = [""]*numSeq
for i in range(numSeq):
try:
strs[i] += "%-*s %4d "%(maxSizeAnno, annoList[i],
final2seq_idxMapList[i][j])
except KeyError:
logger.debug( "final2seq_idxMapList error i=%d, j=%d"%(i,j))
pass
isStart = False
isWithinTMregion = False
for i in range(numSeq):
if lcmp.IsWithinTMRegion(j, posTMList[i]):
aa = aaSeqList[i][j].upper()
isWithinTMregion = True # if hit TM region of any sequence, set as TRUE
strs[i] += "<b><font color=\"black\">%s</font></b>"%(aa)
else:
aa = aaSeqList[i][j].lower()
strs[i] += "<font color=\"grey\">%s</font>"%(aa)
#print "isWithinTMregion=", isWithinTMregion
if ((cnt >= WIDTH and isWithinTMregion == False)
or (j >= lengthAlignment-1)
or j == 190):
for i in range(numSeq):
strs[i] += " %4d"%(final2seq_idxMapList[i][j])
for i in range(numSeq):
fpout.write("%s\n"%(strs[i]))
fpout.write("\n")
strs = [""]*numSeq
isStart = True
cnt = 0
j += 1
cnt += 1
print(tail, file=fpout)
fpout.close()
#}}}
def GetAAPath(topomsafile):#{{{
"""
Get the path for amino acid sequences
"""
if g_params['aapath'] != "":
return g_params['aapath']
else:
return myfunc.my_dirname(topomsafile)
#}}}
def GetDGProfileFileName(inFile, seqID):# {{{
"""
Auto determine the DG profile file name based on the inFile
"""
DGProfileFile = ""
dirname_infile = myfunc.my_dirname(inFile)
rootname_infile = os.path.basename(os.path.splitext(inFile)[0])
if not os.path.exists(g_params['DGProfileFile']):
DGProfileFile = dirname_infile + os.sep + rootname_infile + "_dg.txt"
if not os.path.exists(DGProfileFile):
DGProfileFile = dirname_infile + os.sep + rootname_infile + "-%s"%(seqID) + "_dg.txt"
if not os.path.exists(DGProfileFile):
logger.debug("DGProfileFile not found for inFile %s"%(inFile))
DGProfileFile = ""
else:
DGProfileFile = g_params['DGProfileFile']
logger.debug("In function GetDGProfileFileName: seqID=%s, DGProfileFile=%s"%(seqID, DGProfileFile))
return DGProfileFile
# }}}
def GetAASeqDict(topomsafile):#{{{
"""
Get the amino acid sequence dictionary, keys are seqids
"""
#if (not g_params['isDrawText']) or g_params['isShrink']:
#if (not g_params['isDrawText']):
# return {}
#else:
logger = logging.getLogger(__name__)
if g_params['aaSeqDict'] != {}:
return g_params['aaSeqDict']
else:
aaSeqDict = {}
aapath = GetAAPath(topomsafile)
fastaID = os.path.basename(topomsafile).split('.')[0]
if topomsafile.find('homology') >= 0:
fastaAASeqFile = aapath + os.sep + fastaID + '.homology.fa'
else:
fastaAASeqFile = aapath + os.sep + fastaID + '.fa'
if os.path.exists(fastaAASeqFile):
logger.info("Seqfile %s found"%fastaAASeqFile)
(aaSeqIDList, aaSeqList) = myfunc.ReadFasta_without_annotation(
fastaAASeqFile)
if len(aaSeqList) <= 0:
msg = "Failed to read aaSeqFile %s"
logger.debug(msg)
else:
for i in range (len(aaSeqIDList)):
aaSeqDict[aaSeqIDList[i]] = aaSeqList[i]
else:
logger.debug("aaSeqFile %s does not exist."%(fastaAASeqFile))
return aaSeqDict
#}}}
def HideNonKRResidue(aaseq):#{{{
newseq = ""
for aa in aaseq:
if aa in ["K","R"]:
newseq += aa
else:
newseq += " "
return newseq
#}}}
def GetKRStateFraction(alignedSeqList):#{{{
"""return (cnt_K, cnt_R, per_K, per_R)"""
lengthAlignment=len(alignedSeqList[0])
numSeq = len(alignedSeqList)
cnt_K = [0]*lengthAlignment
cnt_R = [0]*lengthAlignment
for i in range(numSeq):
alignedSeq = alignedSeqList[i]
for j in range(lengthAlignment):
s = alignedSeq[j]
if s == 'K':
cnt_K[j] += 1
elif s == 'R':
cnt_R[j] += 1
per_K = [0.0]*lengthAlignment
per_R = [0.0]*lengthAlignment
numSeq_float = float(numSeq)
for j in range(lengthAlignment):
per_K[j] = cnt_K[j]/(numSeq_float)
per_R[j] = cnt_R[j]/(numSeq_float)
return (cnt_K, cnt_R, per_K, per_R)
#}}}
def MatchToAlignedSeq(unalignedseq, alignedseq, seqID): #{{{
"""match the unaligned seq to the aligned seq, gaps are added
return alignedseq at failure"""
logger = logging.getLogger(__name__)
newseq = ""
j = 0
GAP = g_params['GAP']
for i in range(len(alignedseq)):
if alignedseq[i] != GAP:
newseq += unalignedseq[j]
j += 1
else:
newseq += GAP
if len(newseq) != len(alignedseq):
logger.debug("failed to match sequence for ID %s" %seqID)
return alignedseq
else:
return newseq
#}}}
def ReadInDGProfile(infile):#{{{
"""Read in DG profile output by myscanDG.pl"""
dgpDict = {}
try:
fpin = open(infile, 'r')
buff = fpin.read()
fpin.close()
lines = buff.split('\n')
numLine = len(lines)
i = 0
seqid = ''
while i < numLine:
line = lines[i]
if line.find("#SeqID") == 0:
seqid = line.split()[1]
elif line.find("#Number of sliding windows") == 0:
numWin = int(line.split(':')[1])
dgp = []
for j in range(numWin):
strs = lines[i+j+1].split()
if len(strs) != 2:
logger.debug("dgscan file error. strs=%s"%(str(strs)))
sys.exit(1)
dgp.append((int(strs[0]), float(strs[1])))
i += numWin
dgpDict[seqid] = dgp
i += 1
#print (dgpDict)
return dgpDict
except IOError:
print("Failed to read dgprofile", infile, file=sys.stderr)
#}}}
def MatchAlignedDGP(dgp, idxmap_aligne2seq, posindexmap, aligned_toposeq):#{{{
"""
match dgp (a list of tuples) to the aligned toposeq
posindexmap is the index map from the shrinked seq to the original seq
idxmap_aligne2seq is a dictionary of index map from the original (non-shrinked) MSA to the gapless seq
"""
aligned_dgp = []
lenAlignedSeq = len(aligned_toposeq)
resMap = {}
inew = 0
if len(posindexmap) == 0:
isShrink = False
else:
isShrink = True
# convert dgp in to dictionary
dgp_dt = {}
for (idx, dg) in dgp:
dgp_dt[idx] = dg
for j in range(lenAlignedSeq):
if aligned_toposeq[j] != '-':
if isShrink:
j_origseq = idxmap_aligne2seq[posindexmap[j]]
else:
j_origseq = idxmap_aligne2seq[j]
try:
dg = dgp_dt[j_origseq]
aligned_dgp.append((j, dg))
except KeyError:
pass
return aligned_dgp
#}}}
def GetFontDimension(font_size):#{{{
if font_size == 3:
return (2,4)
elif font_size == 4:
return (3,5)
elif font_size == 5:
return (4,6)
elif font_size == 6:
return (5,7)
elif font_size == 7:
return (5,8)
elif font_size == 8:
return (6,9)
elif font_size == 9:
return (7,10)
elif font_size == 10:
return (7,11)
elif font_size == 11:
return (8,12)
elif font_size == 12:
return (8,13)
elif font_size == 13:
return (9,14)
elif font_size == 14:
return (10,15)
else :
return (8,13)
#}}}
def AutoSizeFontHistogram(ylabel, yticList, widthBox, heigthBox, #{{{
spaceToLeftBorder):
maxSizeYtic = max([len(str(x)) for x in yticList])
numCharSpaceRequied = len(ylabel) + maxSizeYtic*2 + 1
maxAllowedFontWidth = spaceToLeftBorder / numCharSpaceRequied
maxAllowdFontHeight = heigthBox / (len(yticList)-1)
# print "spaceToLeftBorder=",spaceToLeftBorder
# print "heightBox=",heigthBox
# print "(mw, mh)=",(maxAllowedFontWidth,maxAllowdFontHeight)
fs = 100
while 1:
if fs < 9:
break
fnt = ImageFont.truetype(g_params['font_dir'] + g_params['font'], fs)
(fw, fh) = fnt.getsize("a")
if fw <= maxAllowedFontWidth and fh <= maxAllowdFontHeight:
break
else:
fs -= 1
# print "fs=",fs
return fs
#}}}
def AutoSizeFontTMBox(fontWidthAlign, fontHeightAlign, numSeq, specialProIdxDict, posTMList, TMnameList ): #{{{
"""Autosize the font for text written in TM box so that it fits the
narrowest box """
# Get the maximum allowd fontWidth for each box
maxAllowedFontWidthList = []
margin = 1; #pixels
# scale is roughly 50 seqs -> 0.5, 1500 seqs -> 1.5
#scaleTMBox = myfunc.FloatDivision(numSeq, 1450)+ 27.0/58.0
scaleTMBox = 1
specialProIdxList = specialProIdxDict['reppro'] + specialProIdxDict['pdb'] + specialProIdxDict['final']
fs = 50
itr = 0
MAX_ITR = 300
while 1:
if fs < 2:
break
fnt = ImageFont.truetype(g_params['font_dir'] + g_params['font'], fs)
maxMargin = -9999999
minMargin = 9999999
for idx in specialProIdxList: # check all topologies with TMbox
posTM = posTMList[idx]
TMname = TMnameList[idx]
for j in range(len(posTM)):
(b, e) = posTM[j]
try:
ss = TMname[j]
except IndexError:
ss = "TM %d"%(j+1)
ss = "%d"%(j+1)
boxWidth = fontWidthAlign * (e-b)
textWidth, textHeight = fnt.getsize(ss)
margin = boxWidth - textWidth
#print ("margin, boxwidth, textwidth)=", (margin, boxWidth, textWidth))
if margin > maxMargin:
maxMargin = margin
if margin < minMargin:
minMargin = margin
#logger.debug("itr=%s"%str(itr) + " fs=%s"%str(fs)+ " (minMargin,maxMargin, fontHeightAlign) = "%(minMargin, maxMargin, fontHeightAlign))
if minMargin < 5:
fs -= 1
elif minMargin >= fontHeightAlign*2:
fs += 1
else:
break
itr += 1
if textHeight < fontHeightAlign*numSeq*0.1 and textHeight > fontHeightAlign*numSeq*0.05:
break
if itr > MAX_ITR:
break
g_params['font_size_TMbox'] = fs
g_params['font_size_scalebar'] = int(fs*0.9+0.5)
g_params['fntScaleBar'] = ImageFont.truetype(g_params['font_dir'] +
g_params['font'], g_params['font_size_scalebar'])
g_params['fntTMbox'] = ImageFont.truetype(g_params['font_dir'] +
g_params['font'], g_params['font_size_TMbox'])
g_params['fntTMbox_label'] = ImageFont.truetype(g_params['font_dir'] +
"DejaVuSerif.ttf", g_params['font_size_TMbox']+1)
fnt = ImageFont.truetype(g_params['font_dir'] + g_params['font'], fs)
logger.debug("font_size_TMbox=%d", g_params['font_size_TMbox'])
#print "fs=",fs
return fnt.getsize("M")
#}}}
def AutoSizeFontDGProfileLabel(dgprofileRegionHeight):# {{{
"""
Resize the font for the label of dg profile
"""
margin = int(dgprofileRegionHeight*0.1+0.5)
fs = 50
itr = 0
MAX_ITR = 300
while 1:
if fs < 2:
break
fnt = ImageFont.truetype(g_params['font_dir']+"DejaVuSerif-Bold.ttf", fs)
textWidth = fnt.getsize(ylabel_DGprofile)[0]
diff = dgprofileRegionHeight - textWidth
if diff < margin:
fs -= 1
elif diff >= margin*2:
fs += 1
else:
break
itr += 1
#print ("itr=", itr, "fs=", fs, "diff=", diff, "margin=", margin)
if itr > MAX_ITR:
break
g_params['fntDGprofileLable'] = fnt
g_params['fntDGprofileTic'] = ImageFont.truetype(g_params['font_dir']+"DejaVuSerif.ttf", max(2, int(fs*0.65)))
g_params['fntDGprofileLegend'] = ImageFont.truetype(g_params['font_dir']+"DejaVuSerif.ttf", max(2, int(fs*0.7)))
# }}}
def GetPositionIdenticalAdjacentNumber(lst, start, minLength): #{{{
# given a list of numbers
# e.g.
# [ 1, 1, 0, 4,3, 4,2,3,3,3,54, 4,3, 44,44,44,44,3,3,3,3]
# get the position of identical adjacent numbers with at least minLength
posList = []
N = len(lst)
if N <= 0 :
return posList
i = 0
# print 'N=', N
while i < N:
j = 0
while i+j < N and lst[i+j] == lst[i]:
j += 1
if j > 0:
if j >= minLength:
posList.append((i+start, i+j+start))
i += j
else:
i += 1
return posList
#}}}
def GetRemainingSegmentList(start, end, posListToRemove):#{{{
"""get the remaining segments from start to end by removing segment defined
in posListToRemove"""
numPosToRemove = len(posListToRemove)
if numPosToRemove < 1:
return [(start, end)]
length = end-start
if length <= 0:
return [(start, end)]
lst = [1]*length
# print "====================="
# print lst
# print "====================="
for (b, e) in posListToRemove:
b1 = max(0, b-start)
e1 = max(0, e-start)
for i in range(b1, e1):
lst[i] = 0
# print lst
# print "====================="
posRemainList = []
i = 0
while i < length:
j = 0
while i+j < length and lst[i+j] == 1:
j += 1
if j > 0:
posRemainList.append((i+start, i+j+start))
i += j
else:
i += 1
return posRemainList
#}}}
def CalDistPointToFragment(x, fragment):#{{{
"""
MMMMMMM
K dist = 1
K dist = 0
R dist = 0
"""
if x <= fragment[0]:
dist = fragment[0]-x
elif x >= fragment[1]:
dist = x-fragment[1]+1
else:
d1 = x - fragment[0]
d2 = (fragment[1]-1) - x
dist = min(d1,d2)
return dist
#}}}
def IsOutofMaxDistKR(posTM, x, maxDistKR):#{{{
numTM = len(posTM)
for i in range(numTM):
d = CalDistPointToFragment(x, posTM[i])
if d > 0 and d <= maxDistKR:
if not lcmp.IsWithinTMRegion(x, posTM):
return False
return True
#}}}
def IsSafetoDeleteTheRegion(topoSeqList, start, end):#{{{
"""Check whether the deletion of one block in the topology MSA will affect
the topology of any of the sequence"""
GAP = g_params['GAP']
lengthAlignment = len(topoSeqList[0])
numSeq = len(topoSeqList)
stateList = 'ioM'
numState = len(stateList)
if start < 1 or end >= lengthAlignment -1:
return False
for i in range(numSeq):
topo = topoSeqList[i]
cntFoundState = 0
pList = [-1]*numState
for j in range(numState):
pList[j] = topo[start:end].find(stateList[j])
if pList[j] >= 0:
cntFoundState += 1
if cntFoundState >= 3:
return False
elif cntFoundState >= 1:
gaplesssubstr = topo[start:end].replace(GAP, '')
if len(gaplesssubstr) > 0:
# get the first char and last char of the gapless substr
firstT = gaplesssubstr[0]
lastT = gaplesssubstr[len(gaplesssubstr)-1]
# check the first non GAP state on the right side and left side, if both are
# 'M', it is not safe to delete, otherwise safe
# scan the left side
j = start -1
while j >= 1:
if topo[j] != GAP:
break
j -= 1
firstLeftSideState = topo[j]
p1 = j
# go the right side
j = end
while j < lengthAlignment -1:
if topo[j] != GAP:
break
j += 1
firstRightSideState = topo[j]
p2 = j
# 1. leaving the beginning and end topology state unchanged
# 2. do not remove the region of both sides are TM helices, otherwise,
# two TM helices will be merged into one
if (p1 == 0 or p2 == lengthAlignment-1 ):
return False
else:
if cntFoundState == 2:
if not (lastT == firstRightSideState and
firstT == firstLeftSideState):
return False
elif cntFoundState == 1:
if not (lastT == firstRightSideState or
firstT == firstLeftSideState):
return False
return True
#}}}
def IsSafetoDeleteTheRegionNew(origTopoSeqList, startOrig, endOrig,#{{{
newTopoSeqList, startNew, per_K, per_R):
"""Check whether the deletion of one block in the topology MSA will affect
the topology of any of the sequence"""
# The left side should be checked with the newTopoSeqList
# subsequence is obtained from origTopoSeqList[i][startOrig:endOrig]
# startNew is the position in the newTopoSeqList
try:
GAP = g_params['GAP']
lengthAlignment = len(origTopoSeqList[0])
numSeq = len(origTopoSeqList)
stateList = 'ioM'
numState = len(stateList)
if g_params['isDrawKRBias'] and (sum(per_K[startOrig:endOrig]) +
sum(per_R[startOrig:endOrig])) > 0.0:
return False
if startOrig < 1 or endOrig >= lengthAlignment -1:
return False
for i in range(numSeq):
topoOrig = origTopoSeqList[i]
topoNew = newTopoSeqList[i]
cntFoundState = 0
pList = [-1]*numState
for j in range(numState):
pList[j] = topoOrig[startOrig:endOrig].find(stateList[j])
if pList[j] >= 0:
cntFoundState += 1
if cntFoundState >= 3:
return False
elif cntFoundState >= 1:
gaplesssubstr = topoOrig[startOrig:endOrig].replace(GAP, '')
if len(gaplesssubstr) > 0:
# get the first char and last char of the gapless substr
firstT = gaplesssubstr[0]
lastT = gaplesssubstr[len(gaplesssubstr)-1]
# check the first non GAP state on the right side and left side, if both are
# 'M', it is not safe to delete, otherwise safe
# scan the left side
j = startNew -1
while j >= 1:
if topoNew[j] != GAP:
break
j -= 1
if j >= 0:
firstLeftSideState = topoNew[j]
else:
firstLeftSideState = 'X'
p1 = j
# go the right side
j = endOrig
while j < lengthAlignment -1:
if topoOrig[j] != GAP:
break
j += 1
firstRightSideState = topoOrig[j]
p2 = j
# 1. leaving the beginning and end topology state unchanged
# 2. do not remove the region of both sides are TM helices, otherwise,
# two TM helices will be merged into one
if (p1 < 0 or p2 == lengthAlignment-1 ):
return False
else:
if cntFoundState == 2:
if not (lastT == firstRightSideState and
firstT == firstLeftSideState):
return False
elif cntFoundState == 1:
if not (lastT == firstRightSideState or
firstT == firstLeftSideState):
return False
except IndexError:
return False
return True
#}}}
def IsAtTMregionOfSpecialPro(i, topoSeqList, specialProIdxList):# {{{
"""
Check if the residue position is at TM region of the speical proteins
"""
for idx in specialProIdxList:
if topoSeqList[idx][i] == "M":
return True
else:
return False
# }}}
def GetPosTM_MSA(posTMList, specialProIdxList):# {{{
"""
Get the beginning and end position of the MSA which has TM helices
"""
beg = 9999999999
end = -1
for i in range(len(posTMList)):
if not i in specialProIdxList:
posTM = posTMList[i]
if posTM[0][0] < beg:
beg = posTM[0][0]
if posTM[-1][1] > end:
end = posTM[-1][1]
return (beg, end)
# }}}
def ShrinkSeq(seq, shrinkedwidth):#{{{
"""Shrink the seq to shrinkedwidth"""
N = len(seq)
if N <= shrinkedwidth:
return seq
newseq = ""
for i in range(shrinkedwidth):
idx = int(round(i/float(shrinkedwidth-1)*(N-1)))
newseq += seq[idx]
return newseq
#}}}
def ShrinkGapInMSA_obsolete(topoSeqList, specialProIdxList=[]):#{{{
"""Shrink the gap regions
topoSeqList will be updated and return the maparray"""
# For columns without 'M', shrink the region of each sequencs in the block to
# 1. '' if there are no 'i' or 'o' in the block
# 2. 'i' or ' ' if there is no 'o' in the block
# 3. 'o' or ' ' if there is no 'i' in the block
# 4. 'io' or 'oi' or ' ' if there exist both 'i' and 'o' in the block
# To be 'io' or 'i' or ' ' is depending on the subsequence in the region
# for each topology
# further if there exist i or o but if the removal of this colum will not
# change the topology of any sequence, this one can be removed.
# For columns with 'M', shrink the region with continous 'M' depending on the
# number of M in the column
# For the continous 'M' region, make a profile of 'M' percentage
# For flat regions with length > 5, shrink them to min(L, L/5*N/2)
# For smooth profile with a peak, take the region above 50%
#
# For TM regions in specialProIdxList, do not shrink it if it is at the
# beginning and end
(cnt_i, cnt_o, cnt_M, cnt_GAP,
per_i, per_o, per_M, per_GAP) = lcmp.GetTopoStateFraction(
topoSeqList)
lengthAlignment = len(topoSeqList[0])
i = 0
numSeq = len(topoSeqList)
newList = [""]*numSeq
posindexmap = {}
num_specialpro = len(specialProIdxList)
posTMList = [myfunc.GetTMPosition(x) for x in topoSeqList]
(begTM_MSA, endTM_MSA) = GetPosTM_MSA(posTMList, specialProIdxList)
cnt = 0
while i < lengthAlignment:
j = 0
sumPer_i = 0.0
sumPer_o = 0.0
while i+j < lengthAlignment and per_M[i+j] == 0.0:
sumPer_i += per_i[i+j]
sumPer_o += per_o[i+j]
j += 1
if j >= 1: #{{{
if sumPer_i > 0.0 or sumPer_o > 0.0:
if sumPer_i == 0.0:
for iseq in range(numSeq):
if topoSeqList[iseq][i:i+j].find("o") >= 0:
newList[iseq] += 'o'
else:
newList[iseq] += ' '
posindexmap[cnt] = i+j-1
cnt += 1
elif sumPer_o == 0.0:
for iseq in range(numSeq):
if topoSeqList[iseq][i:i+j].find("i") >= 0:
newList[iseq] += 'i'
else:
newList[iseq] += ' '
posindexmap[cnt] = i+j-1
cnt += 1
else:
for iseq in range(numSeq):
ss = topoSeqList[iseq][i:i+j]
p1 = ss.find('i')
p2 = ss.find('o')
if p1 >= 0 and p2 >= 0:
if p1 < p2:
newList[iseq] += 'io'
else:
newList[iseq] += 'oi'
else:
if p1 >= 0:
newList[iseq]+='ii'
elif p2 >= 0:
newList[iseq] += 'oo'
else:
newList[iseq] += ' '
posindexmap[cnt] = i
posindexmap[cnt+1] = i+j-1
cnt += 2
i += j;#}}}
else: # starts a region with M#{{{
if num_specialpro > 0:
if (IsAtTMregionOfSpecialPro(i, topoSeqList, specialProIdxList) and
(i < begTM_MSA and i>=endTM_MSA)):
for iseq in range(numSeq):
newList[iseq] = topoSeqList[iseq][i]
i += 1
else:
sumPer_i = 0.0
sumPer_o + 0.0
while i+j < lengthAlignment and per_M[i+j] > 0.0:
sumPer_i += per_i[i+j]
sumPer_o += per_i[i+j]
j += 1
#find all flat regions with >=5 residues
# print cnt_M[i:i+j]
posFlatRegionList = GetPositionIdenticalAdjacentNumber(
cnt_M[i:i+j], i, 5)
# get the rest regions
posNonFlatRegionList = GetRemainingSegmentList(i, i+j,
posFlatRegionList)
mergedRegionList = []
for (b,e)in posFlatRegionList:
mergedRegionList.append(('flat', b, e))
for (b,e)in posNonFlatRegionList:
mergedRegionList.append(('nonflat', b, e))
mergedRegionList = sorted(mergedRegionList, key=lambda tup:tup[1])
for (state, b, e) in mergedRegionList:
if state == 'flat':
shrinkedwidth = max(2, int(round((e-b)* min(1.0,
per_M[b]*10))))
for iseq in range(numSeq):
newList[iseq] += ShrinkSeq(topoSeqList[iseq][b:e],
shrinkedwidth)
for k in range(cnt, cnt+shrinkedwidth):
posindexmap[k] = (b +
int(round((k-cnt)*(e-b)/float(shrinkedwidth-1))))
cnt += shrinkedwidth
else:
selectedPosList = []
minPerM = min(per_M[b:e])
maxPerM = max(per_M[b:e])
middlePerM = minPerM + (maxPerM - minPerM)*0.6
for k in range(b,e):
if per_M[k] >= middlePerM:
selectedPosList.append(k)
selectedPosListSet = set(selectedPosList)
for k in range(b, e):
if (k in selectedPosListSet or not
IsSafetoDeleteTheRegion(topoSeqList, k, k+1)):
for iseq in range(numSeq):
newList[iseq] += topoSeqList[iseq][k]
posindexmap[cnt] = k
cnt += 1
i += j
#}}}
for iseq in range(numSeq):
topoSeqList[iseq] = newList[iseq]
return posindexmap
#}}}
def ShrinkGapInMSA_0(idList, topoSeqList, specialProIdxList=[]): #{{{
"""Shrink the gap regions
topoSeqList will be updated and return the maparray
by default ShrinkGapInMSA_0 is used"""
# For columns without 'M', shrink the region of each sequencs in the block to
# 1. '' if there are no 'i' or 'o' in the block
# 2. 'i' or ' ' if there is no 'o' in the block
# 3. 'o' or ' ' if there is no 'i' in the block
# 4. 'io' or 'oi' or ' ' if there exist both 'i' and 'o' in the block
# To be 'io' or 'i' or ' ' is depending on the subsequence in the region
# for each topology
# further if there exist i or o but if the removal of this colum will not
# change the topology of any sequence, this one can be removed.
# For columns with 'M', shrink the region with continous 'M' depending on the
# number of M in the column
# For the continous 'M' region, make a profile of 'M' percentage
# For flat regions with length > 5, shrink them to min(L, L/5*N/2)
# For smooth profile with a peak, take the region above 50%
#
(cnt_i, cnt_o, cnt_M, cnt_GAP,
per_i, per_o, per_M, per_GAP) = lcmp.GetTopoStateFraction(
topoSeqList)
isDrawKRBias = g_params['isDrawKRBias']
num_specialpro = len(specialProIdxList)
#print ("num_specialpro=%d"%(num_specialpro))
posTMList = [myfunc.GetTMPosition(x) for x in topoSeqList]
(begTM_MSA, endTM_MSA) = GetPosTM_MSA(posTMList, specialProIdxList)
if isDrawKRBias:
aaSeqDict = g_params['aaSeqDict']
alignedSeqList = []
for i in range(len(idList)):
toposeq = topoSeqList[i]
seqid = idList[i]
try:
aaseq = aaSeqDict[seqid]
aaseq = MatchToAlignedSeq(aaseq, toposeq, seqid)
alignedSeqList.append(aaseq)
except KeyError:
pass
(cnt_K, cnt_R, per_K, per_R) = GetKRStateFraction(alignedSeqList)
else:
(cnt_K, cnt_R, per_K, per_R) = ([],[],[],[])
lengthAlignment = len(topoSeqList[0])
i = 0
numSeq = len(topoSeqList)
newList = [""]*numSeq
posindexmap = {}
cnt = 0
while i < lengthAlignment:
j = 0
sumPer_i = 0.0
sumPer_o = 0.0
while i+j < lengthAlignment and per_M[i+j] == 0.0:
sumPer_i += per_i[i+j]
sumPer_o += per_o[i+j]
j += 1
if j >= 1: #{{{ # non TM region
# print "per_i:", per_i[i:i+j]
# print "per_o:", per_o[i:i+j]
# print "sumPer_i:", sumPer_i, "sumPer_o:", sumPer_o
# print "Non M region: (%d, %d)"%(i,i+j)
if sumPer_i > 0.0 or sumPer_o > 0.0:
# print "With i or o: region: (%d, %d)"%(i,i+j)
if not IsSafetoDeleteTheRegionNew(topoSeqList, i, i+j, newList,
cnt, per_K, per_R):
# otherwise, just delete this region
if isDrawKRBias:
repStatList = [] # state to be replaced
for iseq in range(numSeq):
subseq = topoSeqList[iseq][i:i+j].replace('-','')
if len(subseq) == 0:
repStatList.append(' ')
else:
repStatList.append(subseq[0])
tmpcnt = 0
for pp in range(i, i+j):
if per_K[pp] > 0.0 or per_R[pp] > 0.0:
for iseq in range(numSeq):
newList[iseq] += repStatList[iseq]
posindexmap[cnt] = pp
cnt += 1
tmpcnt += 1
if tmpcnt == 0:
pp = i
for iseq in range(numSeq):
newList[iseq] += repStatList[iseq]
posindexmap[cnt] = pp
cnt += 1
else:
if sumPer_i == 0.0 or sumPer_o == 0.0:#{{{
for iseq in range(numSeq):
segment = topoSeqList[iseq][i:i+j]
if segment.find('o') >= 0:
newList[iseq] += 'o'
elif segment.find('i') >= 0:
newList[iseq] += 'i'
else:
newList[iseq] += ' '
posindexmap[cnt] = i+j-1
cnt += 1
else:
stateList = 'io'
maxCntFoundState = 0
for iseq in range(numSeq):
cntFoundState = 0
segment = topoSeqList[iseq][i:i+j]
for state in stateList:
if segment.find(state) >= 0:
cntFoundState += 1
if cntFoundState > maxCntFoundState:
maxCntFoundState = cntFoundState
if maxCntFoundState >= 2:
break
if maxCntFoundState == 2:
for iseq in range(numSeq):
ss = topoSeqList[iseq][i:i+j]
p1 = ss.find('i')
p2 = ss.find('o')
if p1 >= 0 and p2 >= 0:
if p1 < p2:
newList[iseq] += 'io'
else:
newList[iseq] += 'oi'
else:
if p1 >= 0:
newList[iseq]+='ii'
elif p2 >= 0:
newList[iseq] += 'oo'
else:
newList[iseq] += ' '
posindexmap[cnt] = i
posindexmap[cnt+1] = i+j-1
cnt += 2
else:
for iseq in range(numSeq):
segment = topoSeqList[iseq][i:i+j]
if segment.find('o') >= 0:
newList[iseq] += 'o'
elif segment.find('i') >= 0:
newList[iseq] += 'i'
else:
newList[iseq] += ' '
posindexmap[cnt] = i+j-1
cnt += 1#}}}
i += j;#}}}
else: # starts a region with M#{{{
if num_specialpro > 0:
if (IsAtTMregionOfSpecialPro(i, topoSeqList, specialProIdxList)
#and (i < begTM_MSA and i>=endTM_MSA)
):
for iseq in range(numSeq):
newList[iseq] += topoSeqList[iseq][i]
posindexmap[cnt] = i
cnt += 1
print((i, lengthAlignment))
i += 1
else:
sumPer_i = 0.0
sumPer_o + 0.0
while i+j < lengthAlignment and per_M[i+j] > 0.0:
sumPer_i += per_i[i+j]
sumPer_o += per_i[i+j]
j += 1
if j > 0:
# print "M region: (%d, %d)"%(i,i+j)
#find all flat regions with >=5 residues
posFlatRegionList = GetPositionIdenticalAdjacentNumber(
cnt_M[i:i+j], i, 5)
# get remaining regions
posNonFlatRegionList = GetRemainingSegmentList(i, i+j,
posFlatRegionList)
mergedRegionList = []
for (b,e)in posFlatRegionList:
mergedRegionList.append(('flat', b, e))
for (b,e)in posNonFlatRegionList:
mergedRegionList.append(('nonflat', b, e))
mergedRegionList = sorted(mergedRegionList, key=lambda
tup:tup[1])
# if i >= 1320 and i+j <= 1460:
# print "region (%d, %d)"%(i, i+j)
# print cnt_M[i:i+j]
# print "posFlatRegionList:", posFlatRegionList
for (state, b, e) in mergedRegionList:
if state == 'flat':
if (per_GAP[b] > 0.95 and
IsSafetoDeleteTheRegionNew(topoSeqList, b, e,
newList, cnt, per_K, per_R)):
shrinkedwidth = 0
else:
shrinkedwidth = max(5, int(round((e-b)* min(1.0,
per_M[b]*1.5))))
# if b >= 1320 and e <= 1460:
# print ("per_M[b]:",per_M[b], "len(%d, %d)="%(b, e),
# e-b, "shrinkedwidth=",shrinkedwidth)
selectedIndexList = [b +
int(round(k*(e-b-1)/float(shrinkedwidth-1)))
for k in range(shrinkedwidth)]
if isDrawKRBias:
for pp in range(b, e):
if (per_K[pp] + per_R[pp] > 0.0):
selectedIndexList.append(pp)
selectedIndexList = sorted(
list(set(selectedIndexList)))
for k in range(b, e):
if (k in selectedIndexList or
not IsSafetoDeleteTheRegionNew(
topoSeqList, k, k+1, newList,
cnt, per_K, per_R)):
for iseq in range(numSeq):
newList[iseq] += topoSeqList[iseq][k]
posindexmap[cnt] = k
cnt += 1
else: #'nonflat'
minPerM = min(per_M[b:e])
maxPerM = max(per_M[b:e])
middlePerM = minPerM + (maxPerM - minPerM)*0.5
selectedIndexList = []
for k in range(b,e):
if ((per_GAP[k] < 0.95 and per_M[k] > middlePerM) or
per_M[k] > 0.65 or
(isDrawKRBias and (per_K[k]+per_R[k])>0.0)):
selectedIndexList.append(k)
for k in range(b, e):
if (k in selectedIndexList or
not IsSafetoDeleteTheRegionNew(topoSeqList,
k, k+1, newList, cnt, per_K, per_R)):
for iseq in range(numSeq):
newList[iseq] += topoSeqList[iseq][k]
posindexmap[cnt] = k
cnt += 1
# if b >= 1320 and e <= 1460:
# print ("numSelectedColumn=", numSelectedColumn, maxPerM,
# "len(%d, %d)=%d"%(b, e, e-b))
i += j
else:
i += 1
#}}}
for iseq in range(numSeq):
topoSeqList[iseq] = newList[iseq].replace(" ", "-")
#print ("%10s: %s"%(idList[iseq], topoSeqList[iseq]))
return posindexmap
#}}}
def ShrinkGapInMSA_exclude_TMregion(idList, topoSeqList): #{{{
"""
Shrink non TM region and gap region
topoSeqList will be updated and return the index map
Return posindexmap
posindexmap shrink -> non-shrink
"""
# updated 2016-09-13, shrink exclude signal peptide
# For columns without 'M', shrink the region of each sequencs in the block to
# 1. '' if there are no 'i' or 'o' in the block
# 2. 'i' or ' ' if there is no 'o' in the block
# 3. 'o' or ' ' if there is no 'i' in the block
# 4. 'io' or 'oi' or ' ' if there exist both 'i' and 'o' in the block
# To be 'io' or 'i' or ' ' is depending on the subsequence in the region
# for each topology
# further if there exist i or o but if the removal of this colum will not
# change the topology of any sequence, this one can be removed.
(cnt_i, cnt_o, cnt_M, cnt_SP, cnt_GAP,
per_i, per_o, per_M, per_SP, per_GAP) = lcmp.GetTopoStateFraction_withSP(
topoSeqList)
NMargin = 4 # keep <= NMargin/2 residues at two sides of aligned TM helices
halfMargin = NMargin/2
lengthAlignment = len(topoSeqList[0])
i = 0
numSeq = len(topoSeqList)
newList = [""]*numSeq
posindexmap = {}
cnt = 0
while i < lengthAlignment:
j = 0
sumPer_i = 0.0
sumPer_o = 0.0
while i+j < lengthAlignment and (per_M[i+j] == 0.0 and per_SP[i+j] == 0.0):
sumPer_i += per_i[i+j]
sumPer_o += per_o[i+j]
j += 1
poslist_to_keep = []
if j >= 1: # non TM region, non SP region
# print "per_i:", per_i[i:i+j]
# print "per_o:", per_o[i:i+j]
# print "sumPer_i:", sumPer_i, "sumPer_o:", sumPer_o
# print "Non M region: (%d, %d)"%(i,i+j)
#if ((sumPer_i > 0.0 and sumPer_o == 0.0) or (sumPer_o > 0.0 and sumPer_i == 0.0)):
if (sumPer_i > 0.0 or sumPer_o > 0.0):
if i == 0:
poslist_to_keep = list(range(max(i+j-halfMargin,0), i+j))
else:
poslist_to_keep = list(range(i,min(i+halfMargin,
lengthAlignment)))+list(range(max(i+j-halfMargin,0),i+j))
else:
poslist_to_keep = list(range(i, i+j))
i += j
else:
poslist_to_keep = list(range(i, i+1))
i += 1
poslist_to_keep = sorted(set(poslist_to_keep), reverse=False)
for pp in poslist_to_keep:
for iseq in range(numSeq):
try:
newList[iseq] += topoSeqList[iseq][pp]
posindexmap[cnt] = pp
except IndexError:
print("Error! iseq=%d, pp=%d, lengthAlignment=%d"%(iseq, pp, lengthAlignment))
cnt += 1
for iseq in range(numSeq):
topoSeqList[iseq] = newList[iseq].replace(" ", "-")
return posindexmap
#}}}
def ShrinkMSA_Method_2(topoSeqList, aaSeqList=[], posTMList=[],#{{{
shrinkrate_TM=2.0, max_hold_loop=3, isDrawKRBias=False):
"""
Shrink multiple alignment of topologies
Input:
topoSeqList A list of aligned topologies
aaSeqList A list of aligned sequences
shrinkrate_TM shrink rate for aligned TM regions,
max_hold_loop maximal positions to hold for the loop region
Output:
updated topoSeqList
idxmap_align2shrink map original-aligned-position -> shrink-alignment
idxmap_shrink2align map shrink-alignemnt -> original-alignment
"""
if posTMList == []:
for topo in topoSeqList:
posTMList.append(myfunc.GetTMPosition(topo))
numSeq = len(topoSeqList)
lengthAlignment = len(topoSeqList[0])
# first get positions to keep
array = ["l"]*lengthAlignment # l -- loop
# P -- positively charged residues K or R
# M -- common TM region
for posTM in posTMList:
for tup in posTM:
for j in range(tup[0], tup[1]):
array[j] = "M"
if isDrawKRBias:
for i in range(numSeq):
seq = aaSeqList[i]
for j in range(len(seq)):
if (seq[j] in ["K", "R"] and
(not lcmp.IsWithinTMRegion(j, posTMList[i]))):
array[j] = "P"
#debug
if g_params['isPrintDebugInfo']:
print("array for KR and TM regions, (l: loop, P: K or R, M, TM region)")
print("%s"%("".join(array)))
poslist_to_keep = []
i = 0
while i < lengthAlignment:
if array[i] == "M":
j = 0
while i+j<lengthAlignment and array[i+j] == "M":
j+=1
length_segment = j
shrinked_len_seg = max(2, int(round(length_segment/shrinkrate_TM)))
for k in range(shrinked_len_seg):
poslist_to_keep.append(i +
int(round((length_segment-1)*k/float(shrinked_len_seg-1))))
i += j
else:
j = 0
while i+j<lengthAlignment and array[i+j] != "M":
j+=1
length_segment = j
if length_segment < max_hold_loop:
poslist_to_keep += list(range(i,i+j))
else:
for k in range(i,i+j):
if (k-i < max_hold_loop/2 or
i+j-k < max_hold_loop or
(isDrawKRBias and array[k] == "P")):
poslist_to_keep.append(k)
i += j
idxmap_align2shrink = {}
idxmap_shrink2align = {}
# begin debug:
# print "ss=",ss
# for i in xrange(len(poslist_to_keep)):
# print "ss[ %d ] = %s"%(i, ss[i])
# end debug
for i in range(len(poslist_to_keep)):
pp = poslist_to_keep[i]
idxmap_align2shrink[pp] = i
idxmap_shrink2align[i] = pp
poslist_to_keep = sorted(set(poslist_to_keep), reverse=False)
cnt = 0
newList = [""]*numSeq
for pp in poslist_to_keep:
for iseq in range(numSeq):
try:
newList[iseq] += topoSeqList[iseq][pp]
except IndexError:
print("Error! iseq=%d, pp=%d, lengthAlignment=%d"%(iseq, pp,
lengthAlignment))
cnt += 1
for iseq in range(numSeq):
topoSeqList[iseq] = newList[iseq].replace(" ", "-")
return (idxmap_align2shrink, idxmap_shrink2align)
#}}}
def RunDGScan(aaseq, seqID):# {{{
"""
Calculate the DG profile by using the dgscanProg
return dgp
"""
dgp = None
tmpaaseqfile = tempfile.mktemp()
tmpdgpfile = tempfile.mktemp()
tmpfp = open(tmpaaseqfile, 'w')
tmpfp.write(">%s\n"%seqID)
tmpfp.write("%s\n"%aaseq)
tmpfp.close()
os.system("%s %s -lmin 21 -lmax 21 -o %s"%(g_params['dgscanProg'],
tmpaaseqfile, tmpdgpfile))
dgpDict = ReadInDGProfile(tmpdgpfile)
os.system("rm -f %s %s" %(tmpaaseqfile, tmpdgpfile))
if dgpDict and seqID in dgpDict:
dgp = dgpDict[seqID]
return dgp
# }}}
def DrawTMOfConsensus(posTM, xy0, fontWidth, fontHeight, draw): #{{{
"""Draw TM box"""
widthAnnotation = g_params['widthAnnotation']
annoSeqInterval = g_params['annoSeqInterval']
font_size_TMbox = g_params['font_size_TMbox']
fntTMbox = g_params['fntTMbox']
heightTMbox = g_params['heightTMbox']
(fontWidthTMbox, fontHeightTMbox) = fntTMbox.getsize("M")
fntTMbox = g_params['fntTMbox']
(x0,y0) = xy0
x0 = x0 + widthAnnotation * fontWidth + annoSeqInterval * fontWidthTMbox
y0 = y0 - fontHeightTMbox/2
marginTop = 0
marginBottom = 0
cnt = 0
for (b, e) in posTM:
x1 = x0 + b*fontWidth
y1 = y0 - marginTop
x2 = x0 + e*fontWidth
y2 = y0 + int(heightTMbox*fontHeightTMbox+0.5) + marginBottom
box=[x1, y1 , x2, y2]
draw.rectangle(box, fill="violet", outline="black")
# draw text
s = "TM %d"%(cnt+1)
(textwidth, textheight) = fntTMbox.getsize(s)
textheight+=2
x3 = int(round((x1+x2-textwidth)/2.0))
y3 = int(round((y1+y2-textheight)/2.0))
draw.text((x3, y3), s, font=fntTMbox, fill="black")
cnt += 1
#}}}
def DrawDGProfile(dgpList, lengthAlignment, maxDG, minDG, xy0, #{{{
dgprofileRegionWidth, dgprofileRegionHeight, spaceToLeftBorder,
isDrawSeqID, line_mode, draw):
"""Draw DG profile
support drawing multiple DG profiles in one box
"""
logger.debug("Draw DG profile")
(x0, y0) = xy0
paddingtop = int(dgprofileRegionHeight*0.05+0.5)
paddingbottom = int(dgprofileRegionHeight*0.05+0.5)
heightDrawRegion = dgprofileRegionHeight - paddingtop - paddingbottom
widthDrawRegion = dgprofileRegionWidth
font_size = g_params['font_size_scalebar']
line_width = max(1, int(dgprofileRegionHeight*0.02+0.5))
outline_width = max(1, int(line_width*0.75+0.5))
ticline_width = max(1, int(outline_width*0.75+0.5))
logger.debug("(minDG, maxDG)=(%f,%f)"%(minDG, maxDG))
num_dgp = len(dgpList)
blue = Color("blue")
red = Color("red")
colorpalette = list(blue.range_to(red,num_dgp))
# draw outline box
x1 = x0
y1 = y0 + paddingtop
x2 = x1 + dgprofileRegionWidth
y2 = y0 + dgprofileRegionHeight - paddingbottom
box = [x1,y1,x2,y2]
draw.rectangle(box, outline='black', width=outline_width)
yMiddle = int(round((y1 + y2) / 2.0))
# draw x, axis
x1 = x0
y1 = y0 + paddingtop + int(round(heightDrawRegion*maxDG/(maxDG-minDG)))
x2 = x1 + widthDrawRegion
y2 = y1
draw.line([x1, y1, x2, y2],fill="grey")
yZero = y1
# draw ytics and text
fnt = g_params['fntDGprofileTic']
step = max(0.5, round((maxDG-minDG)/5))
lengthtic = max(5, int(widthDrawRegion*0.01+0.5))
ytic = 0.0
while ytic <= maxDG:
x1 = x0 - lengthtic
y1 = yZero - int(round(heightDrawRegion*ytic/(maxDG-minDG)))
x2 = x1 + lengthtic
y2 = y1
draw.line([x1, y1, x2, y2],fill="black", width=ticline_width)
text = "%.1f"%ytic
(textWidth,textHeight) = fnt.getsize(text)
draw.text((x1-textWidth-lengthtic,y1-textHeight/2), text, font=fnt,
fill='black')
ytic += step
ytic = -step
while ytic > minDG:
x1 = x0 - lengthtic
y1 = yZero - int(round(heightDrawRegion*ytic/(maxDG-minDG)))
x2 = x1 + lengthtic
y2 = y1
draw.line([x1, y1, x2, y2],fill="black")
text = "%.1f"%ytic
(textWidth,textHeight) = fnt.getsize(text)
draw.text((x1-textWidth-lengthtic,y1-textHeight/2), text, font=fnt,
fill='black')
ytic -= step
for ii in range(num_dgp):
seqID, dgp = dgpList[ii]
line_color = colorpalette[ii].get_hex_l()
# draw profile
sizeSquare = int(g_params['image_scale']* 4+0.5)
pointList= []
papd = pointList.append
for (idx, dg) in dgp:
#print (idx, dg)
h = int(round(dg/(maxDG-minDG)*heightDrawRegion))
x1 = (x0 + int(round(widthDrawRegion*float(idx)/lengthAlignment)) -
sizeSquare/2)
y1 = yZero - h - sizeSquare/2
x2 = x1+sizeSquare
y2 = y1+sizeSquare
box=[x1,y1,x2,y2]
if line_mode == "dot":
draw.ellipse(box, outline=line_color)
papd((x1+sizeSquare/2, y1+sizeSquare/2))
if line_mode == "line":
for i in range(0,len(pointList)-1,1):
draw.line([pointList[i],pointList[i+1]],fill=line_color, width=line_width)
# draw legend
if g_params['isDrawDGProfileLegend']:
fnt = g_params['fntDGprofileLegend']
textWidth, textHeight = fnt.getsize("Initial Topology 1")
bar_width = textWidth/3
gap_item = textWidth/4
gap_text_bar = bar_width/2
item_width = textWidth + bar_width + gap_item + gap_text_bar
x_padding = int(item_width*num_dgp*0.05+0.5)
if isDrawSeqID and num_dgp > 1:
# draw outline box
x1 = x0
y1 = y0 + dgprofileRegionHeight
x2 = x1 + item_width*num_dgp+x_padding*2
y2 = y1 + textHeight*2
box = [x1,y1,x2,y2]
draw.rectangle(box, outline='black', width=outline_width)
for ii in range(num_dgp):
seqID, dgp = dgpList[ii]
line_color = colorpalette[ii].get_hex_l()
if (num_dgp) == 1:
text = "Initial Topology"
else:
text = "Initial Topology %s"%(seqID.lstrip("rep"))
(textWidth,textHeight) = fnt.getsize(text)
x = x0 + ii*item_width + x_padding
y = y0 + dgprofileRegionHeight + textHeight/4
draw.text((x,y), text, font=fnt, fill='black')
# draw bar
x1 = x + textWidth + gap_text_bar
y1 = y + textHeight/2
x2 = x1 + bar_width
y2 = y1
draw.line([x1, y1, x2, y2],fill=line_color, width=line_width*2)
#}}}
def DrawTMOfConsensus2(topoSeq, posTM, typeTM, TMname, foldType, xy0, fontWidth, fontHeight, draw,length): #{{{
"""Draw TM box, for loop regions, GAPs are shown in blank
"""
widthAnnotation = g_params['widthAnnotation']
annoSeqInterval = g_params['annoSeqInterval']
font_size_TMbox = g_params['font_size_TMbox']
fntTMbox = g_params['fntTMbox']
heightTMbox = g_params['heightTMbox']
(fontWidthTMbox, fontHeightTMbox) = fntTMbox.getsize("M")
fntTMbox = g_params['fntTMbox']
(x0,y0) = xy0
x0 = x0 + widthAnnotation * fontWidth + annoSeqInterval * fontWidthTMbox
y0 = y0 - fontHeightTMbox/2
marginTop = 0
marginBottom = 0
incolor="#F2EABD"
#outcolor="#CCFFFF"
incolor = g_params['loopcolor_in']
outcolor = g_params['loopcolor_out']
base_outline_color = "black"
base_text_color = "black"
base_outline_width = int(fontHeightTMbox*g_params['heightTMbox']*0.06+0.5)
loop_height = fontHeightTMbox*0.4
cnt = 0
last=x0
for (b, e) in posTM:
x1 = x0 + b*fontWidth
y1 = y0 - marginTop
x2 = x0 + e*fontWidth
y2 = y0 + int(heightTMbox*fontHeightTMbox +0.5) + marginBottom
box=[x1, y1 , x2, y2]
(text_TMlabel, text_color, outline_color, outline_width) = lcmp.SetMakeTMplotColor(
cnt, TMname, foldType, base_outline_width, base_text_color, base_outline_color)
logger.debug("text_TMlabel=%s, outline_color=%s, outline_width=%d"%(text_TMlabel, outline_color, outline_width))
if (typeTM[cnt]=="M"): # out to in
draw.rectangle(box, fill=g_params['memcolor_out_to_in'], outline=outline_color, width=outline_width)
#draw.line([last,y2,x1,y2],g_params['loopcolor_in'])
draw.rectangle([last,y2-loop_height,x1,y2],fill=incolor) #draw loop
elif (typeTM[cnt]=="W"): # in to out
draw.rectangle(box, fill=g_params['memcolor_in_to_out'], outline=outline_color, width=outline_width)
#draw.line([last,y1,x1,y1],outcolor)
draw.rectangle([last,y1,x1,y1+loop_height],fill=outcolor)
elif (typeTM[cnt]=="R"): # Reeentrant inside
y1 = y0 - marginTop + int(heightTMbox*fontHeightTMbox/3.0+0.5)
y2 = y0 + int(heightTMbox*fontHeightTMbox+0.5) + marginBottom
box=[x1, y1 , x2, y2]
draw.rectangle(box, fill=incolor, outline=outline_color, width=outline_width)
#draw.line([last,y2,x1,y2],incolor)
draw.rectangle([last,y2-loop_height,x1,y2],fill=incolor) # draw loop
elif (typeTM[cnt]=="r"): # Reentrant outside
y1 = y0 - marginTop
y2 = y0 + int(heightTMbox*fontHeightTMbox/3.0*2+0.5) + marginBottom
box=[x1, y1 , x2, y2]
draw.rectangle(box, fill=outcolor, outline=outline_color, width=outline_width)
#draw.line([last,y1,x1,y1],outcolor)
draw.rectangle([last,y1,x1,y1+loop_height],fill=outcolor) # draw loop
else:
draw.rectangle(box, fill="violet", outline=outline_color, width=outline_width)
last=x2
# draw text_TMlabel
(textwidth, textheight) = fntTMbox.getsize(text_TMlabel)
textheight+=2
x3 = int(round((x1+x2-textwidth)/2.0))
y3 = int(round((y1+y2-textheight)/2.0))
draw.text((x3, y3), text_TMlabel, font=fntTMbox, fill=text_color)
cnt += 1
if (typeTM[cnt-1]=="R" or typeTM[cnt-1]=="W"):
#draw.line([x2,y2,x0 + length*fontWidth,y2],incolor)
draw.rectangle([x2,y2-loop_height,x0 + length*fontWidth,y2],fill=incolor) # loop
elif (typeTM[cnt-1]=="r" or (typeTM[cnt-1]=="M")):
draw.line([x2,y1,x0 + length*fontWidth,y1],outcolor)
draw.rectangle([x2,y1,x0 + length*fontWidth,y1+loop_height],fill=outcolor) # loop
if g_params['isShowGap']:
# show gap with blank, draw rectangle with fill=(0, 0, 0, 0)
posGAP = myfunc.GetGapPosition(topoSeq)
for (b, e) in posGAP:
if not lcmp.IsWithinTMRegion(b, posTM):
x1 = x0 + b*fontWidth
y1 = y0 - marginTop
x2 = x0 + e*fontWidth
y2 = y0 + int(heightTMbox*fontHeightTMbox +0.5) + marginBottom
io_left = lcmp.Get_IOState_upstream(topoSeq, b)
io_right = lcmp.Get_IOState_downstream(topoSeq, e-1)
if io_left == 'i' or io_right == 'i':
loop_type = 'inside'
y1 = y2 - loop_height
else:
loop_type = 'outside'
y2 = y1 + loop_height
box=[x1, y1 , x2, y2]
#fillcolor = (0,0,0,0)
fillcolor = (255,255,255,255)
draw.rectangle(box, fill=fillcolor) # erase the gap region
#}}}
def DrawScale(length, posindexmap, xy0, font_size_alignment, #{{{
fontWidth, fontHeight, draw):
"""Draw horizontal scale bar. font_size_alignment is the font_size for the
MSA"""
widthAnnotation = g_params['widthAnnotation']
annoSeqInterval = g_params['annoSeqInterval']
fntTMbox = g_params['fntTMbox']
(fontWidthTMbox, fontHeightTMbox) = fntTMbox.getsize("a")
isShrinked = False
if len(posindexmap) > 0:
length = len(posindexmap)
isShrinked = True
font_size_scalebar = g_params['font_size_scalebar']
fntScaleBar = g_params['fntScaleBar']
(fontWidthScaleBar, fontHeightScaleBar) = fntScaleBar.getsize("a")
(x0,y0) = xy0
x = x0 + widthAnnotation*fontWidth +annoSeqInterval* fontWidthTMbox
y = y0
fg = "black"
step = 20 * max(1,int(math.ceil(fontWidthScaleBar / float(fontWidth) *
len("%d"%length) /10 + 0.5)))
# print "step=", step
i = step
while i < length:
if isShrinked:
s = "%s"%(posindexmap[i])
else:
s = "%s"%(i)
#str=string.rjust(str,step," ")
numdigit = len(s)
shiftx = numdigit
draw.text((x+step*fontWidth - shiftx*fontWidthScaleBar, y), s,
font=fntScaleBar, fill=fg)
i += step
x += step * fontWidth
y += int(fontHeightScaleBar*1.5+0.5)
x = x0 + widthAnnotation*fontWidth + annoSeqInterval*fontWidthTMbox
i = step
shiftx = step-1
while i < length:
s = "|"
draw.text((x+shiftx*fontWidth,y), s, font=fntScaleBar, fill=fg)
i += step
x += step * fontWidth
#}}}
def DrawHistogram(histoList, xy0, histoRegionWidth, histoRegionHeight, #{{{
color_fill, color_outline, spaceToLeftBorder, draw):
"""Draw histogram"""
(x0, y0) = xy0
marginTop = max(20, int(round(histoRegionHeight * 0.1)))
marginBottom = int(round(histoRegionHeight * 0.1))
heightBox = histoRegionHeight - marginTop - marginBottom
widthBox = histoRegionWidth
# Draw outline box
box = [x0, y0 + marginTop, x0 + widthBox, y0 + marginTop + heightBox]
draw.rectangle(box, outline="black")
yticList = [0, 50, 100]
ylabel = "% state M"
# draw tics and text
font_size = AutoSizeFontHistogram(ylabel, yticList, widthBox, heightBox,
spaceToLeftBorder)
#font_size = 12
fnt = ImageFont.truetype(g_params['font_dir'] + g_params['font'],
font_size)
(fw, fh) = fnt.getsize('-')
lengthtic = fw
maxTicWidth = 0
for ytic in yticList:
x1 = x0 - lengthtic
heightInPixel = int(round(ytic/100.0 * heightBox ))
y1 = y0 + marginTop + heightBox - heightInPixel
x2 = x0
y2 = y1
box=[x1, y1 , x2, y2]
draw.line(box, fill="black")
text = str(ytic)
(textWidth, textHeight) = fnt.getsize(text)
if textWidth > maxTicWidth:
maxTicWidth = textWidth
draw.text((x1-textWidth-lengthtic-3, y1-textHeight/2), text, font=fnt,
fill='black')
text = ylabel
(textWidth, textHeight) = fnt.getsize(text)
x = x0
y = (y0 + marginTop + y0 + marginTop + heightBox) / 2
xt = x - textWidth - 2*lengthtic - maxTicWidth - 3 - 10
yt = y - textHeight/2
draw.text((xt , yt), text, font=fnt, fill='black')
# Draw histogram bars
x = x0
for (w, h) in histoList:
widthInPixel = int(round(w * widthBox))
heightInPixel = int(round(h * heightBox ))
x1 = x
y1 = y0 + marginTop + heightBox - heightInPixel
x2 = x1 + widthInPixel
y2 = y0 + marginTop + heightBox
box = [x1, y1, x2, y2]
draw.rectangle(box, fill=color_fill, outline=color_outline)
x += widthInPixel
#}}}
def GetSizeAnnotationToDraw(annotationList):#{{{
"""Get the size of annotation field"""
maxSize = 0
for anno in annotationList:
m1 = re.search("nTM\s*=\s*[0-9]*", anno)
m2 = re.search("group of [0-9]*", anno)
m3 = re.search("[^\s]+", anno)
pos1 = 0
pos2 = 0
pos3 = 0
if m1:
pos1 = m1.end(0)
if m2:
pos2 = m2.end(0)
if m3:
pos3 = m3.end(0)
size = max(pos1,pos2, pos3)
if size > maxSize:
maxSize = size
return maxSize
#}}}
def GetSpecialProIndex(seqIDIndexDict):# {{{
"""
Get index for special proteins
return a dict with keys
'reppro' = []
'pdb' = []
'final' = []
"""
li_reppro = []
li_pdb = []
li_final = []
for seqID in sorted(seqIDIndexDict.keys()):
if seqID.find("rep") == 0:
li_reppro.append(seqIDIndexDict[seqID])
elif seqID.find("pdb_") == 0:
li_pdb.append(seqIDIndexDict[seqID])
elif seqID.find("final") == 0:
li_final.append(seqIDIndexDict[seqID])
dt = {}
dt['reppro'] = li_reppro
dt['pdb'] = li_pdb
dt['final'] = li_final
return dt
# }}}
def CalculateImageScale(numSeq):# {{{
"""
Calculate Image Scale based on the number of sequences
"""
if g_params['image_scale'] == None:
if numSeq < 50:
g_params['image_scale'] = 2.0
elif numSeq < 100:
g_params['image_scale'] = 1.5
elif numSeq < 500:
g_params['image_scale'] = 1.2
else:
g_params['image_scale'] = 1.0
else:
g_params['image_scale'] = 1.0
# }}}
def VerifyTerminalStatus(topoSeqList, posTMList):# {{{
"""
Add i/o status before/after the TM helices if it is missing
"""
numSeq = len(topoSeqList)
lengthAlignment = len(topoSeqList[0])
IOSTATE_LIST = ["i","o"]
for i in range(numSeq):
topo = topoSeqList[i]
l_topo = [x for x in topo]
posTM_Nterm = posTMList[i][0]
posTM_Cterm = posTMList[i][-1]
if lcmp.Get_IOState_upstream(topo, posTM_Nterm[0]) == '':
io_after = lcmp.Get_IOState_downstream(topo, posTM_Nterm[1])
io_before = IOSTATE_LIST[(IOSTATE_LIST.index(io_after)+1)%2]
if posTM_Nterm[0] == 0:
l_topo[posTM_Nterm[0]] = io_before
else:
l_topo[posTM_Nterm[0]-1] = io_before
if lcmp.Get_IOState_downstream(topo, posTM_Cterm[1]) == '':
io_before = lcmp.Get_IOState_upstream(topo, posTM_Cterm[0])
io_after = IOSTATE_LIST[(IOSTATE_LIST.index(io_before)+1)%2]
if posTM_Cterm[1] == lengthAlignment:
l_topo[posTM_Cterm[1]-1] = io_after
else:
l_topo[posTM_Cterm[1]] = io_after
topoSeqList[i] = ''.join(l_topo)
# }}}
def GetSeqTag(anno):#{{{
"get sequence tag from annotation"
tag = ""
if anno.find("ClusterNo=") != -1:
tag = re.search("ClusterNo=[0-9]+", anno).group(0)
elif anno.find(" IDT ") != -1:
tag = "IDT"
elif anno.find("Consensus ") != -1:
tag = "Consensus"
elif anno.find(" OK ") != -1:
tag = "OK"
elif anno.find(" SHIFT ") != -1:
tag = "SHIFT"
elif anno.find(" INV ") != -1:
tag = "INV"
elif anno.find(" INV_SHIFT ") != -1:
tag = "INV_SHIFT"
elif anno.find(" DIFF ") != -1:
tag = "DIFF"
elif anno.find(" TM2GAP ") != -1:
tag = "TM2GAP"
elif anno.find(" TM2SEQ ") != -1:
tag = "TM2SEQ"
elif anno.find(" TM2GAP_AND_TM2SEQ ") != -1:
tag = "TM2GAP_AND_TM2SEQ"
elif anno.find(" Mixed ") != -1:
tag = "Mixed"
elif anno.find("Eukaryota") != -1:
tag = "Eukaryota"
elif anno.find("Archaea") != -1:
tag = "Archaea"
elif anno.find("Bacteria") != -1:
tag = "Bacteria"
else:
tag = ""
return tag
#}}}
def DrawTopology(anno, tag, toposeq, aaseq, xy0, fnt, fontWidth, #{{{
fontHeight, isDrawText, draw):
"""Draw the topology MSA region with the PIL library"""
# draw background
annoSeqInterval = g_params['annoSeqInterval']
widthAnnotation = g_params['widthAnnotation']
fntTMbox = g_params['fntTMbox']
(fontWidthTMbox, fontHeightTMbox) = fntTMbox.getsize("a")
(x0,y0) = xy0
x = x0
y = y0
if isDrawText: # leave the width for the annotation text
x += widthAnnotation * fontWidth
else: # We actually need this shift anyhow to not get things shifted
x += widthAnnotation * fontWidth
#Draw a vertical bar for proteins in different groups
if g_params['isDrawTagColumn']:
if tag.find("ClusterNo") != -1:#{{{
numCluster = int(tag.split("=")[1])
numTM = int(re.search("nTM=[0-9]+", anno).group(0).split("=")[1])
cntColor = 0
fillColor = "white"
if cntColor > 10 or numTM == 1:
fillColor = "black"
else:
if numCluster == 1:
fillColor = "#008000"
elif numCluster == 2:
fillColor = "#239C23"
elif numCluster == 3:
fillColor = "#35A835"
elif numCluster == 4:
fillColor = "#53B953"
elif numCluster == 5:
fillColor = "#6CC66C"
elif numCluster == 6:
fillColor = "#84D084"
elif numCluster == 7:
fillColor = "#A5DEA5"
elif numCluster == 8:
fillColor = "#CEEECE"
elif numCluster == 9:
fillColor = "#E2F5E2"
elif numCluster == 10:
fillColor = "#F5FCF5"
else:
numCluster = "black"
box=[x+fontWidthTMbox*1,y,x+fontWidthTMbox*3,y+fontHeight]
draw.rectangle(box, fill=fillColor)
else:
fill_color = "white"
if tag == "IDT":
fill_color = "red"
elif tag == "INV":
fill_color ="blue"
elif tag == "TM2GAP":
fill_color ="green"
elif tag == "TM2SEQ":
fill_color ="violet"
elif tag == "TM2GAP_AND_TM2SEQ":
fill_color ="cyan"
elif tag == "Consensus":
fill_color = "purple"
elif tag == "OK":
fill_color = "red"
elif tag == "SHIFT":
fill_color = "pink"
elif tag == "INV_SHIFT":
fill_color ="lightgreen"
elif tag == "DIFF":
fill_color ="black"
elif tag == "Archaea":
fill_color ="blue"
elif tag == "Bacteria":
fill_color ="purple"
elif tag == "Eukaryota":
fill_color ="green"
#print ("TEST",x,y,tag,fill_color)
#fill_color='green'
box=[x+fontWidthTMbox*1,y,x+fontWidthTMbox*3,y+fontHeight]
draw.rectangle(box, fill=fill_color)
x += annoSeqInterval * fontWidthTMbox
#}}}
bg="#FFFFFF"; #white
memcolor = "#FF0000"
lengthSeq = len(toposeq)
(posTM, typeTM) = lcmp.GetTMType(toposeq)
seq_typeTM = [] # seq_typeTM is a list to name the type at every TM residue
# position, it defines only the types at the TM position, not loops
seq_typeTM = [' '] * lengthSeq
for jj in range(len(posTM)):
tp = typeTM[jj]
b,e = posTM[jj]
for kk in range(b,e):
seq_typeTM[kk] = tp
# it is much faster to draw a block of text than drawing characters one by one
i=0
if g_params['isColorByKingdom']:
if ("Eukaryota" in anno):
memcolor="#0000FF"; #blue
elif ("Archaea" in anno):
memcolor="#00FF00"; #Green
elif ("Bacteria" in anno):
memcolor="#FF0000"; #red
else:
memcolor="#808080"; #grey
while i < lengthSeq:
j=i
while j < lengthSeq and toposeq[j] == toposeq[i]:
j += 1
lengthSegment = j-i
if toposeq[i] == "M":
if seq_typeTM[i] == "M":
bg=g_params['memcolor_out_to_in_MSA']
elif seq_typeTM[i] == "W":
bg = g_params['memcolor_in_to_out_MSA']
#bg = "red"
elif toposeq[i] == "i":
#bg = "#F2EABD"; # light yellow
bg = g_params['loopcolor_in_MSA']
elif toposeq[i] == "o":
#bg="#CCFFFF"; # faded blue
bg = g_params['loopcolor_out_MSA']
elif toposeq[i] == "S":
#bg="#228B22"; # forestgreen for signal peptide
bg = g_params['spcolor']
else:
if g_params['isColorWholeTMbox'] and lcmp.IsWithinTMRegion(i, posTM):
bg = memcolor #"#FF0000"
else:
bg = "#FFFFFF" #white
box=[x,y,x+fontWidth*lengthSegment,y+fontHeight]
draw.rectangle(box, fill=bg)
x+=fontWidth*lengthSegment
i=j
# draw text, foreground
if isDrawText:
x = x0
y = y0
#ss = string.ljust(anno[0:widthAnnotation], widthAnnotation, " ")
ss = label[0:widthAnnotation].ljust(widthAnnotation, " ")
# print "ss=%s, anno=%s" %(ss, anno)
fg="#000000";# black
draw.text((x,y), ss, font=fnt, fill=fg)
x += (widthAnnotation*fontWidth)
x += (annoSeqInterval*fontWidthTMbox)
fg = "#000000" #black
# it is much faster to draw a block of text than drawing characters one by one
if g_params['isShowTMIndex']: # show TM1 TM2 TM3 ... in the middle of the TMbox
tmpli = [' ']*lengthSeq
for kk in range(len(posTM)):
(bb, ee) = posTM[kk]
tmpstr = "TM%d"%(kk+1)
mid = (bb+ee)/2
bb1 = min(mid - len(tmpstr)/2, lengthSeq - len(tmpstr)-1)
for jj in range(len(tmpstr)):
tmpli[bb1+jj] = tmpstr[jj]
seq = "".join(tmpli)
else:
if aaseq != "":
seq = aaseq
else:
seq = toposeq
seq = seq.replace('-',' ')
draw.text((x,y), seq, font=fnt, fill=fg)
# i=0
# while i < lengthSeq:
# j=i
# while j < lengthSeq and seq[j] == seq[i]:
# j+=1
# lengthSegment=j-i
# if seq[i] == "-":
# fg="#FFFFFF"
# else:
# fg="#000000"
# draw.text((x,y), seq[i:j], font=fnt, fill=fg)
# x+=(fontWidth*lengthSegment)
# i=j
#}}}
def CalculateImageParameter(fontWidth, fontHeight, lengthAlignment, numSeq, numSeprationLine, sectionSepSpace, specialProIdxDict, posTMList, TMnameList, widthAdjustRatio):# {{{
"""
Calculate image parameters for the PIL method
"""
(fontWidthScaleBar, fontHeightScaleBar) = g_params['fntScaleBar'].getsize("a")
fontHeightDGProfileLegend = g_params['fntDGprofileLegend'].getsize("a")[1]
(fontWidthTMbox, fontHeightTMbox) = AutoSizeFontTMBox(fontWidth, fontHeight, numSeq, specialProIdxDict, posTMList, TMnameList)
width = ((g_params['widthAnnotation'] + lengthAlignment) * (fontWidth) +
g_params['annoSeqInterval']*fontWidthTMbox + int(widthAdjustRatio*g_params['marginX'] * 2+0.5))
dgprofileRegionWidth = lengthAlignment * fontWidth
dgprofileRegionHeight = max(30, int(width*0.15+0.5), int(round(numSeq * fontHeight * 0.2)), g_params['heightTMbox']*fontHeightTMbox*2)
histoRegionWidth = lengthAlignment * fontWidth
histoRegionHeight = max(50, int(round(lengthAlignment*fontHeight*widthAdjustRatio* 0.1)), int(round(numSeq*fontHeight* 0.1)))
AutoSizeFontDGProfileLabel(dgprofileRegionHeight)
width = ((g_params['widthAnnotation'] + lengthAlignment) * (fontWidth) +
g_params['annoSeqInterval']*fontWidthTMbox + g_params['marginX'] * 2)
height = (
g_params['marginY']* int(myfunc.maprange((0.5,1), (2,6), sig2(numSeq, scale=5))) +
#g_params['marginY']* 6+
len(specialProIdxDict['reppro'])*int(g_params['heightTMbox']*fontHeightTMbox*1.5+0.5) +
g_params['isDrawScaleBar']*int(fontHeightScaleBar*2.5+0.5)+
g_params['isDrawMSA']*numSeq*fontHeight +
g_params['isDrawSeprationLine'] * numSeprationLine * g_params['scaleSeprationLine']* fontHeight +
(len(specialProIdxDict['pdb'])+len(specialProIdxDict['final']) >0)*(int(g_params['heightTMbox']*fontHeightTMbox+0.5)+sectionSepSpace*fontHeightScaleBar)+
len(specialProIdxDict['pdb'])*int(g_params['heightTMbox']*fontHeightTMbox*1.5+0.5)+
len(specialProIdxDict['final'])*int(g_params['heightTMbox']*fontHeightTMbox*1.5+0.5)+
g_params['isDrawDGprofile'] *(int(dgprofileRegionHeight*1.1+0.5)+sectionSepSpace*fontHeightScaleBar)+
g_params['isDrawDGprofile'] *((len(specialProIdxDict['reppro'])>1)*fontHeightDGProfileLegend*4)+
g_params['isDrawPerMDistribution'] * (histoRegionHeight)
)
return (width, height, fontWidthTMbox, fontHeightTMbox, dgprofileRegionWidth, dgprofileRegionHeight, histoRegionWidth, histoRegionHeight)
# }}}
def DrawMSATopo_PIL(inFile, g_params):#{{{
"""Draw multiple alignment of topologies using the PIL library"""
lcmp.SetMakeTMplotColor_g_params(g_params)
isDrawSeqLable = True
(idList, annotationList, topoSeqList) = myfunc.ReadFasta(inFile)
H2W_ratio = g_params['H2W_ratio']
widthAdjustRatio = 1.0
topoSeqList = lcmp.RemoveUnnecessaryGap(topoSeqList)
numSeq = len(idList)
if numSeq < 1:
print("No sequence in the file %s. Ignore." %(inFile), file=sys.stderr)
return 1
CalculateImageScale(numSeq)
logger.debug("image_scale=%g"%(g_params['image_scale']))
g_params['marginX'] = int(g_params['marginX']*g_params['image_scale']+0.5)
g_params['marginY'] = int(g_params['marginY']*g_params['image_scale']+0.5)
seqIDIndexDict = {}
for i in range(numSeq):
seqIDIndexDict[idList[i]] = i
(fontWidthScaleBar, fontHeightScaleBar) = g_params['fntScaleBar'].getsize("a")
sectionSepSpace = int(g_params['image_scale']*g_params['heightTMbox']/2+0.5)
rootname = os.path.basename(os.path.splitext(inFile)[0])
aaSeqDict = GetAASeqDict(inFile)
if g_params['outpath'] == "":
outpath = myfunc.my_dirname(inFile)
else:
outpath = g_params['outpath']
str_krbias = ""
if g_params['isDrawKRBias'] == True:
str_krbias = ".krbias"
outFile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, g_params['outFormat'])
# ============================
# get index of special proteins
# * representative protein
# * topology of a PDB structure
# * the final topology
specialProIdxDict = GetSpecialProIndex(seqIDIndexDict)
specialProIdxList = specialProIdxDict['reppro'] + specialProIdxDict['pdb'] + specialProIdxDict['final']
if len(specialProIdxList) == 0:
if g_params['makeCleanPlot']:
# make all as final proteins for Sudha's case
specialProIdxDict['final'] = list(range(len(idList)))
# print("FATAL ERROR! specialPro is not set, TM box can not be plotted. "\
# "Please check your input file %s"%(inFile), file=sys.stderr)
# return 1
else:
specialProIdxDict['reppro'] = [0]
specialProIdxList = specialProIdxDict['reppro'] + specialProIdxDict['pdb'] + specialProIdxDict['final']
posTMList = [myfunc.GetTMPosition(x) for x in topoSeqList]
TMnameList = [] # note that TMname is also a list, TMnameList is a list of list
foldTypeList = []
for i in range(numSeq):
if i in specialProIdxList:
TMname = myfunc.GetTMnameFromAnnotation(annotationList[i]) # annotation from the file topomsa
foldType = myfunc.GetFoldTypeFromAnnotation(annotationList[i])
TMnameList.append(TMname)
foldTypeList.append(foldType)
else:
TMnameList.append([])
foldTypeList.append("")
origPosTMList = [myfunc.GetTMPosition(x) for x in topoSeqList]
VerifyTerminalStatus(topoSeqList, origPosTMList)
# posindexmap: map of the residue position to the original MSA
# e.g. pos[0] = 5 means the first residue is actually the 6th residue position
# in the original MSA
# backup original aligned topoSeqList
origTopoSeqList = []
for seq in topoSeqList:
origTopoSeqList.append(seq)
posindexmap = {}
if g_params['isShrink']:
if g_params['method_shrink'] == 0:
posindexmap = ShrinkGapInMSA_0(idList, topoSeqList, specialProIdxList=[])
elif g_params['method_shrink'] == 1:
posindexmap = ShrinkGapInMSA_exclude_TMregion(idList, topoSeqList)
# get posTMList for the shink version of MSA
posTMList = [myfunc.GetTMPosition(x) for x in topoSeqList]
# for i in range(len(topoSeqList)):
# print ("%10s: %s" %(idList[i], topoSeqList[i]))
g_params['widthAnnotation'] = GetSizeAnnotationToDraw(annotationList)
if g_params['makeCleanPlot']:
g_params['widthAnnotation'] = 4
widthAnnotation = g_params['widthAnnotation']
tagList = []
for seqAnno in annotationList:
tagList.append(GetSeqTag(seqAnno))
numSeprationLine = len(set(tagList))
lengthAlignment = len(topoSeqList[0])
fnt = ImageFont.truetype(g_params['font_dir'] + g_params['font'],
int(g_params['image_scale']*g_params['font_size']))
(fontWidth, fontHeight) = fnt.getsize("a")
(width, height, fontWidthTMbox, fontHeightTMbox, dgprofileRegionWidth, dgprofileRegionHeight, histoRegionWidth, histoRegionHeight) = CalculateImageParameter(fontWidth, fontHeight, lengthAlignment, numSeq, numSeprationLine, sectionSepSpace, specialProIdxDict, posTMList, TMnameList, 1.0)
if (H2W_ratio != None and height/float(width)!=H2W_ratio):
widthAdjustRatio = height/float(width)/H2W_ratio
fontWidth = int(fontWidth * widthAdjustRatio + 0.5)
g_params['marginY'] += int(widthAdjustRatio*10+0.5)
(width, height, fontWidthTMbox, fontHeightTMbox, dgprofileRegionWidth, dgprofileRegionHeight, histoRegionWidth, histoRegionHeight) = CalculateImageParameter(fontWidth, fontHeight, lengthAlignment, numSeq, numSeprationLine, sectionSepSpace,specialProIdxDict, posTMList, TMnameList, widthAdjustRatio)
isDrawText = g_params['isDrawText']
font_size = g_params['font_size']
if g_params['isAutoSize']:
while height*width > g_params['MAXIMAGESIZE']:
if font_size > 3:
font_size -= 1
fnt = ImageFont.truetype(g_params['font_dir'] +
g_params['font'], font_size)
(fontWidth, fontHeight) = fnt.getsize("a")
else:
if fontWidth > 1:
fontWidth -= 1
if fontHeight > 1:
fontHeight -= 1
(width, height, fontWidthTMbox, fontHeightTMbox, dgprofileRegionWidth, dgprofileRegionHeight, histoRegionWidth, histoRegionHeight) = CalculateImageParameter(fontWidth, fontHeight, lengthAlignment, numSeq, numSeprationLine, sectionSepSpace,specialProIdxDict, posTMList, TMnameList, 1.0)
if font_size < 3:
isDrawText = False
if fontWidth < 2 and fontHeight < 2:
break
logger.debug("height (%d) *width (%d) = %d"%(height, width, height*width))
if (H2W_ratio != None and height/float(width)!=H2W_ratio):
widthAdjustRatio = height/float(width)/H2W_ratio
fontWidth = int(fontWidth * widthAdjustRatio + 0.5)
g_params['marginY'] += int(widthAdjustRatio*10+0.5)
(width, height, fontWidthTMbox, fontHeightTMbox, dgprofileRegionWidth, dgprofileRegionHeight, histoRegionWidth, histoRegionHeight) = CalculateImageParameter(fontWidth, fontHeight, lengthAlignment, numSeq, numSeprationLine, sectionSepSpace, specialProIdxDict, posTMList, TMnameList, widthAdjustRatio)
if height*width > g_params['MAXIMAGESIZE']:
msg = "%s: (fontWidth, fontHeight) have been reduced to (%d, %d)"\
", but the image size is still too big (%dM)"%(
inFile, fontWidth, fontHeight, height*width/1024/1024)
print(msg)
else:
msg = "font is autosized to %d, isDrawText = %s, "\
"(fontWidth, fontHeight) = (%d, %d)"%(
font_size,isDrawText, fontWidth, fontHeight)
print(msg.format(font_size, isDrawText, fontWidth, fontHeight))
bg_color="#FFFFFF"; # white
if g_params['mode'] == "RGB":
newImage = Image.new(g_params['mode'], (width, height),bg_color)
elif g_params['mode'] == "P":
newImage = Image.new(g_params['mode'], (width, height),255)
draw = ImageDraw.Draw(newImage); # setup to draw on the main image
x = g_params['marginX']
y = g_params['marginY']
# Draw TM helices of the consensus.(or the representative topologies)
# Do not draw consensus if there is only final topologies in the topology
# alignment
if not len(specialProIdxDict['final']) == len(specialProIdxList):
if len(specialProIdxDict['reppro']) > 0:
idxRepProList = specialProIdxDict['reppro']
else:
idxRepProList = [0] #if reppro is not set, take the first one
cnt = 0
for idx in idxRepProList:
(posTM_rep,typeTM_rep) = lcmp.GetTMType(topoSeqList[idx])
if isDrawSeqLable:
xt = g_params['marginX'] + fontWidth*g_params['widthAnnotation']*0
if not g_params['makeCleanPlot']:
if len(idxRepProList) == 1:
label = "Initial Topology"
else:
label = "Initial Topology %d"%(cnt+1)
label = rootname
else:
label = ""
#ss = string.ljust(label[0:widthAnnotation], widthAnnotation, " ")
ss = label[0:widthAnnotation].ljust(widthAnnotation, " ")
fg="#000000";# black
draw.text((xt,y), ss, font=g_params['fntTMbox_label'], fill=fg)
DrawTMOfConsensus2(topoSeqList[idx], posTM_rep, typeTM_rep,
TMnameList[idx], foldTypeList[idx], (x,y),
fontWidth, fontHeight, draw,lengthAlignment)
y += int(g_params['heightTMbox'] * fontHeightTMbox*1.5+0.5)
cnt += 1
#y += sectionSepSpace*fontHeightTMbox
# Draw a scale bar of the residue position
(fontWidthScaleBar, fontHeightScaleBar) = g_params['fntScaleBar'].getsize("a")
if g_params['isDrawMSA']:
if g_params['isDrawScaleBar']:
DrawScale(lengthAlignment, posindexmap, (x,y), font_size, fontWidth,
fontHeight, draw)
y += int(fontHeightScaleBar*2.5+0.5)
maxDistKR = g_params['maxDistKR']
isDrawKRBias = g_params['isDrawKRBias']
tagFormer = tagList[0]
for i in range(numSeq):
tagCurrent = tagList[i]
seqID = idList[i]
if i in specialProIdxList: #do not draw the topology of special proteins
continue
if tagCurrent != tagFormer:
tagFormer = tagCurrent
if g_params['isDrawSeprationLine'] == True:
box = [x, y+1, width-marginX, y+fontHeight*scaleSeprationLine-1]
draw.rectangle(box, fill="grey",outline="white")
y += fontHeight
anno = annotationList[i]
tag = tagList[i]
toposeq = topoSeqList[i]
aaseq = ""
if seqID in aaSeqDict:
aaseq = aaSeqDict[seqID]
#print aaseq
#print origTopoSeqList[i]
# origTopoSeqList is the original (non-shinked MSA)
aaseq = MatchToAlignedSeq(aaseq, origTopoSeqList[i], seqID)
if posindexmap != {}:
tmpaaseq = ""
for pp in range(len(posindexmap)):
aa = aaseq[posindexmap[pp]]
if (isDrawKRBias and aa in ["K", "R"] and
IsOutofMaxDistKR(posTMList[i], posindexmap[pp],
maxDistKR)):
aa = " "
if g_params['isPrintDebugInfo']:
print(seqID, aa, posindexmap[pp], posTMList[i])
tmpaaseq += (aa)
aaseq = tmpaaseq
if isDrawKRBias:
aaseq = HideNonKRResidue(aaseq)
# print aaseq
DrawTopology(anno, tag, toposeq, aaseq, (x,y), fnt, fontWidth,
fontHeight, isDrawText, draw)
# if tagCurrent == "Consensus":
# y+=fontHeight
y += fontHeight
if y >= height:
print(("Error! position y(%d) exceeds height (%d)" %
(y, height)), file=sys.stderr)
# Draw special topologies
idxPDBList = specialProIdxDict['pdb']
idxFinalProList = specialProIdxDict['final']
if len(idxPDBList)+len(idxFinalProList) > 0:
y += int(g_params['heightTMbox']*fontHeightTMbox+0.5)
logger.debug("idxPDBList=%s, idxFinalProList=%s", str(idxPDBList), str(idxFinalProList))
for idx in idxPDBList+idxFinalProList:
if idx != -1:
if isDrawSeqLable:
xt = g_params['marginX'] + fontWidth*g_params['widthAnnotation']*0
if idx in idxPDBList:
label = idList[idx].lstrip('pdb_')
else:
if g_params['makeCleanPlot']:
label = alphabet[idx]
else:
if len(idxFinalProList) == 1:
label = "Final Topology"
else:
label = "Final Topology "+ idList[idx].lstrip("final")
#ss = string.ljust(label[0:widthAnnotation], widthAnnotation, " ")
ss = label[0:widthAnnotation].ljust(widthAnnotation, " ")
fg="#000000";# black
draw.text((xt,y), ss, font=g_params['fntTMbox_label'], fill=fg)
(posTM,typeTM) = lcmp.GetTMType(topoSeqList[idx])
# draw topology of the representative protein
DrawTMOfConsensus2(topoSeqList[idx], posTM, typeTM,
TMnameList[idx], foldTypeList[idx], (x,y), fontWidth,
fontHeight, draw, lengthAlignment)
y += int(g_params['heightTMbox']*fontHeightTMbox*1.5+0.5)
y += sectionSepSpace*fontHeightScaleBar
# Draw DGprofile
if g_params['isDrawDGprofile']:
dgpList = []
for idx in specialProIdxDict['reppro']:
seqID = idList[idx]
toposeq = topoSeqList[idx]
lengthAlignment = len(toposeq)
DGProfileFile = GetDGProfileFileName(inFile, seqID)
isDrawSeqID = True
dgprofileDict = None
logger.debug("seqID=%s, DGProfileFile=%s"%(seqID, DGProfileFile))
if os.path.exists(DGProfileFile):
dgprofileDict = ReadInDGProfile(DGProfileFile)
dgp = None
if dgprofileDict:
if seqID in dgprofileDict:
dgp = dgprofileDict[seqID]
elif 'query' in dgprofileDict:
dgp = dgprofileDict['query']
elif seqID in aaSeqDict: #if dg profile file is not provided, calculate it
aaseq = aaSeqDict[seqID]
dgp = RunDGScan(aaseq, seqID)
if dgp:
idxmap_aligne2seq = lcmp.GetAlign2SeqMap(origTopoSeqList[idx],
origTopoSeqList[idx].replace(GAP,""))
aligned_dgp = MatchAlignedDGP(dgp, idxmap_aligne2seq, posindexmap, toposeq)
dgpList.append([seqID, aligned_dgp])
if len(dgpList) > 0:
dgList = []
for ii in range(len(dgpList)):
dgp = dgpList[ii][1]
ldg = [x[1] for x in dgp]
dgList += ldg
minDG = min(-1.0, min(dgList))
maxDG = max(2, max(dgList))
line_mode = "line"
x = (g_params['marginX'] + g_params['widthAnnotation'] * fontWidth + g_params['annoSeqInterval'] *
fontWidthTMbox)
spaceToLeftBorder = (g_params['annoSeqInterval'] * fontWidthTMbox +
g_params['widthAnnotation'] * fontWidth)
DrawDGProfile(dgpList, lengthAlignment, maxDG, minDG,
(x,y), dgprofileRegionWidth,
dgprofileRegionHeight, spaceToLeftBorder, isDrawSeqID,
line_mode, draw)
# Add ylabel deltaG (kcal/mol)
fnt_label = g_params['fntDGprofileLable']
(fw_l, fh_l) = fnt_label.getsize(ylabel_DGprofile)
image2 = Image.new('RGBA', (int(fw_l*1.1+0.5)+5, int(fh_l*1.2+0.5)+5))
draw2 = ImageDraw.Draw(image2)
draw2.text((5, 5), text=ylabel_DGprofile, font=fnt_label, fill="black")
image2 = image2.rotate(90, resample=1, expand=1)
logger.debug ("widthAdjustRatio=%f"%widthAdjustRatio)
px, py = int(g_params['marginX']*1.5), y + 0*int(fw_l/2)
sx, sy = image2.size
newImage.paste(image2, (px, py), mask=image2)
del image2
y += dgprofileRegionHeight
y += sectionSepSpace*fontHeightScaleBar
# draw distribution of 'M' percentage
if g_params['isDrawPerMDistribution']:
(cnt_i, cnt_o, cnt_M, cnt_GAP, per_i, per_o, per_M, per_GAP) =\
lcmp.GetTopoStateFraction( topoSeqList)
# histoList is a list of 2-tuples (width, height) where height is a value
# ranging to 1, and sum of width equals 1.
histoList = []
for i in range(lengthAlignment):
histoList.append((1.0/lengthAlignment, per_M[i]))
x = (g_params['marginX'] + g_params['widthAnnotation'] * fontWidth + g_params['annoSeqInterval'] *
fontWidthTMbox)
color_fill = 'red'
color_outline = 'black'
spaceToLeftBorder = (g_params['annoSeqInterval'] * fontWidthTMbox + g_params['widthAnnotation']
* fontWidth)
DrawHistogram(histoList, (x,y), histoRegionWidth, histoRegionHeight,
color_fill, color_outline, spaceToLeftBorder,
draw)
if not g_params['isQuiet']:
print(("Topology MSA is drawn and output to \"%s\"" %(outFile)))
del draw
newImage.save(outFile)
del newImage
#}}}
def DrawMSATopo_SVG(inFile, g_params):#{{{
(idList, annotationList, topoSeqList) = myfunc.ReadFasta(inFile)
topoSeqList = lcmp.RemoveUnnecessaryGap(topoSeqList)
numSeq = len(idList)
if numSeq < 1:
print("No sequence in the file %s. Ignore." %(inFile), file=sys.stderr)
return 1
marginX = g_params['marginX']
marginY = g_params['marginY']
annoSeqInterval = g_params['annoSeqInterval']
widthAnnotation = g_params['widthAnnotation']
heightScaleBar = g_params['heightScaleBar']
heightTMbox = g_params['heightTMbox']
scaleSeprationLine = g_params['scaleSeprationLine']
font_size_scalebar = g_params['font_size_scalebar']
fntScaleBar = g_params['fntScaleBar']
(fontWidthScaleBar, fontHeightScaleBar) = fntScaleBar.getsize("a")
rootname = os.path.basename(os.path.splitext(inFile)[0])
aaSeqDict = GetAASeqDict(inFile)
#rootname=rootname.split('.')[0]
if g_params['outpath'] == "":
outpath = myfunc.my_dirname(inFile)
else:
outpath = g_params['outpath']
str_krbias = ""
if g_params['isDrawKRBias'] == True:
str_krbias = ".krbias"
svgfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'svg')
pdffile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'pdf')
# posindexmap: map of the residue position to the original MSA
# e.g. pos[0] = 5 means the first residue is actually the 6th residue position
# in the original MSA
# backup original aligned topoSeqList
alignedTopoSeqList = []
posTMList = []
for seq in topoSeqList:
alignedTopoSeqList.append(seq)
posTMList.append(myfunc.GetTMPosition(seq))
posindexmap = {}
if g_params['isShrink']:
posindexmap = ShrinkGapInMSA_0(idList, topoSeqList)
posTM = myfunc.GetTMPosition(topoSeqList[0])
g_params['widthAnnotation'] = GetSizeAnnotationToDraw(annotationList)
widthAnnotation = g_params['widthAnnotation']
tagList = []
for seqAnno in annotationList:
tagList.append(GetSeqTag(seqAnno))
# print tagList
numSeprationLine = len(set(tagList))
lengthAlignment = len(topoSeqList[0])
svg_document = pysvg.structure.svg()
# shape_builder = pysvg.builders.ShapeBuilder()
#
# svg_document.addElement(shape_builder.createRect(0, 0,
# "200px", "100px",
# strokewidth = 1,
# stroke = "black",
# fill = "rgb(255, 255, 0)"))
myStyle = pysvg.builders.StyleBuilder()
myStyle.setFontFamily(fontfamily="monospace")
# myStyle.setFontSize('5pt')
myStyle.setFillOpacity('0.5')
myStyle.setFilling(fill="grey")
# myStyle.setStroke("green")
# myStyle.fill = "blue"
newAnnoList = []
for i in range(len(topoSeqList)):
newAnnoList.append("%s %s"%(idList[i], tagList[i]))
maxSizeAnno = max([len(x) for x in newAnnoList])
print("maxSizeAnno=",maxSizeAnno)
x0 = 10
y0 = 10
for i in range(len(topoSeqList)):
y = y0 + (i)*20
string = "%-*s %s"%(maxSizeAnno+5, newAnnoList[i], topoSeqList[i])
#print len(string)
print(string)
t1 = pysvg.text.text(string, x=x0, y=y)
t1.set_style(myStyle.getStyle())
svg_document.addElement(t1)
svg_document.save(svgfile)
cmd = "svg2pdf %s %s"%(svgfile, pdffile)
os.system(cmd)
print("%s output"%(pdffile))
#}}}
def DrawMSATopo_MAT(inFile, g_params):#{{{
(idList, annotationList, topoSeqList) = myfunc.ReadFasta(inFile)
numSeq = len(idList)
if numSeq < 1:
print("No sequence in the file %s. Ignore." %(inFile), file=sys.stderr)
return 1
topoSeqList = lcmp.RemoveUnnecessaryGap(topoSeqList)
lengthAlignmentOriginal = len(topoSeqList[0])
maxDistKR = g_params['maxDistKR']
marginX = g_params['marginX']
marginY = g_params['marginY']
annoSeqInterval = g_params['annoSeqInterval']
widthAnnotation = g_params['widthAnnotation']
heightScaleBar = g_params['heightScaleBar']
heightTMbox = g_params['heightTMbox']
scaleSeprationLine = g_params['scaleSeprationLine']
font_size_scalebar = g_params['font_size_scalebar']
fntScaleBar = g_params['fntScaleBar']
(fontWidthScaleBar, fontHeightScaleBar) = fntScaleBar.getsize("a")
pdfcrop_margin_left = g_params['pdfcrop_margin_left']
pdfcrop_margin_top = g_params['pdfcrop_margin_top']
pdfcrop_margin_right = g_params['pdfcrop_margin_right']
pdfcrop_margin_bottom = g_params['pdfcrop_margin_bottom']
rootname = os.path.basename(os.path.splitext(inFile)[0])
aaSeqDict = GetAASeqDict(inFile)
if g_params['outpath'] == "":
outpath = myfunc.my_dirname(inFile)
else:
outpath = g_params['outpath']
isDrawKRBias = g_params['isDrawKRBias']
str_krbias = ""
if g_params['isDrawKRBias'] == True:
str_krbias = ".krbias"
svgfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'svg')
pdffile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'pdf')
txtfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'txtplot')
# posindexmap: map of the residue position to the original MSA
# e.g. pos[0] = 5 means the first residue is actually the 6th residue position
# in the original MSA
# backup original aligned topoSeqList
alignedTopoSeqList = []
posTMList = []
for seq in topoSeqList:
alignedTopoSeqList.append(seq)
posTMList.append(myfunc.GetTMPosition(seq))
posindexmap = {}
method_shrink = g_params['method_shrink']
if g_params['isDrawDGprofile']:
method_shrink = 1
if g_params['isShrink']:
if g_params['method_shrink'] == 0:
posindexmap = ShrinkGapInMSA_0(idList, topoSeqList)
elif g_params['method_shrink'] == 1:
posindexmap = ShrinkGapInMSA_exclude_TMregion(idList, topoSeqList)
posTM = myfunc.GetTMPosition(topoSeqList[0])
g_params['widthAnnotation'] = GetSizeAnnotationToDraw(annotationList)
widthAnnotation = g_params['widthAnnotation']
tagList = []
for seqAnno in annotationList:
tagList.append(GetSeqTag(seqAnno))
# print tagList
numSeprationLine = len(set(tagList))
lengthAlignment = len(topoSeqList[0])
newAnnoList = []
for i in range(len(topoSeqList)):
newAnnoList.append("%s %s"%(myfunc.GetFirstWord(annotationList[i]), tagList[i]))
maxSizeAnno = max([len(x) for x in newAnnoList])
print("maxSizeAnno=",maxSizeAnno)
fonttype = 'monospace'
numSeq = len(topoSeqList)
aaSeqList = [] # list of amino acid sequences, aligned and shrinked if enabled
final2seq_idxMapList = [] # the index map from the final (shrinked or not)
# sequence to the orignal unaligned sequence.
for i in range(numSeq):
seqID = idList[i]
idxmap_aligne2seq = lcmp.GetAlign2SeqMap(alignedTopoSeqList[i],
alignedTopoSeqList[i].replace(GAP,""))
idxmap = {}
if seqID in aaSeqDict:
aaseq = aaSeqDict[seqID]
#print aaseq
#print alignedTopoSeqList[i]
# alignedTopoSeqList is the original (non-shinked MSA)
aaseq = MatchToAlignedSeq(aaseq, alignedTopoSeqList[i], seqID)
if posindexmap != {}:
tmpaaseq = ""
for pp in range(len(posindexmap)):
aa = aaseq[posindexmap[pp]]
idxmap[pp] = idxmap_aligne2seq[posindexmap[pp]]
if (isDrawKRBias and aa in ["K", "R"] and
IsOutofMaxDistKR(posTMList[i], posindexmap[pp],
maxDistKR)):
aa = " "
if g_params['isPrintDebugInfo']:
print(seqID, aa, posindexmap[pp], posTMList[i])
tmpaaseq += (aa)
aaseq = tmpaaseq
else:
idxmap = idxmap_aligne2seq
if isDrawKRBias:
aaseq = HideNonKRResidue(aaseq)
aaSeqList.append(aaseq)
else:
aaSeqList.append("")
final2seq_idxMapList.append(idxmap)
# setting font properties
#ffam = "monospace"
#ffam = "Fixed"
ffam = "Courier New"
fontpath = "%s/%s"%(g_params['font_dir'], "Courier_New.ttf")
fp = matplotlib.font_manager.FontProperties(
fname=fontpath, family=ffam, style='normal', size=12,
weight='normal', stretch='normal')
ffam = "Arial"
fontpath = "%s/%s"%(g_params['font_dir'], "Arial.ttf")
fp_anno = matplotlib.font_manager.FontProperties(
fname=fontpath, family=ffam, style='normal', size=14,
weight='normal', stretch='normal')
# get the text width and height in pixels
x=0
y=0
linespaceInPixel = 12
ss = "M"*1
pth = matplotlib.textpath.TextPath((x, y), ss, prop=fp)
bb = pth.get_extents(transform=None)
widthSingleCharInPixel = float(bb.width)/len(ss)
heightSingleCharInPixel = float(bb.height)
print("charwidth, charheight", widthSingleCharInPixel, heightSingleCharInPixel)
widthAnnoInPixel = 0
for i in range(numSeq):
ss = newAnnoList[i]
pth = matplotlib.textpath.TextPath((x, y), ss, prop=fp_anno)
bb = pth.get_extents(transform=None)
wtd = float(bb.width)/len(ss.replace(" ",""))*len(ss)
if wtd > widthAnnoInPixel:
widthAnnoInPixel = wtd
print("widthAnnoInPixel=", widthAnnoInPixel)
# set aspect ratio
if g_params['isDrawDGprofile']:
heightRatios = [numSeq,10]
gs = gridspec.GridSpec(2, 1, height_ratios=heightRatios)
else:
heightRatios = [1]
print("heightRatios=", heightRatios)
sumTextHeightInPixel = (heightSingleCharInPixel + linespaceInPixel)*(numSeq+1)
sumTextWidthInPixel = (widthSingleCharInPixel)*(lengthAlignment+1)
# sumTextWidthAnnotationInPixel = widthSingleCharInPixel*(maxSizeAnno+5)
sumTextWidthAnnotationInPixel = widthAnnoInPixel+20
print("lengthAlignment=", lengthAlignment)
print("sumTextWidthAnnotationInPixel=", sumTextWidthAnnotationInPixel)
print("sumTextWidthInPixel=", sumTextWidthInPixel)
print("sumTextHeightInPixel=", sumTextHeightInPixel)
widthUnitFigureInPixel = 8*80
heightUnitFigureInPixel = 6*80
adjust_right = 0.99
#adjust_left = float(sumTextWidthInPixel)/(lengthAlignment*widthSingleCharInPixel)
adjust_left = (adjust_right) * (sumTextWidthAnnotationInPixel/(sumTextWidthAnnotationInPixel+lengthAlignment*widthSingleCharInPixel))
adjust_top = max(1.0 - float(2)/numSeq, 0.7)
adjust_bottom = min(float(2)/numSeq,0.3)
print("adjust_left=",adjust_left)
print("adjust_right=",adjust_right)
print("adjust_top=",adjust_top)
print("adjust_bottom=",adjust_bottom)
subplot1_width_ratio = (adjust_right-adjust_left)
subplot1_height_ratio = float(heightRatios[0])/sum(heightRatios)*(adjust_top-adjust_bottom)
#subplot1_width_ratio = 1.0/(1.0+0.2+0.2+adjust_left)
widthUnitSubplot1InPixel = widthUnitFigureInPixel*subplot1_width_ratio
heightUnitSubplot1InPixel = heightUnitFigureInPixel*subplot1_height_ratio
widthscale = float(sumTextWidthInPixel)/widthUnitSubplot1InPixel+0.00
heightscale = float(sumTextHeightInPixel)/heightUnitSubplot1InPixel+0.02
print("sumTextWidthInPixel, sumTextHeightInPixel=", (sumTextWidthInPixel, sumTextHeightInPixel))
print("scale=",(widthscale, heightscale))
widthSubplot1InPixel = widthUnitSubplot1InPixel * widthscale
heightSubplot1InPixel = heightUnitSubplot1InPixel * heightscale
print("widthSubplot1InPixel=",widthSubplot1InPixel)
print("heightSubplot1InPixel", heightSubplot1InPixel)
widthSingleCharInAxes = float(widthSingleCharInPixel)/widthSubplot1InPixel
heightSingleCharInAxes = float(heightSingleCharInPixel)/heightSubplot1InPixel
widthAnnotationInAxes = float(sumTextWidthAnnotationInPixel)/widthSubplot1InPixel
linespaceInAxes = float(linespaceInPixel)/heightSubplot1InPixel
print("widthSingleCharInAxes, heightSingleCharInAxes=", (widthSingleCharInAxes, heightSingleCharInAxes))
# create figure object
figsize = (8*widthscale, 6*heightscale) # fig size in inches (width,height)
fig = plt.figure(figsize = figsize) # set the figsize
fig.subplots_adjust(left=adjust_left, right=adjust_right, top=adjust_top, bottom=adjust_bottom)
if g_params['isDrawDGprofile']:
ax = fig.add_subplot(gs[0])
else:
ax = fig.add_subplot(111)
#ax.axis('off')
inv = ax.transAxes.inverted()
loc_xtics = []
label_xtics = []
if posindexmap != {}:
for jj in range(0, lengthAlignment, 10):
loc_xtics.append(jj)
label_xtics.append(posindexmap[jj])
ax.set_xlim(0, lengthAlignment)
#ax.set_xlabel("Sequence position", fontsize=16)
if posindexmap != {}:
plt.xticks(np.array(loc_xtics), np.array(label_xtics))
ax2 = ax.twiny()
ax2.set_xlabel("Alignment position", fontsize=16)
ax2.set_xlim(0,lengthAlignment)
if posindexmap != {}:
plt.xticks(np.array(loc_xtics), np.array(label_xtics))
ax.set_ylim(numSeq,0)
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
# make a plot of sequence indexes
l1 = []
l2 = []
for j in range(lengthAlignment):
if posindexmap != {}:
idxAlignedSeq = posindexmap[j]
else:
idxAlignedSeq = j
l1.append(idxAlignedSeq)
l2.append(0)
plt.plot(l1,l2, ' ')
x0 = 0
y0 = 1.0 - linespaceInAxes - heightSingleCharInAxes
x = x0
y = y0
yshift=0
for i in range(len(topoSeqList)):
y -= yshift
anno = "%-*s"%(maxSizeAnno+5, newAnnoList[i])
x = x0 - widthAnnotationInAxes
plt.text(x, y, anno, fontproperties=fp_anno, transform=ax.transAxes)
xshift=0
x = x0
if aaSeqList[i] != "":
seq = aaSeqList[i]
else:
seq = topoSeqList[i]
topo = topoSeqList[i]
posTM = myfunc.GetTMPosition(topo)
if len(posTM) == 0:
txt = seq
plt.text(x, y, txt, fontproperties=fp, transform=ax.transAxes)
width = widthSingleCharInAxes * len(txt)
height = heightSingleCharInAxes + linespaceInAxes
edgecolor = 'none'
y2 = y - linespaceInAxes/2.0
rec = matplotlib.patches.Rectangle((x, y2), width, height,
transform=ax.transAxes)
ax.add_patch(rec)
xshift = width
yshift = height
else:
li = []
for (b, e) in posTM:
li.append(b)
li.append(e)
for j in range(len(li)+1):
if j == 0:
begin = 0
else:
begin = li[j-1]
if j != len(li):
end = li[j]
else:
end = lengthAlignment
txt = seq[begin:end]
txt = txt.replace("-", " ")
txt_topo = topo[begin:end].replace(GAP," ")
x_char = x
for char in txt:
plt.text(x_char, y, char, fontproperties=fp, transform=ax.transAxes)
x_char += widthSingleCharInAxes
#t = matplotlib.textpath.TextPath((0,0), txt, prop=fp)
#bb = t.get_extents(transform=inv)
if txt_topo.find('M')!=-1:
edgecolor = 'black'
facecolor = 'red'
color = 'red'
elif txt_topo.find('i') != -1:
color = 'lightyellow'
elif txt_topo.find('o') != -1:
color = 'lightblue'
else:
facecolor = 'none'
edgecolor = 'none'
width = widthSingleCharInAxes * len(txt)
#width = bb.width
#print "w1, w2=", widthSingleCharInAxes*len(txt), bb.width
height = heightSingleCharInAxes + linespaceInAxes
y2 = y - linespaceInAxes/2.0
# rec = matplotlib.patches.Rectangle((x, y2), width, height,
# facecolor=facecolor, edgecolor=edgecolor, alpha=0.5,transform=ax.transAxes)
rec = matplotlib.patches.Rectangle((x, y2), width, height,
color=color, transform=ax.transAxes)
ax.add_patch(rec)
xshift = width
x += xshift
yshift = height
cnttextlength += len(txt)
#print "%3d: %4d %4d %s"%(i, cnttextlength, len(seq), seq), posTM
if g_params['isDrawDGprofile']:
dgprofileDict = {} #{{{
if os.path.exists(g_params['DGProfileFile']):
dgprofileDict = ReadInDGProfile(g_params['DGProfileFile'])
for i in range(numSeq):
seqID = idList[i]
toposeq = topoSeqList[i]
lengthAlignment = len(toposeq)
if (not seqID in dgprofileDict) and (seqID in aaSeqDict):
aaseq = aaSeqDict[seqID]
#print "aaseq=", aaseq
tmpaaseqfile = tempfile.mktemp()
tmpdgpfile = tempfile.mktemp()
tmpfp = open(tmpaaseqfile, 'w')
tmpfp.write(">%s\n"%seqID)
tmpfp.write("%s\n"%aaseq)
tmpfp.close()
os.system("%s %s -lmin 21 -lmax 21 -o %s"%(g_params['dgscanProg'],
tmpaaseqfile, tmpdgpfile))
tmpDGPDict = ReadInDGProfile(tmpdgpfile)
os.system("rm -f %s %s" %(tmpaaseqfile, tmpdgpfile))
for seqid in tmpDGPDict:
dgprofileDict[seqid] = tmpDGPDict[seqid]
#print dgprofileDict[seqid]
#}}}
ax = fig.add_subplot(gs[1])
#ax.set_xlim(0, lengthAlignmentOriginal)
ax.set_xlim(0, lengthAlignment)
ax.set_xlabel("Alignment position", fontsize=16)
ax.set_ylabel(r"${\Delta}G$", fontsize=16)
if posindexmap != {}:
plt.xticks(np.array(loc_xtics), np.array(label_xtics))
for i in range(numSeq):
seqid = idList[i]
alignedTopoOriginal = alignedTopoSeqList[i]
align2seqMap = lcmp.GetAlign2SeqMap(alignedTopoOriginal,
alignedTopoOriginal.replace(GAP,""))
#print "dgprofileDict[%s]"%seqid, dgprofileDict[seqid]
print("align2seqMap=", align2seqMap)
try:
dgp = dgprofileDict[seqid]
dt = {}
for tup in dgp:
dt[tup[0]] = tup[1]
x = []
y = []
for j in range(lengthAlignment):
if posindexmap != {}:
try:
idxAlignedSeq = posindexmap[j]
except KeyError:
#print "j=%d not in posindexmap"%j
pass
else:
idxAlignedSeq = j
try:
idxSeq = align2seqMap[idxAlignedSeq]
except KeyError:
# if g_params['isPrintDebugInfo']:
# print "idxAlignedSeq=%d not in align2seqMap"%idxAlignedSeq
pass
if idxSeq in dt:
#x.append(idxAlignedSeq)
x.append(j)
y.append(dt[idxSeq])
else:
# if g_params['isPrintDebugInfo']:
# print "idxSeq=%d not in dgp, idxAlignedSeq=%d"%(idxSeq, idxAlignedSeq)
pass
if i < len(colorList_DG_profile):
color = colorList_DG_profile[i]
else:
color = 'none'
# plot by line
plt.plot(x,y, label=seqid, color=color, linewidth=2.0)
# plot with '+' symbol
# plt.plot(x,y, '+', label=seqid, color=color)
plt.hlines(0, 0, lengthAlignmentOriginal)
plt.legend()
except KeyError:
print("no dgprofile for %s"%(seqid))
pass
plt.savefig(pdffile)
print("%s output"%(pdffile))
cmd = "pdfcrop --margins '%d %d %d %d' --clip %s"%(pdfcrop_margin_left,
pdfcrop_margin_top, pdfcrop_margin_right, pdfcrop_margin_bottom,
pdffile)
os.system(cmd)
# Write Txtformat alignment
#print final2seq_idxMapList
htmlfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'html')
# WriteTXTAlignment(idList, newAnnoList, topoSeqList, alignedTopoSeqList,
# aaSeqList, final2seq_idxMapList, txtfile)
if len(idList) == 2:
#WriteHTMLAlignment2(idList, newAnnoList, topoSeqList,
# alignedTopoSeqList, aaSeqList, final2seq_idxMapList, htmlfile)
tmpmapList = []
for i in range(len(alignedTopoSeqList)):
tmpmap = {}
for j in range(len(alignedTopoSeqList[i])):
tmpmap[j] = j
tmpmapList.append(tmpmap)
WriteHTMLAlignment2(idList, newAnnoList, alignedTopoSeqList,
alignedTopoSeqList, aaSeqList, tmpmapList, htmlfile)
elif len(idList) > 2:
WriteHTMLAlignment3(idList, newAnnoList, topoSeqList,
alignedTopoSeqList, aaSeqList, final2seq_idxMapList, htmlfile)
#}}}
def DrawMSATopo_MAT2(inFile, g_params):#{{{
"""
draw topology similar as
blue upperhyphen outside
red _____ inside
grey box TM helix (in->out)
white box TM helix (out->in)
"""
logger = logging.getLogger(__name__)
(idList, annotationList, topoSeqList) = myfunc.ReadFasta(inFile)
numSeq = len(idList)
if numSeq < 1:
logger.debug("No sequence in the file %s. Ignore." %(inFile))
return 1
topoSeqList = lcmp.RemoveUnnecessaryGap(topoSeqList)
lengthAlignmentOriginal = len(topoSeqList[0])
maxDistKR = g_params['maxDistKR']
marginX = g_params['marginX']
marginY = g_params['marginY']
annoSeqInterval = g_params['annoSeqInterval']
widthAnnotation = g_params['widthAnnotation']
heightScaleBar = g_params['heightScaleBar']
heightTMbox = g_params['heightTMbox']
scaleSeprationLine = g_params['scaleSeprationLine']
font_size_scalebar = g_params['font_size_scalebar']
fntScaleBar = g_params['fntScaleBar']
(fontWidthScaleBar, fontHeightScaleBar) = fntScaleBar.getsize("a")
pdfcrop_margin_left = g_params['pdfcrop_margin_left']
pdfcrop_margin_top = g_params['pdfcrop_margin_top']
pdfcrop_margin_right = g_params['pdfcrop_margin_right']
pdfcrop_margin_bottom = g_params['pdfcrop_margin_bottom']
rootname = os.path.basename(os.path.splitext(inFile)[0])
aaSeqDict = GetAASeqDict(inFile)
#rootname=rootname.split('.')[0]
if g_params['outpath'] == "":
outpath = myfunc.my_dirname(inFile)
else:
outpath = g_params['outpath']
isDrawKRBias = g_params['isDrawKRBias']
str_krbias = ""
if g_params['isDrawKRBias'] == True:
str_krbias = ".krbias"
svgfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'svg')
pdffile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'pdf')
txtfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'txtplot')
# posindexmap: map of the residue position to the original MSA
# e.g. pos[0] = 5 means the first residue is actually the 6th residue position
# in the original MSA
# backup original aligned topoSeqList
alignedTopoSeqList = []
posTMList = []
for seq in topoSeqList:
alignedTopoSeqList.append(seq)
posTMList.append(myfunc.GetTMPosition(seq))
posindexmap = {}
method_shrink = g_params['method_shrink']
# if g_params['isDrawDGprofile']:
# method_shrink = 1
aaSeqAlignList = [] # list of original aligned aa seq list
for i in range(numSeq):
seqid = idList[i]
try:
aaseq = aaSeqDict[seqid]
aaseq = MatchToAlignedSeq(aaseq, alignedTopoSeqList[i], seqid)
except KeyError:
aaseq = " "*lengthAlignmentOriginal
aaSeqAlignList.append(aaseq)
if g_params['isShrink']:
if g_params['method_shrink'] == 0:
posindexmap = ShrinkGapInMSA_0(idList, topoSeqList)
elif g_params['method_shrink'] == 1:
posindexmap = ShrinkGapInMSA_exclude_TMregion(idList, topoSeqList)
elif g_params['method_shrink'] == 2:
(idxmap_align2shrink, idxmap_shrink2align) =\
ShrinkMSA_Method_2(topoSeqList, aaSeqAlignList, posTMList,
g_params['shrinkrate_TM'], g_params['max_hold_loop'],
g_params['isDrawKRBias'])
posindexmap = idxmap_shrink2align
posTM = myfunc.GetTMPosition(topoSeqList[0])
g_params['widthAnnotation'] = GetSizeAnnotationToDraw(annotationList)
widthAnnotation = g_params['widthAnnotation']
tagList = []
for seqAnno in annotationList:
tagList.append(GetSeqTag(seqAnno))
# print tagList
numSeprationLine = len(set(tagList))
lengthAlignment = len(topoSeqList[0])
# added 2013-12-04
if g_params['shrinkrate'] != None:
shrinkrate = g_params['shrinkrate'] # shrink the sequence proportionally
else:
shrinkrate = lengthAlignment/120.0
lengthAlignmentShrinked = int(lengthAlignment/shrinkrate+0.5)
logger.debug("lengthAlignment=%d"%lengthAlignment)
logger.debug("shrinkrate=%s"%(str(shrinkrate)))
logger.debug("lengthAlignmentShrinked=%d"%(lengthAlignmentShrinked))
newAnnoList = []
for i in range(len(topoSeqList)):
try:
anno = myfunc.GetFirstWord(annotationList[i])
except:
anno = ""
anno = anno[:g_params['MAX_SIZE_ANNOTATION']]
newAnnoList.append("%s %s"%(anno, tagList[i]))
maxSizeAnno = max([len(x) for x in newAnnoList])
logger.debug("maxSizeAnno=%d"%(maxSizeAnno))
fonttype = 'monospace'
numSeq = len(topoSeqList)
aaSeqList = [] # list of amino acid sequences, aligned and shrinked if enabled
final2seq_idxMapList = [] # the index map from the final (shrinked or not)
# sequence to the orignal unaligned sequence.
krSeqList = [] # seqlist for positively charged residues, KR
for i in range(numSeq):
seqID = idList[i]
idxmap_aligne2seq = lcmp.GetAlign2SeqMap(alignedTopoSeqList[i],
alignedTopoSeqList[i].replace(GAP,""))
idxmap = {}
if seqID in aaSeqDict:
aaseq = aaSeqDict[seqID]
#print aaseq
#print alignedTopoSeqList[i]
# alignedTopoSeqList is the original (non-shinked MSA)
aaseq = MatchToAlignedSeq(aaseq, alignedTopoSeqList[i], seqID)
tmpaaseq = ""
tmpKRseq = ""
for pp in range(lengthAlignment):
if posindexmap != {}:
jseq = posindexmap[pp]
else:
jseq = pp
idxmap[pp] = idxmap_aligne2seq[jseq]
aa = aaseq[jseq]
tmpaaseq += (aa)
if isDrawKRBias:
if (aa in ["K", "R"] and not
IsOutofMaxDistKR(posTMList[i], jseq,
maxDistKR)):
tmpKRseq += aa
if g_params['isPrintDebugInfo']:
print(seqID, aa, jseq, posTMList[i])
else:
tmpKRseq += " "
aaSeqList.append(tmpaaseq)
krSeqList.append(tmpKRseq)
else:
aaSeqList.append("")
krSeqList.append("")
final2seq_idxMapList.append(idxmap)
# debug
if g_params['isPrintDebugInfo'] and g_params['isDrawKRBias']:
print("print shrinked toposeq and krseq")
for k in range(numSeq):
print("%s\t%s"%(idList[k], topoSeqList[k]))
print("%s\t%s"%(idList[k], krSeqList[k]))
sys.stdout.flush()
# setting font properties
#ffam = "monospace"
#ffam = "Fixed"
ffam = "Courier New"
fontpath = "%s/%s"%(g_params['font_dir'], "Courier_New.ttf")
fontsize = 18
fp = matplotlib.font_manager.FontProperties(
fname=fontpath, family=ffam, style='normal', size=fontsize,
weight='normal', stretch='normal')
ffam = "Arial"
fontpath = "%s/%s"%(g_params['font_dir'], "Arial.ttf")
fp_anno = matplotlib.font_manager.FontProperties(
fname=fontpath, family=ffam, style='normal', size=20,
weight='normal', stretch='normal')
# get the text width and height in pixels
x=0
y=0
linespaceInPixel = 36
ss = "M"*1
pth = matplotlib.textpath.TextPath((x, y), ss, prop=fp)
bb = pth.get_extents(transform=None)
widthSingleCharInPixel = float(bb.width)/len(ss)
heightSingleCharInPixel = float(bb.height)
logger.debug("charwidth=%d, charheight=%d"%( widthSingleCharInPixel, heightSingleCharInPixel))
widthAnnoInPixel = 0
for i in range(numSeq):
ss = newAnnoList[i]+ "M"*2 # leave 3 letters' space
pth = matplotlib.textpath.TextPath((x, y), ss, prop=fp_anno)
bb = pth.get_extents(transform=None)
wtd = float(bb.width)/len(ss.replace(" ",""))*len(ss)
if wtd > widthAnnoInPixel:
widthAnnoInPixel = wtd
logger.debug( "widthAnnoInPixel=%d"%(widthAnnoInPixel))
sumTextHeightInPixel = (heightSingleCharInPixel + linespaceInPixel)*(numSeq+1)
sumTextWidthInPixel = (widthSingleCharInPixel)*(lengthAlignmentShrinked+1)
# sumTextWidthAnnotationInPixel = widthSingleCharInPixel*(maxSizeAnno+5)
sumTextWidthAnnotationInPixel = widthAnnoInPixel
logger.debug("lengthAlignment=%d"% lengthAlignment)
logger.debug("sumTextWidthAnnotationInPixel=%d"% sumTextWidthAnnotationInPixel)
logger.debug("sumTextWidthInPixel=%d"% sumTextWidthInPixel)
logger.debug("sumTextHeightInPixel=%d"% sumTextHeightInPixel)
# set aspect ratio
if g_params['isDrawDGprofile']:
heightRatios = [numSeq, 5]
gs = gridspec.GridSpec(2, 1, height_ratios=heightRatios)
else:
heightRatios = [1]
logger.debug( "heightRatios=%s"%str( heightRatios))
widthUnitFigureInPixel = 8*80
heightUnitFigureInPixel = 6*80
adjust_left = float(maxSizeAnno+5)/lengthAlignmentShrinked
adjust_right = 0.99
#adjust_left = (adjust_right) * (sumTextWidthAnnotationInPixel/(sumTextWidthAnnotationInPixel+lengthAlignment*widthSingleCharInPixel))
adjust_top = max(1.0 - float(2)/numSeq, 0.7)
adjust_bottom = min(float(2)/numSeq,0.3)
logger.debug( "adjust_left=%d"%adjust_left)
logger.debug( "adjust_right=%d"%adjust_right)
logger.debug( "adjust_top=%d"%adjust_top)
logger.debug( "adjust_bottom=%d"%adjust_bottom)
subplot1_width_ratio = (adjust_right-adjust_left)
subplot1_height_ratio = float(heightRatios[0])/sum(heightRatios)*(adjust_top-adjust_bottom)
#subplot1_width_ratio = 1.0/(1.0+0.2+0.2+adjust_left)
widthUnitSubplot1InPixel = widthUnitFigureInPixel*subplot1_width_ratio
heightUnitSubplot1InPixel = heightUnitFigureInPixel*subplot1_height_ratio
widthscale = float(sumTextWidthInPixel)/widthUnitSubplot1InPixel+0.00
heightscale = float(sumTextHeightInPixel)/heightUnitSubplot1InPixel+0.02
logger.debug( "sumTextWidthInPixel=%d, sumTextHeightInPixel=%d"% (sumTextWidthInPixel, sumTextHeightInPixel))
logger.debug( "widthscale=%s, heightscale=%s"%(str(widthscale), str(heightscale)))
widthSubplot1InPixel = widthUnitSubplot1InPixel * widthscale
heightSubplot1InPixel = heightUnitSubplot1InPixel * heightscale
logger.debug( "widthSubplot1InPixel=%d"%widthSubplot1InPixel)
logger.debug( "heightSubplot1InPixel=%d"% heightSubplot1InPixel)
widthSingleCharInAxes = float(widthSingleCharInPixel)/widthSubplot1InPixel
heightSingleCharInAxes = float(heightSingleCharInPixel)/heightSubplot1InPixel
widthAnnotationInAxes = float(sumTextWidthAnnotationInPixel)/widthSubplot1InPixel
linespaceInAxes = float(linespaceInPixel)/heightSubplot1InPixel
logger.debug("widthSingleCharInAxes=%d, heightSingleCharInAxes=%d"%(
widthSingleCharInAxes, heightSingleCharInAxes))
fontsize_tics = 18
fontsize_label = 24
# fontsize_tics = 18/3
# fontsize_label = 24/3
# create figure object
figsize = (8*widthscale, 6*heightscale) # fig size in inches (width,height)
fig = plt.figure(figsize = figsize) # set the figsize
fig.subplots_adjust(left=adjust_left, right=adjust_right, top=adjust_top, bottom=adjust_bottom)
plt.rc('legend',**{'fontsize':fontsize_label})
if g_params['isDrawDGprofile']:
ax = fig.add_subplot(gs[0])
else:
ax = fig.add_subplot(111)
#ax.axis('off')
inv = ax.transAxes.inverted()
loc_xtics = []
label_xtics = []
if posindexmap != {}:
for jj in range(0, lengthAlignmentShrinked, 10):
loc_xtics.append(jj)
label_xtics.append(posindexmap[int(jj*shrinkrate)])
else:
for jj in range(0, lengthAlignmentShrinked, 10):
loc_xtics.append(jj)
label_xtics.append(int(jj*shrinkrate))
ax.set_xlim(0, lengthAlignmentShrinked)
ax.xaxis.set_visible(False)
#ax.set_xlabel("Sequence position", fontsize=16)
plt.xticks(np.array(loc_xtics), np.array(label_xtics))
ax2 = ax.twiny()
#ax2.set_xlabel("Alignment position", fontsize=16)
ax2.set_xlim(0,lengthAlignmentShrinked)
plt.xticks(np.array(loc_xtics), np.array(label_xtics), fontsize=fontsize_tics)
plt.tick_params(labelsize=fontsize_tics, direction='out', pad=10)
ax.set_ylim(numSeq,0)
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(True)
ax.spines['right'].set_visible(True)
# # make a plot of sequence indexes
# l1 = []
# l2 = []
# for j in xrange(lengthAlignment):
# if posindexmap != {}:
# idxAlignedSeq = posindexmap[j]
# else:
# idxAlignedSeq = j
# l1.append(idxAlignedSeq)
# l2.append(0)
# plt.plot(l1,l2, ' ')
x0 = 0
y0 = 1.0 - linespaceInAxes - heightSingleCharInAxes
IOSTATE_LIST = ["i","o"]
row_height = heightSingleCharInAxes + linespaceInAxes
# plot symbol annotation above the alignment
# blue upper hyphen Outside loop
# red under hyphen Inside loop
# grey box TM helix (In -> out)
# white box TM helix (Out -> In)
line1 = Line2D(list(range(1)), list(range(1)), color="red", marker='', markersize=5, markerfacecolor="red")
line2 = Line2D(list(range(1)), list(range(1)), color="blue", marker='', markersize=5, markerfacecolor="blue")
line3 = Line2D(list(range(1)), list(range(1)), color="white", marker='s', markersize=20, markerfacecolor="grey")
line4 = Line2D(list(range(1)), list(range(1)), color="white", marker='s', markersize=20,markerfacecolor="white")
legend = plt.legend((line1,line2,line3,line4),('Inside loop','Outside loop', 'TM helix (In -> Out)',
'TM helix (Out -> In)'),numpoints=1, loc='upper center', bbox_to_anchor=(0.5,
2.0), ncol=4, fancybox=False, shadow=False)
legend.draw_frame(False)
for i in range(len(topoSeqList)):
# write sequence description
anno = "%-*s"%(maxSizeAnno+5, newAnnoList[i])
x = x0 - widthAnnotationInAxes
y = y0 - i*row_height
plt.text(x, y, anno, fontproperties=fp_anno, transform=ax.transAxes)
if isDrawKRBias:
seq = krSeqList[i]
else:
seq = ""
# if aaSeqList[i] != "":
# seq = aaSeqList[i]
# else:
# seq = topoSeqList[i]
topo = topoSeqList[i]
posTM = myfunc.GetTMPosition(topo)
NtermState = lcmp.GetNtermState(topo)
if len(posTM) == 0: # if non TM protein, just draw the sequence if specified
x = x0
y = y0 - row_height*i
txt = seq
plt.text(x, y, txt, fontproperties=fp, transform=ax.transAxes)
else: # draw TM regions and loops
# terminal gaps are ignored
li = []
for (b, e) in posTM: # get list of segment borders
li.append(b)
li.append(e)
for j in range(len(li)+1):
# for TM helices, j-1 is even, j is odd
if j == 0:
# ignore the N-terminal gaps
m = re.search('^-*', topo)
begin = len(m.group(0))
else:
begin = li[j-1]
if j != len(li):
end = li[j]
else:
# ignore the C-terminal gaps
m = re.search('-*$', topo)
end = lengthAlignment - len(m.group(0))
if isDrawKRBias: # draw positively charged K and R if enabled
y = y0 - row_height*i
for jpos in range(begin, end):
char = seq[jpos]
if char in ["K", "R"]:
x = x0 + jpos*widthSingleCharInAxes/shrinkrate
plt.text(x, y, char, fontproperties=fp,
transform=ax.transAxes)
txt_topo = topo[begin:end].replace(GAP," ")
type_topo_stat = "" #the state can be [IN, OUT, TM_IN_OUT, TM_OUT_IN]
# setting properties for loops and TM regions
if txt_topo.find('M')!=-1:
if lcmp.Get_IOState_upstream(topo, li[j-1]) == 'i':
type_topo_stat = "TM_IN_OUT"
edgecolor = 'black'
facecolor = 'grey'
else:
type_topo_stat = "TM_OUT_IN"
edgecolor = 'black'
facecolor = 'white'
elif txt_topo.find('i') != -1: #inside
type_topo_stat = "IN"
color = 'red'
elif txt_topo.find('o') != -1: #outside
type_topo_stat = "OUT"
color = 'blue'
else:
facecolor = 'none'
edgecolor = 'none'
width = widthSingleCharInAxes * (end-begin)/shrinkrate
height = heightSingleCharInAxes + linespaceInAxes/2.0
if type_topo_stat.find("TM") != -1: # draw TM regions
x = x0 + begin*widthSingleCharInAxes/shrinkrate
y = y0 - row_height*i - linespaceInAxes/4.0
rec = matplotlib.patches.Rectangle((x, y), width,
height, facecolor=facecolor,
edgecolor=edgecolor, transform=ax.transAxes)
ax.add_patch(rec)
elif type_topo_stat in ["IN", "OUT"]: # draw loops
if type_topo_stat == "IN":
x1 = x0 + begin*widthSingleCharInAxes/shrinkrate
y1 = y0 - row_height*i - linespaceInAxes/4.0
x2 = x1 + width
y2 = y1
else: #OUT
x1 = x0 + begin*widthSingleCharInAxes/shrinkrate
y1 = y0 - row_height*i + heightSingleCharInAxes + linespaceInAxes/4.0
x2 = x1 + width
y2 = y1
ax.plot([x1, x2], [y1, y2], color=color, linestyle='-',
linewidth=2, transform=ax.transAxes)
if g_params['isDrawDGprofile']:
dgprofileDict = {} #{{{
if os.path.exists(g_params['DGProfileFile']):
dgprofileDict = ReadInDGProfile(g_params['DGProfileFile'])
for i in range(numSeq):
seqID = idList[i]
toposeq = topoSeqList[i]
lengthAlignment = len(toposeq)
if (not seqID in dgprofileDict) and (seqID in aaSeqDict):
aaseq = aaSeqDict[seqID]
#print "aaseq=", aaseq
tmpaaseqfile = tempfile.mktemp()
tmpdgpfile = tempfile.mktemp()
tmpfp = open(tmpaaseqfile, 'w')
tmpfp.write(">%s\n"%seqID)
tmpfp.write("%s\n"%aaseq)
tmpfp.close()
cmd = [g_params['dgscanProg'], tmpaaseqfile, "-lmin", "21", "-lmax",
"21", "-o", tmpdgpfile]
cmdline = " ".join(cmd)
print(cmdline)
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print(e)
tmpDGPDict = ReadInDGProfile(tmpdgpfile)
os.remove(tmpaaseqfile)
os.remove(tmpdgpfile)
for seqid in tmpDGPDict:
dgprofileDict[seqid] = tmpDGPDict[seqid]
if g_params['isPrintDebugInfo']:
print(dgprofileDict)
#}}}
ax = fig.add_subplot(gs[1])
#ax.set_xlim(0, lengthAlignmentOriginal)
ax.set_xlim(0, lengthAlignmentShrinked)
ax.set_xlabel("Alignment position", fontsize=fontsize_label, labelpad=20)
ax.set_ylabel(r"${\Delta}G$ (kJ/mol)", fontsize=fontsize_label)
plt.xticks(np.array(loc_xtics), np.array(label_xtics), fontsize=fontsize_tics)
plt.tick_params(labelsize=fontsize_tics, direction='out', pad=10)
for i in range(numSeq):
seqid = idList[i]
alignedTopoOriginal = alignedTopoSeqList[i]
align2seqMap = lcmp.GetAlign2SeqMap(alignedTopoOriginal,
alignedTopoOriginal.replace(GAP,""))
#print "align2seqMap=", align2seqMap
print("posindexmap=", posindexmap)
#print "dgprofileDict[%s]"%seqid, dgprofileDict[seqid]
try:
dgp = dgprofileDict[seqid]
dt = {}
for tup in dgp:
dt[tup[0]] = tup[1]
x = []
y = []
for j in range(lengthAlignmentShrinked):
if posindexmap != {}:
try:
idxAlignedSeq = posindexmap[int(j*shrinkrate+0.5)]
except KeyError:
print("j=%d not in posindexmap"%j)
pass
else:
idxAlignedSeq = int(j*shrinkrate)
try:
idxSeq = align2seqMap[idxAlignedSeq]
except KeyError:
#print "idxAlignedSeq=%d not in align2seqMap"%idxAlignedSeq
pass
if idxSeq in dt:
x.append(j)
y.append(dt[idxSeq])
else:
# print "idxSeq=%d not in dgp, idxAlignedSeq=%d"%(idxSeq, idxAlignedSeq)
pass
if i < len(colorList_DG_profile):
color = colorList_DG_profile[i]
else:
color = 'none'
if g_params['isPrintDebugInfo']:
print("DG-x:",x)
print("DG-y:",y)
# plot by line
plt.plot(x,y, label=seqid, color=color)
# plot with '+' symbol
# plt.plot(x,y, '+', label=seqid, color=color)
plt.hlines(0, 0, lengthAlignmentOriginal)
plt.legend()
except KeyError:
print("no dgprofile for %s"%(seqid))
pass
plt.savefig(pdffile)
print("%s output"%(pdffile))
cmd = "pdfcrop --margins '%d %d %d %d' --clip %s"%(pdfcrop_margin_left,
pdfcrop_margin_top, pdfcrop_margin_right, pdfcrop_margin_bottom,
pdffile)
os.system(cmd)
pdf_cropfile = os.path.splitext(pdffile)[0]+"-crop.pdf"
pngfile = os.path.splitext(pdffile)[0] + "-crop.png"
thumb_pngfile = os.path.splitext(pdffile)[0] + "-crop.thumb.png"
cmd = ["convert", pdffile, pngfile]
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print(e)
cmd = ["convert", "-thumbnail", "100", pngfile, thumb_pngfile]
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print(e)
# Write Txtformat alignment
#print final2seq_idxMapList
htmlfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'html')
# WriteTXTAlignment(idList, newAnnoList, topoSeqList, alignedTopoSeqList,
# aaSeqList, final2seq_idxMapList, txtfile)
if len(idList) == 2:
#WriteHTMLAlignment2(idList, newAnnoList, topoSeqList,
# alignedTopoSeqList, aaSeqList, final2seq_idxMapList, htmlfile)
tmpmapList = []
for i in range(len(alignedTopoSeqList)):
tmpmap = {}
for j in range(len(alignedTopoSeqList[i])):
tmpmap[j] = j
tmpmapList.append(tmpmap)
WriteHTMLAlignment2(idList, newAnnoList, alignedTopoSeqList,
alignedTopoSeqList, aaSeqAlignList, tmpmapList, htmlfile)
elif len(idList) > 2:
WriteHTMLAlignment3(idList, newAnnoList, topoSeqList,
alignedTopoSeqList, aaSeqList, final2seq_idxMapList, htmlfile)
#}}}
def GetAlignedRegion(annotationList, topoSeqList):#{{{
"""Get aligned region from the description line
return a list of tuples [(beg, end)], starting from 0
In the annotation line, starting and both indeces are included
"""
li = []
for i in range (len(annotationList)):
aligned_region = myfunc.GetAlignedRegionFromAnnotation(annotationList[i], method=0)
if aligned_region[0] == -1:
logger.debug("seqNo %d, aligned_region bad format, desp=\"\""%(i+1, annotationList[i]))
aligned_region = (0, len(topoSeqList[i]))
li.append(aligned_region)
return li
#}}}
def DrawMSATopo_MAT_Core_unalign_rainbow(inFile, g_params):#{{{
"""Draw topology alignment core unaligned with rainbow color
"""
logger = logging.getLogger(__name__)
logger.debug("method=%s"%(g_params['method']))
(idList, annotationList, orig_topoSeqList) = myfunc.ReadFasta(inFile)
lst_aligned_region = GetAlignedRegion(annotationList, orig_topoSeqList)
numSeq = len(idList)
if numSeq < 1:
logger.debug("No sequence in the file %s. Ignore." %(inFile))
return 1
topoSeqList = []
lst_unalignedNterm = []
lst_unalignedCterm = []
for i in range(numSeq):
(beg, end) = lst_aligned_region[i]
topoSeqList.append(orig_topoSeqList[i][beg:end])
lst_unalignedNterm.append(orig_topoSeqList[i][0:beg])
lst_unalignedCterm.append(orig_topoSeqList[i][end:len(orig_topoSeqList[i])])
maxlen_unaligned_nterm = max([len(x) for x in lst_unalignedNterm])
maxlen_unaligned_cterm = max([len(x) for x in lst_unalignedCterm])
orig_posTMList = [ myfunc.GetTMPosition(topo) for topo in orig_topoSeqList]
numTMList=[ len (posTM) for posTM in orig_posTMList]
blue = Color("blue")
red = Color("red")
lst_color = [list(blue.range_to(red,numTM)) for numTM in numTMList]
lengthAlignmentOriginal = len(topoSeqList[0])
maxDistKR = g_params['maxDistKR']
marginX = g_params['marginX']
marginY = g_params['marginY']
annoSeqInterval = g_params['annoSeqInterval']
widthAnnotation = g_params['widthAnnotation']
heightScaleBar = g_params['heightScaleBar']
heightTMbox = g_params['heightTMbox']
scaleSeprationLine = g_params['scaleSeprationLine']
font_size_scalebar = g_params['font_size_scalebar']
fntScaleBar = g_params['fntScaleBar']
(fontWidthScaleBar, fontHeightScaleBar) = fntScaleBar.getsize("a")
pdfcrop_margin_left = g_params['pdfcrop_margin_left']
pdfcrop_margin_top = g_params['pdfcrop_margin_top']
pdfcrop_margin_right = g_params['pdfcrop_margin_right']
pdfcrop_margin_bottom = g_params['pdfcrop_margin_bottom']
rootname = os.path.basename(os.path.splitext(inFile)[0])
aaSeqDict = GetAASeqDict(inFile)
if g_params['outpath'] == "":
outpath = myfunc.my_dirname(inFile)
else:
outpath = g_params['outpath']
isDrawKRBias = g_params['isDrawKRBias']
str_krbias = ""
if g_params['isDrawKRBias'] == True:
str_krbias = ".krbias"
svgfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'svg')
pdffile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'pdf')
txtfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'txtplot')
# posindexmap: map of the residue position to the original MSA
# e.g. pos[0] = 5 means the first residue is actually the 6th residue position
# in the original MSA
# backup original aligned topoSeqList
logger.info("before adding Ms")
logger.info("topoSeqList")
for i in range(numSeq):
logger.debug("%s - numTM = %d"%(idList[i],len(myfunc.GetTMPosition(topoSeqList[i]))))
# add 'M's if the terminal at TM helix of the aligned region is chopped
# to be too short, topoSeqList is the aligned region
# 1. for the N-terminal at the aligned region
MIN_LENGTH_OF_TM_TO_DRAWN = 6
posTMList = [myfunc.GetTMPosition(x) for x in topoSeqList]
for posTM in posTMList:
lst_length_of_first_TM_withgaps = []
if len(posTM) > 0:
lenFirstTM = posTM[0][1]-posTM[0][0]
else:
lenFirstTM = 21
lst_length_of_first_TM_withgaps.append(lenFirstTM)
min_length_of_first_TM_withgaps = min(lst_length_of_first_TM_withgaps)
if min_length_of_first_TM_withgaps < MIN_LENGTH_OF_TM_TO_DRAWN: # if the TM is chopped
num_Ms_to_add = MIN_LENGTH_OF_TM_TO_DRAWN - min_length_of_first_TM_withgaps
newTopoSeqList = []
for top in topoSeqList:
if lcmp.Get_nongap_downstream(top, 0) == "M":
newTopoSeqList.append("M"*num_Ms_to_add+top)
else:
newTopoSeqList.append("-"*num_Ms_to_add+top)
topoSeqList = newTopoSeqList
# 2. dealing with the last TM in the aligned region
posTMList = [myfunc.GetTMPosition(x) for x in topoSeqList]
for posTM in posTMList:
lst_length_of_last_TM_withgaps = []
if len(posTM) > 0:
lenLastTM = posTM[-1][1]-posTM[-1][0]
else:
lenLastTM = 21
lst_length_of_last_TM_withgaps.append(lenLastTM)
min_length_of_last_TM_withgaps = min(lst_length_of_last_TM_withgaps)
if min_length_of_last_TM_withgaps < MIN_LENGTH_OF_TM_TO_DRAWN: # if the TM is chopped
num_Ms_to_add = MIN_LENGTH_OF_TM_TO_DRAWN - min_length_of_last_TM_withgaps
newTopoSeqList = []
for top in topoSeqList:
if lcmp.Get_nongap_uppstream(top, len(top)-1) == "M":
newTopoSeqList.append(top+ "M"*num_Ms_to_add)
else:
newTopoSeqList.append(top+ "-"*num_Ms_to_add)
topoSeqList = newTopoSeqList
posTMList = [myfunc.GetTMPosition(x) for x in topoSeqList]
alignedTopoSeqList = topoSeqList
logger.info("after adding Ms")
logger.info("topoSeqList")
for i in range(numSeq):
logger.debug("%s - numTM = %d"%(idList[i],len(myfunc.GetTMPosition(topoSeqList[i]))))
#===========================================
posindexmap = {}
method_shrink = g_params['method_shrink']
aaSeqAlignList = [] # list of original aligned aa seq list
for i in range(numSeq):
seqid = idList[i]
try:
aaseq = aaSeqDict[seqid]
aaseq = MatchToAlignedSeq(aaseq, alignedTopoSeqList[i], seqid)
except KeyError:
aaseq = " "*lengthAlignmentOriginal
aaSeqAlignList.append(aaseq)
if g_params['isShrink']:
if g_params['method_shrink'] == 0:
posindexmap = ShrinkGapInMSA_0(idList, topoSeqList)
elif g_params['method_shrink'] == 1:
posindexmap = ShrinkGapInMSA_exclude_TMregion(idList, topoSeqList)
elif g_params['method_shrink'] == 2:
(idxmap_align2shrink, idxmap_shrink2align) =\
ShrinkMSA_Method_2(topoSeqList, aaSeqAlignList, posTMList,
g_params['shrinkrate_TM'], g_params['max_hold_loop'],
g_params['isDrawKRBias'])
posindexmap = idxmap_shrink2align
logger.info("222222222222222222222")
logger.info("topoSeqList")
for i in range(numSeq):
logger.debug("%s - numTM = %d"%(idList[i],len(myfunc.GetTMPosition(topoSeqList[i]))))
posTM = myfunc.GetTMPosition(topoSeqList[0])
g_params['widthAnnotation'] = GetSizeAnnotationToDraw(annotationList)
widthAnnotation = g_params['widthAnnotation']
tagList = []
for seqAnno in annotationList:
tagList.append(GetSeqTag(seqAnno))
# print tagList
numSeprationLine = len(set(tagList))
lengthAlignment = len(topoSeqList[0])
# added 2013-12-04
if g_params['shrinkrate'] != None:
shrinkrate = g_params['shrinkrate'] # shrink the sequence proportionally
else:
shrinkrate = lengthAlignment/120.0
lengthAlignmentShrinked = int(lengthAlignment/shrinkrate+0.5)
logger.debug("lengthAlignment=%d"%lengthAlignment)
logger.debug("shrinkrate=%s"%(str(shrinkrate)))
logger.debug("lengthAlignmentShrinked=%d"%(lengthAlignmentShrinked))
maxlen_unaligned_cterm_shrinked = int(maxlen_unaligned_cterm/shrinkrate+0.5)
maxlen_unaligned_nterm_shrinked = int(maxlen_unaligned_nterm/shrinkrate+0.5)
newAnnoList = []
for i in range(len(topoSeqList)):
try:
anno = myfunc.GetFirstWord(annotationList[i]).split('|')[0]
except:
anno = ""
anno = anno[:g_params['MAX_SIZE_ANNOTATION']]
newAnnoList.append("%s %s"%(anno, tagList[i]))
maxSizeAnno = max([len(x) for x in newAnnoList])
logger.debug("maxSizeAnno=%d"%(maxSizeAnno))
fonttype = 'monospace'
numSeq = len(topoSeqList)
logger.info("after shrinking")
logger.info("topoSeqList")
for i in range(numSeq):
logger.debug("%s - numTM = %d"%(idList[i],len(myfunc.GetTMPosition(topoSeqList[i]))))
aaSeqList = [] # list of amino acid sequences, aligned and shrinked if enabled
final2seq_idxMapList = [] # the index map from the final (shrinked or not)
# sequence to the orignal unaligned sequence.
krSeqList = [] # seqlist for positively charged residues, KR
for i in range(numSeq):
seqID = idList[i]
idxmap_aligne2seq = lcmp.GetAlign2SeqMap(alignedTopoSeqList[i],
alignedTopoSeqList[i].replace(GAP,""))
idxmap = {}
if seqID in aaSeqDict:
aaseq = aaSeqDict[seqID]
#print aaseq
#print alignedTopoSeqList[i]
# alignedTopoSeqList is the original (non-shinked MSA)
aaseq = MatchToAlignedSeq(aaseq, alignedTopoSeqList[i], seqID)
tmpaaseq = ""
tmpKRseq = ""
for pp in range(lengthAlignment):
if posindexmap != {}:
jseq = posindexmap[pp]
else:
jseq = pp
idxmap[pp] = idxmap_aligne2seq[jseq]
aa = aaseq[jseq]
tmpaaseq += (aa)
if isDrawKRBias:
if (aa in ["K", "R"] and not
IsOutofMaxDistKR(posTMList[i], jseq,
maxDistKR)):
tmpKRseq += aa
if g_params['isPrintDebugInfo']:
print(seqID, aa, jseq, posTMList[i])
else:
tmpKRseq += " "
aaSeqList.append(tmpaaseq)
krSeqList.append(tmpKRseq)
else:
aaSeqList.append("")
krSeqList.append("")
final2seq_idxMapList.append(idxmap)
# debug
if g_params['isPrintDebugInfo']:
print("print shrinked toposeq and krseq")
for k in range(numSeq):
print("%s\t%s"%(idList[k], topoSeqList[k]))
print("%s\t%s"%(idList[k], krSeqList[k]))
sys.stdout.flush()
# setting font properties
#ffam = "monospace"
#ffam = "Fixed"
ffam = "Courier New"
fontpath = "%s/%s"%(g_params['font_dir'], "Courier_New.ttf")
fontsize = 18
fp = matplotlib.font_manager.FontProperties(
fname=fontpath, family=ffam, style='normal', size=fontsize,
weight='normal', stretch='normal')
ffam = "Arial"
fontpath = "%s/%s"%(g_params['font_dir'], "Arial.ttf")
fp_anno = matplotlib.font_manager.FontProperties(
fname=fontpath, family=ffam, style='normal', size=20,
weight='normal', stretch='normal')
# get the text width and height in pixels
x=0
y=0
linespaceInPixel = 36
ss = "M"*1
pth = matplotlib.textpath.TextPath((x, y), ss, prop=fp)
bb = pth.get_extents(transform=None)
widthSingleCharInPixel = float(bb.width)/len(ss)
heightSingleCharInPixel = float(bb.height)
logger.debug("charwidth=%d, charheight=%d"%( widthSingleCharInPixel, heightSingleCharInPixel))
widthAnnoInPixel = 0
for i in range(numSeq):
ss = newAnnoList[i]+ "M"*2 # leave 3 letters' space
pth = matplotlib.textpath.TextPath((x, y), ss, prop=fp_anno)
bb = pth.get_extents(transform=None)
wtd = float(bb.width)/len(ss.replace(" ",""))*len(ss)
if wtd > widthAnnoInPixel:
widthAnnoInPixel = wtd
logger.debug( "widthAnnoInPixel=%d"%(widthAnnoInPixel))
sumTextHeightInPixel = (heightSingleCharInPixel + linespaceInPixel)*(numSeq+1)
sumTextWidthInPixel = (widthSingleCharInPixel)*(lengthAlignmentShrinked+maxlen_unaligned_nterm_shrinked+maxlen_unaligned_cterm_shrinked+1)
#sumTextWidthInPixel = (widthSingleCharInPixel)*(lengthAlignmentShrinked+maxlen_unaligned_cterm_shrinked+1)
sumTextWidthAnnotationInPixel = widthAnnoInPixel
logger.debug("lengthAlignment=%d"% lengthAlignment)
logger.debug("sumTextWidthAnnotationInPixel=%d"% sumTextWidthAnnotationInPixel)
logger.debug("sumTextWidthInPixel=%d"% sumTextWidthInPixel)
logger.debug("sumTextHeightInPixel=%d"% sumTextHeightInPixel)
# set aspect ratio
if g_params['isDrawDGprofile']:
heightRatios = [numSeq, 5]
gs = gridspec.GridSpec(2, 1, height_ratios=heightRatios)
else:
heightRatios = [1]
logger.debug( "heightRatios=%s"%str( heightRatios))
widthUnitFigureInPixel = 8*80
heightUnitFigureInPixel = 6*80
adjust_left = float(maxSizeAnno+5)/lengthAlignmentShrinked
adjust_right = 0.99
adjust_top = max(1.0 - float(2)/numSeq, 0.65)
adjust_bottom = min(float(2)/numSeq,0.3)
logger.debug( "adjust_left=%d"%adjust_left)
logger.debug( "adjust_right=%d"%adjust_right)
logger.debug( "adjust_top=%d"%adjust_top)
logger.debug( "adjust_bottom=%d"%adjust_bottom)
subplot1_width_ratio = (adjust_right-adjust_left)
subplot1_height_ratio = float(heightRatios[0])/sum(heightRatios)*(adjust_top-adjust_bottom)
#subplot1_width_ratio = 1.0/(1.0+0.2+0.2+adjust_left)
widthUnitSubplot1InPixel = widthUnitFigureInPixel*subplot1_width_ratio
heightUnitSubplot1InPixel = heightUnitFigureInPixel*subplot1_height_ratio
widthscale = float(sumTextWidthInPixel)/widthUnitSubplot1InPixel+0.00
heightscale = float(sumTextHeightInPixel)/heightUnitSubplot1InPixel+0.02
logger.debug( "sumTextWidthInPixel=%d, sumTextHeightInPixel=%d"% (sumTextWidthInPixel, sumTextHeightInPixel))
logger.debug( "widthscale=%s, heightscale=%s"%(str(widthscale), str(heightscale)))
widthSubplot1InPixel = widthUnitSubplot1InPixel * widthscale
heightSubplot1InPixel = heightUnitSubplot1InPixel * heightscale
logger.debug( "widthSubplot1InPixel=%d"%widthSubplot1InPixel)
logger.debug( "heightSubplot1InPixel=%d"% heightSubplot1InPixel)
widthSingleCharInAxes = float(widthSingleCharInPixel)/widthSubplot1InPixel
heightSingleCharInAxes = float(heightSingleCharInPixel)/heightSubplot1InPixel
widthAnnotationInAxes = float(sumTextWidthAnnotationInPixel)/widthSubplot1InPixel
linespaceInAxes = float(linespaceInPixel)/heightSubplot1InPixel
logger.debug("widthSingleCharInAxes=%d, heightSingleCharInAxes=%d"%(
widthSingleCharInAxes, heightSingleCharInAxes))
fontsize_tics = 18
fontsize_label = 24
# fontsize_tics = 18/3
# fontsize_label = 24/3
# create figure object
figsize = (8*widthscale, 6*heightscale) # fig size in inches (width,height)
fig = plt.figure(figsize = figsize) # set the figsize
fig.subplots_adjust(left=adjust_left, right=adjust_right, top=adjust_top, bottom=adjust_bottom)
plt.rc('legend',**{'fontsize':fontsize_label})
if g_params['isDrawDGprofile']:
ax = fig.add_subplot(gs[0])
else:
ax = fig.add_subplot(111)
#ax.axis('off')
inv = ax.transAxes.inverted()
# label_xtics is just for the aligned region
loc_xtics = []
label_xtics = []
if posindexmap != {}:
for jj in range(0, lengthAlignmentShrinked, 10):
loc_xtics.append(jj)
label_xtics.append(posindexmap[int(jj*shrinkrate)])
else:
for jj in range(0, lengthAlignmentShrinked, 10):
loc_xtics.append(jj)
label_xtics.append(int(jj*shrinkrate))
ax.set_xlim(0, lengthAlignmentShrinked)
ax.xaxis.set_visible(False)
#ax.set_xlabel("Sequence position", fontsize=16)
#plt.xticks(np.array(loc_xtics), np.array(label_xtics))
#ax2 = ax.twiny()
#ax2.set_xlabel("Alignment position", fontsize=16)
#ax2.set_xlim(0,lengthAlignmentShrinked)
#plt.xticks(np.array(loc_xtics), np.array(label_xtics), fontsize=fontsize_tics)
plt.tick_params(labelsize=fontsize_tics, direction='out', pad=10)
ax.set_ylim(numSeq,0)
ax.yaxis.set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
x0 = 0
y0 = 1.0 - linespaceInAxes - heightSingleCharInAxes
IOSTATE_LIST = ["i","o"]
row_height = heightSingleCharInAxes + linespaceInAxes
# plot symbol annotation above the alignment
# blue upper hyphen Outside loop
# red under hyphen Inside loop
# grey box TM helix (In -> out)
# white box TM helix (Out -> In)
line1 = Line2D(list(range(1)), list(range(1)), color="red", marker='', markersize=5, markerfacecolor="red")
line2 = Line2D(list(range(1)), list(range(1)), color="blue", marker='', markersize=5, markerfacecolor="blue")
#line3 = Line2D(range(1), range(1), color="white", marker='s', markersize=20, markerfacecolor="grey")
#line4 = Line2D(range(1), range(1), color="white", marker='s', markersize=20,markerfacecolor="white")
# legend = plt.legend((line1,line2,line3,line4),('Inside loop','Outside loop', 'TM helix (In -> Out)',
# 'TM helix (Out -> In)'),numpoints=1, loc='upper center', bbox_to_anchor=(0.5,
# 2.0), ncol=4, fancybox=False, shadow=False)
legend = plt.legend((line1,line2),('Inside loop','Outside loop'),
numpoints=1, loc='upper center', bbox_to_anchor=(0.5,
1.5), ncol=2, fancybox=False, shadow=False)
legend.draw_frame(True)
#=================================================
#draw topology of each sequence
#=================================================
# draw a transparent box for the aligned region
shiftx = (widthSingleCharInAxes*maxlen_unaligned_nterm)/shrinkrate
x = x0 - widthSingleCharInAxes/shrinkrate + shiftx
y = y0 - row_height*(numSeq) + linespaceInAxes*0.625
width = widthSingleCharInAxes * (lengthAlignment+2)/shrinkrate
height = row_height*(numSeq+1) - linespaceInAxes
facecolor = "#CCCCEF"
edgecolor = 'black'
rec = matplotlib.patches.Rectangle((x, y), width,
height, alpha=0.6, linewidth=3, linestyle="solid", facecolor=facecolor,
edgecolor=edgecolor, transform=ax.transAxes)
ax.add_patch(rec)
for i in range(numSeq):
# write sequence description
anno = "%-*s"%(maxSizeAnno+5, newAnnoList[i])
x = x0 - widthAnnotationInAxes
y = y0 - i*row_height
plt.text(x, y, anno, fontproperties=fp_anno, transform=ax.transAxes)
if isDrawKRBias:
seq = krSeqList[i]
else:
seq = ""
#------------------------------
# Draw aligned region and unaligned C-terminal
#------------------------------
idxTM_drawn_set = set([]) # set of the TM index already been drawn,
# avoid duplicated drawn when cross border
for item in ["unaligned_N_term", "aligned_region", "unaligned_C_term"]:
#for item in ["aligned_region"]:
#for item in ["unaligned_C_term"]:
#for item in ["unaligned_N_term"]:
if item == "aligned_region":
topo = topoSeqList[i]
shiftx = (widthSingleCharInAxes*maxlen_unaligned_nterm)/shrinkrate
elif item == "unaligned_C_term":
topo = lst_unalignedCterm[i]
shiftx = (widthSingleCharInAxes*(len(topoSeqList[0])+maxlen_unaligned_nterm))/shrinkrate
elif item == "unaligned_N_term":
topo = lst_unalignedNterm[i]
shiftx = (widthSingleCharInAxes*(maxlen_unaligned_nterm-len(lst_unalignedNterm[i])))/shrinkrate
posTM = myfunc.GetTMPosition(topo)
logger.debug("%s - %s: %s"%(anno, item, topo))
if len(posTM) == 0: # if non TM protein, just draw the sequence if specified
x = x0 + shiftx
y = y0 - row_height*i
txt = seq
plt.text(x, y, txt, fontproperties=fp, transform=ax.transAxes)
else: # draw TM regions and loops
# terminal gaps are ignored
li = []
for (b, e) in posTM: # get list of segment borders
li.append(b)
li.append(e)
for j in range(len(li)+1):
# for TM helices, j-1 is even, j is odd
numTM_unalignedNterm = len(myfunc.GetTMPosition(lst_unalignedNterm[i]))
if item == "unaligned_N_term":
idxTM = j/2
elif item == "aligned_region":
idxTM = j/2 + numTM_unalignedNterm
if (len(lst_unalignedNterm[i]) > 0
and (lst_unalignedNterm[i][-1] == "M" and topoSeqList[i][0]=="M")):
idxTM -= 1
elif item == "unaligned_C_term":
idxTM = j/2 + len(myfunc.GetTMPosition(lst_unalignedNterm[i]+topoSeqList[i]))
if topoSeqList[i][-1] == "M" and lst_unalignedCterm[i][0] == "M":
idxTM -= 1
try:
color = lst_color[i][idxTM].get_hex_l()
except:
color = "red"
# elif item in ["unaligned_C_term", "unaligned_N_term"]:
# color = "#EEEEEE"
if j == 0:
# ignore the N-terminal gaps
m = re.search('^-*', topo)
begin = len(m.group(0))
else:
begin = li[j-1]
if j != len(li):
end = li[j]
else:
# ignore the C-terminal gaps
m = re.search('-*$', topo)
end = len(topo) - len(m.group(0))
if isDrawKRBias: # draw positively charged K and R if enabled
y = y0 - row_height*i
for jpos in range(begin, end):
char = seq[jpos]
if char in ["K", "R"]:
x = x0 + jpos*widthSingleCharInAxes/shrinkrate
plt.text(x, y, char, fontproperties=fp,
transform=ax.transAxes)
txt_topo = topo[begin:end].replace(GAP," ")
type_topo_stat = "" #the state can be [IN, OUT, TM_IN_OUT, TM_OUT_IN]
# setting properties for loops and TM regions
if txt_topo.find('M')!=-1:
if lcmp.Get_IOState_upstream(topo, li[j-1]) == 'i':
type_topo_stat = "TM_IN_OUT"
edgecolor = 'black'
#facecolor = 'grey'
facecolor = color
else:
type_topo_stat = "TM_OUT_IN"
edgecolor = 'black'
#facecolor = 'white'
facecolor = color
elif txt_topo.find('i') != -1: #inside
type_topo_stat = "IN"
color = 'red'
elif txt_topo.find('o') != -1: #outside
type_topo_stat = "OUT"
color = 'blue'
else:
facecolor = 'none'
edgecolor = 'none'
width = widthSingleCharInAxes * (end-begin)/shrinkrate
height = heightSingleCharInAxes + linespaceInAxes/2.0
if type_topo_stat.find("TM") != -1: # draw TM regions
x = x0 + begin*widthSingleCharInAxes/shrinkrate + shiftx
y = y0 - row_height*i - linespaceInAxes/4.0
rec = matplotlib.patches.Rectangle((x, y), width,
height, facecolor=facecolor,
edgecolor=edgecolor, transform=ax.transAxes)
ax.add_patch(rec)
# draw TM indeces
if not idxTM in idxTM_drawn_set:
x = x + width/2.5
y = y + height/4.0
plt.text(x, y, "%d"%(idxTM+1), fontproperties=fp_anno, transform=ax.transAxes)
idxTM_drawn_set.add(idxTM)
logger.debug("idxTM=%d"%(idxTM))
# draw loops
elif type_topo_stat in ["IN", "OUT"]: # draw loops
if type_topo_stat == "IN":
x1 = x0 + begin*widthSingleCharInAxes/shrinkrate + shiftx
y1 = y0 - row_height*i - linespaceInAxes/4.0
x2 = x1 + width
y2 = y1
else: #OUT
x1 = x0 + begin*widthSingleCharInAxes/shrinkrate + shiftx
y1 = y0 - row_height*i + heightSingleCharInAxes + linespaceInAxes/4.0
x2 = x1 + width
y2 = y1
ax.plot([x1, x2], [y1, y2], color=color, linestyle='-',
linewidth=2, transform=ax.transAxes)
if g_params['isDrawDGprofile']:
dgprofileDict = {} #{{{
if os.path.exists(g_params['DGProfileFile']):
dgprofileDict = ReadInDGProfile(g_params['DGProfileFile'])
for i in range(numSeq):
seqID = idList[i]
toposeq = topoSeqList[i]
lengthAlignment = len(toposeq)
if (not seqID in dgprofileDict) and (seqID in aaSeqDict):
aaseq = aaSeqDict[seqID]
#print "aaseq=", aaseq
tmpaaseqfile = tempfile.mktemp()
tmpdgpfile = tempfile.mktemp()
tmpfp = open(tmpaaseqfile, 'w')
tmpfp.write(">%s\n"%seqID)
tmpfp.write("%s\n"%aaseq)
tmpfp.close()
cmd = [g_params['dgscanProg'], tmpaaseqfile, "-lmin", "21", "-lmax",
"21", "-o", tmpdgpfile]
cmdline = " ".join(cmd)
print(cmdline)
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print(e)
tmpDGPDict = ReadInDGProfile(tmpdgpfile)
os.remove(tmpaaseqfile)
os.remove(tmpdgpfile)
for seqid in tmpDGPDict:
dgprofileDict[seqid] = tmpDGPDict[seqid]
if g_params['isPrintDebugInfo']:
print(dgprofileDict)
#}}}
ax = fig.add_subplot(gs[1])
#ax.set_xlim(0, lengthAlignmentOriginal)
ax.set_xlim(0, lengthAlignmentShrinked)
ax.set_xlabel("Alignment position", fontsize=fontsize_label, labelpad=20)
ax.set_ylabel(r"${\Delta}G$ (kJ/mol)", fontsize=fontsize_label)
plt.xticks(np.array(loc_xtics), np.array(label_xtics), fontsize=fontsize_tics)
plt.tick_params(labelsize=fontsize_tics, direction='out', pad=10)
for i in range(numSeq):
seqid = idList[i]
alignedTopoOriginal = alignedTopoSeqList[i]
align2seqMap = lcmp.GetAlign2SeqMap(alignedTopoOriginal,
alignedTopoOriginal.replace(GAP,""))
#print "align2seqMap=", align2seqMap
print("posindexmap=", posindexmap)
#print "dgprofileDict[%s]"%seqid, dgprofileDict[seqid]
try:
dgp = dgprofileDict[seqid]
dt = {}
for tup in dgp:
dt[tup[0]] = tup[1]
x = []
y = []
for j in range(lengthAlignmentShrinked):
if posindexmap != {}:
try:
idxAlignedSeq = posindexmap[int(j*shrinkrate+0.5)]
except KeyError:
print("j=%d not in posindexmap"%j)
pass
else:
idxAlignedSeq = int(j*shrinkrate)
try:
idxSeq = align2seqMap[idxAlignedSeq]
except KeyError:
#print "idxAlignedSeq=%d not in align2seqMap"%idxAlignedSeq
pass
if idxSeq in dt:
x.append(j)
y.append(dt[idxSeq])
else:
# print "idxSeq=%d not in dgp, idxAlignedSeq=%d"%(idxSeq, idxAlignedSeq)
pass
if i < len(colorList_DG_profile):
color = colorList_DG_profile[i]
else:
color = 'none'
if g_params['isPrintDebugInfo']:
print("DG-x:",x)
print("DG-y:",y)
# plot by line
plt.plot(x,y, label=seqid, color=color)
# plot with '+' symbol
# plt.plot(x,y, '+', label=seqid, color=color)
plt.hlines(0, 0, lengthAlignmentOriginal)
plt.legend()
except KeyError:
print("no dgprofile for %s"%(seqid))
pass
plt.savefig(pdffile)
print("%s output"%(pdffile))
cmd = "pdfcrop --margins '%d %d %d %d' --clip %s"%(pdfcrop_margin_left,
pdfcrop_margin_top, pdfcrop_margin_right, pdfcrop_margin_bottom,
pdffile)
os.system(cmd)
pdf_cropfile = os.path.splitext(pdffile)[0]+"-crop.pdf"
pngfile = os.path.splitext(pdffile)[0] + "-crop.png"
thumb_pngfile = os.path.splitext(pdffile)[0] + "-crop.thumb.png"
cmd = ["convert", pdffile, pngfile]
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print(e)
cmd = ["convert", "-thumbnail", "100", pngfile, thumb_pngfile]
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print(e)
# Write Txtformat alignment
#print final2seq_idxMapList
if len(aaSeqList) == numSeq and len(aaSeqList[0]) == len(orig_topoSeqList[0]):
htmlfile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'html')
if len(idList) == 2:
tmpmapList = []
for i in range(len(alignedTopoSeqList)):
tmpmap = {}
for j in range(len(alignedTopoSeqList[i])):
tmpmap[j] = j
tmpmapList.append(tmpmap)
WriteHTMLAlignment2(idList, newAnnoList, alignedTopoSeqList,
alignedTopoSeqList, aaSeqAlignList, tmpmapList, htmlfile)
elif len(idList) > 2:
WriteHTMLAlignment3(idList, newAnnoList, topoSeqList,
alignedTopoSeqList, aaSeqList, final2seq_idxMapList, htmlfile)
#}}}
def DrawMSATopo_PYX(inFile, g_params):#{{{
(idList, annotationList, topoSeqList) = myfunc.ReadFasta(inFile)
topoSeqList = lcmp.RemoveUnnecessaryGap(topoSeqList)
numSeq = len(idList)
if numSeq < 1:
print("No sequence in the file %s. Ignore." %(inFile), file=sys.stderr)
return 1
marginX = g_params['marginX']
marginY = g_params['marginY']
annoSeqInterval = g_params['annoSeqInterval']
widthAnnotation = g_params['widthAnnotation']
heightScaleBar = g_params['heightScaleBar']
heightTMbox = g_params['heightTMbox']
scaleSeprationLine = g_params['scaleSeprationLine']
font_size_scalebar = g_params['font_size_scalebar']
fntScaleBar = g_params['fntScaleBar']
(fontWidthScaleBar, fontHeightScaleBar) = fntScaleBar.getsize("a")
rootname = os.path.basename(os.path.splitext(inFile)[0])
aaSeqDict = GetAASeqDict(inFile)
#rootname=rootname.split('.')[0]
if g_params['outpath'] == "":
outpath = myfunc.my_dirname(inFile)
else:
outpath = g_params['outpath']
str_krbias = ""
if g_params['isDrawKRBias'] == True:
str_krbias = ".krbias"
pdffile = "%s%s%s%s.%s"%(outpath, os.sep, rootname, str_krbias, 'pdf')
stemname = "%s%s%s%s"%(outpath, os.sep, rootname, str_krbias)
# posindexmap: map of the residue position to the original MSA
# e.g. pos[0] = 5 means the first residue is actually the 6th residue position
# in the original MSA
# backup original aligned topoSeqList
alignedTopoSeqList = []
posTMList = []
for seq in topoSeqList:
alignedTopoSeqList.append(seq)
posTMList.append(myfunc.GetTMPosition(seq))
posindexmap = {}
if g_params['isShrink']:
posindexmap = ShrinkGapInMSA_0(idList, topoSeqList)
posTM = myfunc.GetTMPosition(topoSeqList[0])
g_params['widthAnnotation'] = GetSizeAnnotationToDraw(annotationList)
widthAnnotation = g_params['widthAnnotation']
tagList = []
for seqAnno in annotationList:
tagList.append(GetSeqTag(seqAnno))
# print tagList
numSeprationLine = len(set(tagList))
lengthAlignment = len(topoSeqList[0])
newAnnoList = []
for i in range(len(topoSeqList)):
newAnnoList.append("%s %s"%(idList[i], tagList[i]))
maxSizeAnno = max([len(x) for x in newAnnoList])
print("maxSizeAnno=",maxSizeAnno)
fonttype = 'monospace'
x0 = 10
y0 = 10
pyx.unit.set(xscale=1)
for i in range(len(topoSeqList)):
y = y0 + (i)*20
ss = r"%-*s %s"%(maxSizeAnno+5, newAnnoList[i], topoSeqList[i])
ss = ss.replace("_", "\\_")
print(ss)
#ss = r"Boxed text"
#ss = r"%s %s"%("Boxed text new strings -- -- -----", " ".join(["one"]*20))
#ss = r"Q81HG1 TM2SEQ -- -o ---oMMMMMMMMMMMMMMMMMMMMMiiiiiMMMMMMMMMMMMMMMMMMoMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiiiiiiiiiiiii - -- --"
#ss = r"Q63UX3 TM2GAPANDM2SEQ -M MM-MMMMMMMiiiiiiiiiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMoMMMMMMM-MMMMMMMMMMMMMiMMMMMMMMMMMMMMMMMMMoo- - -"
tbox = pyx.text.text(x0, y, ss)
tpath = tbox.bbox().enlarged(3*pyx.unit.x_pt).path()
c = pyx.canvas.canvas()
c.draw(tpath, [pyx.deco.filled([pyx.color.cmyk.Yellow]), pyx.deco.stroked()])
c.insert(tbox)
c.writePDFfile(stemname)
print("%s output"%(pdffile))
#}}}
def main(g_params):#{{{
logger = logging.getLogger(__name__)
argv = sys.argv
numArgv = len(argv)
if numArgv < 2:
PrintHelp()
return 1
filelist = []
filelistfile = ""
aaSeqFile = ""
i = 1
isNonOptionArg=False
while i < numArgv:#{{{
if isNonOptionArg == True:
isNonOptionArg=False
i = i + 1
elif argv[i] == "--":
isNonOptionArg=True
i = i + 1
elif argv[i][0] == "-":
if argv[i] in ["-h", "--help"]:
PrintHelp()
return 1
elif argv[i] in ["-i", "--infile"]:
filelist.append(argv[i+1])
i = i + 2
elif argv[i] in ["-l", "--l", "-list", "--list"]:
filelistfile, i = myfunc.my_getopt_str(argv,i)
elif argv[i] in ["-outpath", "--outpath"]:
(g_params['outpath'],i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-method", "--method"]:
(g_params['method'],i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-aapath", "--aapath"]:
(g_params['aapath'], i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-of" , "--of", "--outformat"]:
(g_params['outFormat'], i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-fontsize", "--fontsize"]:
(g_params['font_size'], i) = myfunc.my_getopt_int(argv, i)
elif argv[i] in ["-mode", "--mode"]:
(g_params['mode'], i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-log", "--log"]:
(g_params['log_config_file'],i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-dgpfile", "--dgpfile"]:
(g_params['DGProfileFile'],i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-text", "--text"]:
(tmpstr, i) = myfunc.my_getopt_str(argv, i)
if (tmpstr.lower())[0] == "y":
g_params['isDrawText'] = True
else:
g_params['isDrawText'] = False
elif argv[i] in ["-krbias", "--krbias"]:
g_params['isDrawKRBias'] = True; i = i + 1
elif argv[i] in ["-maxdistkr", "--maxdistkr"]:
(g_params['maxDistKR'], i) = myfunc.my_getopt_int(argv, i)
elif argv[i] in ["-htmlheader", "--htmlheader"]:
g_params['htmlheader'], i = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-colorhtml", "--colorhtml"]:
g_params['colorhtml'] = True; i += 1
elif argv[i] in ["-win", "--win"]:
g_params['window_size'], i = myfunc.my_getopt_int(argv, i)
elif (argv[i] in ["-sep", "--sep"]):
if (argv[i+1].lower())[0] == "y":
g_params['isDrawSeprationLine'] = True
else:
g_params['isDrawSeprationLine'] = False
i = i + 2
elif (argv[i] in ["-pfm", "--pfm"]):
if (argv[i+1].lower())[0] == "y":
g_params['isDrawPerMDistribution'] = True
else:
g_params['isDrawPerMDistribution'] = False
i = i + 2
elif (argv[i] in ["-pdg", "--pdg"]):
if (argv[i+1].lower())[0] == "y":
g_params['isDrawDGprofile'] = True
else:
g_params['isDrawDGprofile'] = False
i = i + 2
elif (argv[i] in ["-pmsa", "--pmsa"]):
if (argv[i+1].lower())[0] == "y":
g_params['isDrawMSA'] = True
else:
g_params['isDrawMSA'] = False
i = i + 2
elif (argv[i] in ["-pscale", "--pscale"]):
if (argv[i+1].lower())[0] == "y":
g_params['isDrawScaleBar'] = True
else:
g_params['isDrawScaleBar'] = False
i = i + 2
elif (argv[i] in ["-ptag", "--ptag"]):
if (argv[i+1].lower())[0] == "y":
g_params['isDrawTagColumn'] = True
else:
g_params['isDrawTagColumn'] = False
i = i + 2
elif (argv[i] in ["-shrink", "--shrink"]):
if (argv[i+1].lower())[0] == "y":
g_params['isShrink'] = True
else:
g_params['isShrink'] = False
i = i + 2
elif (argv[i] in ["-shrinkrate", "--shrinkrate"]):
g_params['shrinkrate'], i = myfunc.my_getopt_float(argv, i)
elif (argv[i] in ["-shrinkrateTM", "--shrinkrateTM"]):
g_params['shrinkrate_TM'], i = myfunc.my_getopt_float(argv, i)
elif (argv[i] in ["-imagescale", "--imagescale"]):
g_params['image_scale'], i = myfunc.my_getopt_float(argv, i)
elif (argv[i] in ["-h2wratio", "--h2wratio"]):
g_params['H2W_ratio'], i = myfunc.my_getopt_float(argv, i)
elif (argv[i] in ["-max-hold-loop", "--max-hold-loop"]):
g_params['max_hold_loop'], i = myfunc.my_getopt_int(argv, i)
elif (argv[i] in ["-m-shrink", "--m-shrink"]):
g_params['method_shrink'], i = myfunc.my_getopt_int(argv, i)
elif (argv[i] in ["-autosize", "--autosize"]):
if (argv[i+1].lower())[0] == "y":
g_params['isAutoSize'] = True
else:
g_params['isAutoSize'] = False
i = i + 2
elif argv[i] in ["--aaseq", "-aaseq"]:
aaSeqFile, i = myfunc.my_getopt_str(argv, i)
elif argv[i] in["-colorTMbox", "--colorTMbox"]:
g_params['isColorWholeTMbox'] = True; i += 1
elif argv[i] in["-advtopo", "--advtopo"]:
g_params['isAdvTopo'] = True; i += 1
elif argv[i] in["-TMname", "--TMname"]:
g_params['isTMname'] = True
g_params['TMname']=argv[i+1].split(',')
i += 2
elif argv[i] in["-colorkingdom", "--colorkingdom"]:
g_params['isColorByKingdom'] = True; i += 1
elif argv[i] in["-showTMidx", "--showTMidx"]:
g_params['isShowTMIndex'] = True; i += 1
elif argv[i] in["-cleanplot", "--cleanplot"]:
g_params['makeCleanPlot'] = True; i += 1
elif argv[i] in["-showgap", "--showgap"]:
g_params['isShowGap'] = True; i += 1
elif argv[i] in["-debug", "--debug"]:
g_params['isPrintDebugInfo'] = True; i += 1
elif argv[i] == "-q":
g_params['isQuiet'] = True; i += 1
else:
print(("Error! Wrong argument:%s" % argv[i]), file=sys.stderr)
return 1
else:
filelist.append(argv[i])
i=i+1
#}}}
if g_params['isDrawDGprofile']:
g_params['dgscanProg'] = "%s/myscanDG.pl"%(rundir)
myfunc.setup_logging(g_params['log_config_file'])
logger = logging.getLogger(__name__)
if g_params['outpath'] != "" and not os.path.exists(g_params['outpath']):
os.system("mkdir -p %s" % g_params['outpath'])
if g_params['method'].lower()[0:2] == "sv":
g_params['method'] = 'svg'
g_params['outFormat'] = 'svg'
elif g_params['method'].lower()[0:2] == 'ma':
g_params['method'] = 'mat'
g_params['outFormat'] = 'pdf'
elif g_params['method'].lower() == 'core-rainbow':
g_params['method'] = 'core-rainbow'
g_params['outFormat'] = 'pdf'
elif g_params['method'].lower()[0:2] == 'py':
g_params['method'] = 'pyx'
g_params['outFormat'] = 'pdf'
else:
g_params['method'] = 'pil'
logger.debug("font_dir = %s"%(g_params['font_dir']))
if filelistfile != "":
try:
fp = open(filelistfile,"r")
filelist += fp.read().split()
fp.close()
except IOError:
print("file %s does not exist." %(filelistfile), file=sys.stderr)
if len(filelist) < 1:
print("Error! Input file not set.", file=sys.stderr)
g_params['fntScaleBar'] = ImageFont.truetype(g_params['font_dir'] +
g_params['font'], g_params['font_size_scalebar'])
g_params['fntTMbox'] = ImageFont.truetype(g_params['font_dir'] +
g_params['font'], g_params['font_size_TMbox'])
g_params['fntDGprofileLegend'] = ImageFont.truetype(g_params['font_dir'] +
g_params['font'], g_params['font_size_scalebar'])
if aaSeqFile != "" and os.path.exists(aaSeqFile):
(idList, aaSeqList) = myfunc.ReadFasta_without_annotation(aaSeqFile)
dd = {}
for i in range(len(idList)):
dd[idList[i]] = aaSeqList[i].replace("-", "") # gapless aaseq
g_params['aaSeqDict'] = dd
if g_params['isDrawKRBias'] and len(aaSeqList) < 1:
print("aaSeq must be set when krbias is enabled", file=sys.stderr)
return 1
logger.debug("method=%s"%(g_params['method']))
for inFile in filelist:
if g_params['method'] == 'pil':
DrawMSATopo_PIL(inFile, g_params)
elif g_params['method'] == 'svg':
DrawMSATopo_SVG(inFile, g_params)
elif g_params['method'] == 'mat':
#DrawMSATopo_MAT(inFile, g_params)
DrawMSATopo_MAT2(inFile, g_params)
elif g_params['method'] == 'core-rainbow':
DrawMSATopo_MAT_Core_unalign_rainbow(inFile, g_params)
elif g_params['method'] == 'matfull':
DrawMSATopo_MAT(inFile, g_params)
elif g_params['method'] == 'pyx':
DrawMSATopo_PYX(inFile, g_params)
return 0
#}}}
def InitGlobalParameter():#{{{
g_params = {}
g_params['outpath'] = ""
g_params['isQuiet'] = False
g_params['outFormat'] = "png"
g_params['mode'] = "P"
g_params['image_scale'] = None #overal image scale, set it to a higher value to increase the DPI
g_params['font_dir'] = "%s/../fonts/truetype/ttf-dejavu/"%(rundir)
g_params['font_size'] = 16
# font size for scale bar always set to 11 for easy reading
g_params['font_size_scalebar'] = 11
g_params['heightTMbox'] = 3; # number of lines for the TM helices box
g_params['font_size_TMbox'] = 36; # size of the font for text written in TM
# box
g_params['font'] = "DejaVuSansMono.ttf"
g_params['isAutoSize'] = True
g_params['isDrawSeprationLine'] = True
g_params['isDrawText'] = True
g_params['isDrawKRBias'] = False
g_params['DGProfileFile'] = ''
#draw distribution of percentage of M
g_params['isDrawPerMDistribution'] = True
g_params['isDrawDGprofile'] = False
g_params['isDrawTagColumn'] = False
g_params['maxDistKR'] = 12
g_params['isShrink'] = True
g_params['aapath'] = ""
g_params['MAXIMAGESIZE'] = 50*1024*1024; #50M in pixels
g_params['marginX'] = 20; # marginX in pixels
g_params['marginY'] = 50; # marginY in pixels
g_params['H2W_ratio'] = None # set the height/width ratio for PIL image
# number of columns for the annotation text
g_params['widthAnnotation'] = 30
# number of columns between the annotation and alignment
g_params['annoSeqInterval'] = 4
# number of lines for the scale bar
g_params['heightScaleBar'] = 3
g_params['scaleSeprationLine'] = 1
g_params['GAP'] = "-"
g_params['aaSeqDict'] = {}
g_params['window_size'] = 70
g_params['htmlheader'] = ""
g_params['colorhtml'] = False
g_params['method_shrink'] = 1
g_params['isColorWholeTMbox'] = False
g_params['isAdvTopo'] = False
g_params['isShowGap'] = False
g_params['isTMname'] = False
g_params['TMname'] = []
g_params['isColorByKingdom'] = False
g_params['isShowTMIndex'] = False
g_params['shrinkrate'] = None # shrink the alignment proportionally
g_params['shrinkrate_TM'] = 2.0 # shrink the aligned TM region proportionally
g_params['max_hold_loop'] = 12 # maximal positions to keep for the loop region when doing shrinking
g_params['log_config_file'] = "%s/default_log.yml"%(rundir)
g_params['logger'] = None
g_params['dgscanProg'] = ""
g_params['makeCleanPlot'] = False
g_params['pdfcrop_margin_left'] = 20
g_params['pdfcrop_margin_top'] = 5
g_params['pdfcrop_margin_right'] = 10
g_params['pdfcrop_margin_bottom'] = 5
g_params['MAX_SIZE_ANNOTATION'] = 30 # maximum width of annotation
g_params['memcolor_out_to_in'] = "#DCDCDC" #very light grey, type = M
g_params['memcolor_in_to_out'] = "#808080" #grey , type = W
g_params['memcolor_out_to_in_MSA'] = "#FF6666" # light red, type M
g_params['memcolor_in_to_out_MSA'] = "#CC0000" # dark red, type W
#g_params['loopcolor_in'] = "#FFBFB3" # light red
g_params['loopcolor_in'] = "#FFFF00" # yellow
g_params['loopcolor_in_MSA'] = "#F2EABD" # yellow
#g_params['loopcolor_out'] = "#87CEFA" # light sky blue
g_params['loopcolor_out'] = "#3399FF" # blue
g_params['loopcolor_out_MSA'] = "#CCFFFF" # faded blue
g_params['spcolor'] = "#000000" # signal peptide, black
g_params['method'] = 'pil' #pyx
g_params['isPrintDebugInfo'] = False
g_params['isDrawMSA'] = True
g_params['isDrawScaleBar'] = True
g_params['isDrawDGProfileLegend'] = False
return g_params
#}}}
if __name__ == '__main__' :
outpath_log = "logs"
if not os.path.exists(outpath_log):
os.makedirs(outpath_log)
g_params = InitGlobalParameter()
sys.exit(main(g_params))
#cProfile.run("main()")
# Check argv
|
"""
Provides a suite of special data configurations for testing segmented
regression.
Notation: geographic: N,S,E,W
"""
# Author: Steven Lillywhite
# License: BSD 3 clause
from collections import namedtuple
from matplotlib import pyplot as plt
import numpy as np
from segreg.analysis import stats_plotting
from segreg.model import two_bkpt_segreg
from segreg.model import segreg_util
TwoBkptExample = namedtuple("TwoBkptExample", ["indep",
"dep",
"num_end_to_skip",
"num_between_to_skip",
"params",
"rss"])
def _plot(indep, dep, num_end_to_skip, num_between_to_skip):
(min_params,
min_value) = two_bkpt_segreg.estimate_two_bkpt_segreg(indep,
dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip)
func = segreg_util.two_bkpt_segmented_func(*min_params)
stats_plotting.plot_models(func_arr=[func],
indep=indep,
dep=dep,
extra_pts_arr=[[min_params[0], min_params[2]]],
full_size_scatter=True)
plt.ylim([min(dep) - 2, max(dep) + 2])
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
def _create_muliple_y(indep_core, dep_core):
indep = [[x, x, x] for x in indep_core]
indep = np.array(indep, dtype=float)
indep = indep.flatten()
dep = [[y - 1, y, y + 1] for y in dep_core]
dep = np.array(dep, dtype=float)
dep = dep.flatten()
return indep, dep
def corner_NW_square_NW(multiple_y=False, plot=False):
"""
Solution at corner u1,u2_next, most upper-left square.
rss = residual sum squares
"""
indep = np.arange(9)
num_distinct_indep = len(indep)
dep = [1, 0, 0, 0, 0, 0, 0, 0, 1]
params = [1.0, 0.0, 7.0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def corner_SE_square_SW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
dep = [2, 1, 0, 0, 0, 1, 2, 3, 4]
params = [2.0, 0.0, 4.0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def corner_NE_square_NE(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
dep = [4, 3, 2, 1, 0, 0, 0, 0, 1]
params = [4.0, 0.0, 7.0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def corner_NE_square_NW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
dep = [2, 1, 0, 0, 0, 0, 0, 0, 1]
params = [2.0, 0.0, 7.0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def side_W_square_NW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
x0 = 6.5
def line(x):
return x - x0
dep = [1, 0, 0, 0, 0, 0, 0, line(7), line(8)]
params = [1.0, 0.0, x0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def side_E_square_NW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
x0 = 6.5
def line(x):
return x - x0
dep = [2, 1, 0, 0, 0, 0, 0, line(7), line(8)]
params = [2.0, 0.0, x0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def side_E_square_SW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
x0 = 4.5
def line(x):
return x - x0
dep = [2, 1, 0, 0, 0, line(5), line(6), line(7), line(8)]
params = [2.0, 0.0, x0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def side_E_square_NE(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
x0 = 6.5
def line(x):
return x - x0
dep = [4, 3, 2, 1, 0, 0, 0, line(7), line(8)]
params = [4.0, 0.0, x0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def side_S_square_SW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
x0 = 1.5
def line(x):
return x0 - x
dep = [line(0), line(1), 0, 0, 0, 1, 2, 3, 4]
params = [x0, 0.0, 4.0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def side_N_square_SW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
x0 = 1.5
def line(x):
return x0 - x
dep = [line(0), line(1), 0, 0, 0, 0, 1, 2, 3]
params = [x0, 0.0, 5.0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def side_N_square_NW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
x0 = 1.5
def line(x):
return x0 - x
dep = [line(0), line(1), 0, 0, 0, 0, 0, 0, 1]
params = [x0, 0.0, 7.0, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def interior_square_NW(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
u1 = 1.5
def line_left(x):
return u1 - x
u2 = 6.5
def line_right(x):
return x - u2
dep = [line_left(0), line_left(1), 0, 0, 0, 0, 0, line_right(7), line_right(8)]
params = [u1, 0.0, u2, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
def interior_square_NE(multiple_y=False, plot=False):
indep = np.arange(9)
num_distinct_indep = len(indep)
u1 = 3.5
def line_left(x):
return u1 - x
u2 = 6.5
def line_right(x):
return x - u2
dep = [line_left(0), line_left(1), line_left(2), line_left(3), 0, 0, 0, line_right(7), line_right(8)]
params = [u1, 0.0, u2, 0.0, -1.0, 1.0]
rss = 0.0
if multiple_y:
indep, dep = _create_muliple_y(indep, dep)
rss = 2.0 * num_distinct_indep
# ensure float arrays
indep = np.array(indep, dtype=float)
dep = np.array(dep, dtype=float)
num_end_to_skip = 0
num_between_to_skip = 3
if plot:
_plot(indep, dep, num_end_to_skip, num_between_to_skip)
return TwoBkptExample(indep=indep,
dep=dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
params=params,
rss=rss)
if __name__ == "__main__":
interior_square_NE(plot=True, multiple_y=False)
|
import sys
import os
current_dir=os.path.dirname(__file__)
sys.path.append(os.path.join(current_dir,"../"))
import json
import random
from collections import deque,defaultdict,namedtuple
import numpy as np
from square_puzzle import SquarePuzzle
from sklearn.neural_network import MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder,StandardScaler
from q_learning_agent import QLearningAgent
from q_learning_nn_agent import QLearningNnAgent
from trainer_base import TrainerBase
import pickle
Experience = namedtuple("Experience",["state", "action", "reward", "next_state", "done"])
class SquarePuzzleTrainer(TrainerBase):
def train_loop(self,env,agent,episode=200):
self.experiences = deque(maxlen=self.buffer_size)
self.reward_log = []
self.training = False
shuffle_count = 3
win_ratio = 0
reward_ary = deque(maxlen=50)
for i in range(episode):
if win_ratio>.9:
reward_ary = deque(maxlen=50)
shuffle_count += 1
state = env.reset(shuffle_count)
done = False
while not done:
action = agent.policy(state)
next_state, reward, done, info = env.step(action)
e = Experience(state, action, reward, next_state, done)
self.experiences.append(e)
if not self.training and len(self.experiences) == self.buffer_size:
agent.initialize(list(self.experiences)[:1])
self.training = True
self.step(agent)
state = next_state
if info["step_count"]>=shuffle_count*2:break
reward_ary.append(reward)
win_ratio = sum(reward_ary)/50
print(i,reward,shuffle_count,info["step_count"],self.training)
#if shuffle_count>=5:return
def step(self,agent):
if self.training :
batch = random.sample(self.experiences, self.batch_size)
agent.update(batch, self.gamma)
def train():
trainer = SquarePuzzleTrainer(buffer_size=1,batch_size=1,gamma=0.95)
env = SquarePuzzle()
#agent = QLearningAgent(epsilon=0.1,actions=env.actions)
agent = QLearningNnAgent(epsilon=0.1,actions=env.actions)
trainer.train_loop(env,agent,episode=100000)
if __name__ == "__main__":
train()
|
import datetime
from django.test import TestCase
from django.urls import reverse
from ..models import EventCodeAccess
TESTCODE = "testcode"
BADCODE = "fakefake"
CREATED_AT = "2019-12-10 23:25"
EXPECTED_CREATED_AT = datetime.datetime.strptime(CREATED_AT, "%Y-%m-%d %H:%M")
class TestViewsWithActiveEvent(TestCase):
def setUp(self):
self.index_url = "mercury:index"
self.login_url = "mercury:EventAccess"
test_code = EventCodeAccess(event_code="testcode", enabled=True)
test_code.save()
def _get_with_event_code(self, url, event_code):
self.client.get(reverse(self.login_url))
self.client.post(reverse(self.login_url), data={"eventcode": event_code})
response = self.client.get(reverse(url))
session = self.client.session
return response, session
def test_HomePageView_GET_fail(self):
response, session = self._get_with_event_code(self.index_url, BADCODE)
self.assertEqual(302, response.status_code)
self.assertEqual("/", response.url)
self.assertEqual(True, session["event_code_active"])
self.assertEqual(False, session["event_code_known"])
def test_HomePageView_GET_success(self):
response, session = self._get_with_event_code(self.index_url, TESTCODE)
self.assertEqual(200, response.status_code)
self.assertEqual(True, session["event_code_active"])
self.assertEqual(True, session["event_code_known"])
class TestViewsWithoutActiveEvent(TestCase):
def setUp(self):
self.index_url = "mercury:index"
self.login_url = "mercury:EventAccess"
# Calling GET against login_url is necessary to check for an event
self.client.get(reverse(self.login_url))
def test_HomePageView_GET(self):
response = self.client.get(reverse(self.index_url))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed("index.html")
class TestLogout(TestCase):
def setUp(self):
self.login_url = "mercury:EventAccess"
self.logout_url = "mercury:logout"
test_code = EventCodeAccess(event_code="testcode", enabled=True)
test_code.save()
def _get_with_event_code(self, url):
self.client.get(reverse(self.login_url))
self.client.post(reverse(self.login_url), data={"eventcode": "testcode"})
response = self.client.get(reverse(url))
session = self.client.session
return response, session
def test_logout_after_login(self):
response, session = self._get_with_event_code(self.logout_url)
self.assertEqual(302, response.status_code)
self.assertEqual("/", response.url)
self.assertNotIn("event_code_active", session)
self.assertNotIn("event_code_known", session)
def test_logout_without_login(self):
response = self.client.get(reverse(self.logout_url))
session = self.client.session
self.assertEqual(302, response.status_code)
self.assertEqual("/", response.url)
self.assertNotIn("event_code_active", session)
self.assertNotIn("event_code_known", session)
class TestEventAccessDisabled(TestCase):
def setUp(self):
self.login_url = "mercury:EventAccess"
test_code = EventCodeAccess(event_code="testcode", enabled=False)
test_code.save()
def test_active_event_get(self):
response = self.client.get(reverse(self.login_url))
session = self.client.session
self.assertEqual(302, response.status_code)
self.assertEqual(False, session["event_code_active"])
class TestEventAlreadyLoggedIn(TestCase):
def setUp(self):
self.login_url = "mercury:EventAccess"
test_code = EventCodeAccess(event_code="testcode", enabled=True)
test_code.save()
def test_bypass_login(self):
self.client.get(reverse(self.login_url))
self.client.post(reverse(self.login_url), data={"eventcode": "testcode"})
response = self.client.get(reverse(self.login_url))
self.assertEqual(302, response.status_code)
self.assertEqual("index", response.url)
class TestViewsWithoutCheckingEvent(TestCase):
def setUp(self):
self.index_url = "mercury:index"
def test_HomePageView_GET(self):
response = self.client.get(reverse(self.index_url))
self.assertEqual(302, response.status_code)
|
import json
from numpy.testing import assert_almost_equal, assert_equal
import numpy as np
from obspy import read, UTCDateTime
import pytest
from geomagio.algorithm.FilterAlgorithm import FilterAlgorithm, get_nearest_time
import geomagio.iaga2002 as i2
def test_second():
"""algorithm_test.FilterAlgorithm_test.test_second()
Tests algorithm for 10Hz to second.
"""
f = FilterAlgorithm(input_sample_period=0.1, output_sample_period=1)
# generation of 10HZ_filter_sec.mseed
# starttime = UTCDateTime('2020-01-06T00:00:00Z')
# endtime = UTCDateTime('2020-01-06T04:00:00Z')
# m = MiniSeedFactory(port=2061, host='...',
# convert_channels=['U', 'V', 'W'],
# bin_conv=500, volt_conv=100)
# f = FilterAlgorithm(input_sample_period=0.1,
# output_sample_period=1.0)
# starttime, endtime = f.get_input_interval(starttime,endtime)
# LLO_raw = m.get_timeseries(observatory='LLO',
# starttime=starttime,endtime=endtime,
# channels=['U_Volt', 'U_Bin', 'V_Volt',
# 'V_Bin', 'W_Volt', 'W_Bin'],
# interval='tenhertz', type='variaton')
# LLO_raw.write('10HZ_filter_sec.mseed')
llo = read("etc/filter/10HZ_filter_sec.mseed")
filtered = f.process(llo)
with open("etc/filter/LLO20200106vsec.sec", "r") as f:
iaga = i2.StreamIAGA2002Factory(stream=f)
LLO = iaga.get_timeseries(starttime=None, endtime=None, observatory="LLO")
u = LLO.select(channel="U")[0]
v = LLO.select(channel="V")[0]
w = LLO.select(channel="W")[0]
u_filt = filtered.select(channel="U")[0]
v_filt = filtered.select(channel="V")[0]
w_filt = filtered.select(channel="W")[0]
assert_almost_equal(u_filt.data, u.data, 2)
assert_almost_equal(v_filt.data, v.data, 2)
assert_almost_equal(w_filt.data, w.data, 2)
assert_equal(filtered[0].stats.starttime, UTCDateTime("2020-01-06T00:00:00Z"))
assert_equal(filtered[0].stats.endtime, UTCDateTime("2020-01-06T04:00:00Z"))
assert_equal(u_filt.stats.data_interval, "second")
assert_equal(u_filt.stats.data_interval_type, "1-second")
def test_minute():
"""algorithm_test.FilterAlgorithm_test.test_minute()
Tests algorithm for 10Hz to minute.
"""
f = FilterAlgorithm(input_sample_period=0.1, output_sample_period=60.0)
# generation of 10HZ_filter_min.mseed
# starttime = UTCDateTime('2020-01-06T00:00:00Z')
# endtime = UTCDateTime('2020-01-06T04:00:00Z')
# m = MiniSeedFactory(port=2061, host='...',
# convert_channels=['U', 'V', 'W'])
# f = FilterAlgorithm(input_sample_period=0.1,
# output_sample_period=60.0)
# starttime, endtime = f.get_input_interval(starttime,endtime)
# LLO = m.get_timeseries(observatory='LLO',
# starttime=starttime,endtime=endtime,
# channels=['U_Volt', 'U_Bin', 'V_Volt',
# 'V_Bin', 'W_Volt', 'W_Bin'],
# interval='tenhertz', type='variaton')
# LLO.write('10HZ_filter_min.mseed')
llo = read("etc/filter/10HZ_filter_min.mseed")
filtered = f.process(llo)
with open("etc/filter/LLO20200106vmin.min", "r") as f:
iaga = i2.StreamIAGA2002Factory(stream=f)
LLO = iaga.get_timeseries(starttime=None, endtime=None, observatory="LLO")
u = LLO.select(channel="U")[0]
v = LLO.select(channel="V")[0]
w = LLO.select(channel="W")[0]
u_filt = filtered.select(channel="U")[0]
v_filt = filtered.select(channel="V")[0]
w_filt = filtered.select(channel="W")[0]
assert_almost_equal(u_filt.data, u.data, 2)
assert_almost_equal(v_filt.data, v.data, 2)
assert_almost_equal(w_filt.data, w.data, 2)
assert_equal(filtered[0].stats.starttime, UTCDateTime("2020-01-06T00:00:00Z"))
assert_equal(filtered[0].stats.endtime, UTCDateTime("2020-01-06T04:00:00Z"))
assert_equal(filtered[0].stats.data_interval, "minute")
assert_equal(filtered[0].stats.data_interval_type, "1-minute")
def test_hour():
"""algorithm_test.FilterAlgorithm_test.test_hour()
Tests algorithm for 1min to hour.
"""
f = FilterAlgorithm(input_sample_period=60.0, output_sample_period=3600.0)
# generation of hor_filter_min.mseed
# starttime = UTCDateTime("2020-08-31T00:00:00Z")
# endtime = UTCDateTime("2020-08-31T03:00:00Z")
# e = EdgeFactory()
# f = FilterAlgorithm(input_sample_period=60.0,
# output_sample_period=3600.0)
# starttime, endtime = f.get_input_interval(starttime,endtime)
# BOU = e.get_timeseries(observatory='BOU',
# starttime=starttime,endtime=endtime,
# channels=["H", "E", "Z", "F"],
# interval="minute", type='variaton')
# LLO.write('hour_filter_min.mseed')
bou = read("etc/filter/hor_filter_min.mseed")
filtered = f.process(bou)
with open("etc/filter/BOU20200831vhor.hor", "r") as f:
iaga = i2.StreamIAGA2002Factory(stream=f)
BOU = iaga.get_timeseries(starttime=None, endtime=None, observatory="BOU")
h = BOU.select(channel="H")[0]
e = BOU.select(channel="E")[0]
z = BOU.select(channel="Z")[0]
f = BOU.select(channel="F")[0]
h_filt = filtered.select(channel="H")[0]
e_filt = filtered.select(channel="E")[0]
z_filt = filtered.select(channel="Z")[0]
f_filt = filtered.select(channel="F")[0]
assert_almost_equal(h_filt.data, h.data, 2)
assert_almost_equal(e_filt.data, e.data, 2)
assert_almost_equal(z_filt.data, z.data, 2)
assert_almost_equal(f_filt.data, f.data, 2)
assert_equal(filtered[0].stats.starttime, UTCDateTime("2020-08-31T00:29:30"))
assert_equal(filtered[0].stats.endtime, UTCDateTime("2020-08-31T03:29:30"))
assert_equal(filtered[0].stats.data_interval, "hour")
assert_equal(filtered[0].stats.data_interval_type, "1-hour (00-59)")
def test_day():
"""algorithm_test.FilterAlgorithm_test.test_hour()
Tests algorithm for 1min to day.
"""
f = FilterAlgorithm(input_sample_period=60.0, output_sample_period=86400.0)
# generation of day_filter_min.mseed
# starttime = UTCDateTime("2020-08-27T00:00:00Z")
# endtime = UTCDateTime("2020-08-30T00:00:00Z")
# e = EdgeFactory()
# f = FilterAlgorithm(input_sample_period=60.0,
# output_sample_period=86400.0)
# starttime, endtime = f.get_input_interval(starttime,endtime)
# BOU = e.get_timeseries(observatory='BOU',
# starttime=starttime,endtime=endtime,
# channels=["H", "E", "Z", "F"],
# interval="minute", type='variaton')
# LLO.write('day_filter_min.mseed')
bou = read("etc/filter/day_filter_min.mseed")
filtered = f.process(bou)
with open("etc/filter/BOU20200831vday.day", "r") as f:
iaga = i2.StreamIAGA2002Factory(stream=f)
BOU = iaga.get_timeseries(starttime=None, endtime=None, observatory="BOU")
h = BOU.select(channel="H")[0]
e = BOU.select(channel="E")[0]
z = BOU.select(channel="Z")[0]
f = BOU.select(channel="F")[0]
h_filt = filtered.select(channel="H")[0]
e_filt = filtered.select(channel="E")[0]
z_filt = filtered.select(channel="Z")[0]
f_filt = filtered.select(channel="F")[0]
assert_almost_equal(h_filt.data, h.data, 2)
assert_almost_equal(e_filt.data, e.data, 2)
assert_almost_equal(z_filt.data, z.data, 2)
assert_almost_equal(f_filt.data, f.data, 2)
assert_equal(filtered[0].stats.starttime, UTCDateTime("2020-08-27T11:59:30"))
assert_equal(filtered[0].stats.endtime, UTCDateTime("2020-08-30T11:59:30"))
assert_equal(filtered[0].stats.data_interval, "day")
assert_equal(filtered[0].stats.data_interval_type, "1-day (00:00-23:59)")
def test_custom():
"""algorithm_test.FilterAlgorithm_test.test_custom()
Tests algorithm for 10Hz to second with custom filter coefficients.
"""
f = FilterAlgorithm(
input_sample_period=0.1,
output_sample_period=1.0,
coeff_filename="etc/filter/coeffs.json",
)
# generation of 10HZ_filter_sec.mseed
# starttime = UTCDateTime('2020-01-06T00:00:00Z')
# endtime = UTCDateTime('2020-01-06T04:00:00Z')
# m = MiniSeedFactory(port=2061, host='...',
# convert_channels=['U', 'V', 'W'])
# f = FilterAlgorithm(input_sample_period=0.1,
# output_sample_period=1.0)
# starttime, endtime = f.get_input_interval(starttime,endtime)
# LLO = m.get_timeseries(observatory='LLO',
# starttime=starttime,endtime=endtime,
# channels=['U_Volt', 'U_Bin', 'V_Volt',
# 'V_Bin', 'W_Volt', 'W_Bin'],
# interval='tenhertz', type='variaton')
# LLO.write('10HZ_filter_sec.mseed')
llo = read("etc/filter/10HZ_filter_sec.mseed")
filtered = f.process(llo)
with open("etc/filter/LLO20200106_custom_vsec.sec", "r") as f:
iaga = i2.StreamIAGA2002Factory(stream=f)
LLO = iaga.get_timeseries(starttime=None, endtime=None, observatory="LLO")
u = LLO.select(channel="U")[0]
v = LLO.select(channel="V")[0]
w = LLO.select(channel="W")[0]
u_filt = filtered.select(channel="U")[0]
v_filt = filtered.select(channel="V")[0]
w_filt = filtered.select(channel="W")[0]
assert_almost_equal(u_filt.data, u.data, 2)
assert_almost_equal(v_filt.data, v.data, 2)
assert_almost_equal(w_filt.data, w.data, 2)
assert_equal(filtered[0].stats.starttime, UTCDateTime("2020-01-06T00:00:00Z"))
assert_equal(filtered[0].stats.endtime, UTCDateTime("2020-01-06T04:00:00Z"))
assert_equal(filtered[0].stats.data_interval, "second")
assert_equal(filtered[0].stats.data_interval_type, "filtered custom interval")
def test_starttime_shift():
"""algorithm_test.FilterAlgorithm_test.test_starttime_shift()
Tests algorithm for second to minute with misalligned starttime(16 seconds).
"""
f = FilterAlgorithm(input_sample_period=1.0, output_sample_period=60.0)
# generation of BOU20200101vsec.sec
# starttime = UTCDateTime('2020-01-01T00:00:00Z')
# endtime = UTCDateTime('2020-01-01T00:15:00Z')
# bou = e.get_timeseries(observatory='BOU',interval='second',type='variation',starttime=starttime,endtime=endtime,channels=["H","E","Z","F"])
# with open('BOU20200101vsec.sec','wb') as file:
# i2w.write(out=file,timeseries=bou,channels=["H","E","Z","F"])
with open("etc/filter/BOU20200101vsec.sec", "r") as file:
iaga = i2.StreamIAGA2002Factory(stream=file)
bou = iaga.get_timeseries(starttime=None, endtime=None, observatory="BOU")
# check initial assumptions
assert_equal(bou[0].stats.starttime, UTCDateTime("2020-01-01T00:00:00Z"))
assert_equal(bou[0].stats.endtime, UTCDateTime("2020-01-01T00:15:00Z"))
# filter should center on minute
filtered = f.process(bou)
assert_equal(filtered[0].stats.starttime, UTCDateTime("2020-01-01T00:01:00Z"))
assert_equal(filtered[0].stats.endtime, UTCDateTime("2020-01-01T00:14:00Z"))
# remove unneeded data, and verify filter works with exactly the right data
precise = bou.trim(
starttime=UTCDateTime("2020-01-01T00:00:15Z"),
endtime=UTCDateTime("2020-01-01T00:14:45Z"),
)
filtered = f.process(precise)
assert_equal(filtered[0].stats.starttime, UTCDateTime("2020-01-01T00:01:00Z"))
assert_equal(filtered[0].stats.endtime, UTCDateTime("2020-01-01T00:14:00Z"))
# remove one extra sample (filter no longer has enough to generate first/last)
trimmed = bou.trim(
starttime=UTCDateTime("2020-01-01T00:00:16Z"),
endtime=UTCDateTime("2020-01-01T00:14:44Z"),
)
filtered = f.process(trimmed)
assert_equal(filtered[0].stats.starttime, UTCDateTime("2020-01-01T00:02:00Z"))
assert_equal(filtered[0].stats.endtime, UTCDateTime("2020-01-01T00:13:00Z"))
def test_align_trace():
"""algorithm_test.FilterAlgorithm_test.test_align_trace()
Tests algorithm for minute to hour with expected behavior, trailing samples, and missing samples
"""
f = FilterAlgorithm(input_sample_period=60.0, output_sample_period=3600.0)
bou = read("etc/filter/hor_filter_min.mseed")
step = f.get_filter_steps()[0]
# check intial assumptions
starttime, _ = f.align_trace(step, bou[0])
assert_equal(starttime, UTCDateTime("2020-08-31T00:29:30"))
# check for filtered product producing the correct interval with trailing samples
trimmed = bou.copy().trim(
starttime=UTCDateTime("2020-08-31T01:00:00"),
endtime=UTCDateTime("2020-08-31T02:04:00"),
)
starttime, _ = f.align_trace(step, trimmed[0])
assert_equal(starttime, UTCDateTime("2020-08-31T01:29:30"))
# test for skipped sample when not enough data is given for first interval
trimmed = bou.copy().trim(
starttime=UTCDateTime("2020-08-31T01:30:00"), endtime=bou[0].stats.endtime
)
starttime, _ = f.align_trace(step, trimmed[0])
assert_equal(starttime, UTCDateTime("2020-08-31T02:29:30"))
def test_get_nearest__oneday_average():
"""algorithm_test.FilterAlgorithm_test.test_get_nearest__oneday_average()
Tests get_nearest_time for minute to day
"""
f = FilterAlgorithm(input_sample_period=60.0, output_sample_period=86400.0)
step = f.get_filter_steps()[0]
time = UTCDateTime("2020-08-20T01:00:00")
aligned = get_nearest_time(step=step, output_time=time)
# filter is average for day, should be first/last minute samples of 2020-08-20
assert_equal(aligned["data_start"], UTCDateTime("2020-08-20T00:00:00"))
assert_equal(aligned["time"], UTCDateTime("2020-08-20T11:59:30"))
assert_equal(aligned["data_end"], UTCDateTime("2020-08-20T23:59:00"))
def test_get_nearest__intermagnet_minute():
"""algorithm_test.FilterAlgorithm_test.test_get_nearest__intermagnet_minute()
Tests get_nearest_time for second to minute
"""
f = FilterAlgorithm(input_sample_period=1.0, output_sample_period=60.0)
step = f.get_filter_steps()[0]
time = UTCDateTime("2020-08-20T01:00:13")
aligned = get_nearest_time(step=step, output_time=time)
# filter uses 91 samples, should be 01:00:00 +/- 45 seconds
assert_equal(aligned["data_start"], UTCDateTime("2020-08-20T00:59:15"))
assert_equal(aligned["time"], UTCDateTime("2020-08-20T01:00:00"))
assert_equal(aligned["data_end"], UTCDateTime("2020-08-20T01:00:45"))
def test_validate_step():
"""algorithm_test.FilterAlgorithm_test.test_validate_steps()
Validates algorithm steps 10 Hz to second with custom coefficients.
"""
with open("etc/filter/coeffs.json", "rb") as f:
step = json.loads(f.read())
f = FilterAlgorithm()
numtaps = len(step["window"])
half = numtaps // 2
# check initial assumption
assert_equal(numtaps % 2, 1)
f._validate_step(step)
# expect step to raise a value error when window has an even length
step = {
"window": np.delete(step["window"], numtaps // 2, 0),
"type": "firfilter",
}
assert_equal(len(step["window"]) % 2, 0)
with pytest.raises(ValueError):
f._validate_step(step)
|
__all__ = ['P0DCutUtils',
'Base',
'makePlots',
'submit_analysis',
'highland_configurations',
'efficiency',
'constants']
|
#
# PySNMP MIB module HUAWEI-VO-GK-CLIENT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-VO-GK-CLIENT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:49:31 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
voice, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "voice")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, ModuleIdentity, Unsigned32, MibIdentifier, ObjectIdentity, Gauge32, Counter32, NotificationType, iso, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "ModuleIdentity", "Unsigned32", "MibIdentifier", "ObjectIdentity", "Gauge32", "Counter32", "NotificationType", "iso", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hwVoiceGKClientMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8))
hwVoiceGKClientMIB.setRevisions(('2004-04-08 13:45',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hwVoiceGKClientMIB.setRevisionsDescriptions(('',))
if mibBuilder.loadTexts: hwVoiceGKClientMIB.setLastUpdated('200410200000Z')
if mibBuilder.loadTexts: hwVoiceGKClientMIB.setOrganization('Huawei-3COM Technologies Co., Ltd.')
if mibBuilder.loadTexts: hwVoiceGKClientMIB.setContactInfo('PLAT Team Huawei 3Com Technologies co.,Ltd. Shang-Di Information Industry Base, Hai-Dian District Beijing P.R. China http://www.huawei-3com.com Zip:100085')
if mibBuilder.loadTexts: hwVoiceGKClientMIB.setDescription(' ')
hwVoGKClientObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1))
hwVoRasOn = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoRasOn.setStatus('current')
if mibBuilder.loadTexts: hwVoRasOn.setDescription('This object expresses whether or not the GKClient function of this gateaway is enabled .Before hwVoGwIPAddress and hwVoH323GKID being set ,this object can not be set to enable.')
hwVoH323InterfaceIndex = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323InterfaceIndex.setStatus('obsolete')
if mibBuilder.loadTexts: hwVoH323InterfaceIndex.setDescription('The index of the interface through which the gateway is connected to the GateKeeper.')
hwVoGwIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoGwIPAddress.setStatus('current')
if mibBuilder.loadTexts: hwVoGwIPAddress.setDescription('This object expresses the IP address of the interface through which the gateway is connected to the GateKeeper.')
hwVoH323GWID = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GWID.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GWID.setDescription('This object expresses the ID of this H323 gateway .')
hwVoH323GWSupportMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("nonstandard-compatible", 1), ("huawei", 2))).clone('nonstandard-compatible')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GWSupportMode.setStatus('obsolete')
if mibBuilder.loadTexts: hwVoH323GWSupportMode.setDescription('This object expresses the type of GateKeeper to which this H323 gateway is connected .')
hwVoH323GWAreaID = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 960))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GWAreaID.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GWAreaID.setDescription("This object expresses the technology prefixs of this gateway so that the GateKeeper can validation the type of gateway .There are thirty prefixs which can be configed in one gateway.In this object every prefix is seperated by a ';'.As a single prefix , only character from '0' to '9' and '#' can present,its length is limited to 31.")
hwVoH323GKID = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GKID.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GKID.setDescription('This object expresses the ID of the GateKeeper .')
hwVoH323GKIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GKIPAddress.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GKIPAddress.setDescription('This object expresses the IP address of the GateKeeper .')
hwVoH323GKPort = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GKPort.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GKPort.setDescription('This object expresses the RAS communication port of GateKeeper.')
hwVoH323GK2ID = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GK2ID.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GK2ID.setDescription('This object expresses the ID of the Slave GateKeeper .')
hwVoH323GK2IPAddress = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 11), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GK2IPAddress.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GK2IPAddress.setDescription('This object expresses the IP address of the Slave GateKeeper .')
hwVoH323GK2Port = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GK2Port.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GK2Port.setDescription('This object expresses the RAS communication port of Slave GateKeeper.')
hwVoH323GKSecurityCall = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GKSecurityCall.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GKSecurityCall.setDescription('enable/disable call level security capacity')
hwVoH323GKSecurityPWDType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("cipher", 1), ("simple", 2))).clone('simple')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GKSecurityPWDType.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GKSecurityPWDType.setDescription(" Specify registration level security Type cipher Hide the password when showing simple Don't hide the password when showing ")
hwVoH323GKSecurityPWD = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 1, 8, 1, 15), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 24))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwVoH323GKSecurityPWD.setStatus('current')
if mibBuilder.loadTexts: hwVoH323GKSecurityPWD.setDescription(' register password. length 0 : clear password;disable GKSecurity Capacity. length 1-16: set password with no encrypted text. length 24: set password with encrypted text. ')
mibBuilder.exportSymbols("HUAWEI-VO-GK-CLIENT-MIB", hwVoH323GKID=hwVoH323GKID, hwVoH323InterfaceIndex=hwVoH323InterfaceIndex, hwVoH323GKSecurityCall=hwVoH323GKSecurityCall, hwVoH323GK2Port=hwVoH323GK2Port, hwVoiceGKClientMIB=hwVoiceGKClientMIB, hwVoH323GKSecurityPWD=hwVoH323GKSecurityPWD, PYSNMP_MODULE_ID=hwVoiceGKClientMIB, hwVoH323GWID=hwVoH323GWID, hwVoH323GWSupportMode=hwVoH323GWSupportMode, hwVoGKClientObjects=hwVoGKClientObjects, hwVoH323GWAreaID=hwVoH323GWAreaID, hwVoH323GKPort=hwVoH323GKPort, hwVoRasOn=hwVoRasOn, hwVoH323GK2ID=hwVoH323GK2ID, hwVoH323GKIPAddress=hwVoH323GKIPAddress, hwVoH323GKSecurityPWDType=hwVoH323GKSecurityPWDType, hwVoH323GK2IPAddress=hwVoH323GK2IPAddress, hwVoGwIPAddress=hwVoGwIPAddress)
|
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import unittest
import uuid
import mock
import dcm.agent.exceptions as exceptions
import dcm.agent.tests.utils.general as test_utils
import dcm.agent.cloudmetadata as cm
class TestCloudMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.CloudMetaData(self.conf)
def test_base_instance_id(self):
instance_id = self.cm_obj.get_instance_id()
self.assertIsNone(instance_id)
def test_base_is_effective(self):
v = self.cm_obj.is_effective_cloud()
self.assertFalse(v)
def test_base_startup(self):
self.assertRaises(exceptions.AgentNotImplementedException,
self.cm_obj.get_startup_script)
def test_base_get_cloud_type(self):
self.assertRaises(exceptions.AgentNotImplementedException,
self.cm_obj.get_cloud_type)
def test_env_injected_id_no_env(self):
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
injected_id = self.cm_obj.get_injected_id()
self.assertIsNone(injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_env_injected_id_env(self):
tmp_dir = tempfile.mkdtemp()
fake_id = str(uuid.uuid4())
id_file = os.path.join(tmp_dir, "injected_id")
try:
self.conf.get_secure_dir.return_value = tmp_dir
with mock.patch.dict('os.environ',
{cm.ENV_INJECTED_ID_KEY: fake_id}):
injected_id = self.cm_obj.get_injected_id()
self.assertEqual(injected_id, fake_id)
self.assertTrue(os.path.exists(id_file))
with open(id_file, "r") as fptr:
v = fptr.read().strip()
self.assertEqual(v, injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_env_injected_id_env_file_exists(self):
tmp_dir = tempfile.mkdtemp()
fake_id = str(uuid.uuid4())
id_file = os.path.join(tmp_dir, "injected_id")
try:
with open(id_file, "w") as fptr:
fptr.write(fake_id)
self.conf.get_secure_dir.return_value = tmp_dir
injected_id = self.cm_obj.get_injected_id()
self.assertEqual(injected_id, fake_id)
with open(id_file, "r") as fptr:
v = fptr.read().strip()
self.assertEqual(v, injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_ipv4_address(self):
addr = self.cm_obj.get_ipv4_addresses()
self.assertEqual(type(addr), list)
self.assertGreaterEqual(len(addr), 1)
def test_handshake_address(self):
addr = self.cm_obj.get_handshake_ip_address()
self.assertEqual(type(addr), list)
self.assertGreaterEqual(len(addr), 1)
class TestUnknownMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.UnknownMetaData(conf)
def test_effective_cloud(self):
self.assertTrue(self.cm_obj.is_effective_cloud())
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(), cm.CLOUD_TYPES.UNKNOWN)
class TestAWSMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.AWSMetaData(self.conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(), cm.CLOUD_TYPES.Amazon)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_startup(self, md_server_data):
startup_data = "some date"
md_server_data.return_value = startup_data
sd = self.cm_obj.get_startup_script()
self.assertEqual(startup_data, sd)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_injected_id(self, md_server_data):
fake_id = "somedata"
md_server_data.return_value = fake_id
sd = self.cm_obj.get_injected_id()
self.assertEqual(fake_id, sd)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_injected_id_none(self, md_server_data):
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
fake_id = None
md_server_data.return_value = fake_id
sd = self.cm_obj.get_injected_id()
self.assertIsNone(sd)
finally:
shutil.rmtree(tmp_dir)
class TestCloudStackMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.CloudStackMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.CloudStack)
class TestJoyentMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.JoyentMetaData(self.conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.Joyent)
@mock.patch('dcm.agent.utils.run_command')
def test_base_injected_id(self, runcmd):
fakeid = "someid"
runcmd.return_value = (fakeid, "", 0)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
@mock.patch('dcm.agent.utils.run_command')
def test_base_cached_injected_id(self, runcmd):
fakeid = "someid"
runcmd.return_value = (fakeid, "", 0)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
@mock.patch('dcm.agent.utils.run_command')
def test_base_injected_try_both_locations(self, runcmd):
runcmd.return_value = ("", "error", 1)
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
self.conf.system_sudo = "sudo"
x = self.cm_obj.get_injected_id()
call1 = mock.call(
self.conf,
["sudo", "/usr/sbin/mdata-get", "es:dmcm-launch-id"])
call2 = mock.call(
self.conf,
["sudo", "/lib/smartdc/mdata-get", "es:dmcm-launch-id"])
self.assertEqual(runcmd.call_args_list, [call1, call2])
self.assertEqual(runcmd.call_count, 2)
self.assertIsNone(x)
finally:
shutil.rmtree(tmp_dir)
class TestGCEMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.GCEMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.Google)
class TestAzureMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.AzureMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.Azure)
class TestOpenStackMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.OpenStackMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.OpenStack)
class TestKonamiMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.KonamiMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES._Konami)
class TestDigitalOceanMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.DigitalOceanMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.DigitalOcean)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_startup(self, md_server_data):
startup_data = "some date"
md_server_data.return_value = startup_data
sd = self.cm_obj.get_startup_script()
self.assertEqual(startup_data, sd)
|
import atexit
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.cli import CLI
from mininet.log import info,setLogLevel
net = None
def createTopo():
topo=Topo()
#Create Nodes
topo.addHost("h1")
topo.addHost("h2")
topo.addHost("h3")
topo.addHost("h4")
topo.addSwitch('s1')
topo.addSwitch('s2')
topo.addSwitch('s3')
#Create links
topo.addLink('s1','s2')
topo.addLink('s1','s3')
topo.addLink('h1','s2')
topo.addLink('h2','s2')
topo.addLink('h3','s3')
topo.addLink('h4','h3')
return topo
def startNetwork():
topo = createTopo()
global net
net = Mininet(topo=topo, autoSetMacs=True)
net.start()
CLI(net)
def stopNetwork():
if net is not None:
net.stop()
if __name__ == '__main__':
atexit.register(stopNetwork)
setLogLevel('info')
startNetwork()
|
import pygmaps
from sqlwrap import Database
def draw_map(ssid, coords):
gmap = pygmaps.maps('0', '0', '2')
for location in coords:
lat, lon = location
gmap.addpoint(float(lat), float(lon), '#FF0000')
gmap.draw('../ssid_html/{0}.html'.format(ssid.replace(' ', '_')))
def map_ssid_coords(ssid):
with Database('ssids.db') as db:
coords = db.get_ssid_coords(ssid)
draw_map(ssid, coords)
|
import datetime
import json
from collections import deque
from enum import Enum, unique
from pathlib import Path
import unyt as u
from event_model import compose_resource
from ophyd import Component as Cpt
from ophyd import Device, Signal
from ophyd.sim import NullStatus, SynAxis, new_uid
from .shadow_handler import read_shadow_file
from .sirepo_bluesky import SirepoBluesky
from .srw_handler import read_srw_file
@unique
class SimTypes(Enum):
srw = 'srw'
shadow = 'shadow'
@unique
class SimReportTypes(Enum):
# Single Electron Spectrum
srw_se_spectrum = 'srw_se_spectrum'
shadow_beam_stats = 'shadow_beam_stats'
class ExternalFileReference(Signal):
"""
A pure software Signal that describe()s an image in an external file.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def describe(self):
resource_document_data = super().describe()
resource_document_data[self.name].update(
dict(
external="FILESTORE:",
dtype="array",
)
)
return resource_document_data
class SirepoShadowDetector(Device):
"""
Use SRW code based on the value of the motor.
Units used in plots are directly from sirepo. View the schema at:
https://github.com/radiasoft/sirepo/blob/master/sirepo/package_data/static/json/srw-schema.json
Parameters
----------
name : str
The name of the detector
sim_id : str
The simulation id corresponding to the Sirepo simulation being run on
local server
watch_name : str
The name of the watchpoint viewing the simulation
sirepo_server : str
Address that identifies access to local Sirepo server
source_simulation : bool
States whether user wants to grab source page info instead of beamline
"""
image = Cpt(ExternalFileReference, kind="normal")
shape = Cpt(Signal)
mean = Cpt(Signal)
photon_energy = Cpt(Signal)
horizontal_extent = Cpt(Signal)
vertical_extent = Cpt(Signal)
sirepo_json = Cpt(Signal, kind="normal", value="")
beam_statistics_report = Cpt(Signal, kind="omitted", value="")
def __init__(self, name='sirepo_det', sim_type=None, sim_report_type=None, sim_id=None, watch_name=None,
sirepo_server='http://10.10.10.10:8000', source_simulation=False,
root_dir='/tmp/sirepo_det_data', **kwargs):
super().__init__(name=name, **kwargs)
allowed_sim_types = tuple(SimTypes.__members__.keys())
if sim_type not in allowed_sim_types:
raise ValueError(f"sim_type should be one of {allowed_sim_types}. "
f"Provided value: {sim_type}")
allowed_sim_report_types = tuple(SimReportTypes.__members__.keys())
if sim_report_type not in allowed_sim_report_types:
raise ValueError(f"sim_report_type should be one of {allowed_sim_report_types}. "
f"Provided value: {sim_report_type}")
if sim_id is None:
raise ValueError(f"Simulation ID must be provided. "
f"Currently it is set to {sim_id}")
self._asset_docs_cache = deque()
self._resource_document = None
self._datum_factory = None
self._root_dir = root_dir
self.sirepo_component = None
self.fields = {}
self.field_units = {}
self.parents = {}
self._result = {}
self._sim_type = sim_type
self._sim_report_type = sim_report_type
self._sim_id = sim_id
self.watch_name = watch_name
self.sb = None
self.data = None
self._hints = None
self.sirepo_server = sirepo_server
self.parameters = None
self.source_parameters = None
self.optic_parameters = {}
self.sirepo_components = None
self.source_component = None
self.active_parameters = {}
self.autocompute_params = {}
self.source_simulation = source_simulation
self.one_d_reports = ['intensityReport']
self.two_d_reports = ['watchpointReport']
self.connect(sim_type=self._sim_type, sim_id=self._sim_id)
def update_value(self, value, units):
unyt_obj = u.m
starting_unit = value * unyt_obj
converted_unit = starting_unit.to(units)
return converted_unit
"""
Get new parameter values from Sirepo server
"""
def update_parameters(self):
data, sirepo_schema = self.sb.auth(self._sim_type, self._sim_id)
self.data = data
for key, value in self.sirepo_components.items():
optic_id = self.sb.find_optic_id_by_name(key)
self.parameters = {f'sirepo_{k}': v for k, v in
data['models']['beamline'][optic_id].items()}
for k, v in self.parameters.items():
getattr(value, k).set(v)
def trigger(self):
super().trigger()
date = datetime.datetime.now()
file_name = new_uid()
self._resource_document, self._datum_factory, _ = compose_resource(
start={'uid': 'needed for compose_resource() but will be discarded'},
spec=self._sim_type,
root=self._root_dir,
resource_path=str(Path(date.strftime('%Y/%m/%d')) / Path(f'{file_name}.dat')),
resource_kwargs={}
)
# now discard the start uid, a real one will be added later
self._resource_document.pop('run_start')
self._asset_docs_cache.append(('resource', self._resource_document))
sim_result_file = str(Path(self._resource_document['root']) /
Path(self._resource_document['resource_path']))
if not self.source_simulation:
if self.sirepo_component is not None:
for component in self.data['models']['beamline']:
if 'autocomputeVectors' in component.keys():
self.autocompute_params[component['title']] = component['autocomputeVectors']
for i in range(len(self.active_parameters)):
real_field = self.fields['field' + str(i)].replace('sirepo_', '')
dict_key = self.fields['field' + str(i)].replace('sirepo', self.parents['par' + str(i)])
x = self.active_parameters[dict_key].read()[
f'{self.parents["par" + str(i)]}_{self.fields["field" + str(i)]}']['value']
element = self.sb.find_element(self.data['models']['beamline'],
'title',
self.parents['par' + str(i)])
element[real_field] = x
if self.parents[f'par{i}'] in self.autocompute_params.keys() and 'grazingAngle' in dict_key:
grazing_vecs_dict = {}
autocompute_key = f'{self.parents[f"par{i}"]}_sirepo_autocomputeVectors'
autocompute_type = self.sirepo_components[self.parents[f'par{i}']].read()[
autocompute_key]['value']
grazing_vecs_dict['angle'] = x
grazing_vecs_dict['autocompute_type'] = autocompute_type
optic_id = self.sb.find_optic_id_by_name(self.parents[f'par{i}'])
self.sb.update_grazing_vectors(self.data['models']['beamline'][optic_id],
grazing_vecs_dict)
watch = self.sb.find_element(self.data['models']['beamline'],
'title',
self.watch_name)
if self._sim_report_type == SimReportTypes.srw_se_spectrum.name:
self.data['report'] = 'watchpointReport{}'.format(watch['id'])
elif self._sim_report_type == SimReportTypes.shadow_beam_stats.name:
self.data['report'] = 'beamStatisticsReport'
self.beam_statistics_report.kind = 'normal'
else:
raise ValueError(f"Unknown simulation report type: {self._sim_report_type}")
elif self._sim_report_type == SimReportTypes.srw_se_spectrum.name:
self.data['report'] = "intensityReport"
self.sb.run_simulation()
datafile = self.sb.get_datafile()
if self._sim_report_type == SimReportTypes.shadow_beam_stats.name:
self.beam_statistics_report.put(json.dumps(json.loads(datafile.decode())))
else:
with open(sim_result_file, 'wb') as f:
f.write(datafile)
def update_components(_data):
self.shape.put(_data['shape'])
self.mean.put(_data['mean'])
self.photon_energy.put(_data['photon_energy'])
self.horizontal_extent.put(_data['horizontal_extent'])
self.vertical_extent.put(_data['vertical_extent'])
if self._sim_type == SimTypes.srw.name:
if self.data['report'] in self.one_d_reports:
ndim = 1
else:
ndim = 2
ret = read_srw_file(sim_result_file, ndim=ndim)
self._resource_document["resource_kwargs"]["ndim"] = ndim
update_components(ret)
elif self._sim_type == SimTypes.shadow.name and not \
self._sim_report_type == SimReportTypes.shadow_beam_stats.name:
nbins = self.data['models'][self.data['report']]['histogramBins']
ret = read_shadow_file(sim_result_file, histogram_bins=nbins)
self._resource_document["resource_kwargs"]["histogram_bins"] = nbins
update_components(ret)
# else:
# raise ValueError(f"Unknown simulation type: {self._sim_type}")
datum_document = self._datum_factory(datum_kwargs={})
self._asset_docs_cache.append(("datum", datum_document))
self.image.put(datum_document["datum_id"])
self.sirepo_json.put(json.dumps(self.data))
self._resource_document = None
self._datum_factory = None
return NullStatus()
def describe(self):
res = super().describe()
res[self.image.name].update(dict(external="FILESTORE"))
return res
def unstage(self):
super().unstage()
self._resource_document = None
self._result.clear()
def collect_asset_docs(self):
items = list(self._asset_docs_cache)
self._asset_docs_cache.clear()
for item in items:
yield item
def connect(self, sim_type, sim_id):
sb = SirepoBluesky(self.sirepo_server)
data, sirepo_schema = sb.auth(sim_type, sim_id)
self.data = data
self.sb = sb
if not self.source_simulation:
def class_factory(cls_name):
dd = {k: Cpt(SynAxis) for k in self.parameters}
return type(cls_name, (Device,), dd)
sirepo_components = {}
# Create sirepo component for each optical element, set active element
# to the one selected by the user
for i in range(len(data['models']['beamline'])):
optic = (data['models']['beamline'][i]['title'])
optic_id = self.sb.find_optic_id_by_name(optic)
self.parameters = {f'sirepo_{k}': v for k, v in
data['models']['beamline'][optic_id].items()}
self.optic_parameters[optic] = self.parameters
SirepoComponent = class_factory('SirepoComponent')
sirepo_component = SirepoComponent(name=optic)
for k, v in self.parameters.items():
getattr(sirepo_component, k).set(v)
sirepo_components[sirepo_component.name] = sirepo_component
self.sirepo_components = sirepo_components
else:
# Create source components
self.source_parameters = {f'sirepo_intensityReport_{k}': v for k, v in
data['models']['intensityReport'].items()}
def source_class_factory(cls_name):
dd = {k: Cpt(SynAxis) for k in self.source_parameters}
return type(cls_name, (Device,), dd)
SirepoComponent = source_class_factory('SirepoComponent')
self.source_component = SirepoComponent(name='intensityReport')
for k, v in self.source_parameters.items():
getattr(self.source_component, k).set(v)
for k in self.optic_parameters:
if self.optic_parameters[k]['sirepo_type'] == 'watch':
self.watch_name = self.optic_parameters[k]['sirepo_title']
"""
Get list of available sirepo components / parameters / watchpoints
"""
def view_sirepo_components(self):
watchpoints = []
for k in self.optic_parameters:
print(f'OPTIC: {k}')
print(f'PARAMETERS: {self.optic_parameters[k]}')
if self.optic_parameters[k]['sirepo_type'] == 'watch':
watchpoints.append(k)
print(f'WATCHPOINTS: {watchpoints}')
"""
Selects specific optical component for any scan
- Any parameter selected must be of this component
Parameters
----------
name : str
name of optic
"""
def select_optic(self, name):
self.sirepo_component = self.sirepo_components[name]
"""
Returns a parameter based on Ophyd objects created in connect()
- User can specify any parameter name of the selected component
- No need to put "sirepo_" before the name
Parameters
----------
name : str
name of parameter to create
"""
def create_parameter(self, name):
real_name = f"sirepo_{name}"
ct = 0
while f'field{ct}' in self.fields.keys():
ct += 1
fieldkey = f'field{ct}'
parentkey = f'par{ct}'
self.fields[fieldkey] = real_name
self.parents[parentkey] = self.sirepo_component.name
key = f"{self.parents[parentkey]}_{name}"
param = getattr(self.sirepo_component, real_name)
self.active_parameters[key] = param
return param
"""
Sets active watchpoint for the trigger() method
Parameters
----------
name : str
name of watchpoint
"""
def set_watchpoint(self, name):
self.watch_name = name
|
# AUTOGENERATED FILE - DO NOT MODIFY!
# This file was generated by Djinni from my_flags.djinni
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
from enum import IntFlag
class MyFlags(IntFlag):
""" flag comment """
No_Flags = 0
""" flag option comment """
Flag1 = 1
Flag2 = 2
Flag3 = 4
All_Flags = 7
|
#!/usr/bin/env python
import datetime
import json
import logging
import os
import random
import re
import subprocess
import time
import tempfile
import shutil
import urllib2
import json
import csv
import pytz
import tzlocal
def do_ndt_test(country_code=""):
"""Runs the NDT test as a subprocess and returns the raw results.
Args:
`country_code`: A capitalized, two-letter country code representing the
location of the desired test server. If no country code is supplied,
the script uses the default mlab_ns behavior.
Returns:
The STDOUT of the call to `measurement_kit`.
"""
now = int(subprocess.check_output(["date", "-u", "+%s"]))
if country_code == "":
# If there is a country code, use it, otherwise default We are using
# the `-g` flag due to a regex stack overflow segmentation fault bug
# in GNU's C++ library Measurement-kit issue #1276:
# https://github.com/measurement-kit/measurement-kit/issues/1276
result_raw = subprocess.check_output(
["/test-runner/measurement_kit", "-g",
"--reportfile=/data/ndt-%d.njson"%now, "ndt"])
else:
result_raw = subprocess.check_output(
["/test-runner/measurement_kit", "-g",
"--reportfile=/data/ndt-%d.njson"%now, "ndt", "-C", country_code])
return result_raw
def summarize_tests():
"""Converts measurement_kit .njson test results into a single .csv file.
This function checks the `/data/` directory for all files, reads the json
into an object and writes the object into a csv file that it stores in
`/share/history.csv` (the `share` directory is shared between this Docker
image and the dashboard image).
"""
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
historywriter = csv.writer(tmpfile)
historywriter.writerow(["Datetime", "Download", "Upload"])
for file in sorted(os.listdir("/data")):
with open("/data/" + file) as json_data:
try:
d = json.load(json_data)
historywriter.writerow([
d["measurement_start_time"],
d["test_keys"]["simple"]["download"],
d["test_keys"]["simple"]["upload"]])
except Exception as e:
logging.error('Failed to write row %s', e)
pass
tmp_loc = tmpfile.name
logging.info("Updating /share/history.csv")
shutil.move(tmp_loc, "/share/history.csv")
def perform_test_loop(expected_sleep_secs=12*60*60):
"""The main loop of the script.
It gathers the computer's location, then loops forever calling
measurement_kit once with the default mlab_ns behavior. It then sleeps
for a random interval (determined by an exponential distribution) that
will average out to expected_sleep_seconds.
Args:
`expected_sleep_seconds`: The desired average time, in seconds,
between tests.
"""
while True:
# Run the test once with the default mlab_ns responder.
try:
_ = do_ndt_test("")
except subprocess.CalledProcessError as ex:
logging.error('Error in NDT test: %s', ex)
summarize_tests()
sleeptime = random.expovariate(1.0/expected_sleep_secs)
resume_time = (datetime.datetime.utcnow() +
datetime.timedelta(seconds=sleeptime))
logging.info(
'Sleeping for %u seconds (until %s)', sleeptime, resume_time)
time.sleep(sleeptime)
if __name__ == "__main__":
root_log = logging.getLogger()
root_log.setLevel(logging.INFO)
perform_test_loop()
|
#from pyspark.sql import SparkSession, functions, types
import sys
import json
import requests
#def join():
# real_game_info = spark.read.json('real_game_info')
# real_game_info = real_game_info.orderBy(real_game_info['count'].desc()).where(real_game_info.guid != '')
# real_game_info.select('guid').coalesce(1).write.json('game_id.txt', mode = 'overwrite')
def load_genre():
guid = []
with open('game_id.txt/game_id.json', 'r') as jf:
for line in jf:
guid.append(json.loads(line))
session = requests.Session()
session.headers.update({'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:63.0) Gecko/20100101 Firefox/63.0'})
for i in range(len(guid)):
gid = guid[i]['guid']
req = 'https://www.giantbomb.com/api/game/'+ str(gid) + '/?api_key=3aa85a32d444184830f32a6d51b564a5a9397d41&format=json&field_list=guid,genres'
response = session.get(req)
filename = 'guids/genre_' + str(i)
print(i, gid)
with open(filename, 'w', encoding='utf-8') as outfile:
json.dump(json.JSONDecoder().decode(response.content.decode("utf-8")), outfile, indent=4, separators=(',', ': '))
if __name__ == '__main__':
#spark = SparkSession.builder.appName('join data').getOrCreate()
#spark.sparkContext.setLogLevel('WARN')
load_genre()
|
def reverseBits(n):
binaryNum = list(f'{n:32b}')
for i in range(len(binaryNum)):
if (binaryNum[i] == ' '):
binaryNum[i] = '0'
print ("binaryNum: " + str(binaryNum))
for i in range(math.floor(len(binaryNum)/2)):
temp = binaryNum[i]
binaryNum[i] = binaryNum[len(binaryNum)-i-1]
binaryNum[len(binaryNum)-i-1] = temp
#return int("".join(binaryNum))
return "".join(binaryNum)
print (reverseBits(43261596)) |
from .base import Kernel, DevicePointer, CUDAStream, round_up
import ctypes
arith_kernel = Kernel(
"arith",
[
"cu_arith_global_scale",
"cu_arith_element_add",
"cu_arith_element_mul",
"cu_arith_batch_add_forward",
"cu_arith_batch_add_backward",
"cu_arith_ln_mul_add",
"cu_arith_ln_add",
"cu_arith_ln_mul",
"cu_arith_ln_div",
"cu_arith_ln_sub_div",
"cu_arith_ln_mul_backward",
"cu_arith_ln_add_backward",
"cu_arith_batch_mul_add",
"cu_arith_batch_mul"
]
)
def arith_global_scale(
n : int,
inp : DevicePointer, # (n,) fp16
scale : float,
out : DevicePointer, # (n,) fp16
stream : CUDAStream
):
threads = min(round_up(n, 32), 1024)
gridDim = (round_up(n, threads) // threads, 1, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_global_scale(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(n),
ctypes.c_void_p(inp),
ctypes.c_float(scale),
ctypes.c_void_p(out)
]
)
def arith_element_add(
batch : int, n : int,
x : DevicePointer, # (batch, n) fp16
y : DevicePointer, # (batch, n) fp16
out : DevicePointer, # (batch, n) fp16
stream : CUDAStream
):
"""
out = x + y
"""
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_element_add(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(y),
ctypes.c_void_p(out)
]
)
def arith_element_mul(
batch : int, n : int,
x : DevicePointer, # (batch, n) fp16
y : DevicePointer, # (batch, n) fp16
out : DevicePointer, # (batch, n) fp16
stream : CUDAStream
):
"""
out = x * y
"""
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_element_mul(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(y),
ctypes.c_void_p(out)
]
)
def arith_batch_add_forward(
batch : int, n : int,
x : DevicePointer, # (batch, n) fp16
y : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n) fp16
stream : CUDAStream
):
"""
out = x + y[None, :]
"""
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_batch_add_forward(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(y),
ctypes.c_void_p(out)
]
)
def arith_batch_add_backward(
batch : int, n : int,
grad_out : DevicePointer, # (batch, n) fp16
grad : DevicePointer, # (n) fp16
stream : CUDAStream
):
gridDim = ( round_up(n, 32) // 32, 1, 1 )
blockDim = (32, 32, 1)
arith_kernel.cu_arith_batch_add_backward(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(grad_out),
ctypes.c_void_p(grad)
]
)
def arith_ln_mul_add(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
beta : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = x * alpha[None, :, None] + beta[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_mul_add(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
def arith_ln_add(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
beta : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = x + beta[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_add(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
def arith_ln_mul(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = x * alpha[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_mul(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(out)
]
)
def arith_ln_div(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = x / alpha[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_div(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(out)
]
)
def arith_ln_sub_div(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
beta : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = (x - beta[None, :, None]) / alpha[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_sub_div(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
def arith_ln_mul_backward(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
grad_out : DevicePointer, # (batch, n, m) fp16
grad : DevicePointer, # (n) fp16
stream : CUDAStream
):
gridDim = (n, 1, 1)
blockDim = (32, 32, 1)
arith_kernel.cu_arith_ln_mul_backward(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(grad_out),
ctypes.c_void_p(grad)
]
)
def arith_ln_add_backward(
batch : int, n : int, m : int,
grad_out : DevicePointer, # (batch, n, m) fp16
grad : DevicePointer, # (n) fp16
stream : CUDAStream
):
gridDim = (n, 1, 1)
blockDim = (32, 32, 1)
arith_kernel.cu_arith_ln_add_backward(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(grad_out),
ctypes.c_void_p(grad)
]
)
def arith_batch_mul_add(
batch : int, n : int,
x : DevicePointer, # (batch, n)
alpha : DevicePointer, # (n)
beta : DevicePointer, # (n)
out : DevicePointer, # (batch, n)
stream : CUDAStream
):
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_batch_mul_add(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(alpha),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
def arith_batch_mul(
batch : int, n : int,
x : DevicePointer, # (batch, n)
alpha : DevicePointer, # (n)
out : DevicePointer, # (batch, n)
stream : CUDAStream
):
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_batch_mul(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(alpha),
ctypes.c_void_p(out)
]
)
|
#para saber se seu nome tem o nome silva
s = str(input('digite seu nome: ')).upper()
print('tem silva em seu nome: {}'.format("SILVA" in s.strip()))
|
from diagnnose.attribute import ContextualDecomposer, Explainer
from diagnnose.config import create_config_dict
from diagnnose.models import LanguageModel, import_model
from diagnnose.tokenizer import create_tokenizer
from diagnnose.utils.misc import profile
if __name__ == "__main__":
config_dict = create_config_dict()
model: LanguageModel = import_model(**config_dict["model"])
tokenizer = create_tokenizer(**config_dict["tokenizer"])
decomposer = ContextualDecomposer(model)
explainer = Explainer(decomposer, tokenizer)
sens = [f"The athletes above Barbara <mask>."]
tokens = ["walk", "walks"]
with profile():
full_probs, contribution_probs = explainer.explain(sens, tokens)
explainer.print_attributions(full_probs, contribution_probs, sens, tokens)
|
from barbell2.lib.xnat.xnatclient import XnatClient
|
#!/usr/bin/env python
from setuptools import setup
try:
long_description = open('README.md', 'r').read()
except:
long_description = ''
setup(
name='samp-client',
version='3.0.1',
packages=['samp_client'],
url='https://github.com/mick88/samp-client',
license='MIT',
author='Michal Dabski',
author_email='[email protected]',
install_requires=[],
description='SA-MP API client for python supporting both query and RCON APIs',
long_description_content_type='text/markdown',
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
],
project_urls={
'Source': 'https://github.com/mick88/samp-client',
'Tracker': 'https://github.com/mick88/samp-client/issues',
},
)
|
import pytest
from test_deployers import InMemoryDeploymentBackend
from ee.models import Application, ApplicationEnvironment, EnvironmentDefinition
from ee.service import EnvironmentService
@pytest.fixture
def env_service(in_memory_store):
deployment_backend = InMemoryDeploymentBackend()
env_service = EnvironmentService(
store=in_memory_store, deployment_backend=deployment_backend
)
return env_service
def test_env_service_roundtrip_persistence_for_env_def(env_service: EnvironmentService):
raw = '{"packages": {"foo": "1.2.3"}}'
env_def = EnvironmentDefinition(raw)
env_service.save_env_def(env_def)
env_def_returned = env_service.get_env_def(env_def.id)
assert env_def_returned == env_def
def test_env_service_multiple_env_defs(env_service: EnvironmentService):
for n in [4, 5, 6]:
_ = EnvironmentDefinition('{"packages": {"foo": "1.2.%s"}}' % n)
env_service.save_env_def(_)
env_id = EnvironmentDefinition('{"packages": {"foo": "1.2.5"}}').id
env_def = env_service.get_env_def(env_id)
assert env_def.id == env_id
assert env_def.packages == {"foo": "1.2.5"}
def test_associate_application_environment(env_service: EnvironmentService):
env_def = EnvironmentDefinition('{"packages": {"foo": "9.8.7"}}')
env_service.save_env_def(env_def)
app = Application(name="my-app")
app_env = ApplicationEnvironment(app=app, env="prod", env_def=env_def)
env_service.save_app_env(app_env)
def test_application_environment_roundtrip(env_service: EnvironmentService):
env_def = EnvironmentDefinition('{"packages": {"foo": "9.8.7"}}')
env_service.save_env_def(env_def)
app = Application(name="my-app")
app_env = ApplicationEnvironment(app=app, env="prod", env_def=env_def)
env_service.save_app_env(app_env)
app_env_returned = env_service.get_app_env("my-app", "prod")
assert app_env_returned.env_def.packages == {"foo": "9.8.7"}
def test_run(env_service: EnvironmentService):
# First we create an environment definition
env_def = EnvironmentDefinition('{"packages": {"foo": "9.8.7"}}')
env_service.save_env_def(env_def)
# Then we associate it to an (application, environment)
app = Application(name="some_app")
app_env = ApplicationEnvironment(app=app, env="uat", env_def=env_def)
env_service.save_app_env(app_env)
# Finally we ask the service to run something on that (app, env)
env_service.run("some_app", "uat", ["hello", "world"])
# We can then run the asserts against the deployment backend inside our service
assert [("b262deb", env_def)] == env_service.deployment_backend.envs
assert [
("b262deb", ["hello", "world"])
] == env_service.deployment_backend.executed_commands
|
from .main import find_and_write_commits
|
# -*- coding: utf-8 -*-
import attr
@attr.s
class BaseConfig:
def validate(self):
raise NotImplementedError
|
Subsets and Splits