metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joekohlsdorf/docusign-esign-python-client",
"score": 2
} |
#### File: docusign_esign/models/brand.py
```python
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class Brand(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'brand_company': 'str',
'brand_id': 'str',
'brand_languages': 'list[str]',
'brand_name': 'str',
'colors': 'list[NameValue]',
'default_brand_language': 'str',
'email_content': 'list[BrandEmailContent]',
'error_details': 'ErrorDetails',
'is_overriding_company_name': 'bool',
'is_sending_default': 'bool',
'is_signing_default': 'bool',
'landing_pages': 'list[NameValue]',
'links': 'list[BrandLink]',
'logos': 'BrandLogos',
'resources': 'BrandResourceUrls'
}
attribute_map = {
'brand_company': 'brandCompany',
'brand_id': 'brandId',
'brand_languages': 'brandLanguages',
'brand_name': 'brandName',
'colors': 'colors',
'default_brand_language': 'defaultBrandLanguage',
'email_content': 'emailContent',
'error_details': 'errorDetails',
'is_overriding_company_name': 'isOverridingCompanyName',
'is_sending_default': 'isSendingDefault',
'is_signing_default': 'isSigningDefault',
'landing_pages': 'landingPages',
'links': 'links',
'logos': 'logos',
'resources': 'resources'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""Brand - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._brand_company = None
self._brand_id = None
self._brand_languages = None
self._brand_name = None
self._colors = None
self._default_brand_language = None
self._email_content = None
self._error_details = None
self._is_overriding_company_name = None
self._is_sending_default = None
self._is_signing_default = None
self._landing_pages = None
self._links = None
self._logos = None
self._resources = None
self.discriminator = None
setattr(self, "_{}".format('brand_company'), kwargs.get('brand_company', None))
setattr(self, "_{}".format('brand_id'), kwargs.get('brand_id', None))
setattr(self, "_{}".format('brand_languages'), kwargs.get('brand_languages', None))
setattr(self, "_{}".format('brand_name'), kwargs.get('brand_name', None))
setattr(self, "_{}".format('colors'), kwargs.get('colors', None))
setattr(self, "_{}".format('default_brand_language'), kwargs.get('default_brand_language', None))
setattr(self, "_{}".format('email_content'), kwargs.get('email_content', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('is_overriding_company_name'), kwargs.get('is_overriding_company_name', None))
setattr(self, "_{}".format('is_sending_default'), kwargs.get('is_sending_default', None))
setattr(self, "_{}".format('is_signing_default'), kwargs.get('is_signing_default', None))
setattr(self, "_{}".format('landing_pages'), kwargs.get('landing_pages', None))
setattr(self, "_{}".format('links'), kwargs.get('links', None))
setattr(self, "_{}".format('logos'), kwargs.get('logos', None))
setattr(self, "_{}".format('resources'), kwargs.get('resources', None))
@property
def brand_company(self):
"""Gets the brand_company of this Brand. # noqa: E501
The name of the company associated with this brand. # noqa: E501
:return: The brand_company of this Brand. # noqa: E501
:rtype: str
"""
return self._brand_company
@brand_company.setter
def brand_company(self, brand_company):
"""Sets the brand_company of this Brand.
The name of the company associated with this brand. # noqa: E501
:param brand_company: The brand_company of this Brand. # noqa: E501
:type: str
"""
self._brand_company = brand_company
@property
def brand_id(self):
"""Gets the brand_id of this Brand. # noqa: E501
The ID used to identify a specific brand in API calls. # noqa: E501
:return: The brand_id of this Brand. # noqa: E501
:rtype: str
"""
return self._brand_id
@brand_id.setter
def brand_id(self, brand_id):
"""Sets the brand_id of this Brand.
The ID used to identify a specific brand in API calls. # noqa: E501
:param brand_id: The brand_id of this Brand. # noqa: E501
:type: str
"""
self._brand_id = brand_id
@property
def brand_languages(self):
"""Gets the brand_languages of this Brand. # noqa: E501
# noqa: E501
:return: The brand_languages of this Brand. # noqa: E501
:rtype: list[str]
"""
return self._brand_languages
@brand_languages.setter
def brand_languages(self, brand_languages):
"""Sets the brand_languages of this Brand.
# noqa: E501
:param brand_languages: The brand_languages of this Brand. # noqa: E501
:type: list[str]
"""
self._brand_languages = brand_languages
@property
def brand_name(self):
"""Gets the brand_name of this Brand. # noqa: E501
The name of the brand. # noqa: E501
:return: The brand_name of this Brand. # noqa: E501
:rtype: str
"""
return self._brand_name
@brand_name.setter
def brand_name(self, brand_name):
"""Sets the brand_name of this Brand.
The name of the brand. # noqa: E501
:param brand_name: The brand_name of this Brand. # noqa: E501
:type: str
"""
self._brand_name = brand_name
@property
def colors(self):
"""Gets the colors of this Brand. # noqa: E501
# noqa: E501
:return: The colors of this Brand. # noqa: E501
:rtype: list[NameValue]
"""
return self._colors
@colors.setter
def colors(self, colors):
"""Sets the colors of this Brand.
# noqa: E501
:param colors: The colors of this Brand. # noqa: E501
:type: list[NameValue]
"""
self._colors = colors
@property
def default_brand_language(self):
"""Gets the default_brand_language of this Brand. # noqa: E501
# noqa: E501
:return: The default_brand_language of this Brand. # noqa: E501
:rtype: str
"""
return self._default_brand_language
@default_brand_language.setter
def default_brand_language(self, default_brand_language):
"""Sets the default_brand_language of this Brand.
# noqa: E501
:param default_brand_language: The default_brand_language of this Brand. # noqa: E501
:type: str
"""
self._default_brand_language = default_brand_language
@property
def email_content(self):
"""Gets the email_content of this Brand. # noqa: E501
# noqa: E501
:return: The email_content of this Brand. # noqa: E501
:rtype: list[BrandEmailContent]
"""
return self._email_content
@email_content.setter
def email_content(self, email_content):
"""Sets the email_content of this Brand.
# noqa: E501
:param email_content: The email_content of this Brand. # noqa: E501
:type: list[BrandEmailContent]
"""
self._email_content = email_content
@property
def error_details(self):
"""Gets the error_details of this Brand. # noqa: E501
:return: The error_details of this Brand. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this Brand.
:param error_details: The error_details of this Brand. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def is_overriding_company_name(self):
"""Gets the is_overriding_company_name of this Brand. # noqa: E501
# noqa: E501
:return: The is_overriding_company_name of this Brand. # noqa: E501
:rtype: bool
"""
return self._is_overriding_company_name
@is_overriding_company_name.setter
def is_overriding_company_name(self, is_overriding_company_name):
"""Sets the is_overriding_company_name of this Brand.
# noqa: E501
:param is_overriding_company_name: The is_overriding_company_name of this Brand. # noqa: E501
:type: bool
"""
self._is_overriding_company_name = is_overriding_company_name
@property
def is_sending_default(self):
"""Gets the is_sending_default of this Brand. # noqa: E501
# noqa: E501
:return: The is_sending_default of this Brand. # noqa: E501
:rtype: bool
"""
return self._is_sending_default
@is_sending_default.setter
def is_sending_default(self, is_sending_default):
"""Sets the is_sending_default of this Brand.
# noqa: E501
:param is_sending_default: The is_sending_default of this Brand. # noqa: E501
:type: bool
"""
self._is_sending_default = is_sending_default
@property
def is_signing_default(self):
"""Gets the is_signing_default of this Brand. # noqa: E501
# noqa: E501
:return: The is_signing_default of this Brand. # noqa: E501
:rtype: bool
"""
return self._is_signing_default
@is_signing_default.setter
def is_signing_default(self, is_signing_default):
"""Sets the is_signing_default of this Brand.
# noqa: E501
:param is_signing_default: The is_signing_default of this Brand. # noqa: E501
:type: bool
"""
self._is_signing_default = is_signing_default
@property
def landing_pages(self):
"""Gets the landing_pages of this Brand. # noqa: E501
# noqa: E501
:return: The landing_pages of this Brand. # noqa: E501
:rtype: list[NameValue]
"""
return self._landing_pages
@landing_pages.setter
def landing_pages(self, landing_pages):
"""Sets the landing_pages of this Brand.
# noqa: E501
:param landing_pages: The landing_pages of this Brand. # noqa: E501
:type: list[NameValue]
"""
self._landing_pages = landing_pages
@property
def links(self):
"""Gets the links of this Brand. # noqa: E501
# noqa: E501
:return: The links of this Brand. # noqa: E501
:rtype: list[BrandLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Brand.
# noqa: E501
:param links: The links of this Brand. # noqa: E501
:type: list[BrandLink]
"""
self._links = links
@property
def logos(self):
"""Gets the logos of this Brand. # noqa: E501
:return: The logos of this Brand. # noqa: E501
:rtype: BrandLogos
"""
return self._logos
@logos.setter
def logos(self, logos):
"""Sets the logos of this Brand.
:param logos: The logos of this Brand. # noqa: E501
:type: BrandLogos
"""
self._logos = logos
@property
def resources(self):
"""Gets the resources of this Brand. # noqa: E501
:return: The resources of this Brand. # noqa: E501
:rtype: BrandResourceUrls
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this Brand.
:param resources: The resources of this Brand. # noqa: E501
:type: BrandResourceUrls
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Brand, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Brand):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Brand):
return True
return self.to_dict() != other.to_dict()
```
#### File: docusign_esign/models/bulk_send_batch_summaries.py
```python
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class BulkSendBatchSummaries(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'batch_size_limit': 'str',
'bulk_batch_summaries': 'list[BulkSendBatchSummary]',
'bulk_process_queue_limit': 'str',
'bulk_process_total_queued': 'str',
'end_position': 'str',
'next_uri': 'str',
'previous_uri': 'str',
'queue_limit': 'str',
'result_set_size': 'str',
'start_position': 'str',
'total_queued': 'str',
'total_set_size': 'str'
}
attribute_map = {
'batch_size_limit': 'batchSizeLimit',
'bulk_batch_summaries': 'bulkBatchSummaries',
'bulk_process_queue_limit': 'bulkProcessQueueLimit',
'bulk_process_total_queued': 'bulkProcessTotalQueued',
'end_position': 'endPosition',
'next_uri': 'nextUri',
'previous_uri': 'previousUri',
'queue_limit': 'queueLimit',
'result_set_size': 'resultSetSize',
'start_position': 'startPosition',
'total_queued': 'totalQueued',
'total_set_size': 'totalSetSize'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""BulkSendBatchSummaries - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._batch_size_limit = None
self._bulk_batch_summaries = None
self._bulk_process_queue_limit = None
self._bulk_process_total_queued = None
self._end_position = None
self._next_uri = None
self._previous_uri = None
self._queue_limit = None
self._result_set_size = None
self._start_position = None
self._total_queued = None
self._total_set_size = None
self.discriminator = None
setattr(self, "_{}".format('batch_size_limit'), kwargs.get('batch_size_limit', None))
setattr(self, "_{}".format('bulk_batch_summaries'), kwargs.get('bulk_batch_summaries', None))
setattr(self, "_{}".format('bulk_process_queue_limit'), kwargs.get('bulk_process_queue_limit', None))
setattr(self, "_{}".format('bulk_process_total_queued'), kwargs.get('bulk_process_total_queued', None))
setattr(self, "_{}".format('end_position'), kwargs.get('end_position', None))
setattr(self, "_{}".format('next_uri'), kwargs.get('next_uri', None))
setattr(self, "_{}".format('previous_uri'), kwargs.get('previous_uri', None))
setattr(self, "_{}".format('queue_limit'), kwargs.get('queue_limit', None))
setattr(self, "_{}".format('result_set_size'), kwargs.get('result_set_size', None))
setattr(self, "_{}".format('start_position'), kwargs.get('start_position', None))
setattr(self, "_{}".format('total_queued'), kwargs.get('total_queued', None))
setattr(self, "_{}".format('total_set_size'), kwargs.get('total_set_size', None))
@property
def batch_size_limit(self):
"""Gets the batch_size_limit of this BulkSendBatchSummaries. # noqa: E501
# noqa: E501
:return: The batch_size_limit of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._batch_size_limit
@batch_size_limit.setter
def batch_size_limit(self, batch_size_limit):
"""Sets the batch_size_limit of this BulkSendBatchSummaries.
# noqa: E501
:param batch_size_limit: The batch_size_limit of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._batch_size_limit = batch_size_limit
@property
def bulk_batch_summaries(self):
"""Gets the bulk_batch_summaries of this BulkSendBatchSummaries. # noqa: E501
# noqa: E501
:return: The bulk_batch_summaries of this BulkSendBatchSummaries. # noqa: E501
:rtype: list[BulkSendBatchSummary]
"""
return self._bulk_batch_summaries
@bulk_batch_summaries.setter
def bulk_batch_summaries(self, bulk_batch_summaries):
"""Sets the bulk_batch_summaries of this BulkSendBatchSummaries.
# noqa: E501
:param bulk_batch_summaries: The bulk_batch_summaries of this BulkSendBatchSummaries. # noqa: E501
:type: list[BulkSendBatchSummary]
"""
self._bulk_batch_summaries = bulk_batch_summaries
@property
def bulk_process_queue_limit(self):
"""Gets the bulk_process_queue_limit of this BulkSendBatchSummaries. # noqa: E501
# noqa: E501
:return: The bulk_process_queue_limit of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._bulk_process_queue_limit
@bulk_process_queue_limit.setter
def bulk_process_queue_limit(self, bulk_process_queue_limit):
"""Sets the bulk_process_queue_limit of this BulkSendBatchSummaries.
# noqa: E501
:param bulk_process_queue_limit: The bulk_process_queue_limit of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._bulk_process_queue_limit = bulk_process_queue_limit
@property
def bulk_process_total_queued(self):
"""Gets the bulk_process_total_queued of this BulkSendBatchSummaries. # noqa: E501
# noqa: E501
:return: The bulk_process_total_queued of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._bulk_process_total_queued
@bulk_process_total_queued.setter
def bulk_process_total_queued(self, bulk_process_total_queued):
"""Sets the bulk_process_total_queued of this BulkSendBatchSummaries.
# noqa: E501
:param bulk_process_total_queued: The bulk_process_total_queued of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._bulk_process_total_queued = bulk_process_total_queued
@property
def end_position(self):
"""Gets the end_position of this BulkSendBatchSummaries. # noqa: E501
The last position in the result set. # noqa: E501
:return: The end_position of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._end_position
@end_position.setter
def end_position(self, end_position):
"""Sets the end_position of this BulkSendBatchSummaries.
The last position in the result set. # noqa: E501
:param end_position: The end_position of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._end_position = end_position
@property
def next_uri(self):
"""Gets the next_uri of this BulkSendBatchSummaries. # noqa: E501
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null. # noqa: E501
:return: The next_uri of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._next_uri
@next_uri.setter
def next_uri(self, next_uri):
"""Sets the next_uri of this BulkSendBatchSummaries.
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null. # noqa: E501
:param next_uri: The next_uri of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._next_uri = next_uri
@property
def previous_uri(self):
"""Gets the previous_uri of this BulkSendBatchSummaries. # noqa: E501
The postal code for the billing address. # noqa: E501
:return: The previous_uri of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._previous_uri
@previous_uri.setter
def previous_uri(self, previous_uri):
"""Sets the previous_uri of this BulkSendBatchSummaries.
The postal code for the billing address. # noqa: E501
:param previous_uri: The previous_uri of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._previous_uri = previous_uri
@property
def queue_limit(self):
"""Gets the queue_limit of this BulkSendBatchSummaries. # noqa: E501
# noqa: E501
:return: The queue_limit of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._queue_limit
@queue_limit.setter
def queue_limit(self, queue_limit):
"""Sets the queue_limit of this BulkSendBatchSummaries.
# noqa: E501
:param queue_limit: The queue_limit of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._queue_limit = queue_limit
@property
def result_set_size(self):
"""Gets the result_set_size of this BulkSendBatchSummaries. # noqa: E501
The number of results returned in this response. # noqa: E501
:return: The result_set_size of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._result_set_size
@result_set_size.setter
def result_set_size(self, result_set_size):
"""Sets the result_set_size of this BulkSendBatchSummaries.
The number of results returned in this response. # noqa: E501
:param result_set_size: The result_set_size of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._result_set_size = result_set_size
@property
def start_position(self):
"""Gets the start_position of this BulkSendBatchSummaries. # noqa: E501
Starting position of the current result set. # noqa: E501
:return: The start_position of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._start_position
@start_position.setter
def start_position(self, start_position):
"""Sets the start_position of this BulkSendBatchSummaries.
Starting position of the current result set. # noqa: E501
:param start_position: The start_position of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._start_position = start_position
@property
def total_queued(self):
"""Gets the total_queued of this BulkSendBatchSummaries. # noqa: E501
# noqa: E501
:return: The total_queued of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._total_queued
@total_queued.setter
def total_queued(self, total_queued):
"""Sets the total_queued of this BulkSendBatchSummaries.
# noqa: E501
:param total_queued: The total_queued of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._total_queued = total_queued
@property
def total_set_size(self):
"""Gets the total_set_size of this BulkSendBatchSummaries. # noqa: E501
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response. # noqa: E501
:return: The total_set_size of this BulkSendBatchSummaries. # noqa: E501
:rtype: str
"""
return self._total_set_size
@total_set_size.setter
def total_set_size(self, total_set_size):
"""Sets the total_set_size of this BulkSendBatchSummaries.
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response. # noqa: E501
:param total_set_size: The total_set_size of this BulkSendBatchSummaries. # noqa: E501
:type: str
"""
self._total_set_size = total_set_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BulkSendBatchSummaries, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BulkSendBatchSummaries):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BulkSendBatchSummaries):
return True
return self.to_dict() != other.to_dict()
```
#### File: docusign_esign/models/conditional_recipient_rule_filter.py
```python
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ConditionalRecipientRuleFilter(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'operator': 'str',
'recipient_id': 'str',
'scope': 'str',
'tab_id': 'str',
'tab_label': 'str',
'tab_type': 'str',
'value': 'str'
}
attribute_map = {
'operator': 'operator',
'recipient_id': 'recipientId',
'scope': 'scope',
'tab_id': 'tabId',
'tab_label': 'tabLabel',
'tab_type': 'tabType',
'value': 'value'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ConditionalRecipientRuleFilter - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._operator = None
self._recipient_id = None
self._scope = None
self._tab_id = None
self._tab_label = None
self._tab_type = None
self._value = None
self.discriminator = None
setattr(self, "_{}".format('operator'), kwargs.get('operator', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('scope'), kwargs.get('scope', None))
setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None))
setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None))
setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None))
setattr(self, "_{}".format('value'), kwargs.get('value', None))
@property
def operator(self):
"""Gets the operator of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The operator of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""Sets the operator of this ConditionalRecipientRuleFilter.
# noqa: E501
:param operator: The operator of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._operator = operator
@property
def recipient_id(self):
"""Gets the recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:return: The recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
"""Sets the recipient_id of this ConditionalRecipientRuleFilter.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:param recipient_id: The recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._recipient_id = recipient_id
@property
def scope(self):
"""Gets the scope of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The scope of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this ConditionalRecipientRuleFilter.
# noqa: E501
:param scope: The scope of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._scope = scope
@property
def tab_id(self):
"""Gets the tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:return: The tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_id
@tab_id.setter
def tab_id(self, tab_id):
"""Sets the tab_id of this ConditionalRecipientRuleFilter.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:param tab_id: The tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_id = tab_id
@property
def tab_label(self):
"""Gets the tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
The label string associated with the tab. # noqa: E501
:return: The tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_label
@tab_label.setter
def tab_label(self, tab_label):
"""Sets the tab_label of this ConditionalRecipientRuleFilter.
The label string associated with the tab. # noqa: E501
:param tab_label: The tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_label = tab_label
@property
def tab_type(self):
"""Gets the tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_type
@tab_type.setter
def tab_type(self, tab_type):
"""Sets the tab_type of this ConditionalRecipientRuleFilter.
# noqa: E501
:param tab_type: The tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_type = tab_type
@property
def value(self):
"""Gets the value of this ConditionalRecipientRuleFilter. # noqa: E501
Specifies the value of the tab. # noqa: E501
:return: The value of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ConditionalRecipientRuleFilter.
Specifies the value of the tab. # noqa: E501
:param value: The value of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConditionalRecipientRuleFilter, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConditionalRecipientRuleFilter):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConditionalRecipientRuleFilter):
return True
return self.to_dict() != other.to_dict()
```
#### File: docusign_esign/models/display_appliance_page.py
```python
from pprint import pformat
from six import iteritems
import re
class DisplayAppliancePage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, doc_name=None, document_id=None, external_document_id=None, height=None, is_first_page=None, page_id=None, page_no=None, page_status=None, page_type=None, width=None):
"""
DisplayAppliancePage - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'doc_name': 'str',
'document_id': 'str',
'external_document_id': 'str',
'height': 'int',
'is_first_page': 'bool',
'page_id': 'str',
'page_no': 'int',
'page_status': 'str',
'page_type': 'str',
'width': 'int'
}
self.attribute_map = {
'doc_name': 'docName',
'document_id': 'documentId',
'external_document_id': 'externalDocumentId',
'height': 'height',
'is_first_page': 'isFirstPage',
'page_id': 'pageId',
'page_no': 'pageNo',
'page_status': 'pageStatus',
'page_type': 'pageType',
'width': 'width'
}
self._doc_name = doc_name
self._document_id = document_id
self._external_document_id = external_document_id
self._height = height
self._is_first_page = is_first_page
self._page_id = page_id
self._page_no = page_no
self._page_status = page_status
self._page_type = page_type
self._width = width
@property
def doc_name(self):
"""
Gets the doc_name of this DisplayAppliancePage.
:return: The doc_name of this DisplayAppliancePage.
:rtype: str
"""
return self._doc_name
@doc_name.setter
def doc_name(self, doc_name):
"""
Sets the doc_name of this DisplayAppliancePage.
:param doc_name: The doc_name of this DisplayAppliancePage.
:type: str
"""
self._doc_name = doc_name
@property
def document_id(self):
"""
Gets the document_id of this DisplayAppliancePage.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute.
:return: The document_id of this DisplayAppliancePage.
:rtype: str
"""
return self._document_id
@document_id.setter
def document_id(self, document_id):
"""
Sets the document_id of this DisplayAppliancePage.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute.
:param document_id: The document_id of this DisplayAppliancePage.
:type: str
"""
self._document_id = document_id
@property
def external_document_id(self):
"""
Gets the external_document_id of this DisplayAppliancePage.
:return: The external_document_id of this DisplayAppliancePage.
:rtype: str
"""
return self._external_document_id
@external_document_id.setter
def external_document_id(self, external_document_id):
"""
Sets the external_document_id of this DisplayAppliancePage.
:param external_document_id: The external_document_id of this DisplayAppliancePage.
:type: str
"""
self._external_document_id = external_document_id
@property
def height(self):
"""
Gets the height of this DisplayAppliancePage.
Height of the tab in pixels.
:return: The height of this DisplayAppliancePage.
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""
Sets the height of this DisplayAppliancePage.
Height of the tab in pixels.
:param height: The height of this DisplayAppliancePage.
:type: int
"""
self._height = height
@property
def is_first_page(self):
"""
Gets the is_first_page of this DisplayAppliancePage.
:return: The is_first_page of this DisplayAppliancePage.
:rtype: bool
"""
return self._is_first_page
@is_first_page.setter
def is_first_page(self, is_first_page):
"""
Sets the is_first_page of this DisplayAppliancePage.
:param is_first_page: The is_first_page of this DisplayAppliancePage.
:type: bool
"""
self._is_first_page = is_first_page
@property
def page_id(self):
"""
Gets the page_id of this DisplayAppliancePage.
:return: The page_id of this DisplayAppliancePage.
:rtype: str
"""
return self._page_id
@page_id.setter
def page_id(self, page_id):
"""
Sets the page_id of this DisplayAppliancePage.
:param page_id: The page_id of this DisplayAppliancePage.
:type: str
"""
self._page_id = page_id
@property
def page_no(self):
"""
Gets the page_no of this DisplayAppliancePage.
:return: The page_no of this DisplayAppliancePage.
:rtype: int
"""
return self._page_no
@page_no.setter
def page_no(self, page_no):
"""
Sets the page_no of this DisplayAppliancePage.
:param page_no: The page_no of this DisplayAppliancePage.
:type: int
"""
self._page_no = page_no
@property
def page_status(self):
"""
Gets the page_status of this DisplayAppliancePage.
:return: The page_status of this DisplayAppliancePage.
:rtype: str
"""
return self._page_status
@page_status.setter
def page_status(self, page_status):
"""
Sets the page_status of this DisplayAppliancePage.
:param page_status: The page_status of this DisplayAppliancePage.
:type: str
"""
self._page_status = page_status
@property
def page_type(self):
"""
Gets the page_type of this DisplayAppliancePage.
:return: The page_type of this DisplayAppliancePage.
:rtype: str
"""
return self._page_type
@page_type.setter
def page_type(self, page_type):
"""
Sets the page_type of this DisplayAppliancePage.
:param page_type: The page_type of this DisplayAppliancePage.
:type: str
"""
self._page_type = page_type
@property
def width(self):
"""
Gets the width of this DisplayAppliancePage.
Width of the tab in pixels.
:return: The width of this DisplayAppliancePage.
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""
Sets the width of this DisplayAppliancePage.
Width of the tab in pixels.
:param width: The width of this DisplayAppliancePage.
:type: int
"""
self._width = width
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: docusign_esign/models/envelope_template_definition.py
```python
from pprint import pformat
from six import iteritems
import re
class EnvelopeTemplateDefinition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, created=None, description=None, folder_id=None, folder_name=None, folder_uri=None, last_modified=None, last_modified_by=None, name=None, new_password=None, owner=None, page_count=None, parent_folder_uri=None, password=None, shared=None, template_id=None, uri=None):
"""
EnvelopeTemplateDefinition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'created': 'str',
'description': 'str',
'folder_id': 'str',
'folder_name': 'str',
'folder_uri': 'str',
'last_modified': 'str',
'last_modified_by': 'UserInfo',
'name': 'str',
'new_password': 'str',
'owner': 'UserInfo',
'page_count': 'int',
'parent_folder_uri': 'str',
'password': '<PASSWORD>',
'shared': 'str',
'template_id': 'str',
'uri': 'str'
}
self.attribute_map = {
'created': 'created',
'description': 'description',
'folder_id': 'folderId',
'folder_name': 'folderName',
'folder_uri': 'folderUri',
'last_modified': 'lastModified',
'last_modified_by': 'lastModifiedBy',
'name': 'name',
'new_password': '<PASSWORD>',
'owner': 'owner',
'page_count': 'pageCount',
'parent_folder_uri': 'parentFolderUri',
'password': 'password',
'shared': 'shared',
'template_id': 'templateId',
'uri': 'uri'
}
self._created = created
self._description = description
self._folder_id = folder_id
self._folder_name = folder_name
self._folder_uri = folder_uri
self._last_modified = last_modified
self._last_modified_by = last_modified_by
self._name = name
self._new_password = <PASSWORD>
self._owner = owner
self._page_count = page_count
self._parent_folder_uri = parent_folder_uri
self._password = password
self._shared = shared
self._template_id = template_id
self._uri = uri
@property
def created(self):
"""
Gets the created of this EnvelopeTemplateDefinition.
:return: The created of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this EnvelopeTemplateDefinition.
:param created: The created of this EnvelopeTemplateDefinition.
:type: str
"""
self._created = created
@property
def description(self):
"""
Gets the description of this EnvelopeTemplateDefinition.
:return: The description of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this EnvelopeTemplateDefinition.
:param description: The description of this EnvelopeTemplateDefinition.
:type: str
"""
self._description = description
@property
def folder_id(self):
"""
Gets the folder_id of this EnvelopeTemplateDefinition.
The ID for the folder.
:return: The folder_id of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._folder_id
@folder_id.setter
def folder_id(self, folder_id):
"""
Sets the folder_id of this EnvelopeTemplateDefinition.
The ID for the folder.
:param folder_id: The folder_id of this EnvelopeTemplateDefinition.
:type: str
"""
self._folder_id = folder_id
@property
def folder_name(self):
"""
Gets the folder_name of this EnvelopeTemplateDefinition.
The name of the folder in which the template is located.
:return: The folder_name of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._folder_name
@folder_name.setter
def folder_name(self, folder_name):
"""
Sets the folder_name of this EnvelopeTemplateDefinition.
The name of the folder in which the template is located.
:param folder_name: The folder_name of this EnvelopeTemplateDefinition.
:type: str
"""
self._folder_name = folder_name
@property
def folder_uri(self):
"""
Gets the folder_uri of this EnvelopeTemplateDefinition.
The URI of the folder.
:return: The folder_uri of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._folder_uri
@folder_uri.setter
def folder_uri(self, folder_uri):
"""
Sets the folder_uri of this EnvelopeTemplateDefinition.
The URI of the folder.
:param folder_uri: The folder_uri of this EnvelopeTemplateDefinition.
:type: str
"""
self._folder_uri = folder_uri
@property
def last_modified(self):
"""
Gets the last_modified of this EnvelopeTemplateDefinition.
:return: The last_modified of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._last_modified
@last_modified.setter
def last_modified(self, last_modified):
"""
Sets the last_modified of this EnvelopeTemplateDefinition.
:param last_modified: The last_modified of this EnvelopeTemplateDefinition.
:type: str
"""
self._last_modified = last_modified
@property
def last_modified_by(self):
"""
Gets the last_modified_by of this EnvelopeTemplateDefinition.
:return: The last_modified_by of this EnvelopeTemplateDefinition.
:rtype: UserInfo
"""
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
"""
Sets the last_modified_by of this EnvelopeTemplateDefinition.
:param last_modified_by: The last_modified_by of this EnvelopeTemplateDefinition.
:type: UserInfo
"""
self._last_modified_by = last_modified_by
@property
def name(self):
"""
Gets the name of this EnvelopeTemplateDefinition.
:return: The name of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this EnvelopeTemplateDefinition.
:param name: The name of this EnvelopeTemplateDefinition.
:type: str
"""
self._name = name
@property
def new_password(self):
"""
Gets the new_password of this EnvelopeTemplateDefinition.
:return: The new_password of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._new_password
@new_password.setter
def new_password(self, new_password):
"""
Sets the new_password of this EnvelopeTemplateDefinition.
:param new_password: The new_password of this EnvelopeTemplateDefinition.
:type: str
"""
self._new_password = new_password
@property
def owner(self):
"""
Gets the owner of this EnvelopeTemplateDefinition.
:return: The owner of this EnvelopeTemplateDefinition.
:rtype: UserInfo
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this EnvelopeTemplateDefinition.
:param owner: The owner of this EnvelopeTemplateDefinition.
:type: UserInfo
"""
self._owner = owner
@property
def page_count(self):
"""
Gets the page_count of this EnvelopeTemplateDefinition.
An integer value specifying the number of document pages in the template. Omit this property if not submitting a page count.
:return: The page_count of this EnvelopeTemplateDefinition.
:rtype: int
"""
return self._page_count
@page_count.setter
def page_count(self, page_count):
"""
Sets the page_count of this EnvelopeTemplateDefinition.
An integer value specifying the number of document pages in the template. Omit this property if not submitting a page count.
:param page_count: The page_count of this EnvelopeTemplateDefinition.
:type: int
"""
self._page_count = page_count
@property
def parent_folder_uri(self):
"""
Gets the parent_folder_uri of this EnvelopeTemplateDefinition.
:return: The parent_folder_uri of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._parent_folder_uri
@parent_folder_uri.setter
def parent_folder_uri(self, parent_folder_uri):
"""
Sets the parent_folder_uri of this EnvelopeTemplateDefinition.
:param parent_folder_uri: The parent_folder_uri of this EnvelopeTemplateDefinition.
:type: str
"""
self._parent_folder_uri = parent_folder_uri
@property
def password(self):
"""
Gets the password of this EnvelopeTemplateDefinition.
:return: The password of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""
Sets the password of this EnvelopeTemplateDefinition.
:param password: The password of this EnvelopeTemplateDefinition.
:type: str
"""
self._password = password
@property
def shared(self):
"""
Gets the shared of this EnvelopeTemplateDefinition.
When set to **true**, this custom tab is shared.
:return: The shared of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._shared
@shared.setter
def shared(self, shared):
"""
Sets the shared of this EnvelopeTemplateDefinition.
When set to **true**, this custom tab is shared.
:param shared: The shared of this EnvelopeTemplateDefinition.
:type: str
"""
self._shared = shared
@property
def template_id(self):
"""
Gets the template_id of this EnvelopeTemplateDefinition.
The unique identifier of the template. If this is not provided, DocuSign will generate a value.
:return: The template_id of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""
Sets the template_id of this EnvelopeTemplateDefinition.
The unique identifier of the template. If this is not provided, DocuSign will generate a value.
:param template_id: The template_id of this EnvelopeTemplateDefinition.
:type: str
"""
self._template_id = template_id
@property
def uri(self):
"""
Gets the uri of this EnvelopeTemplateDefinition.
:return: The uri of this EnvelopeTemplateDefinition.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this EnvelopeTemplateDefinition.
:param uri: The uri of this EnvelopeTemplateDefinition.
:type: str
"""
self._uri = uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: docusign_esign/models/folder.py
```python
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class Folder(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_details': 'ErrorDetails',
'filter': 'Filter',
'folder_id': 'str',
'folder_items': 'list[FolderItemV2]',
'folders': 'list[Folder]',
'has_access': 'str',
'has_sub_folders': 'str',
'item_count': 'str',
'name': 'str',
'owner': 'UserInfo',
'parent_folder_id': 'str',
'parent_folder_uri': 'str',
'sub_folder_count': 'str',
'type': 'str',
'uri': 'str'
}
attribute_map = {
'error_details': 'errorDetails',
'filter': 'filter',
'folder_id': 'folderId',
'folder_items': 'folderItems',
'folders': 'folders',
'has_access': 'hasAccess',
'has_sub_folders': 'hasSubFolders',
'item_count': 'itemCount',
'name': 'name',
'owner': 'owner',
'parent_folder_id': 'parentFolderId',
'parent_folder_uri': 'parentFolderUri',
'sub_folder_count': 'subFolderCount',
'type': 'type',
'uri': 'uri'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""Folder - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._error_details = None
self._filter = None
self._folder_id = None
self._folder_items = None
self._folders = None
self._has_access = None
self._has_sub_folders = None
self._item_count = None
self._name = None
self._owner = None
self._parent_folder_id = None
self._parent_folder_uri = None
self._sub_folder_count = None
self._type = None
self._uri = None
self.discriminator = None
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('filter'), kwargs.get('filter', None))
setattr(self, "_{}".format('folder_id'), kwargs.get('folder_id', None))
setattr(self, "_{}".format('folder_items'), kwargs.get('folder_items', None))
setattr(self, "_{}".format('folders'), kwargs.get('folders', None))
setattr(self, "_{}".format('has_access'), kwargs.get('has_access', None))
setattr(self, "_{}".format('has_sub_folders'), kwargs.get('has_sub_folders', None))
setattr(self, "_{}".format('item_count'), kwargs.get('item_count', None))
setattr(self, "_{}".format('name'), kwargs.get('name', None))
setattr(self, "_{}".format('owner'), kwargs.get('owner', None))
setattr(self, "_{}".format('parent_folder_id'), kwargs.get('parent_folder_id', None))
setattr(self, "_{}".format('parent_folder_uri'), kwargs.get('parent_folder_uri', None))
setattr(self, "_{}".format('sub_folder_count'), kwargs.get('sub_folder_count', None))
setattr(self, "_{}".format('type'), kwargs.get('type', None))
setattr(self, "_{}".format('uri'), kwargs.get('uri', None))
@property
def error_details(self):
"""Gets the error_details of this Folder. # noqa: E501
:return: The error_details of this Folder. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this Folder.
:param error_details: The error_details of this Folder. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def filter(self):
"""Gets the filter of this Folder. # noqa: E501
:return: The filter of this Folder. # noqa: E501
:rtype: Filter
"""
return self._filter
@filter.setter
def filter(self, filter):
"""Sets the filter of this Folder.
:param filter: The filter of this Folder. # noqa: E501
:type: Filter
"""
self._filter = filter
@property
def folder_id(self):
"""Gets the folder_id of this Folder. # noqa: E501
# noqa: E501
:return: The folder_id of this Folder. # noqa: E501
:rtype: str
"""
return self._folder_id
@folder_id.setter
def folder_id(self, folder_id):
"""Sets the folder_id of this Folder.
# noqa: E501
:param folder_id: The folder_id of this Folder. # noqa: E501
:type: str
"""
self._folder_id = folder_id
@property
def folder_items(self):
"""Gets the folder_items of this Folder. # noqa: E501
A list of the envelopes in the specified folder or folders. # noqa: E501
:return: The folder_items of this Folder. # noqa: E501
:rtype: list[FolderItemV2]
"""
return self._folder_items
@folder_items.setter
def folder_items(self, folder_items):
"""Sets the folder_items of this Folder.
A list of the envelopes in the specified folder or folders. # noqa: E501
:param folder_items: The folder_items of this Folder. # noqa: E501
:type: list[FolderItemV2]
"""
self._folder_items = folder_items
@property
def folders(self):
"""Gets the folders of this Folder. # noqa: E501
A collection of folder objects returned in a response. # noqa: E501
:return: The folders of this Folder. # noqa: E501
:rtype: list[Folder]
"""
return self._folders
@folders.setter
def folders(self, folders):
"""Sets the folders of this Folder.
A collection of folder objects returned in a response. # noqa: E501
:param folders: The folders of this Folder. # noqa: E501
:type: list[Folder]
"""
self._folders = folders
@property
def has_access(self):
"""Gets the has_access of this Folder. # noqa: E501
# noqa: E501
:return: The has_access of this Folder. # noqa: E501
:rtype: str
"""
return self._has_access
@has_access.setter
def has_access(self, has_access):
"""Sets the has_access of this Folder.
# noqa: E501
:param has_access: The has_access of this Folder. # noqa: E501
:type: str
"""
self._has_access = has_access
@property
def has_sub_folders(self):
"""Gets the has_sub_folders of this Folder. # noqa: E501
# noqa: E501
:return: The has_sub_folders of this Folder. # noqa: E501
:rtype: str
"""
return self._has_sub_folders
@has_sub_folders.setter
def has_sub_folders(self, has_sub_folders):
"""Sets the has_sub_folders of this Folder.
# noqa: E501
:param has_sub_folders: The has_sub_folders of this Folder. # noqa: E501
:type: str
"""
self._has_sub_folders = has_sub_folders
@property
def item_count(self):
"""Gets the item_count of this Folder. # noqa: E501
# noqa: E501
:return: The item_count of this Folder. # noqa: E501
:rtype: str
"""
return self._item_count
@item_count.setter
def item_count(self, item_count):
"""Sets the item_count of this Folder.
# noqa: E501
:param item_count: The item_count of this Folder. # noqa: E501
:type: str
"""
self._item_count = item_count
@property
def name(self):
"""Gets the name of this Folder. # noqa: E501
# noqa: E501
:return: The name of this Folder. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Folder.
# noqa: E501
:param name: The name of this Folder. # noqa: E501
:type: str
"""
self._name = name
@property
def owner(self):
"""Gets the owner of this Folder. # noqa: E501
:return: The owner of this Folder. # noqa: E501
:rtype: UserInfo
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this Folder.
:param owner: The owner of this Folder. # noqa: E501
:type: UserInfo
"""
self._owner = owner
@property
def parent_folder_id(self):
"""Gets the parent_folder_id of this Folder. # noqa: E501
# noqa: E501
:return: The parent_folder_id of this Folder. # noqa: E501
:rtype: str
"""
return self._parent_folder_id
@parent_folder_id.setter
def parent_folder_id(self, parent_folder_id):
"""Sets the parent_folder_id of this Folder.
# noqa: E501
:param parent_folder_id: The parent_folder_id of this Folder. # noqa: E501
:type: str
"""
self._parent_folder_id = parent_folder_id
@property
def parent_folder_uri(self):
"""Gets the parent_folder_uri of this Folder. # noqa: E501
# noqa: E501
:return: The parent_folder_uri of this Folder. # noqa: E501
:rtype: str
"""
return self._parent_folder_uri
@parent_folder_uri.setter
def parent_folder_uri(self, parent_folder_uri):
"""Sets the parent_folder_uri of this Folder.
# noqa: E501
:param parent_folder_uri: The parent_folder_uri of this Folder. # noqa: E501
:type: str
"""
self._parent_folder_uri = parent_folder_uri
@property
def sub_folder_count(self):
"""Gets the sub_folder_count of this Folder. # noqa: E501
# noqa: E501
:return: The sub_folder_count of this Folder. # noqa: E501
:rtype: str
"""
return self._sub_folder_count
@sub_folder_count.setter
def sub_folder_count(self, sub_folder_count):
"""Sets the sub_folder_count of this Folder.
# noqa: E501
:param sub_folder_count: The sub_folder_count of this Folder. # noqa: E501
:type: str
"""
self._sub_folder_count = sub_folder_count
@property
def type(self):
"""Gets the type of this Folder. # noqa: E501
# noqa: E501
:return: The type of this Folder. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Folder.
# noqa: E501
:param type: The type of this Folder. # noqa: E501
:type: str
"""
self._type = type
@property
def uri(self):
"""Gets the uri of this Folder. # noqa: E501
# noqa: E501
:return: The uri of this Folder. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this Folder.
# noqa: E501
:param uri: The uri of this Folder. # noqa: E501
:type: str
"""
self._uri = uri
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Folder, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Folder):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Folder):
return True
return self.to_dict() != other.to_dict()
```
#### File: docusign_esign/models/provisioning_information.py
```python
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ProvisioningInformation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'default_connection_id': 'str',
'default_plan_id': 'str',
'distributor_code': 'str',
'distributor_password': '<PASSWORD>',
'password_rule_text': 'str',
'plan_promotion_text': 'str',
'purchase_order_or_prom_allowed': 'str'
}
attribute_map = {
'default_connection_id': 'defaultConnectionId',
'default_plan_id': 'defaultPlanId',
'distributor_code': 'distributorCode',
'distributor_password': '<PASSWORD>',
'password_rule_text': '<PASSWORD>',
'plan_promotion_text': 'planPromotionText',
'purchase_order_or_prom_allowed': 'purchaseOrderOrPromAllowed'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ProvisioningInformation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._default_connection_id = None
self._default_plan_id = None
self._distributor_code = None
self._distributor_password = None
self._password_rule_text = None
self._plan_promotion_text = None
self._purchase_order_or_prom_allowed = None
self.discriminator = None
setattr(self, "_{}".format('default_connection_id'), kwargs.get('default_connection_id', None))
setattr(self, "_{}".format('default_plan_id'), kwargs.get('default_plan_id', None))
setattr(self, "_{}".format('distributor_code'), kwargs.get('distributor_code', None))
setattr(self, "_{}".format('distributor_password'), kwargs.get('distributor_password', None))
setattr(self, "_{}".format('password_rule_text'), kwargs.get('password_rule_text', None))
setattr(self, "_{}".format('plan_promotion_text'), kwargs.get('plan_promotion_text', None))
setattr(self, "_{}".format('purchase_order_or_prom_allowed'), kwargs.get('purchase_order_or_prom_allowed', None))
@property
def default_connection_id(self):
"""Gets the default_connection_id of this ProvisioningInformation. # noqa: E501
# noqa: E501
:return: The default_connection_id of this ProvisioningInformation. # noqa: E501
:rtype: str
"""
return self._default_connection_id
@default_connection_id.setter
def default_connection_id(self, default_connection_id):
"""Sets the default_connection_id of this ProvisioningInformation.
# noqa: E501
:param default_connection_id: The default_connection_id of this ProvisioningInformation. # noqa: E501
:type: str
"""
self._default_connection_id = default_connection_id
@property
def default_plan_id(self):
"""Gets the default_plan_id of this ProvisioningInformation. # noqa: E501
# noqa: E501
:return: The default_plan_id of this ProvisioningInformation. # noqa: E501
:rtype: str
"""
return self._default_plan_id
@default_plan_id.setter
def default_plan_id(self, default_plan_id):
"""Sets the default_plan_id of this ProvisioningInformation.
# noqa: E501
:param default_plan_id: The default_plan_id of this ProvisioningInformation. # noqa: E501
:type: str
"""
self._default_plan_id = default_plan_id
@property
def distributor_code(self):
"""Gets the distributor_code of this ProvisioningInformation. # noqa: E501
The code that identifies the billing plan groups and plans for the new account. # noqa: E501
:return: The distributor_code of this ProvisioningInformation. # noqa: E501
:rtype: str
"""
return self._distributor_code
@distributor_code.setter
def distributor_code(self, distributor_code):
"""Sets the distributor_code of this ProvisioningInformation.
The code that identifies the billing plan groups and plans for the new account. # noqa: E501
:param distributor_code: The distributor_code of this ProvisioningInformation. # noqa: E501
:type: str
"""
self._distributor_code = distributor_code
@property
def distributor_password(self):
"""Gets the distributor_password of this ProvisioningInformation. # noqa: E501
The password for the distributorCode. # noqa: E501
:return: The distributor_password of this ProvisioningInformation. # noqa: E501
:rtype: str
"""
return self._distributor_password
@distributor_password.setter
def distributor_password(self, distributor_password):
"""Sets the distributor_password of this ProvisioningInformation.
The password for the distributorCode. # noqa: E501
:param distributor_password: The distributor_password of this ProvisioningInformation. # noqa: E501
:type: str
"""
self._distributor_password = distributor_password
@property
def password_rule_text(self):
"""Gets the password_rule_text of this ProvisioningInformation. # noqa: E501
# noqa: E501
:return: The password_rule_text of this ProvisioningInformation. # noqa: E501
:rtype: str
"""
return self._password_rule_text
@password_rule_text.setter
def password_rule_text(self, password_rule_text):
"""Sets the password_rule_text of this ProvisioningInformation.
# noqa: E501
:param password_rule_text: The password_rule_text of this ProvisioningInformation. # noqa: E501
:type: str
"""
self._password_rule_text = password_rule_text
@property
def plan_promotion_text(self):
"""Gets the plan_promotion_text of this ProvisioningInformation. # noqa: E501
# noqa: E501
:return: The plan_promotion_text of this ProvisioningInformation. # noqa: E501
:rtype: str
"""
return self._plan_promotion_text
@plan_promotion_text.setter
def plan_promotion_text(self, plan_promotion_text):
"""Sets the plan_promotion_text of this ProvisioningInformation.
# noqa: E501
:param plan_promotion_text: The plan_promotion_text of this ProvisioningInformation. # noqa: E501
:type: str
"""
self._plan_promotion_text = plan_promotion_text
@property
def purchase_order_or_prom_allowed(self):
"""Gets the purchase_order_or_prom_allowed of this ProvisioningInformation. # noqa: E501
# noqa: E501
:return: The purchase_order_or_prom_allowed of this ProvisioningInformation. # noqa: E501
:rtype: str
"""
return self._purchase_order_or_prom_allowed
@purchase_order_or_prom_allowed.setter
def purchase_order_or_prom_allowed(self, purchase_order_or_prom_allowed):
"""Sets the purchase_order_or_prom_allowed of this ProvisioningInformation.
# noqa: E501
:param purchase_order_or_prom_allowed: The purchase_order_or_prom_allowed of this ProvisioningInformation. # noqa: E501
:type: str
"""
self._purchase_order_or_prom_allowed = purchase_order_or_prom_allowed
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProvisioningInformation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProvisioningInformation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ProvisioningInformation):
return True
return self.to_dict() != other.to_dict()
```
#### File: docusign_esign/models/usage_history.py
```python
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class UsageHistory(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_sent_date_time': 'str',
'last_signed_date_time': 'str',
'sent_count': 'str',
'signed_count': 'str'
}
attribute_map = {
'last_sent_date_time': 'lastSentDateTime',
'last_signed_date_time': 'lastSignedDateTime',
'sent_count': 'sentCount',
'signed_count': 'signedCount'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""UsageHistory - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._last_sent_date_time = None
self._last_signed_date_time = None
self._sent_count = None
self._signed_count = None
self.discriminator = None
setattr(self, "_{}".format('last_sent_date_time'), kwargs.get('last_sent_date_time', None))
setattr(self, "_{}".format('last_signed_date_time'), kwargs.get('last_signed_date_time', None))
setattr(self, "_{}".format('sent_count'), kwargs.get('sent_count', None))
setattr(self, "_{}".format('signed_count'), kwargs.get('signed_count', None))
@property
def last_sent_date_time(self):
"""Gets the last_sent_date_time of this UsageHistory. # noqa: E501
The date and time the user last sent an envelope. # noqa: E501
:return: The last_sent_date_time of this UsageHistory. # noqa: E501
:rtype: str
"""
return self._last_sent_date_time
@last_sent_date_time.setter
def last_sent_date_time(self, last_sent_date_time):
"""Sets the last_sent_date_time of this UsageHistory.
The date and time the user last sent an envelope. # noqa: E501
:param last_sent_date_time: The last_sent_date_time of this UsageHistory. # noqa: E501
:type: str
"""
self._last_sent_date_time = last_sent_date_time
@property
def last_signed_date_time(self):
"""Gets the last_signed_date_time of this UsageHistory. # noqa: E501
The date and time the user last signed an envelope. # noqa: E501
:return: The last_signed_date_time of this UsageHistory. # noqa: E501
:rtype: str
"""
return self._last_signed_date_time
@last_signed_date_time.setter
def last_signed_date_time(self, last_signed_date_time):
"""Sets the last_signed_date_time of this UsageHistory.
The date and time the user last signed an envelope. # noqa: E501
:param last_signed_date_time: The last_signed_date_time of this UsageHistory. # noqa: E501
:type: str
"""
self._last_signed_date_time = last_signed_date_time
@property
def sent_count(self):
"""Gets the sent_count of this UsageHistory. # noqa: E501
The number of envelopes the user has sent. # noqa: E501
:return: The sent_count of this UsageHistory. # noqa: E501
:rtype: str
"""
return self._sent_count
@sent_count.setter
def sent_count(self, sent_count):
"""Sets the sent_count of this UsageHistory.
The number of envelopes the user has sent. # noqa: E501
:param sent_count: The sent_count of this UsageHistory. # noqa: E501
:type: str
"""
self._sent_count = sent_count
@property
def signed_count(self):
"""Gets the signed_count of this UsageHistory. # noqa: E501
The number of envelopes the user has signed. # noqa: E501
:return: The signed_count of this UsageHistory. # noqa: E501
:rtype: str
"""
return self._signed_count
@signed_count.setter
def signed_count(self, signed_count):
"""Sets the signed_count of this UsageHistory.
The number of envelopes the user has signed. # noqa: E501
:param signed_count: The signed_count of this UsageHistory. # noqa: E501
:type: str
"""
self._signed_count = signed_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UsageHistory, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UsageHistory):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UsageHistory):
return True
return self.to_dict() != other.to_dict()
```
#### File: docusign_esign/models/watermark.py
```python
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class Watermark(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'display_angle': 'str',
'enabled': 'str',
'font': 'str',
'font_color': 'str',
'font_size': 'str',
'id': 'str',
'image_base64': 'str',
'transparency': 'str',
'watermark_text': 'str'
}
attribute_map = {
'display_angle': 'displayAngle',
'enabled': 'enabled',
'font': 'font',
'font_color': 'fontColor',
'font_size': 'fontSize',
'id': 'id',
'image_base64': 'imageBase64',
'transparency': 'transparency',
'watermark_text': 'watermarkText'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""Watermark - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._display_angle = None
self._enabled = None
self._font = None
self._font_color = None
self._font_size = None
self._id = None
self._image_base64 = None
self._transparency = None
self._watermark_text = None
self.discriminator = None
setattr(self, "_{}".format('display_angle'), kwargs.get('display_angle', None))
setattr(self, "_{}".format('enabled'), kwargs.get('enabled', None))
setattr(self, "_{}".format('font'), kwargs.get('font', None))
setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None))
setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None))
setattr(self, "_{}".format('id'), kwargs.get('id', None))
setattr(self, "_{}".format('image_base64'), kwargs.get('image_base64', None))
setattr(self, "_{}".format('transparency'), kwargs.get('transparency', None))
setattr(self, "_{}".format('watermark_text'), kwargs.get('watermark_text', None))
@property
def display_angle(self):
"""Gets the display_angle of this Watermark. # noqa: E501
# noqa: E501
:return: The display_angle of this Watermark. # noqa: E501
:rtype: str
"""
return self._display_angle
@display_angle.setter
def display_angle(self, display_angle):
"""Sets the display_angle of this Watermark.
# noqa: E501
:param display_angle: The display_angle of this Watermark. # noqa: E501
:type: str
"""
self._display_angle = display_angle
@property
def enabled(self):
"""Gets the enabled of this Watermark. # noqa: E501
# noqa: E501
:return: The enabled of this Watermark. # noqa: E501
:rtype: str
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this Watermark.
# noqa: E501
:param enabled: The enabled of this Watermark. # noqa: E501
:type: str
"""
self._enabled = enabled
@property
def font(self):
"""Gets the font of this Watermark. # noqa: E501
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default. # noqa: E501
:return: The font of this Watermark. # noqa: E501
:rtype: str
"""
return self._font
@font.setter
def font(self, font):
"""Sets the font of this Watermark.
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default. # noqa: E501
:param font: The font of this Watermark. # noqa: E501
:type: str
"""
self._font = font
@property
def font_color(self):
"""Gets the font_color of this Watermark. # noqa: E501
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White. # noqa: E501
:return: The font_color of this Watermark. # noqa: E501
:rtype: str
"""
return self._font_color
@font_color.setter
def font_color(self, font_color):
"""Sets the font_color of this Watermark.
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White. # noqa: E501
:param font_color: The font_color of this Watermark. # noqa: E501
:type: str
"""
self._font_color = font_color
@property
def font_size(self):
"""Gets the font_size of this Watermark. # noqa: E501
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72. # noqa: E501
:return: The font_size of this Watermark. # noqa: E501
:rtype: str
"""
return self._font_size
@font_size.setter
def font_size(self, font_size):
"""Sets the font_size of this Watermark.
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72. # noqa: E501
:param font_size: The font_size of this Watermark. # noqa: E501
:type: str
"""
self._font_size = font_size
@property
def id(self):
"""Gets the id of this Watermark. # noqa: E501
# noqa: E501
:return: The id of this Watermark. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Watermark.
# noqa: E501
:param id: The id of this Watermark. # noqa: E501
:type: str
"""
self._id = id
@property
def image_base64(self):
"""Gets the image_base64 of this Watermark. # noqa: E501
# noqa: E501
:return: The image_base64 of this Watermark. # noqa: E501
:rtype: str
"""
return self._image_base64
@image_base64.setter
def image_base64(self, image_base64):
"""Sets the image_base64 of this Watermark.
# noqa: E501
:param image_base64: The image_base64 of this Watermark. # noqa: E501
:type: str
"""
self._image_base64 = image_base64
@property
def transparency(self):
"""Gets the transparency of this Watermark. # noqa: E501
# noqa: E501
:return: The transparency of this Watermark. # noqa: E501
:rtype: str
"""
return self._transparency
@transparency.setter
def transparency(self, transparency):
"""Sets the transparency of this Watermark.
# noqa: E501
:param transparency: The transparency of this Watermark. # noqa: E501
:type: str
"""
self._transparency = transparency
@property
def watermark_text(self):
"""Gets the watermark_text of this Watermark. # noqa: E501
# noqa: E501
:return: The watermark_text of this Watermark. # noqa: E501
:rtype: str
"""
return self._watermark_text
@watermark_text.setter
def watermark_text(self, watermark_text):
"""Sets the watermark_text of this Watermark.
# noqa: E501
:param watermark_text: The watermark_text of this Watermark. # noqa: E501
:type: str
"""
self._watermark_text = watermark_text
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Watermark, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Watermark):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Watermark):
return True
return self.to_dict() != other.to_dict()
```
#### File: docusign_esign/models/workspace.py
```python
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class Workspace(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'billable_account_id': 'str',
'caller_information': 'WorkspaceUser',
'created': 'str',
'created_by_information': 'WorkspaceUser',
'last_modified': 'str',
'last_modified_by_information': 'WorkspaceUser',
'settings': 'WorkspaceSettings',
'status': 'str',
'workspace_base_url': 'str',
'workspace_description': 'str',
'workspace_id': 'str',
'workspace_name': 'str',
'workspace_uri': 'str'
}
attribute_map = {
'billable_account_id': 'billableAccountId',
'caller_information': 'callerInformation',
'created': 'created',
'created_by_information': 'createdByInformation',
'last_modified': 'lastModified',
'last_modified_by_information': 'lastModifiedByInformation',
'settings': 'settings',
'status': 'status',
'workspace_base_url': 'workspaceBaseUrl',
'workspace_description': 'workspaceDescription',
'workspace_id': 'workspaceId',
'workspace_name': 'workspaceName',
'workspace_uri': 'workspaceUri'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""Workspace - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._billable_account_id = None
self._caller_information = None
self._created = None
self._created_by_information = None
self._last_modified = None
self._last_modified_by_information = None
self._settings = None
self._status = None
self._workspace_base_url = None
self._workspace_description = None
self._workspace_id = None
self._workspace_name = None
self._workspace_uri = None
self.discriminator = None
setattr(self, "_{}".format('billable_account_id'), kwargs.get('billable_account_id', None))
setattr(self, "_{}".format('caller_information'), kwargs.get('caller_information', None))
setattr(self, "_{}".format('created'), kwargs.get('created', None))
setattr(self, "_{}".format('created_by_information'), kwargs.get('created_by_information', None))
setattr(self, "_{}".format('last_modified'), kwargs.get('last_modified', None))
setattr(self, "_{}".format('last_modified_by_information'), kwargs.get('last_modified_by_information', None))
setattr(self, "_{}".format('settings'), kwargs.get('settings', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('workspace_base_url'), kwargs.get('workspace_base_url', None))
setattr(self, "_{}".format('workspace_description'), kwargs.get('workspace_description', None))
setattr(self, "_{}".format('workspace_id'), kwargs.get('workspace_id', None))
setattr(self, "_{}".format('workspace_name'), kwargs.get('workspace_name', None))
setattr(self, "_{}".format('workspace_uri'), kwargs.get('workspace_uri', None))
@property
def billable_account_id(self):
"""Gets the billable_account_id of this Workspace. # noqa: E501
# noqa: E501
:return: The billable_account_id of this Workspace. # noqa: E501
:rtype: str
"""
return self._billable_account_id
@billable_account_id.setter
def billable_account_id(self, billable_account_id):
"""Sets the billable_account_id of this Workspace.
# noqa: E501
:param billable_account_id: The billable_account_id of this Workspace. # noqa: E501
:type: str
"""
self._billable_account_id = billable_account_id
@property
def caller_information(self):
"""Gets the caller_information of this Workspace. # noqa: E501
:return: The caller_information of this Workspace. # noqa: E501
:rtype: WorkspaceUser
"""
return self._caller_information
@caller_information.setter
def caller_information(self, caller_information):
"""Sets the caller_information of this Workspace.
:param caller_information: The caller_information of this Workspace. # noqa: E501
:type: WorkspaceUser
"""
self._caller_information = caller_information
@property
def created(self):
"""Gets the created of this Workspace. # noqa: E501
# noqa: E501
:return: The created of this Workspace. # noqa: E501
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Workspace.
# noqa: E501
:param created: The created of this Workspace. # noqa: E501
:type: str
"""
self._created = created
@property
def created_by_information(self):
"""Gets the created_by_information of this Workspace. # noqa: E501
:return: The created_by_information of this Workspace. # noqa: E501
:rtype: WorkspaceUser
"""
return self._created_by_information
@created_by_information.setter
def created_by_information(self, created_by_information):
"""Sets the created_by_information of this Workspace.
:param created_by_information: The created_by_information of this Workspace. # noqa: E501
:type: WorkspaceUser
"""
self._created_by_information = created_by_information
@property
def last_modified(self):
"""Gets the last_modified of this Workspace. # noqa: E501
Utc date and time the comment was last updated (can only be done by creator.) # noqa: E501
:return: The last_modified of this Workspace. # noqa: E501
:rtype: str
"""
return self._last_modified
@last_modified.setter
def last_modified(self, last_modified):
"""Sets the last_modified of this Workspace.
Utc date and time the comment was last updated (can only be done by creator.) # noqa: E501
:param last_modified: The last_modified of this Workspace. # noqa: E501
:type: str
"""
self._last_modified = last_modified
@property
def last_modified_by_information(self):
"""Gets the last_modified_by_information of this Workspace. # noqa: E501
:return: The last_modified_by_information of this Workspace. # noqa: E501
:rtype: WorkspaceUser
"""
return self._last_modified_by_information
@last_modified_by_information.setter
def last_modified_by_information(self, last_modified_by_information):
"""Sets the last_modified_by_information of this Workspace.
:param last_modified_by_information: The last_modified_by_information of this Workspace. # noqa: E501
:type: WorkspaceUser
"""
self._last_modified_by_information = last_modified_by_information
@property
def settings(self):
"""Gets the settings of this Workspace. # noqa: E501
:return: The settings of this Workspace. # noqa: E501
:rtype: WorkspaceSettings
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this Workspace.
:param settings: The settings of this Workspace. # noqa: E501
:type: WorkspaceSettings
"""
self._settings = settings
@property
def status(self):
"""Gets the status of this Workspace. # noqa: E501
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:return: The status of this Workspace. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Workspace.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:param status: The status of this Workspace. # noqa: E501
:type: str
"""
self._status = status
@property
def workspace_base_url(self):
"""Gets the workspace_base_url of this Workspace. # noqa: E501
The relative URL that may be used to access the workspace. # noqa: E501
:return: The workspace_base_url of this Workspace. # noqa: E501
:rtype: str
"""
return self._workspace_base_url
@workspace_base_url.setter
def workspace_base_url(self, workspace_base_url):
"""Sets the workspace_base_url of this Workspace.
The relative URL that may be used to access the workspace. # noqa: E501
:param workspace_base_url: The workspace_base_url of this Workspace. # noqa: E501
:type: str
"""
self._workspace_base_url = workspace_base_url
@property
def workspace_description(self):
"""Gets the workspace_description of this Workspace. # noqa: E501
Text describing the purpose of the workspace. # noqa: E501
:return: The workspace_description of this Workspace. # noqa: E501
:rtype: str
"""
return self._workspace_description
@workspace_description.setter
def workspace_description(self, workspace_description):
"""Sets the workspace_description of this Workspace.
Text describing the purpose of the workspace. # noqa: E501
:param workspace_description: The workspace_description of this Workspace. # noqa: E501
:type: str
"""
self._workspace_description = workspace_description
@property
def workspace_id(self):
"""Gets the workspace_id of this Workspace. # noqa: E501
The id of the workspace, always populated. # noqa: E501
:return: The workspace_id of this Workspace. # noqa: E501
:rtype: str
"""
return self._workspace_id
@workspace_id.setter
def workspace_id(self, workspace_id):
"""Sets the workspace_id of this Workspace.
The id of the workspace, always populated. # noqa: E501
:param workspace_id: The workspace_id of this Workspace. # noqa: E501
:type: str
"""
self._workspace_id = workspace_id
@property
def workspace_name(self):
"""Gets the workspace_name of this Workspace. # noqa: E501
The name of the workspace. # noqa: E501
:return: The workspace_name of this Workspace. # noqa: E501
:rtype: str
"""
return self._workspace_name
@workspace_name.setter
def workspace_name(self, workspace_name):
"""Sets the workspace_name of this Workspace.
The name of the workspace. # noqa: E501
:param workspace_name: The workspace_name of this Workspace. # noqa: E501
:type: str
"""
self._workspace_name = workspace_name
@property
def workspace_uri(self):
"""Gets the workspace_uri of this Workspace. # noqa: E501
The relative URI that may be used to access the workspace. # noqa: E501
:return: The workspace_uri of this Workspace. # noqa: E501
:rtype: str
"""
return self._workspace_uri
@workspace_uri.setter
def workspace_uri(self, workspace_uri):
"""Sets the workspace_uri of this Workspace.
The relative URI that may be used to access the workspace. # noqa: E501
:param workspace_uri: The workspace_uri of this Workspace. # noqa: E501
:type: str
"""
self._workspace_uri = workspace_uri
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Workspace, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Workspace):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Workspace):
return True
return self.to_dict() != other.to_dict()
``` |
{
"source": "joekohlsdorf/Emailage_Python",
"score": 3
} |
#### File: Emailage_Python/emailage/client.py
```python
import json
import ssl
import sys
import urllib
from requests import Session
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from emailage import signature, validation
from emailage.signature import safety_quote
use_urllib_quote = hasattr(urllib, 'quote')
if use_urllib_quote:
def _url_encode_dict(qs_dict):
return '&'.join(map(lambda pair: '='.join([urllib.quote(str(pair[0]), ''), urllib.quote(str(pair[1]), '')]),
sorted(qs_dict.items())))
elif sys.version_info >= (3, 5):
# Python >= 3.5
def _url_encode_dict(qs_dict):
return urllib.parse.urlencode(qs_dict, quote_via=urllib.parse.quote)
else:
# Python 3.x - 3.4
def _url_encode_dict(qs_dict):
return '&'.join(map(lambda pair: '='.join([urllib.parse.quote(str(pair[0]), ''),
urllib.parse.quote(str(pair[1]), '')]),
sorted(qs_dict.items())))
class TlsVersions:
"""An enumeration of the TLS versions supported by the Emailage API"""
TLSv1_1 = ssl.PROTOCOL_TLSv1_1
TLSv1_2 = ssl.PROTOCOL_TLSv1_2
class ApiDomains:
"""API URLs for the specified domains """
sandbox = 'https://sandbox.emailage.com'
production = 'https://api.emailage.com'
class HttpMethods:
"""HttpMethod constants to pass to the client"""
GET = 'GET'
POST = 'POST'
class EmailageClient:
""" Primary proxy to the Emailage API for end-users of the package"""
FRAUD_CODES = {
1: 'Card Not Present Fraud',
2: 'Customer Dispute (Chargeback)',
3: 'First Party Fraud',
4: 'First Payment Default',
5: 'Identify Theft (Fraud Application)',
6: 'Identify Theft (Account Take Over)',
7: 'Suspected Fraud (Not Confirmed)',
8: 'Synthetic ID',
9: 'Other'
}
class Adapter(HTTPAdapter):
def __init__(self, tls_version=TlsVersions.TLSv1_2):
self._tls_version = tls_version
self.poolmanager = None
super(EmailageClient.Adapter, self).__init__()
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=self._tls_version)
def __init__(
self,
secret,
token,
sandbox=False,
tls_version=TlsVersions.TLSv1_2,
timeout=None,
http_method='GET'
):
""" Creates an instance of the EmailageClient using the specified credentials and environment
:param secret: Consumer secret, e.g. SID or API key.
:param token: Consumer token.
:param sandbox:
(Optional) Whether to use a sandbox instead of a production server. Uses production by default
:param tls_version: (Optional) Uses TLS version 1.2 by default (TlsVersions.TLSv1_2 | TlsVersions.TLSv1_1)
:param timeout: (Optional) The timeout to be used for sent requests
:param http_method: (Optional) The HTTP method (GET or POST) to be used for sending requests
:type secret: str
:type token: str
:type sandbox: bool
:type tls_version: see :class:`TlsVersions`
:type timeout: float
:type http_method: see :class:`HttpMethods`
:Example:
>>> from emailage.client import EmailageClient
>>> from emailage import protocols
>>> client = EmailageClient('consumer_secret', 'consumer_token', sandbox=True, tls_version=protocols.TLSv1_1)
>>> fraud_report = client.query(('<EMAIL>', '192.168.1.1'), urid='some_unique_identifier')
:Example:
>>> from emailage.client import EmailageClient
>>> from emailage import protocols
>>> client = EmailageClient('consumer_secret',
... 'consumer_token', sandbox=True, timeout=300)
>>> fraud_report = client.query(('<EMAIL>', '192.168.1.1'), urid='some_unique_identifier')
"""
self.secret, self.token, self.sandbox = secret, token, sandbox
self.timeout = timeout
self.hmac_key = token + '&'
self.session = None
self.domain = None
self.set_api_domain((sandbox and ApiDomains.sandbox or ApiDomains.production), tls_version)
self._http_method = http_method.upper()
def set_credentials(self, secret, token):
""" Explicitly set the authentication credentials to be used when generating a request in the current session.
Useful when you want to change credentials after initial creation of the client.
:param secret: Consumer secret, e.g. SID or API key
:param token: Consumer token
:return: None
"""
self.secret = secret
self.token = token
self.hmac_key = token + '&'
def set_api_domain(self, domain, tls_version=TlsVersions.TLSv1_2):
""" Explicitly set the API domain to use for a session of the client, typically used in testing scenarios
:param domain: API domain to use for the session
:param tls_version: (Optional) Uses TLS version 1.2 by default (TlsVersions.TLSv1_2 | TlsVersions.TLSv1_1)
:return: None
:type domain: str see :class: `ApiDomains`
:type tls_version: see :class: `TlsVersions`
:Example:
>>> from emailage.client import EmailageClient
>>> from emailage.client import ApiDomains
>>> client = EmailageClient('consumer_secret', 'consumer_token')
>>> client.set_api_domain(ApiDomains.sandbox)
>>> client.domain
'https://sandbox.emailage.com'
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('consumer_secret', 'consumer_token')
>>> client.set_api_domain('https://testing.emailage.com')
>>> client.domain
'https://testing.emailage.com'
"""
self.session = Session()
self.session.headers.update({
'Content-Type': 'application/json'
})
self.domain = domain
self.session.mount(self.domain, EmailageClient.Adapter(tls_version))
def set_http_method(self, http_method):
""" Explicitly set the Http method (GET or POST) through which you will be sending the request. This method
will be used for any future calls made with this instance of the client until another method is specified
:param http_method: HttpMethod to use for sending requests
:return: None
:type http_method: str see :class: `HttpMethods`
:Example:
>>> from emailage.client import EmailageClient, HttpMethods
>>> client = EmailageClient('consumer_secret', 'consumer_token')
>>> client.set_http_method(HttpMethods.POST)
>>> client.http_method
'POST'
"""
if not http_method:
raise TypeError('http_method must be a string with the value GET or SET')
if not http_method.upper() == HttpMethods.GET and not http_method.upper() == HttpMethods.POST:
raise ValueError('http_method must be a string with the value GET or SET')
self._http_method = http_method.upper()
@property
def http_method(self):
return self._http_method
def request(self, endpoint, **params):
""" Base method to generate requests for the Emailage validator and flagging APIs
:param endpoint: API endpoint to send the request ( '' | '/flag' )
:param params: keyword-argument list of parameters to send with the request
:return: JSON dict of the response generated by the API
:type endpoint: str
:type params: kwargs
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('consumer_secret', 'consumer_token')
>>> response = client.request('/flag', email='<EMAIL>1808<EMAIL>', flag='good')
>>> response['query']['email']
u'user20180830001%40domain20180830001.com'
"""
url = self.domain + '/emailagevalidator' + endpoint + '/'
api_params = dict(
format='json',
**params
)
request_params = {}
if self.timeout is not None and 'timeout':
request_params['timeout'] = self.timeout
if self.http_method == HttpMethods.GET:
response = self._perform_get_request(url, api_params, request_params)
else:
response = self._perform_post_request(url, api_params, request_params)
if not response:
raise ValueError('No response received for request')
# Explicit encoding is necessary because the API returns a Byte Order Mark at the beginning of the contents
json_data = response.content.decode(encoding='utf_8_sig')
return json.loads(json_data)
def _perform_get_request(self, url, api_params, request_params=None):
api_params = signature.add_oauth_entries_to_fields_dict(self.secret, api_params)
api_params['oauth_signature'] = signature.create(HttpMethods.GET, url, api_params, self.hmac_key)
params_qs = _url_encode_dict(api_params)
request_params = request_params or {}
res = self.session.get(url, params=params_qs, **request_params)
return res
def _perform_post_request(self, url, api_params, request_params=None):
signature_fields = dict(format='json')
signature_fields = signature.add_oauth_entries_to_fields_dict(self.secret, signature_fields)
signature_fields['oauth_signature'] = signature.create(HttpMethods.POST, url, signature_fields, self.hmac_key)
url = url + '?' + _url_encode_dict(signature_fields)
payload = bytes(self._assemble_quoted_pairs(api_params), encoding='utf_8')
res = self.session.post(url, data=payload, **request_params)
return res
@staticmethod
def _assemble_quoted_pairs(kv_pairs):
return '&'.join(map(lambda pair: '='.join([safety_quote(pair[0]),
safety_quote(pair[1])]),
sorted(kv_pairs.items())))
def query(self, query, **params):
""" Base query method providing support for email, IP address, and optional additional parameters
:param query: RFC2822-compliant Email, RFC791-compliant IP, or both
:param params: keyword-argument form for parameters such as urid, first_name, last_name, etc.
:return: JSON dict of the response generated by the API
:type query: str | (str, str)
:type params: kwargs
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('consumer_secret', 'consumer_token')
>>> response_json = client.query('<EMAIL>')
>>> # Email address only
>>> response_json = client.query('<EMAIL>')
>>> # IP Address only
>>> response_json = client.query('172.16.31.10')
>>> # For a combination. Please note the order
>>> response_json = client.query(('<EMAIL>', '172.16.31.10'))
>>> # Pass a User Defined Record ID (URID) as an optional parameter
>>> response_json = client.query('<EMAIL>', urid='My record ID for <EMAIL>')
"""
if type(query) is tuple:
validation.assert_email(query[0])
validation.assert_ip(query[1])
query = '+'.join(query)
params['query'] = query
return self.request('', **params)
def query_email(self, email, **params):
"""Query a risk score information for the provided email address.
:param email: RFC2822-compliant Email
:param params: (Optional) keyword-argument form for parameters such as urid, first_name, last_name, etc.
:return: JSON dict of the response generated by the API
:type email: str
:type params: kwargs
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('My account SID', 'My auth token', sandbox=True)
>>> response_json = client.query_email('<EMAIL>')
"""
validation.assert_email(email)
return self.query(email, **params)
def query_ip_address(self, ip, **params):
"""Query a risk score information for the provided IP address.
:param ip: RFC791-compliant IP
:param params: (Optional) keyword-argument form for parameters such as urid, first_name, last_name, etc.
:return: JSON dict of the response generated by the API
:type ip: str
:type params: kwargs
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('My account SID', 'My auth token', sandbox=True)
>>> response_json = client.query_ip_address('172.16.31.10')
"""
validation.assert_ip(ip)
return self.query(ip, **params)
def query_email_and_ip_address(self, email, ip, **params):
"""Query a risk score information for the provided combination of an Email and IP address
:param email: RFC2822-compliant Email
:param ip: RFC791-compliant IP
:param params: (Optional) keyword-argument form for parameters such as urid, first_name, last_name, etc.
:return: JSON dict of the response generated by the API
:type email: str
:type ip: str
:type params: kwargs
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('My account SID', 'My auth token', sandbox=True)
>>> response_json = client.query_email_and_ip_address('<EMAIL>', '172.16.31.10')
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('My account SID', 'My auth token', sandbox=True)
>>> response_json = client.query_email_and_ip_address('<EMAIL>', '172.16.31.10',
... urid='My record ID for <EMAIL> and 172.16.31.10')
"""
validation.assert_email(email)
validation.assert_ip(ip)
return self.query((email, ip), **params)
def flag(self, flag, query, fraud_code=None):
""" Base method used to flag an email address as fraud, good, or neutral
:param flag: type of flag you wish to associate with the identifier ( 'fraud' | 'good' | 'neutral' )
:param query: Email to be flagged
:param fraud_code:
(Optional) Required if flag is 'fraud', one of the IDs in `emailage.client.EmailageClient.FRAUD_CODES`
:return: JSON dict of the confirmation response generated by the API
:type flag: str
:type query: str
:type fraud_code: int
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('My account SID', 'My auth token', sandbox=True)
>>> response_json = client.flag('good', '<EMAIL>')
>>> response_json = client.flag('fraud', '<EMAIL>', fraud_code=6)
>>> response_json = client.flag('neutral', '<EMAIL>')
"""
flags = ['fraud', 'neutral', 'good']
if flag not in flags:
raise ValueError(validation.Messages.FLAG_NOT_ALLOWED_FORMAT.format(', '.join(flags), flag))
validation.assert_email(query)
params = dict(flag=flag, query=query)
if flag == 'fraud':
codes = self.FRAUD_CODES
if type(fraud_code) is not int:
raise ValueError(
validation.Messages.FRAUD_CODE_RANGE_FORMAT.format(
len(codes), ', '.join(codes.values()), fraud_code)
)
if fraud_code not in range(1, len(codes) + 1):
fraud_code = 9
params['fraudcodeID'] = fraud_code
return self.request('/flag', **params)
def flag_as_fraud(self, query, fraud_code):
"""Mark an email address as fraud.
:param query: Email to be flagged
:param fraud_code: Reason for the email to be marked as fraud; must be one of the IDs in `emailage.client.EmailageClient.FRAUD_CODES`
:return: JSON dict of the confirmation response generated by the API
:type query: str
:type fraud_code: int
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('My account SID', 'My auth token', sandbox=True)
>>> response_json = client.flag_as_fraud('<EMAIL>', 8)
"""
return self.flag('fraud', query, fraud_code)
def flag_as_good(self, query):
"""Mark an email address as good.
:param query: Email to be flagged
:return: JSON dict of the confirmation response generated by the API
:type query: str
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('My account SID', 'My auth token', sandbox=True)
>>> response_json = client.flag_as_good('<EMAIL>')
"""
return self.flag('good', query)
def remove_flag(self, query):
"""Unflag an email address that was marked as good or fraud previously.
:param query: Email to be flagged
:return: JSON dict of the confirmation response generated by the API
:type query: str
:Example:
>>> from emailage.client import EmailageClient
>>> client = EmailageClient('My account SID', 'My auth token', sandbox=True)
>>> response_json = client.remove_flag('<EMAIL>')
"""
return self.flag('neutral', query)
``` |
{
"source": "joekraemer/VideoFileDateOrganizing",
"score": 3
} |
#### File: VideoFileDateOrganizing/FindingFilesPackage/fileclustermanager.py
```python
import os
import time
import shutil
from .fileinformation import FileInformation
from .folderfunctions import MakeFolder
class FileClusterManager:
def __init__(self, date, path, maxOnDiskFiles=5, maxOnDiskSizeGB=3):
self.Size = 0
# List of FileInformation Classes
self.Files = []
self.FilesExistOffDisk = False
self.Date = date
self.MaxOnDiskFiles = maxOnDiskFiles
self.MaxOnDiskSizes = maxOnDiskSizeGB*(1073741824)
self.Path = os.path.join(
path, str(self.Date.year), str(self.Date.month), str(self.Date.day))
self.ClusterName = str(self.Date)
# Name of folder when the FCM is instructed to put the files into a folder
self.ClusterFolderName = str(self.Date.year)
# TODO: Prevent double adding files. Instead should maybe update the last known location.
# Add files to this cluster
def Add(self, file):
# See if this file already exists
for f in self.Files:
if f.Name == file.Name:
# file is already in the FCM list, don't add it
if f.ExistsInCurrentDir:
return
else:
# file is not in the cluster folder, so we should update its lastKnownLocation
f.LastDirectoryFound = file.LastDirectoryFound
# Don't add this file and just return instead
return
# file doesn't exist in this FCM, add it to the list
self.Files.append(file)
self.Size = self.Size + file.Size
return
# How many files in this cluster
def Number(self):
return len(self.Files)
def GetFiles(self):
return self.Files
# Return the Path that files for this cluster should go to
def GetPath(self):
if (self.Number() >= self.MaxOnDiskFiles) or (self.Size >= self.MaxOnDiskSizes):
self.FilesExistOffDisk = True
return None
return self.Path
# Create a folder with the day
def CreateClusterFolder(self):
clusterFolder = os.path.join(self.Path)
# Make sure the folder doesn't already exist
if not (os.path.isdir(clusterFolder)):
MakeFolder(clusterFolder)
self.Path = clusterFolder
return
# Move each file associated with this cluster to a new folder
def MoveFilesToClusterFolder(self):
for file in self.Files:
self.MoveFileToNewFolder(file.LastDirectoryFound, self.Path)
return
# Move file to a new directory
def MoveFileToNewFolder(self, file, dstDir):
shutil.move(str(file), dstDir)
# Make sure the move is complete
historicalSize = -1
while (historicalSize != os.path.getsize(dstDir)):
historicalSize = os.path.getsize(dstDir)
time.sleep(1)
return
``` |
{
"source": "joekroese/math-of-revolution",
"score": 3
} |
#### File: compartmental-models/SEIR/run.py
```python
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# Total population, N.
N = 1000
# Initial number of exposed, infective and recovered individuals, E0, I0 and R0.
E0, I0, R0 = 0, 1, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0 -E0
# Contact rate, beta, and mean recovery rate, alpha, (in 1/days).
beta = 0.0002
alpha = 1./10
gamma = 6./100
# A grid of ticks
t = np.linspace(0, 450, 450)
# The SIR model differential equations.
def SIR_eq(y, t, N, beta, gamma, alpha):
S, E, I, R = y
Sdot = -beta * S * I
Edot = beta * S * I - gamma * E
Idot = gamma * E - alpha * I
Rdot = alpha * I
return Sdot, Edot, Idot, Rdot
# Initial conditions vector
y0 = S0, E0, I0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(SIR_eq, y0, t, args=(N, beta, gamma, alpha))
S, E, I, R = ret.T
# Plot the data on three separate curves for S(t), I(t) and R(t)
#
s_colour='#ADD694'
e_colour='#FFCD47'
i_colour='#F2728C'
r_colour='#67B8C7'
# s_colour='Green'
# e_colour='Yellow'
# i_colour='Red'
# r_colour='Blue'
fig = plt.figure(facecolor='#dddddd')
ax = fig.add_subplot(111, axis_bgcolor='w', axisbelow=True)
ax.plot(t, S, 'b', alpha=0.8, lw=2, label='Susceptible', color=s_colour)
ax.plot(t, E, 'b', alpha=0.8, lw=2, label='Exposed', color=e_colour)
ax.plot(t, I, 'r', alpha=0.8, lw=2, label='Infective', color=i_colour)
ax.plot(t, R, 'g', alpha=0.8, lw=2, label='Removed', color=r_colour)
ax.set_xlabel('Time (Days)')
ax.set_ylabel('Number of Individuals')
ax.set_xlim(0)
ax.set_ylim(0,N*1.1)
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='black', lw=1, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.9)
for spine in ('top', 'right'):
ax.spines[spine].set_visible(False)
plt.show()
```
#### File: axelrod/tournament-gh-pages/run_probend.py
```python
import axelrod as axl
import os
import utils
from players import players
prob_end = .1
repetitions = 100
processes = 0
seed = 1
filename = "data/strategies_probend_interactions.csv"
def main(players=players):
# Deleting the file if it exists
try:
os.remove(filename)
except OSError:
pass
axl.seed(seed) # Setting a seed
tournament = axl.Tournament(players, prob_end=prob_end,
repetitions=repetitions)
results = tournament.play(filename=filename, processes=processes)
utils.obtain_assets(results, "strategies", "probend", lengthplot=True)
results.write_summary('assets/probend_summary.csv')
if __name__ == "__main__":
main()
``` |
{
"source": "Joel103/genre_classification",
"score": 2
} |
#### File: Joel103/genre_classification/callbacks.py
```python
import tensorflow as tf
class IncreaseEpochCustom(tf.keras.callbacks.Callback):
def __init__(self, network):
self.network = network
def on_epoch_end(self, epoch, logs=None):
# Since Keras Progbar starts counting with 1, I have to add here 1
self.network.epoch = epoch+1
# Tensorflow Keras ModelCheckpoint argument 'period' is deprecated
# Therefore, I'm doing it on my own
class SaveEveryNthEpochCustom(tf.keras.callbacks.Callback):
def __init__(self, network, save_steps):
self.network = network
self.save_steps = save_steps
def on_epoch_end(self, epoch, logs=None):
if self.network.epoch % self.save_steps == 0:
self.network.save()
class ReconstructImages(tf.keras.callbacks.Callback):
def __init__(self, network, period, dataset, wandb_wrapper):
self.network = network
self.period = period
self.dataset = dataset
self.wandb_wrapper = wandb_wrapper
self.plot_images = 5
def on_epoch_end(self, epoch, logs=None):
if self.network.epoch % self.period == 0:
self.reconstruct_images()
def reconstruct_images(self):
import numpy as np
from matplotlib import pyplot as plt
import wandb
images = []
histograms = []
for elem in self.dataset:
batch_size = elem[0].shape[0]
prediction = self.network.predict_on_batch(elem[0])["decoder"]
indices = np.arange(batch_size)
np.random.shuffle(indices)
for index in indices[:self.plot_images]:
x = elem[0][index][..., 0].numpy().astype(np.float32).T
y = prediction[index][..., 0].astype(np.float32).T
images += [self.wandb_wrapper.post_plt_image(x, y, title="Images", tag="side-by-side-images")]
histograms += [self.wandb_wrapper.post_plt_histogram(x, y, title="Histogram", tag="overlay-histogram", alpha=0.35, bins=50)]
break
wandb.log({"side-by-side-images": images})
wandb.log({"overlay-histogram": histograms})
class CreateEmbedding(tf.keras.callbacks.Callback):
def __init__(self, network, period, dataset, num_classes=10):
self.network = network
self.period = period
self.dataset = dataset
self.num_classes = num_classes
self._plotted_random_samples = 1000
def on_epoch_end(self, epoch, logs=None):
if self.network.epoch % self.period == 0:
self.create_embedding()
def create_embedding(self):
import numpy as np
import wandb
from sklearn.manifold import TSNE
from itertools import cycle
from matplotlib import pyplot as plt
from matplotlib.ticker import NullFormatter
''' Collect Network Embeddings '''
collect_embeddings = []
collect_labels = []
for elem in self.dataset:
for (x, y) in zip(elem[0], elem[1]["classifier"]):
prediction = self.network.predict_embedding_on_batch(x[np.newaxis])
collect_embeddings += [prediction]
collect_labels += [tf.argmax(y, axis=1)]
''' Perform T-SNE '''
embeddings = tf.concat(collect_embeddings, axis=0).numpy()
labels = tf.concat(collect_labels, axis=0).numpy()
X_embedded = TSNE(n_components=2).fit_transform(np.squeeze(embeddings))
''' Some Preparation For Colored Plotting '''
collect_colors_markers = {}
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = cycle(prop_cycle.by_key()['color'])
markers = cycle(('o', ','))
for i in range(self.num_classes):
collect_colors_markers[i] = (next(colors), next(markers))
''' Scatter Plot Embeddings '''
indices = np.random.choice(labels.shape[0], self._plotted_random_samples, replace=False)
# Create figure
fig = plt.figure(figsize=(8, 8))
# Add scatter plot
ax = fig.add_subplot(111)
ax.scatter(X_embedded[:, 0], X_embedded[:, 1], alpha=0.25, s=0.1, c="gray")
for embedding, label in zip(X_embedded[indices], labels[indices]):
ax.scatter(embedding[0], embedding[1], alpha=0.5, c=collect_colors_markers[label][0], marker=collect_colors_markers[label][1])
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
# send to wandb
wandb.log({"test embedding - label colored": wandb.Image(plt)})
plt.close()
return embeddings, labels
```
#### File: Joel103/genre_classification/prep_utils.py
```python
import tensorflow as tf
import tensorflow_io as tfio
import os
import time
''' Util functions used for the tf.data.Dataset pipeline '''
#============================== PREPROCESSING ==============================
def wrapper_cast(x):
x['audio'] = tf.cast(x['audio'], tf.float32)
try:
x['noise_wav'] = tf.cast(x['noise_wav'], tf.float32)
except KeyError:
x['input'] = x['audio']
return x
def wrapper_cast_offline(x):
try:
x['input'] = tf.cast(x['noise_wav'], tf.float32)
except KeyError:
x['input'] = tf.cast(x['audio'], tf.float32)
return x
def wrapper_normalize(x):
# normalize whole sample in a range between 0 and 1
x['mel'] -= tf.math.reduce_min(x['mel'])
x['mel'] /= tf.math.reduce_max(x['mel'])
return x
def wrapper_rescale(x):
# normalize whole sample in a range between 0 and 1
x['input'] /= tf.math.reduce_max(tf.math.abs(x['input']))
return x
def wrapper_spect(x, nfft, window, stride, logger=None):
x['spectrogram'] = tfio.experimental.audio.spectrogram(x['input'],
nfft=nfft,
window=window,
stride=stride)
x.pop('input')
return x
def wrapper_mel(x, sample_rate, mels, fmin_mels, fmax_mels, top_db, db=False, logger=None):
x['mel'] = tfio.experimental.audio.melscale(x['spectrogram'],
rate=sample_rate,
mels=mels,
fmin=fmin_mels,
fmax=fmax_mels)
if db: #to be implemented with noise
x['db_mel'] = tfio.experimental.audio.dbscale(x['mel'], top_db=top_db)
x.pop('spectrogram')
return x
def wrapper_log_mel(x):
x['mel'] = tf.math.log(1 + x['mel'])
return x
def wrapper_merge_features(ds, ds_noise):
ds.update({"mel_noise": ds_noise["mel"], "label_noise": ds_noise["label"], "shape_noise": ds_noise["shape"]})
return ds
def wrapper_shape_to_proper_length(x, common_divider, clip=True, testing=False):
# TODO: adapt to be dynamic about how long either signal or noise is (current assumption is: signal_length >= noise_length)
# get shapes
signal_shape = x['shape']
if clip:
pad_length = signal_shape[0] - tf.math.mod(signal_shape[0], common_divider)
x["mel"] = x["mel"][:pad_length]
else:
# calc desired sequence length
pad_length = signal_shape[0] + common_divider - tf.math.mod(signal_shape[0], common_divider)
# create padding
signal_zeros = tf.zeros((pad_length - signal_shape[0], signal_shape[1]), tf.float32)
x["mel"] = tf.concat([x["mel"], signal_zeros], axis=0)
if not testing:
# pad
noise_shape = x['shape_noise']
noise_zeros = tf.zeros((pad_length - noise_shape[0], noise_shape[1]), tf.float32)
x["mel_noise"] = tf.concat([x["mel_noise"], noise_zeros], axis=0)
return x
def wrapper_extract_shape(x):
x["shape"] = tf.py_function(lambda x: x.shape, [x["mel"]], tf.int32)
return x
def get_waveform(x, noise_root, desired_channels, desired_samples):
audio_binary = tf.io.read_file(noise_root+os.sep+x['noise'])
audio, sample_rate = tf.audio.decode_wav(audio_binary,
desired_channels=desired_channels,
desired_samples=desired_samples)
audio = tf.squeeze(audio, axis=-1)
return {'audio': audio,
'label': tf.cast(x['label'], tf.int32),
'rate': sample_rate}
def wrapper_dict2tensor(x, features=['mel','label']):
return [tf.convert_to_tensor(x[feature]) for feature in features]
def wrapper_pack(x):
return {"label": tf.cast(x["label"], tf.int32), "mel":x["mel"], "shape":x["shape"]}
#============================== AUGMENTATION ==============================
def wrapper_fade(x, fade):
x['input'] = tfio.experimental.audio.fade(x['input'], fade_in=fade, fade_out=fade, mode="logarithmic")
return x
def wrapper_trim(x, epsilon):
position = tfio.experimental.audio.trim(x['audio'], axis=0, epsilon=epsilon)
start = position[0]
stop = position[1]
x['audio'] = x['audio'][start:stop]
return x
def wrapper_mask(x, freq_mask, time_mask, param_db, db=False):
# freq masking
x['mel'] = tfio.experimental.audio.freq_mask(x['mel'], param=freq_mask)
# Time masking
x['mel'] = tfio.experimental.audio.time_mask(x['mel'], param=time_mask)
if db:
x['db_mel'] = tfio.experimental.audio.freq_mask(x['db_mel'], param=param_db)
x['db_mel'] = tfio.experimental.audio.time_mask(x['db_mel'], param=param_db)
return x
def wrapper_roll(x, roll_val):
roll_tensor = tf.random.uniform((), minval=-roll_val, maxval=roll_val, dtype=tf.dtypes.int32)
x['mel'] = tf.roll(x['mel'], roll_tensor, axis=0)
x['mel_noise'] = tf.roll(x['mel_noise'], roll_tensor, axis=0)
return x
# TODO: The RMS calculation might still be off
def get_noise_from_sound(signal, noise, SNR):
# current RMS of signal
centered_signal = signal-tf.reduce_mean(signal)
RMS_s = tf.sqrt(tf.reduce_mean(tf.square(centered_signal)))
# current RMS of noise
centered_noise = noise-tf.reduce_mean(noise)
RMS_n_current = tf.sqrt(tf.reduce_mean(tf.square(centered_noise)))
# scalar
RMS_n = SNR * (RMS_s / RMS_n_current)
noise /= RMS_n
return noise
def wrapper_mix_noise(x, SNR):
out = get_noise_from_sound(x['mel'], x['mel_noise'], SNR)
x['mel'] += tf.squeeze(out)
return x
def pitch_shift_data(wave_data, shift_val, bins_per_octave, sample_rate):
import librosa
wave_data = wave_data.numpy()
random_shift = np.random.randint(low=-shift_val, high=shift_val)
wave_data = librosa.effects.pitch_shift(wave_data, sample_rate,
random_shift, bins_per_octave=bins_per_octave.numpy())
return wave_data
def wrapper_change_pitch(x, shift_val, bins_per_octave, sample_rate):
out = tf.py_function(pitch_shift_data, [x['audio'], shift_val, bins_per_octave, sample_rate], tf.float32)
x['audio'] = out
return x
```
#### File: Joel103/genre_classification/resnet_decoder.py
```python
import tensorflow as tf
# TODO: adapt to be usable as Decoder
class BasicBlock_Transposed(tf.keras.Model):
expansion = 1
def __init__(self, in_channels, out_channels, strides=1):
super(BasicBlock_Transposed, self).__init__()
self.conv1 = tf.keras.layers.Conv2DTranspose(out_channels, kernel_size=3, strides=strides, use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2DTranspose(out_channels, kernel_size=3, strides=1, use_bias=False)
self.bn2 = tf.keras.layers.BatchNormalization()
"""
Adds a shortcut between input and residual block and merges them with "sum"
"""
if strides != 1 or in_channels != self.expansion * out_channels:
self.shortcut = tf.keras.Sequential([
tf.keras.layers.Conv2DTranspose(self.expansion * out_channels, kernel_size=1,
strides=strides, use_bias=False),
tf.keras.layers.BatchNormalization()]
)
else:
self.shortcut = lambda x, _: x
self.activation = tf.keras.layers.ReLU()
def call(self, x, training=False):
# if training: print("=> training network ... ")
out = self.activation(self.bn1(self.conv1(x), training=training))
out = self.bn2(self.conv2(out), training=training)
out += self.shortcut(x, training)
return self.activation(out)
class Bottleneck_Transposed(tf.keras.Model):
expansion = 4
def __init__(self, in_channels, out_channels, strides=1):
super(Bottleneck_Transposed, self).__init__()
self.conv1 = tf.keras.layers.Conv2DTranspose(out_channels, 1, 1, use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2DTranspose(out_channels, 3, strides, use_bias=False)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv2DTranspose(out_channels * self.expansion, 1, 1, use_bias=False)
self.bn3 = tf.keras.layers.BatchNormalization()
if strides != 1 or in_channels != self.expansion * out_channels:
self.shortcut = tf.keras.Sequential([
tf.keras.layers.Conv2DTranspose(self.expansion * out_channels, kernel_size=1,
strides=strides, use_bias=False),
tf.keras.layers.BatchNormalization()]
)
else:
self.shortcut = lambda x, _: x
self.activation = tf.keras.layers.ReLU()
def call(self, x, training=False):
out = self.activation(self.bn1(self.conv1(x), training))
out = self.activation(self.bn2(self.conv2(out), training))
out = self.bn3(self.conv3(out), training)
out += self.shortcut(x, training)
return self.activation(out)
class ResNet_Decoder(tf.keras.Model):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet_Decoder, self).__init__()
self.in_channels = 64
self.conv1 = tf.keras.layers.Conv2DTranspose(64, 3, 1, use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.pool = tf.keras.layers.UpSampling2D((4, 4), interpolation="nearest")
self.linear = tf.keras.layers.Dense(units=num_classes, activation="softmax")
self.activation = tf.keras.layers.ReLU()
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return tf.keras.Sequential(layers)
def call(self, x, training=False):
out = x
# For classification
out = self.linear(out)
out = tf.reshape(out, (out.shape[0], 1, 1, -1))
out = self.pool(self.bn1(self.conv1(x), training))
out = self.activation(out)
out = self.layer4(out, training=training)
out = self.layer3(out, training=training)
out = self.layer2(out, training=training)
out = self.layer1(out, training=training)
return out
def ResNet18_Decoder():
return ResNet_Decoder(BasicBlock_Transposed, [2, 2, 2, 2])
def ResNet34_Decoder():
return ResNet_Decoder(BasicBlock_Transposed, [3, 4, 6, 3])
def ResNet50_Decoder():
return ResNet_Decoder(Bottleneck_Transposed, [3, 4, 14, 3])
def ResNet101_Decoder():
return ResNet_Decoder(Bottleneck_Transposed, [3, 4, 23, 3])
def ResNet152_Decoder():
return ResNet_Decoder(Bottleneck_Transposed, [3, 8, 36, 3])
def Basic_Decoder():
model = tf.keras.Sequential([
tf.keras.layers.Reshape((1, 1, -1)),
tf.keras.layers.UpSampling2D((2, 2), interpolation="nearest"),
tf.keras.layers.Conv2DTranspose(1024, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.UpSampling2D((2, 2), interpolation="nearest"),
tf.keras.layers.Conv2DTranspose(512, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.UpSampling2D((2, 2), interpolation="nearest"),
tf.keras.layers.Conv2DTranspose(256, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.UpSampling2D((2, 2), interpolation="nearest"),
tf.keras.layers.Conv2DTranspose(128, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.UpSampling2D((2, 2), interpolation="nearest"),
tf.keras.layers.Conv2DTranspose(64, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.UpSampling2D((2, 2), interpolation="nearest"),
tf.keras.layers.Conv2DTranspose(32, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.Conv2DTranspose(16, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.Conv2DTranspose(8, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.Conv2DTranspose(4, 3, strides=(1, 1), padding="same"),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.Conv2DTranspose(1, 3, strides=(1, 1), padding="same"),
])
return model
if __name__ == "__main__":
from utils import allow_growth
allow_growth()
model = Basic_Decoder()
model.build(input_shape=[1, 1, 1, 1024])
print(model.summary())
print(model.predict_on_batch(tf.ones([1, 1, 1, 1024], tf.float32)).shape)
```
#### File: Joel103/genre_classification/resnet.py
```python
import tensorflow as tf
class BasicBlock(tf.keras.Model):
expansion = 1
def __init__(self, in_channels, out_channels, strides=1):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(out_channels, kernel_size=3, strides=strides,
padding="same", use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(out_channels, kernel_size=3, strides=1,
padding="same", use_bias=False)
self.bn2 = tf.keras.layers.BatchNormalization()
"""
Adds a shortcut between input and residual block and merges them with "sum"
"""
if strides != 1 or in_channels != self.expansion * out_channels:
self.shortcut = tf.keras.Sequential([
tf.keras.layers.Conv2D(self.expansion*out_channels, kernel_size=1,
strides=strides, use_bias=False),
tf.keras.layers.BatchNormalization()]
)
else:
self.shortcut = lambda x,_: x
self.activation = tf.keras.layers.ReLU()
def call(self, x, training=False):
# if training: print("=> training network ... ")
out = self.activation(self.bn1(self.conv1(x), training=training))
out = self.bn2(self.conv2(out), training=training)
out += self.shortcut(x, training)
return self.activation(out)
class Bottleneck(tf.keras.Model):
expansion = 4
def __init__(self, in_channels, out_channels, strides=1):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(out_channels, 1, 1, use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(out_channels, 3, strides, padding="same", use_bias=False)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv2D(out_channels*self.expansion, 1, 1, use_bias=False)
self.bn3 = tf.keras.layers.BatchNormalization()
if strides != 1 or in_channels != self.expansion * out_channels:
self.shortcut = tf.keras.Sequential([
tf.keras.layers.Conv2D(self.expansion*out_channels, kernel_size=1,
strides=strides, use_bias=False),
tf.keras.layers.BatchNormalization()]
)
else:
self.shortcut = lambda x,_: x
self.activation = tf.keras.layers.ReLU()
def call(self, x, training=False):
out = self.activation(self.bn1(self.conv1(x), training))
out = self.activation(self.bn2(self.conv2(out), training))
out = self.bn3(self.conv3(out), training)
out += self.shortcut(x, training)
return self.activation(out)
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.in_channels = 64
self.conv1 = tf.keras.layers.Conv2D(64, 3, 1, padding="same", use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avg_pool2d = tf.keras.layers.GlobalAveragePooling2D()
self.linear = tf.keras.layers.Dense(units=num_classes, activation="sigmoid")
self.activation = tf.keras.layers.ReLU()
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return tf.keras.Sequential(layers)
def call(self, x, training=False):
out = x
out = self.activation(self.bn1(self.conv1(x), training))
out = self.layer1(out, training=training)
out = self.layer2(out, training=training)
out = self.layer3(out, training=training)
out = self.layer4(out, training=training)
# For classification
out = self.avg_pool2d(out)
out = self.linear(out)
return out
def ResNet18(num_classes=10):
return ResNet(BasicBlock, [2,2,2,2], num_classes)
def ResNet34(num_classes=10):
return ResNet(BasicBlock, [3,4,6,3], num_classes)
def ResNet50(num_classes=10):
return ResNet(Bottleneck, [3,4,14,3], num_classes)
def ResNet101(num_classes=10):
return ResNet(Bottleneck, [3,4,23,3], num_classes)
def ResNet152(num_classes=10):
return ResNet(Bottleneck, [3,8,36,3], num_classes)
if __name__ == "__main__":
from utils import allow_growth
allow_growth()
model = ResNet18(1024)
model.build(input_shape=[1, 64, 64, 1])
print(model.summary())
print(model.predict_on_batch(tf.ones([1, 64, 64, 1], tf.float32)).shape)
```
#### File: Joel103/genre_classification/utils.py
```python
import tensorflow as tf
import os
# load model config
def load_config(config_path="config.json", verbose=1):
import json
with open(config_path, "r") as config_file:
config_data = json.load(config_file)
# show content of config
if verbose:
print(json.dumps(config_data, indent=2, sort_keys=True))
return config_data
def save_config(data_parameter, model_parameter, training_parameter, network):
# writing config file into model folder
import json
new_config = {"data_parameter": data_parameter,
"model_parameter": model_parameter,
"training_parameter": training_parameter}
with open(network.config_path, 'w+') as config:
config.write(json.dumps(new_config, sort_keys=True, indent=2))
print(f"Model folder created and config saved: {network.config_path}")
def allow_growth():
import tensorflow as tf
# Copied from: https://tensorflow.google.cn/guide/gpu?hl=en#limiting_gpu_memory_growth
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
# Copied from stackoverflow. originally posted by <NAME>, license: CC BY-SA 4.0, link: https://stackoverflow.com/a/3233356
import collections.abc
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
``` |
{
"source": "joel2411/Simple-Neural-Network",
"score": 3
} |
#### File: Simple-Neural-Network/SimpleNN/Activate.py
```python
import numpy as np
class Activate:
def __init__(self, activation_funtion, alpha):
self.activation_function = activation_funtion
self.alpha = alpha
def activate(self, node_temp, direction):
node_out = 0
if direction != 'forward' and direction != 'backward':
print("direction on activation is wrong")
return 0
if self.activation_function == 'relu':
if direction == 'forward':
node_out = np.clip(node_temp, a_min=0, a_max=np.inf)
elif direction == 'backward':
node_out = np.where(node_temp > 0, 1, 0)
elif self.activation_function == 'lrelu':
if direction == 'forward':
node_out = np.where(node_temp > 0,
node_temp,
np.multiply(node_temp,self.alpha))
elif direction == 'backward':
node_out = np.where(node_temp > 0, 1, self.alpha)
elif self.activation_function == 'elu':
s_out = np.multiply(self.alpha,
np.subtract(np.exp(node_temp),1))
if direction == 'forward':
node_out = np.where(node_temp > 0,
node_temp, s_out)
elif direction == 'backward':
node_out = np.where(node_temp > 0, 1,
np.add(s_out,self.alpha))
elif self.activation_function == 'sigmoid':
sigmoid = np.divide(1, np.add(1, np.exp(node_temp)))
if direction == 'forward':
node_out = sigmoid
elif direction == 'backward':
node_out = np.multiply(sigmoid,
np.subtract(1, sigmoid))
elif self.activation_function == 'tanh':
tanh = np.tanh(node_temp)
if direction == 'forward':
node_out = tanh
elif direction == 'backward':
node_out = np.subtract(1,np.power(tanh, 2))
elif self.activation_function == 'linear':
if direction == 'forward':
node_out = node_temp
elif direction == 'backward':
node_out = np.ones_like(node_temp)
elif self.activation_function == 'maxout':
node_out = np.amax(np.transpose(node_temp), axis=0)
if direction == 'forward':
node_out = node_out
elif direction == 'backward':
node_out = np.where(node_out == np.amax(node_out,
axis=0),
1, 0)
elif self.activation_function == 'softmax':
e_out = np.exp(node_temp) - np.max(node_temp)
softmax = e_out / np.sum(e_out)
if direction == 'forward':
node_out = softmax
elif direction == 'backward':
node_out = np.multiply(softmax,
np.subtract(1, softmax))
elif self.activation_function == 'softplus':
if direction == 'forward':
node_out = np.log(1 + np.exp(node_temp))
elif direction == 'backward':
node_out = np.divide(1, np.add(
1, np.exp(np.negative(node_temp))))
else:
print("activation function is wrong!")
return 0
return node_out
```
#### File: Simple-Neural-Network/SimpleNN/NeuralNet.py
```python
import numpy as np
from SimpleNN import FCLayer
from SimpleNN import LossFun
class NeuralNet(object):
def __init__(self, num_inputnodes,
loss_function = 'NLL'):
self.layers = []
self.Loss = LossFun.Loss(loss_function)
self.LossVal = 0
self.inputlayer = np.zeros(num_inputnodes, dtype=float)
self.iter = 0
def addlayer(self, num_node, layertype = 'FC',
activation_function = 'relu',
optimizer = 'SGD',
learning_rate = 0.01, alpha = 0.01,
param1 = 0.01, param2 = 0.01):
if len(self.layers) == 0:
num_inputnode = len(self.inputlayer)
else:
num_inputnode = self.layers[len(self.layers) - 1].num_nodes
if layertype == 'FC':
self.layers.append(
FCLayer.Layer(num_node,
num_inputnode,
optimizer = optimizer,
learning_rate = learning_rate,
layertype = layertype,
activation_function = activation_function,
alpha = alpha,
param1=param1,
param2=param2))
def dellayer(self, layerindex):
self.layers.pop(layerindex)
if layerindex <= len(self.layers) - 1:
if len(self.layers) == 0:
num_inputnode = len(self.inputlayer)
else:
num_inputnode = self.layers[layerindex - 1].num_nodes
self.layers[layerindex].reset(num_inputnode)
def getloss(self, input, target):
self.LossVal = self.Loss.compute_loss(
self.test(input), target)
return self.LossVal
def test(self, input):
temp_input = input
temp_output = 0
for layerindex in range(len(self.layers)):
temp_output = self.layers[layerindex].forwardprop(temp_input)
temp_input = np.copy(temp_output)
return temp_output
def train(self, input, target, minibatch = False):
self.iter += 1
if minibatch:
acc_grad = 0
self.LossVal = 0
for s in range (len(input)):
gradient = self.Loss.compute_grad(
self.test(input[s]), target[s])
acc_grad += gradient
for layerindex in reversed(range(len(self.layers))):
gradient = self.layers[layerindex].backprop(
gradient, False, self.iter,
minibatch = minibatch)
gradient = acc_grad / len(input)
for layerindex in reversed(range(len(self.layers))):
gradient = self.layers[layerindex].backprop(
gradient, True, self.iter,
minibatch = minibatch, minibatchsize = len(input))
else:
gradient = self.Loss.compute_grad(
self.test(input), target)
# print(gradient)
for layerindex in reversed(range(len(self.layers))):
gradient = self.layers[layerindex].backprop(
gradient, True, self.iter,
minibatch = minibatch, minibatchsize = len(input))
``` |
{
"source": "Joel301/Project_Euler",
"score": 4
} |
#### File: Project_Euler/scripts/problem0002.py
```python
def fibonacci(limit=89):
lst = [1,2]
n1, n2 = 1, 2
while(n2 < limit):
n = n1 + n2
n1 = n2
n2 = n
lst.append(n)
return lst
# main function same aproach as problem0001
def compute(v = 4000000):
ans = sum(x for x in fibonacci(v) if x % 2 == 0)
return ans
if __name__ == "__main__":
print(compute(4000000))
``` |
{
"source": "joel99/midlevel-reps",
"score": 2
} |
#### File: midlevel-reps/configs/doom.py
```python
@ex.named_config
def cfg_doom_navigation():
uuid = 'doom_visualnavigation'
cfg = {}
cfg['learner'] = {
'algo': 'ppo', # Learning algorithm for RL agent. Currently only PPO
'clip_param': 0.1, # Clip param for trust region in PPO
'entropy_coef': 0.01, # Weighting of the entropy term in PPO
'eps': 1e-5, # Small epsilon to prevent divide-by-zero
'gamma': 0.99, # Gamma to use if env.observation_space.shape = 1
'internal_state_size': 512, # If using a recurrent policy, what state size to use
'lr': 0.0001, # Learning rate for algorithm
'num_steps': 200, # Length of each rollout
'num_mini_batch': 16, # Size of PPO minibatch
'num_stack': 4, # Frames that each cell (CNN) can see
'max_grad_norm': 0.5, # Clip grads
'ppo_epoch': 4, # Number of times PPO goes over the buffer
'recurrent_policy': False, # Use a recurrent version with the cell as the standard model
'tau': 0.95, # When using GAE
'use_gae': True, # Whether to use GAE
'value_loss_coef': 0.0001, # Weighting of value_loss in PPO
'perception_network': 'AtariNet',
'test':False,
'use_replay':False,
'replay_buffer_size': 1000,
'on_policy_epoch': 4,
'off_policy_epoch': 0,
}
image_dim = 84
cfg['env'] = {
'add_timestep': False, # Add timestep to the observation
'env_name': 'Doom_VizdoomMultiGoalWithClutterEnv.room-v0',
"env_specific_args": {
# "episode_timeout": 1000,
"episode_timeout": 100,
"n_clutter_objects": 8,
"n_goal_objects": 1
},
'sensors': {
'rgb_filled': None,
'taskonomy': None,
'map': None,
'target': None
},
'transform_fn_pre_aggregation': None,
'transform_fn_post_aggregation': None,
'num_processes': 1,
'additional_repeat_count': 3,
}
cfg['saving'] = {
'port': 8097,
'log_dir': LOG_DIR,
'log_interval': 1,
'save_interval': 100,
'save_dir': 'checkpoints',
'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'),
'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'),
'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'),
'vis': False,
'vis_interval': 200,
'launcher_script': None,
'visdom_server': 'localhost',
'visdom_port': '8097',
'checkpoint': None,
'checkpoint_configs': False, # copy the metadata of the checkpoint. YMMV.
}
cfg['training'] = {
'cuda': True,
'seed': random.randint(0,1000),
'num_frames': 5e6,
'resumable': True,
}
@ex.named_config
def scratch_doom():
# scratch is not compatible with collate because we need to perform Image operations (resize) to go from
# 256 to 84. This is not implemented with collate code
uuid = 'doom_scratch'
cfg = {}
cfg['learner'] = {
'perception_network': 'AtariNet',
'perception_network_kwargs': {
'n_map_channels': 0,
'use_target': False,
}
}
cfg['env'] = {
'env_specific_kwargs': {
"episode_timeout": 1000,
"n_clutter_objects": 8,
"n_goal_objects": 1
},
'transform_fn_pre_aggregation': """
TransformFactory.splitting(
{
'color': {
'rgb_filled':rescale_centercrop_resize((3,84,84)) }
},
keep_unnamed=False)
""".translate(remove_whitespace),
'transform_fn_post_aggregation': None,
}
@ex.named_config
def cfg_doom_exploration():
uuid = 'doom_myopicexploration'
cfg = {}
cfg['learner'] = {
'algo': 'ppo', # Learning algorithm for RL agent. Currently only PPO
'clip_param': 0.1, # Clip param for trust region in PPO
'entropy_coef': 0.01, # Weighting of the entropy term in PPO
'eps': 1e-5, # Small epsilon to prevent divide-by-zero
'gamma': 0.99, # Gamma to use if env.observation_space.shape = 1
'internal_state_size': 512, # If using a recurrent policy, what state size to use
'lr': 0.0001, # Learning rate for algorithm
'num_steps': 200, # Length of each rollout
'num_mini_batch': 16, # Size of PPO minibatch
'num_stack': 4, # Frames that each cell (CNN) can see
'max_grad_norm': 0.5, # Clip grads
'ppo_epoch': 4, # Number of times PPO goes over the buffer
'recurrent_policy': False, # Use a recurrent version with the cell as the standard model
'tau': 0.95, # When using GAE
'use_gae': True, # Whether to use GAE
'value_loss_coef': 0.0001, # Weighting of value_loss in PPO
'perception_network': 'AtariNet',
'test':False,
'use_replay':False,
'replay_buffer_size': 1000,
'on_policy_epoch': 4,
'off_policy_epoch': 0,
}
image_dim = 84
cfg['env'] = {
'add_timestep': False, # Add timestep to the observation
'env_name': 'Doom_VizdoomExplorationEnv.room-v0',
"env_specific_args": {
"episode_timeout": 2000,
},
'sensors': {
'rgb_filled': None,
'taskonomy': None,
'map': None,
'occupancy': None
},
'transform_fn_pre_aggregation': None,
'transform_fn_post_aggregation': None,
'num_processes': 1,
'additional_repeat_count': 3,
}
cfg['saving'] = {
'port': 8097,
'log_dir': LOG_DIR,
'log_interval': 1,
'save_interval': 100,
'save_dir': 'checkpoints',
'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'),
'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'),
'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'),
'vis': False,
'vis_interval': 200,
'launcher_script': None,
'visdom_server': 'localhost',
'visdom_port': '8097',
'checkpoint': None,
'checkpoint_configs': False, # copy the metadata of the checkpoint. YMMV.
}
cfg['training'] = {
'cuda': True,
'seed': random.randint(0,1000),
'num_frames': 5e5,
'resumable': True,
}
@ex.named_config
def scratch_doom_exploration():
# scratch is not compatible with collate because we need to perform Image operations (resize) to go from
# 256 to 84. This is not implemented with collate code
uuid = 'doom_scratch_exploration'
cfg = {}
cfg['learner'] = {
'perception_network': 'AtariNet',
'perception_network_kwargs': {
'n_map_channels': 1,
'use_target': False,
}
}
cfg['env'] = {
'env_specific_kwargs': {
},
'transform_fn_pre_aggregation': """
TransformFactory.splitting(
{
'color': {
'rgb_filled':rescale_centercrop_resize((3,84,84)) },
'occupancy': {
'map': rescale_centercrop_resize((1,84,84))}
},
keep_unnamed=False)
""".translate(remove_whitespace),
'transform_fn_post_aggregation': None,
}
```
#### File: env/vizdoom/vizdoomenv.py
```python
import gym
from gym import spaces
from vizdoom import *
import numpy as np
import os
# from gym.envs.classic_control import rendering
CONFIGS = [['basic.cfg', 3], # 0
['deadly_corridor.cfg', 7], # 1
['defend_the_center.cfg', 3], # 2
['defend_the_line.cfg', 3], # 3
['health_gathering.cfg', 3], # 4
['my_way_home.cfg', 5], # 5
['predict_position.cfg', 3], # 6
['take_cover.cfg', 2], # 7
['deathmatch.cfg', 20], # 8
['health_gathering_supreme.cfg', 3]] # 9
FPS = 50
class VizdoomEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : FPS
}
def __init__(self, level):
# init game
self.game = DoomGame()
scenarios_dir = os.path.join(os.path.dirname(__file__), 'scenarios')
game_path = os.path.join(os.path.dirname(__file__), 'freedoom2.wad')
assert os.path.isfile(game_path)
self.game.set_doom_game_path(game_path)
self.game.load_config(os.path.join(scenarios_dir, CONFIGS[level][0]))
self.game.set_screen_resolution(ScreenResolution.RES_160X120)
self.game.set_window_visible(False)
self.game.set_sound_enabled(False)
args = []
args.append('+sv_cheats 1')
for arg in args:
self.game.add_game_args(arg)
self.game.init()
self.state = None
self.action_space = spaces.Discrete(CONFIGS[level][1])
self.observation_space = spaces.Box(0, 255, (self.game.get_screen_height(),
self.game.get_screen_width(),
self.game.get_screen_channels()),
dtype=np.uint8)
self.viewer = None
self.done = False
def step(self, action):
# convert action to vizdoom action space (one hot)
act = np.zeros(self.action_space.n)
act[action] = 1
act = np.uint8(act)
act = act.tolist()
reward = self.game.make_action(act)
self.state = self.game.get_state()
self.done = self.game.is_episode_finished()
self.obs = self._get_obs()
info = {'dummy': 0}
return self.obs, reward, self.done, info
def reset(self):
self.game.new_episode()
self.state = self.game.get_state()
self.obs = self._get_obs()
return self._get_obs()
def render(self, mode='human'):
# img = np.zeros_like(self.game.get_state().screen_buffer)
# img = self.game.get_state().screen_buffer
# img = np.transpose(img, [1, 2, 0])
# print(self.obs.shape)
# print(self.obs.dtype)
if self.viewer is not None:
# self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(self.obs)
return self.obs
def seed(self, seed):
self.game.set_seed(seed)
def _get_obs(self):
if not self.done:
return np.transpose(self.state.screen_buffer, (1, 2, 0))
else:
return np.zeros(self.observation_space.shape, dtype=np.uint8)
@staticmethod
def get_keys_to_action():
# you can press only one key at a time!
keys = {(): 2,
(ord('a'),): 0,
(ord('d'),): 1,
(ord('w'),): 3,
(ord('s'),): 4,
(ord('q'),): 5,
(ord('e'),): 6}
return keys
```
#### File: env/vizdoom/vizdoomhealthgatheringsupreme.py
```python
from .vizdoomenv import VizdoomEnv
class VizdoomHealthGatheringSupreme(VizdoomEnv):
def __init__(self):
super(VizdoomHealthGatheringSupreme, self).__init__(9)
```
#### File: env/wrappers/preprocessingwrapper.py
```python
from gym.spaces.box import Box
import gym
import torch
class ProcessObservationWrapper(gym.ObservationWrapper):
''' Wraps an environment so that instead of
obs = env.step(),
obs = transform(env.step())
Args:
transform: a function that transforms obs
obs_shape: the final obs_shape is needed to set the observation space of the env
'''
def __init__(self, env, transform, obs_space):
super().__init__(env)
self.observation_space = obs_space
self.transform = transform
def observation(self, observation):
return self.transform(observation)
```
#### File: evkit/models/__init__.py
```python
import torch.nn as nn
class SingleSensorModule(nn.Module):
def __init__(self, module, sensor_name):
super().__init__()
self.module = module
self.sensor_name = sensor_name
def __call__(self, obs):
# return {self.sensor_name: self.module(obs[self.sensor_name])}
return self.module(obs[self.sensor_name])
```
#### File: evkit/preprocess/transforms.py
```python
from collections import defaultdict
import numpy as np
import skimage
import torchvision as vision
import torch
import torch.nn as nn
import torch.nn.functional as F
import multiprocessing.dummy as mp
import multiprocessing
from gym import spaces
from evkit.sensors import SensorPack
from evkit.models.taskonomy_network import TaskonomyDecoder, TaskonomyEncoder, TaskonomyNetwork
RESCALE_0_1_NEG1_POS1 = vision.transforms.Normalize([0.5,0.5,0.5], [0.5, 0.5, 0.5])
RESCALE_NEG1_POS1_0_1 = vision.transforms.Normalize([-1.,-1.,-1.], [2., 2., 2.])
RESCALE_0_255_NEG1_POS1 = vision.transforms.Normalize([127.5,127.5,127.5], [255, 255, 255])
class Pipeline(object):
def __init__(self, env_or_pipeline):
pass
def forward(self):
pass
# Remember to import these into whomever does the eval(preprocessing_fn) - habitatenv and evaluate_habitat
def identity_transform():
def _thunk(obs_space):
return lambda x: x, obs_space
return _thunk
def fill_like(output_size, fill_value=0.0, dtype=torch.float32):
def _thunk(obs_space):
tensor = torch.ones((1,), dtype=dtype)
def _process(x):
return tensor.new_full(output_size, fill_value).numpy()
return _process, spaces.Box(-1, 1, output_size, tensor.numpy().dtype)
return _thunk
def rescale_centercrop_resize(output_size, dtype=np.float32):
''' rescale_centercrop_resize
Args:
output_size: A tuple CxWxH
dtype: of the output (must be np, not torch)
obs_space: Should be form WxHxC
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[-2:] # The out
processed_env_shape = output_size
pipeline = vision.transforms.Compose([
vision.transforms.ToPILImage(),
vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]),
vision.transforms.Resize(output_wh),
vision.transforms.ToTensor(),
RESCALE_0_1_NEG1_POS1,
])
return pipeline, spaces.Box(-1, 1, output_size, dtype)
return _rescale_centercrop_resize_thunk
def rescale_centercrop_resize_collated(output_size, dtype=np.float32):
# WARNING: I will leave this here in case previous models use it, but this is semantically not correct - we do not do any processing
''' rescale_centercrop_resize
Args:
output_size: A tuple CxWxH
dtype: of the output (must be np, not torch)
obs_space: Should be form WxHxC
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
output_wh = output_size[-2:] # The out
processed_env_shape = output_size
pipeline = vision.transforms.Compose([
vision.transforms.ToPILImage(),
vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]),
vision.transforms.Resize(output_wh),
vision.transforms.ToTensor(),
RESCALE_0_1_NEG1_POS1,
])
def iterative_pipeline(pipeline):
def runner(x):
if isinstance(x, torch.Tensor): # for training
x = torch.cuda.FloatTensor(x.cuda())
else: # for testing
x = torch.cuda.FloatTensor(x).cuda()
x = x.permute(0, 3, 1, 2) / 255.0 #.view(1, 3, 256, 256)
x = 2.0 * x - 1.0
return x
# if isinstance(x, torch.Tensor): # for training
# _, h,w,c = x.shape
# iterative_ret = [pipeline(x_.view(c,h,w).to(torch.uint8)) for x_ in x]
# elif isinstance(x, np.ndarray): # for testing
# iterative_ret = [pipeline(x_) for x_ in x]
# else:
# assert False, f'transform does not like {type(x)}'
# return torch.stack(iterative_ret)
return runner
return iterative_pipeline(pipeline), spaces.Box(-1, 1, output_size, dtype)
return _rescale_centercrop_resize_thunk
def rescale():
''' Rescales observations to a new values
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
def _rescale_thunk(obs_space):
obs_shape = obs_space.shape
np_pipeline = vision.transforms.Compose([
vision.transforms.ToTensor(),
RESCALE_0_1_NEG1_POS1,
])
def pipeline(im):
if isinstance(im, np.ndarray):
return np_pipeline(im)
else:
return RESCALE_0_255_NEG1_POS1(im)
return pipeline, spaces.Box(-1.0, 1.0, obs_space.shape, np.float32)
return _rescale_thunk
def grayscale_rescale():
''' Rescales observations to a new values
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
def _grayscale_rescale_thunk(obs_space):
pipeline = vision.transforms.Compose([
vision.transforms.ToPILImage(),
vision.transforms.Grayscale(),
vision.transforms.ToTensor(),
vision.transforms.Normalize([0.5], [0.5])
])
obs_shape = obs_space.shape
return pipeline, spaces.Box(-1.0, 1.0,
(1, obs_shape[0], obs_shape[1]),
dtype=np.float32)
return _grayscale_rescale_thunk
def cross_modal_transform(eval_to_get_net, output_shape=(3,84,84), dtype=np.float32):
''' rescale_centercrop_resize
Args:
output_size: A tuple CxWxH
dtype: of the output (must be np, not torch)
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
output_size = output_shape[-1]
output_shape = output_shape
net = eval_to_get_net
resize_fn = vision.transforms.Compose([
vision.transforms.ToPILImage(),
vision.transforms.Resize(output_size),
vision.transforms.ToTensor(),
RESCALE_0_1_NEG1_POS1,
])
def encode(x):
with torch.no_grad():
return net(x)
def _transform_thunk(obs_space):
rescale, _ = _rescale_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else: # for testing
x = torch.cuda.FloatTensor(x).cuda()
x = encode(x)
y = (x + 1.) / 2
z = torch.stack([resize_fn(y_.cpu()) for y_ in y])
return z
return pipeline, spaces.Box(-1, 1, output_shape, dtype)
return _transform_thunk
def image_to_input_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
# input: n x h x w x c
# output: n x c x h x w and normalized, ready to pass into net.forward
assert x.shape[2] == x.shape[1], 'Input image must be square, of the form: N,H,W,C'
if isinstance(x, torch.Tensor): # for training
x = torch.cuda.FloatTensor(x.cuda())
else: # for testing
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = x.permute(0, 3, 1, 2) / 255.0 #.view(1, 3, 256, 256)
x = 2.0 * x - 1.0
return x
return runner, spaces.Box(-1, 1, output_size, dtype)
return _thunk
def map_pool(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
# input: h x w x c
# output: c x h x w and normalized, ready to pass into net.forward
assert x.shape[0] == x.shape[1], 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor): # for training
x = torch.cuda.FloatTensor(x.cuda())
else: # for testing
x = torch.cuda.FloatTensor(x.copy()).cuda()
x.unsqueeze_(0)
x = x.permute(0, 3, 1, 2) / 255.0 #.view(1, 3, 256, 256)
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2,3)) # Face north (this could be computed using a different world2agent transform)
x = 2.0 * x - 1.0
x.squeeze_(0)
# print(x.shape)
return x.cpu()
return runner, spaces.Box(-1, 1, output_size, dtype)
return _thunk
def map_pool_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
with torch.no_grad():
# input: n x h x w x c
# output: n x c x h x w and normalized, ready to pass into net.forward
assert x.shape[2] == x.shape[1], 'we are only using square data, data format: N,H,W,C'
if isinstance(x, torch.Tensor): # for training
x = torch.cuda.FloatTensor(x.cuda())
else: # for testing
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = x.permute(0, 3, 1, 2) / 255.0 #.view(1, 3, 256, 256)
x = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
x = torch.rot90(x, k=2, dims=(2,3)) # Face north (this could be computed using a different world2agent transform)
x = 2.0 * x - 1.0
return x
return runner, spaces.Box(-1, 1, output_size, dtype)
return _thunk
def taskonomy_features_transform(task_path, dtype=np.float32):
''' rescale_centercrop_resize
Args:
output_size: A tuple CxWxH
dtype: of the output (must be np, not torch)
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
# print(task_path)
net = TaskonomyEncoder().cuda()
net.eval()
checkpoint = torch.load(task_path)
net.load_state_dict(checkpoint['state_dict'])
def encode(x):
with torch.no_grad():
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
x = torch.Tensor(x).cuda()
x = encode(x)
return x.cpu()
return pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)
return _taskonomy_features_transform_thunk
def _load_encoder(encoder_path):
net = TaskonomyEncoder() #.cuda()
net.eval()
checkpoint = torch.load(encoder_path)
net.load_state_dict(checkpoint['state_dict'])
for p in net.parameters():
p.requires_grad = False
# net = Compose(nn.GroupNorm(32, 32, affine=False), net)
return net
def _load_encoders_seq(encoder_paths):
experts = [_load_encoder(encoder_path) for encoder_path in encoder_paths]
experts = [e.cuda() for e in experts]
return experts
def _load_encoders_parallel(encoder_paths, n_processes=None):
# This is not working right now for some reason
n_processes = len(encoder_paths) if n_processes is None else min(len(encoder_paths), n_processes)
n_parallel = min(multiprocessing.cpu_count(), n_processes)
pool = multiprocessing.Pool(min(n_parallel, n_processes))
experts = pool.map(_load_encoder, encoder_paths)
pool.close()
pool.join()
experts = [e.cuda() for e in experts]
return experts
def taskonomy_multi_features_transform(task_paths, dtype=np.float32):
''' rescale_centercrop_resize
Args:
output_size: A tuple CxWxH
dtype: of the output (must be np, not torch)
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
nets = _load_encoders_seq(task_paths)
def encode(x):
with torch.no_grad():
return torch.cat([net(x) for net in nets], dim=1)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
x = torch.Tensor(x).cuda()
x = encode(x)
return x.cpu()
return pipeline, spaces.Box(-1, 1, (8*len(nets), 16, 16), dtype)
return _taskonomy_features_transform_thunk
def taskonomy_features_transform_collated(task_path, dtype=np.float32):
''' rescale_centercrop_resize
Args:
output_size: A tuple CxWxH
dtype: of the output (must be np, not torch)
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
# print(task_path)
net = TaskonomyEncoder().cuda()
net.eval()
checkpoint = torch.load(task_path)
net.load_state_dict(checkpoint['state_dict'])
def encode(x):
with torch.no_grad():
x = torch.Tensor(x).cuda()
if isinstance(x, torch.Tensor): # for training
x = torch.cuda.FloatTensor(x.cuda())
else: # for testing
x = torch.cuda.FloatTensor(x).cuda()
x = x.permute(0, 3, 1, 2) / 255.0 #.view(1, 3, 256, 256)
x = 2.0 * x - 1.0
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
pipeline = lambda x: encode(x).cpu()
return pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)
return _taskonomy_features_transform_thunk
# def taskonomy_features_transform_collated(task_path, encoder_type='taskonomy', dtype=np.float32):
# ''' rescale_centercrop_resize
# Args:
# output_size: A tuple CxWxH
# dtype: of the output (must be np, not torch)
# Returns:
# a function which returns takes 'env' and returns transform, output_size, dtype
# '''
# _rescale_thunk = rescale_centercrop_resize((3, 256, 256))
# _pixels_as_state_thunk = pixels_as_state((8, 16, 16)) # doubt this works... because we need to reshape and that's not impl at collate
# if task_path != 'pixels_as_state' and task_path != 'blind':
# if encoder_type == 'taskonomy':
# net = TaskonomyEncoder(normalize_outputs=False) # Note this change! We do not normalize the encoder on default now
# if task_path != 'None':
# checkpoint = torch.load(task_path)
# if any([isinstance(v, nn.Module) for v in checkpoint.values()]):
# net = [v for v in checkpoint.values() if isinstance(v, nn.Module)][0]
# elif 'state_dict' in checkpoint.keys():
# net.load_state_dict(checkpoint['state_dict'])
# else:
# assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
# net = net.cuda()
# net.eval()
# def encode(x):
# if task_path == 'pixels_as_state' or task_path == 'blind':
# return x
# with torch.no_grad():
# return net(x)
# def _taskonomy_features_transform_thunk(obs_space):
# rescale, _ = _rescale_thunk(obs_space)
# pixels_as_state, _ = _pixels_as_state_thunk(obs_space)
# def pipeline(x):
# with torch.no_grad():
# if isinstance(x, torch.Tensor): # for training
# x = torch.cuda.FloatTensor(x.cuda())
# else: # for testing
# x = torch.cuda.FloatTensor(x).cuda()
# x = x.permute(0, 3, 1, 2) / 255.0 #.view(1, 3, 256, 256)
# x = 2.0 * x - 1.0
# x = encode(x)
# return x
# def pixels_as_state_pipeline(x):
# return pixels_as_state(x).cpu()
# def blind_pipeline(x):
# batch_size = x.shape[0]
# return torch.zeros((batch_size, 8, 16, 16))
# if task_path == 'blind':
# return blind_pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)
# elif task_path == 'pixels_as_state':
# return pixels_as_state_pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)
# else:
# return pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)
# return _taskonomy_features_transform_thunk
def taskonomy_features_transforms_collated(task_paths, encoder_type='taskonomy', dtype=np.float32):
# handles multiple taskonomy encoders at once
num_tasks = 0
if task_paths != 'pixels_as_state' and task_paths != 'blind':
task_path_list = [tp.strip() for tp in task_paths.split(',')]
num_tasks = len(task_path_list)
assert num_tasks > 0, 'at least need one path'
if encoder_type == 'taskonomy':
nets = [TaskonomyEncoder(normalize_outputs=False) for _ in range(num_tasks)]
else:
assert False, f'do not recongize encoder type {encoder_type}'
for i, task_path in enumerate(task_path_list):
checkpoint = torch.load(task_path)
net_in_ckpt = [v for v in checkpoint.values() if isinstance(v, nn.Module)]
if len(net_in_ckpt) > 0:
nets[i] = net_in_ckpt[0]
elif 'state_dict' in checkpoint.keys():
nets[i].load_state_dict(checkpoint['state_dict'])
else:
assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
nets[i] = nets[i].cuda()
nets[i].eval()
def encode(x):
if task_paths == 'pixels_as_state' or task_paths == 'blind':
return x
with torch.no_grad():
feats = []
for net in nets:
feats.append(net(x))
return torch.cat(feats, dim=1)
def _taskonomy_features_transform_thunk(obs_space):
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor): # for training
x = torch.cuda.FloatTensor(x.cuda())
else: # for testing
x = torch.cuda.FloatTensor(x).cuda()
x = x.permute(0, 3, 1, 2) / 255.0 #.view(1, 3, 256, 256)
x = 2.0 * x - 1.0
x = encode(x)
return x
def pixels_as_state_pipeline(x):
return pixels_as_state(x).cpu()
if task_path == 'pixels_as_state':
return pixels_as_state_pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)
else:
return pipeline, spaces.Box(-1, 1, (8 * num_tasks, 16, 16), dtype)
return _taskonomy_features_transform_thunk
```
#### File: evkit/saving/checkpoints.py
```python
import glob
import json
import os
import shutil
import subprocess
import torch
def load_experiment_configs(log_dir, uuid=None):
'''
Loads all experiments in a given directory
Optionally, may be restricted to those with a given uuid
'''
dirs = [f for f in os.listdir(log_dir) if os.path.isdir(os.path.join(log_dir, f))]
results = []
for d in dirs:
cfg_path = os.path.join(log_dir, d, 'config.json')
if not os.path.exists(cfg_path):
continue
with open(os.path.join(log_dir, d, 'config.json'), 'r') as f:
results.append(json.load(f))
if uuid is not None and results[-1]['uuid'] != uuid:
results.pop()
return results
def load_experiment_config_paths(log_dir, uuid=None):
dirs = [f for f in os.listdir(log_dir) if os.path.isdir(os.path.join(log_dir, f))]
results = []
for d in dirs:
cfg_path = os.path.join(log_dir, d, 'config.json')
if not os.path.exists(cfg_path):
continue
with open(cfg_path, 'r') as f:
cfg = json.load(f)
results.append(cfg_path)
if uuid is not None and cfg['uuid'] != uuid:
results.pop()
return results
def checkpoint_name(checkpoint_dir, epoch='latest'):
return os.path.join(checkpoint_dir, 'ckpt-{}.dat'.format(epoch))
def last_archived_run(base_dir, uuid):
''' Returns the name of the last archived run. Of the form:
'UUID_run_K'
'''
archive_dir = os.path.join(base_dir, 'archive')
existing_runs = glob.glob(os.path.join(archive_dir, uuid + "_run_*"))
print(os.path.join(archive_dir, uuid + "_run_*"))
if len(existing_runs) == 0:
return None
run_numbers = [int(run.split("_")[-1]) for run in existing_runs]
current_run_number = max(run_numbers) if len(existing_runs) > 0 else 0
current_run_archive_dir = os.path.join(archive_dir, "{}_run_{}".format(uuid, current_run_number))
return current_run_archive_dir
def archive_current_run(base_dir, uuid):
''' Archives the current run. That is, it moves everything
base_dir/*uuid* -> base_dir/archive/uuid_run_K/
where K is determined automatically.
'''
matching_files = glob.glob(os.path.join(base_dir, "*" + uuid + "*"))
if len(matching_files) == 0:
return
archive_dir = os.path.join(base_dir, 'archive')
os.makedirs(archive_dir, exist_ok=True)
existing_runs = glob.glob(os.path.join(archive_dir, uuid + "_run_*"))
run_numbers = [int(run.split("_")[-1]) for run in existing_runs]
current_run_number = max(run_numbers) + 1 if len(existing_runs) > 0 else 0
current_run_archive_dir = os.path.join(archive_dir, "{}_run_{}".format(uuid, current_run_number))
os.makedirs(current_run_archive_dir)
for f in matching_files:
shutil.move(f, current_run_archive_dir)
return
def save_checkpoint(obj, directory, step_num):
os.makedirs(directory, exist_ok=True)
torch.save(obj, checkpoint_name(directory))
subprocess.call('cp {} {} &'.format(
checkpoint_name(directory),
checkpoint_name(directory, step_num)),
shell=True)
```
#### File: evkit/saving/monitor.py
```python
from gym.wrappers import Monitor
import gym
class VisdomMonitor(Monitor):
def __init__(self, env, directory,
video_callable=None, force=False, resume=False,
write_upon_reset=False, uid=None, mode=None,
server="localhost", env='main', port=8097):
super(VisdomMonitor, self).__init__(env, directory,
video_callable=video_callable, force=force,
resume=resume, write_upon_reset=write_upon_reset,
uid=uid, mode=mode)
def _close_video_recorder(self):
video_recorder
```
#### File: evkit/saving/observers.py
```python
import json
import os
from sacred.observers import FileStorageObserver
class FileStorageObserverWithExUuid(FileStorageObserver):
''' Wraps the FileStorageObserver so that we can pass in the Id.
This allows us to save experiments into subdirectories with
meaningful names. The standard FileStorageObserver jsut increments
a counter.'''
UNUSED_VALUE = -1
def started_event(self, ex_info, command, host_info, start_time, config,
meta_info, _id):
_id = config['uuid'] + "_metadata"
super().started_event(ex_info, command, host_info, start_time, config,
meta_info, _id=_id)
def queued_event(self, ex_info, command, host_info, queue_time, config,
meta_info, _id):
assert 'uuid' in config, "The config must contain a key 'uuid'"
_id = config['uuid'] + "_metadata"
super().queued_event(ex_info, command, host_info, queue_time, config,
meta_info, _id=_id)
```
#### File: evkit/utils/misc.py
```python
import collections
import torch
import pprint
import string
remove_whitespace = str.maketrans('', '', string.whitespace)
def cfg_to_md(cfg, uuid):
''' Because tensorboard uses markdown'''
return uuid + "\n\n " + pprint.pformat((cfg)).replace("\n", " \n").replace("\n \'", "\n \'") + ""
def is_interactive():
try:
ip = get_ipython()
return ip.has_trait('kernel')
except:
return False
def is_cuda(model):
return next(model.parameters()).is_cuda
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
self._keys, self._vals = zip(*adict.items())
self._keys, self._vals = list(self._keys), list(self._vals)
def keys(self):
return self._keys
def vals(self):
return self._vals
def compute_weight_norm(parameters):
''' no grads! '''
total = 0.0
count = 0
for p in parameters:
total += torch.sum(p.data**2)
# total += p.numel()
count += p.numel()
return (total / count)
def get_number(name):
"""
use regex to get the first integer in the name
if none exists, return -1
"""
try:
num = int(re.findall("[0-9]+", name)[0])
except:
num = -1
return num
def update_dict_deepcopy(d, u): # we need a deep dictionary update
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update_dict_deepcopy(d.get(k, {}), v)
else:
d[k] = v
return d
```
#### File: core/physics/robot_locomotors.py
```python
from gibson.core.physics.robot_bases import BaseRobot, quatToXYZW
import numpy as np
import pybullet as p
import os
import gym, gym.spaces
from transforms3d.euler import euler2quat, euler2mat
from transforms3d.quaternions import quat2mat, qmult
import transforms3d.quaternions as quat
import sys
OBSERVATION_EPS = 0.01
class WalkerBase(BaseRobot):
""" Built on top of BaseRobot
Handles action_dim, sensor_dim, scene
base_position, apply_action, calc_state
reward
"""
def __init__(self,
filename, # robot file name
robot_name, # robot name
action_dim, # action dimension
power,
initial_pos,
target_pos,
scale,
sensor_dim=None,
resolution=512,
control = 'torque',
env = None
):
BaseRobot.__init__(self, filename, robot_name, scale, env)
self.control = control
self.resolution = resolution
self.obs_dim = None
self.obs_dim = [self.resolution, self.resolution, 0]
if "rgb_filled" in self.env.config["output"]:
self.obs_dim[2] += 3
if "depth" in self.env.config["output"]:
self.obs_dim[2] += 1
assert type(sensor_dim) == int, "Sensor dimension must be int, got {}".format(type(sensor_dim))
assert type(action_dim) == int, "Action dimension must be int, got {}".format(type(action_dim))
action_high = np.ones([action_dim])
self.action_space = gym.spaces.Box(-action_high, action_high)
obs_high = np.inf * np.ones(self.obs_dim) + OBSERVATION_EPS
self.observation_space = gym.spaces.Box(-obs_high, obs_high)
sensor_high = np.inf * np.ones([sensor_dim])
self.sensor_space = gym.spaces.Box(-sensor_high, sensor_high)
self.power = power
self.camera_x = 0
self.target_pos = target_pos
self.initial_pos = initial_pos
self.body_xyz=[0, 0, 0]
self.action_dim = action_dim
self.scale = scale
self.angle_to_target = 0
def robot_specific_reset(self):
for j in self.ordered_joints:
j.reset_joint_state(self.np_random.uniform(low=-0.1, high=0.1), 0)
self.feet = [self.parts[f] for f in self.foot_list]
self.feet_contact = np.array([0.0 for f in self.foot_list], dtype=np.float32)
self.scene.actor_introduce(self)
self.initial_z = None
def get_position(self):
'''Get current robot position
'''
return self.robot_body.get_position()
def get_orientation(self):
'''Return robot orientation
'''
return self.robot_body.get_orientation()
def set_position(self, pos):
self.robot_body.reset_position(pos)
def move_by(self, delta):
new_pos = np.array(delta) + self.get_position()
self.robot_body.reset_position(new_pos)
def move_forward(self, forward=0.10):
x, y, z, w = self.robot_body.get_orientation()
self.move_by(quat2mat([w, x, y, z]).dot(np.array([forward, 0, 0])))
yaw = self.robot_body.bp_pose.rpy()[2]
self.robot_body.reset_orientation(quatToXYZW(euler2quat(0, 0, yaw), 'wxyz'))
def move_backward(self, backward=0.14):
x, y, z, w = self.robot_body.get_orientation()
self.move_by(quat2mat([w, x, y, z]).dot(np.array([-backward, 0, 0])))
def turn_left(self, delta=0.24):
orn = self.robot_body.get_orientation()
new_orn = qmult((euler2quat(-delta, 0, 0)), orn)
self.robot_body.set_orientation(new_orn)
def turn_right(self, delta=0.24):
orn = self.robot_body.get_orientation()
new_orn = qmult((euler2quat(delta, 0, 0)), orn)
self.robot_body.set_orientation(new_orn)
def get_rpy(self):
return self.robot_body.bp_pose.rpy()
def apply_action(self, a):
#print(self.ordered_joints)
if self.control == 'torque':
for n, j in enumerate(self.ordered_joints):
j.set_motor_torque(self.power * j.power_coef * float(np.clip(a[n], -1, +1)))
elif self.control == 'velocity':
for n, j in enumerate(self.ordered_joints):
j.set_motor_velocity(self.power * j.power_coef * float(np.clip(a[n], -1, +1)))
elif self.control == 'position':
for n, j in enumerate(self.ordered_joints):
j.set_motor_position(a[n])
elif type(self.control) is list or type(self.control) is tuple: #if control is a tuple, set different control
# type for each joint
for n, j in enumerate(self.ordered_joints):
if self.control[n] == 'torque':
j.set_motor_torque(self.power * j.power_coef * float(np.clip(a[n], -1, +1)))
elif self.control[n] == 'velocity':
j.set_motor_velocity(self.power * j.power_coef * float(np.clip(a[n], -1, +1)))
elif self.control[n] == 'position':
j.set_motor_position(a[n])
else:
pass
def get_target_position(self):
return self.target_pos
def set_target_position(self, pos):
self.target_pos = pos
def calc_state(self):
j = np.array([j.get_joint_relative_state() for j in self.ordered_joints], dtype=np.float32).flatten()
self.joint_speeds = j[1::2]
self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)
body_pose = self.robot_body.pose()
parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
self.body_xyz = (
parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2]) # torso z is more informative than mean z
self.body_rpy = body_pose.rpy()
z = self.body_xyz[2]
if self.initial_z == None:
self.initial_z = z
r, p, yaw = self.body_rpy
self.walk_target_theta = np.arctan2(self.target_pos[1] - self.body_xyz[1],
self.target_pos[0] - self.body_xyz[0])
self.walk_target_dist = np.linalg.norm(
[self.target_pos[1] - self.body_xyz[1], self.target_pos[0] - self.body_xyz[0]])
self.walk_target_dist_xyz = np.linalg.norm(
[self.target_pos[2] - self.body_xyz[2], self.target_pos[0] - self.body_xyz[1], self.target_pos[0] - self.body_xyz[0]])
angle_to_target = self.walk_target_theta - yaw
self.angle_to_target = angle_to_target
self.walk_height_diff = np.abs(self.target_pos[2] - self.body_xyz[2])
self.dist_to_start = np.linalg.norm(np.array(self.body_xyz) - np.array(self.initial_pos))
debugmode= 0
if debugmode:
print("Robot dsebug mode: walk_height_diff", self.walk_height_diff)
print("Robot dsebug mode: walk_target_z", self.target_pos[2])
print("Robot dsebug mode: body_xyz", self.body_xyz[2])
rot_speed = np.array(
[[np.cos(-yaw), -np.sin(-yaw), 0],
[np.sin(-yaw), np.cos(-yaw), 0],
[ 0, 0, 1]]
)
vx, vy, vz = np.dot(rot_speed, self.robot_body.speed()) # rotate speed back to body point of view
debugmode=0
if debugmode:
print("Robot state", self.target_pos[1] - self.body_xyz[1], self.target_pos[0] - self.body_xyz[0])
more = np.array([ z-self.initial_z,
np.sin(angle_to_target), np.cos(angle_to_target),
0.3* vx , 0.3* vy , 0.3* vz , # 0.3 is just scaling typical speed into -1..+1, no physical sense here
r, p], dtype=np.float32)
if debugmode:
print("Robot more", more)
if not 'nonviz_sensor' in self.env.config["output"]:
j.fill(0)
more.fill(0)
return np.clip( np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
def calc_potential(self):
# progress in potential field is speed*dt, typical speed is about 2-3 meter per second, this potential will change 2-3 per frame (not per second),
# all rewards have rew/frame units and close to 1.0 (hzyjerry) ==> make rewards similar scale
debugmode=0
if (debugmode):
print("calc_potential: self.walk_target_dist x y", self.walk_target_dist)
print("robot position", self.body_xyz, "target position", [self.target_pos[0], self.target_pos[1], self.target_pos[2]])
return - self.walk_target_dist / self.scene.dt
def calc_goalless_potential(self):
return self.dist_to_start / self.scene.dt
def dist_to_target(self):
return np.linalg.norm(np.array(self.body_xyz) - np.array(self.get_target_position()))
def _is_close_to_goal(self):
body_pose = self.robot_body.pose()
parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
self.body_xyz = (
parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2]) # torso z is more informative than mean z
dist_to_goal = np.linalg.norm([self.body_xyz[0] - self.target_pos[0], self.body_xyz[1] - self.target_pos[1]])
return dist_to_goal < 2
def _get_scaled_position(self):
'''Private method, please don't use this method outside
Used for downscaling MJCF models
'''
return self.robot_body.get_position() / self.mjcf_scaling
class Ant(WalkerBase):
foot_list = ['front_left_foot', 'front_right_foot', 'left_back_foot', 'right_back_foot']
model_type = "MJCF"
default_scale = 0.25
def __init__(self, config, env=None):
self.config = config
scale = config["robot_scale"] if "robot_scale" in config.keys() else self.default_scale
self.mjcf_scaling = scale
WalkerBase.__init__(self, "ant.xml", "torso", action_dim=8,
sensor_dim=28, power=2.5, scale=scale,
initial_pos=config['initial_pos'],
target_pos=config["target_pos"],
resolution=config["resolution"],
env = env)
self.r_f = 0.1
if config["is_discrete"]:
self.action_space = gym.spaces.Discrete(17)
self.torque = 10
## Hip_1, Ankle_1, Hip_2, Ankle_2, Hip_3, Ankle_3, Hip_4, Ankle_4
self.action_list = [[self.r_f * self.torque, 0, 0, 0, 0, 0, 0, 0],
[0, self.r_f * self.torque, 0, 0, 0, 0, 0, 0],
[0, 0, self.r_f * self.torque, 0, 0, 0, 0, 0],
[0, 0, 0, self.r_f * self.torque, 0, 0, 0, 0],
[0, 0, 0, 0, self.r_f * self.torque, 0, 0, 0],
[0, 0, 0, 0, 0, self.r_f * self.torque, 0, 0],
[0, 0, 0, 0, 0, 0, self.r_f * self.torque, 0],
[0, 0, 0, 0, 0, 0, 0, self.r_f * self.torque],
[-self.r_f * self.torque, 0, 0, 0, 0, 0, 0, 0],
[0, -self.r_f * self.torque, 0, 0, 0, 0, 0, 0],
[0, 0, -self.r_f * self.torque, 0, 0, 0, 0, 0],
[0, 0, 0, -self.r_f * self.torque, 0, 0, 0, 0],
[0, 0, 0, 0, -self.r_f * self.torque, 0, 0, 0],
[0, 0, 0, 0, 0, -self.r_f * self.torque, 0, 0],
[0, 0, 0, 0, 0, 0, -self.r_f * self.torque, 0],
[0, 0, 0, 0, 0, 0, 0, -self.r_f * self.torque],
[0, 0, 0, 0, 0, 0, 0, 0]]
'''
[[self.r_f * self.torque, 0, 0, -self.r_f * self.torque, 0, 0, 0, 0],
[0, 0, self.r_f * self.torque, self.r_f * self.torque, 0, 0, 0, 0],
[0, 0, 0, 0, self.r_f * self.torque, self.r_f * self.torque, 0, 0],
[0, 0, 0, 0, 0, 0, self.r_f * self.torque, self.r_f * self.torque],
[0, 0, 0, 0, 0, 0, 0, 0]]
'''
self.setup_keys_to_action()
def apply_action(self, action):
if self.config["is_discrete"]:
realaction = self.action_list[action]
else:
realaction = action
WalkerBase.apply_action(self, realaction)
def robot_specific_reset(self):
WalkerBase.robot_specific_reset(self)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
def setup_keys_to_action(self):
self.keys_to_action = {
(ord('1'), ): 0,
(ord('2'), ): 1,
(ord('3'), ): 2,
(ord('4'), ): 3,
(ord('5'), ): 4,
(ord('6'), ): 5,
(ord('7'), ): 6,
(ord('8'), ): 7,
(ord('9'), ): 8,
(ord('0'), ): 9,
(ord('q'), ): 10,
(ord('w'), ): 11,
(ord('e'), ): 12,
(ord('r'), ): 13,
(ord('t'), ): 14,
(ord('y'), ): 15,
(): 4
}
class AntClimber(Ant):
def __init__(self, config, env=None):
Ant.__init__(self, config, env=env)
def robot_specific_reset(self):
Ant.robot_specific_reset(self)
amplify = 1
for j in self.jdict.keys():
self.jdict[j].power_coef *= amplify
'''
self.jdict["ankle_1"].power_coef = amplify * self.jdict["ankle_1"].power_coef
self.jdict["ankle_2"].power_coef = amplify * self.jdict["ankle_2"].power_coef
self.jdict["ankle_3"].power_coef = amplify * self.jdict["ankle_3"].power_coef
self.jdict["ankle_4"].power_coef = amplify * self.jdict["ankle_4"].power_coef
'''
debugmode=0
if debugmode:
for k in self.jdict.keys():
print("Power coef", self.jdict[k].power_coef)
def calc_potential(self):
#base_potential = Ant.calc_potential(self)
#height_coeff = 3
#height_potential = - height_coeff * self.walk_height_diff / self.scene.dt
debugmode = 0
if debugmode:
print("Ant xyz potential", self.walk_target_dist_xyz)
return - self.walk_target_dist_xyz / self.scene.dt
def alive_bonus(self, roll, pitch):
"""Alive requires the ant's head to not touch the ground, it's roll
and pitch cannot be too large"""
#return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
alive = roll < np.pi/2 and roll > -np.pi/2 and pitch > -np.pi/2 and pitch < np.pi/2
debugmode = 0
if debugmode:
print("roll, pitch")
print(roll, pitch)
print("alive")
print(alive)
return +1 if alive else -1
def _is_close_to_goal(self):
body_pose = self.robot_body.pose()
parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
self.body_xyz = (parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2]) # torso z is more informative than mean z
dist_to_goal = np.linalg.norm([self.body_xyz[0] - self.target_pos[0], self.body_xyz[1] - self.target_pos[1], self.body_xyz[2] - self.target_pos[2]])
debugmode = 0
if debugmode:
print(np.linalg.norm([self.body_xyz[0] - self.target_pos[0], self.body_xyz[1] - self.target_pos[1], self.body_xyz[2] - self.target_pos[2]]), [self.body_xyz[0], self.body_xyz[1], self.body_xyz[2]], [self.target_pos[0], self.target_pos[1], self.target_pos[2]])
return dist_to_goal < 0.5
class Humanoid(WalkerBase):
self_collision = True
foot_list = ["right_foot", "left_foot"] # "left_hand", "right_hand"
model_type = "MJCF"
default_scale = 0.6
def __init__(self, config, env=None):
self.config = config
scale = config["robot_scale"] if "robot_scale" in config.keys() else self.default_scale
self.mjcf_scaling = scale
WalkerBase.__init__(self, "humanoid.xml", "torso", action_dim=17,
sensor_dim=44, power=2.5, scale=scale,
initial_pos=config['initial_pos'],
target_pos=config["target_pos"],
resolution=config["resolution"],
env = env)
self.glass_id = None
self.is_discrete = config["is_discrete"]
if self.is_discrete:
self.action_space = gym.spaces.Discrete(5)
self.torque = 0.1
self.action_list = np.concatenate((np.ones((1, 17)), np.zeros((1, 17)))).tolist()
self.setup_keys_to_action()
def robot_specific_reset(self):
WalkerBase.robot_specific_reset(self)
humanoidId = -1
numBodies = p.getNumBodies()
for i in range (numBodies):
bodyInfo = p.getBodyInfo(i)
if bodyInfo[1].decode("ascii") == 'humanoid':
humanoidId = i
## Spherical radiance/glass shield to protect the robot's camera
if self.glass_id is None:
glass_id = p.loadMJCF(os.path.join(self.physics_model_dir, "glass.xml"))[0]
#print("setting up glass", glass_id, humanoidId)
p.changeVisualShape(glass_id, -1, rgbaColor=[0, 0, 0, 0])
cid = p.createConstraint(humanoidId, -1, glass_id,-1,p.JOINT_FIXED,[0,0,0],[0,0,1.4],[0,0,1])
self.motor_names = ["abdomen_z", "abdomen_y", "abdomen_x"]
self.motor_power = [100, 100, 100]
self.motor_names += ["right_hip_x", "right_hip_z", "right_hip_y", "right_knee"]
self.motor_power += [100, 100, 300, 200]
self.motor_names += ["left_hip_x", "left_hip_z", "left_hip_y", "left_knee"]
self.motor_power += [100, 100, 300, 200]
self.motor_names += ["right_shoulder1", "right_shoulder2", "right_elbow"]
self.motor_power += [75, 75, 75]
self.motor_names += ["left_shoulder1", "left_shoulder2", "left_elbow"]
self.motor_power += [75, 75, 75]
self.motors = [self.jdict[n] for n in self.motor_names]
def apply_action(self, a):
if self.is_discrete:
realaction = self.action_list[a]
else:
force_gain = 1
for i, m, power in zip(range(17), self.motors, self.motor_power):
m.set_motor_torque( float(force_gain * power*self.power*a[i]) )
#m.set_motor_torque(float(force_gain * power * self.power * np.clip(a[i], -1, +1)))
def alive_bonus(self, z, pitch):
return +2 if z > 0.78 else -1 # 2 here because 17 joints produce a lot of electricity cost just from policy noise, living must be better than dying
def setup_keys_to_action(self):
self.keys_to_action = {
(ord('w'), ): 0,
(): 1
}
class Husky(WalkerBase):
foot_list = ['front_left_wheel_link', 'front_right_wheel_link', 'rear_left_wheel_link', 'rear_right_wheel_link']
mjcf_scaling = 1
model_type = "URDF"
default_scale = 0.6
def __init__(self, config, env=None):
self.config = config
scale = config["robot_scale"] if "robot_scale" in config.keys() else self.default_scale
WalkerBase.__init__(self, "husky.urdf", "base_link", action_dim=4,
sensor_dim=23, power=2.5, scale=scale,
initial_pos=config['initial_pos'],
target_pos=config["target_pos"],
resolution=config["resolution"],
env = env)
self.is_discrete = config["is_discrete"]
self.ideal_position_control = config["ideal_position_control"]
if self.ideal_position_control:
assert(self.is_discrete)
if self.is_discrete:
self.action_space = gym.spaces.Discrete(3)
self.torque = 0.03
if self.ideal_position_control:
self.action_list = [self.move_forward, self.turn_right, self.turn_left, lambda: None]
else:
self.action_list = [[self.torque, self.torque, self.torque, self.torque],
[-self.torque, -self.torque, -self.torque, -self.torque],
[self.torque, -self.torque, self.torque, -self.torque],
[-self.torque, self.torque, -self.torque, self.torque],
[0, 0, 0, 0]]
self.setup_keys_to_action()
else:
action_high = 0.02 * np.ones([4])
self.action_space = gym.spaces.Box(-action_high, action_high)
def apply_action(self, action):
if self.ideal_position_control:
action_function = self.action_list[action]
action_function()
else:
if self.is_discrete:
realaction = self.action_list[action]
else:
realaction = action
WalkerBase.apply_action(self, realaction)
def steering_cost(self, action):
if not self.is_discrete:
return 0
if action == 2 or action == 3:
return -0.1
else:
return 0
def angle_cost(self):
angle_const = 0.2
diff_to_half = np.abs(self.angle_to_target - 1.57)
is_forward = self.angle_to_target > 1.57
diff_angle = np.abs(1.57 - diff_to_half) if is_forward else 3.14 - np.abs(1.57 - diff_to_half)
debugmode = 0
if debugmode:
print("is forward", is_forward)
print("diff to half", diff_to_half)
print("angle to target", self.angle_to_target)
print("diff angle", diff_angle)
return -angle_const* diff_angle
def robot_specific_reset(self):
WalkerBase.robot_specific_reset(self)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
def setup_keys_to_action(self):
self.keys_to_action = {
(ord('w'), ): 0, ## forward
#(ord('s'), ): 1, ## backward
(ord('d'), ): 1, ## turn right
(ord('a'), ): 2, ## turn left
(): 3
}
def calc_state(self):
base_state = WalkerBase.calc_state(self)
angular_speed = self.robot_body.angular_speed()
return np.concatenate((base_state, np.array(angular_speed)))
class HuskyClimber(Husky):
def calc_potential(self):
base_potential = Husky.calc_potential(self)
height_potential = - 4 * self.walk_height_diff / self.scene.dt
print("Husky climber", base_potential, height_potential)
return base_potential + height_potential
def robot_specific_reset(self):
Ant.robot_specific_reset(self)
for j in self.jdict.keys():
self.jdict[j].power_coef = 1.5 * self.jdict[j].power_coef
debugmode=0
if debugmode:
for k in self.jdict.keys():
print("Power coef", self.jdict[k].power_coef)
class Quadrotor(WalkerBase):
model_type = "URDF"
default_scale=1
mjcf_scaling=1
def __init__(self, config, env=None):
self.config = config
scale = config["robot_scale"] if "robot_scale" in config.keys() else self.default_scale
self.is_discrete = config["is_discrete"]
WalkerBase.__init__(self, "quadrotor.urdf", "base_link", action_dim=4,
sensor_dim=20, power=2.5, scale = scale,
initial_pos=config['initial_pos'],
target_pos=config["target_pos"],
resolution=config["resolution"],
env = env)
if self.is_discrete:
self.action_space = gym.spaces.Discrete(7)
self.action_list = [[1,0,0,0,0,0],
[-1,0,0,0,0,0],
[0,1,0,0,0,0],
[0,-1,0,0,0,0],
[0,0,1,0,0,0],
[0,0,-1,0,0,0],
[0,0,0,0,0,0]
]
self.setup_keys_to_action()
else:
action_high = 0.02 * np.ones([6])
self.action_space = gym.spaces.Box(-action_high, action_high)
self.foot_list = []
def apply_action(self, action):
if self.is_discrete:
realaction = self.action_list[action]
else:
realaction = action
p.setGravity(0, 0, 0)
p.resetBaseVelocity(self.robot_ids[0], realaction[:3], realaction[3:])
def robot_specific_reset(self):
WalkerBase.robot_specific_reset(self)
def setup_keys_to_action(self):
self.keys_to_action = {
(ord('w'),): 0, ## +x
(ord('s'),): 1, ## -x
(ord('d'),): 2, ## +y
(ord('a'),): 3, ## -y
(ord('z'),): 4, ## +z
(ord('x'),): 5, ## -z
(): 6
}
class Turtlebot(WalkerBase):
foot_list = []
mjcf_scaling = 1
model_type = "URDF"
default_scale = 1
def __init__(self, config, env=None):
self.config = config
scale = config["robot_scale"] if "robot_scale" in config.keys() else self.default_scale
WalkerBase.__init__(self, "turtlebot/turtlebot.urdf", "base_link", action_dim=4,
sensor_dim=20, power=2.5, scale=scale,
initial_pos=config['initial_pos'],
target_pos=config["target_pos"],
resolution=config["resolution"],
control = 'velocity',
env=env)
self.is_discrete = config["is_discrete"]
self.ideal_position_control = config["ideal_position_control"]
if self.is_discrete:
if self.ideal_position_control:
self.action_space = gym.spaces.Discrete(3)
self.action_list = [self.move_forward, self.turn_right, self.turn_left]
else:
self.action_space = gym.spaces.Discrete(5)
self.vel = 0.1
self.action_list = [[self.vel, self.vel],
[-self.vel, -self.vel],
[self.vel, -self.vel],
[-self.vel, self.vel],
[0, 0]]
self.setup_keys_to_action()
else:
action_high = 0.02 * np.ones([4])
self.action_space = gym.spaces.Box(-action_high, action_high)
def apply_action(self, action):
if self.ideal_position_control:
action_function = self.action_list[action]
action_function()
else:
if self.is_discrete:
realaction = self.action_list[action]
else:
realaction = action
WalkerBase.apply_action(self, realaction)
def steering_cost(self, action):
if not self.is_discrete:
return 0
if action == 2 or action == 3:
return -0.1
else:
return 0
def angle_cost(self):
angle_const = 0.2
diff_to_half = np.abs(self.angle_to_target - 1.57)
is_forward = self.angle_to_target > 1.57
diff_angle = np.abs(1.57 - diff_to_half) if is_forward else 3.14 - np.abs(1.57 - diff_to_half)
debugmode = 0
if debugmode:
print("is forward", is_forward)
print("diff to half", diff_to_half)
print("angle to target", self.angle_to_target)
print("diff angle", diff_angle)
return -angle_const * diff_angle
def robot_specific_reset(self):
WalkerBase.robot_specific_reset(self)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
def setup_keys_to_action(self):
self.keys_to_action = {
(ord('w'),): 0, ## forward
#(ord('s'),): 1, ## backward
(ord('d'),): 1, ## turn right
(ord('a'),): 2, ## turn left
(): 3
}
def calc_state(self):
base_state = WalkerBase.calc_state(self)
angular_speed = self.robot_body.angular_speed()
return np.concatenate((base_state, np.array(angular_speed)))
class JR(WalkerBase):
foot_list = []
mjcf_scaling = 1
model_type = "URDF"
default_scale = 0.6
def __init__(self, config, env=None):
self.config = config
scale = config["robot_scale"] if "robot_scale" in config.keys() else self.default_scale
WalkerBase.__init__(self, "jr1_urdf/jr1_gibson.urdf", "base_link", action_dim=4,
sensor_dim=20, power=2.5, scale=scale,
initial_pos=config['initial_pos'],
target_pos=config["target_pos"],
resolution=config["resolution"],
control = 'velocity',
env=env)
self.is_discrete = config["is_discrete"]
if self.is_discrete:
self.action_space = gym.spaces.Discrete(5)
self.vel = 0.1
self.action_list = [[self.vel, self.vel],
[-self.vel, -self.vel],
[self.vel, -self.vel],
[-self.vel, self.vel],
[0, 0]]
self.setup_keys_to_action()
else:
action_high = 0.02 * np.ones([4])
self.action_space = gym.spaces.Box(-action_high, action_high)
def apply_action(self, action):
if self.is_discrete:
realaction = self.action_list[action]
else:
realaction = action
WalkerBase.apply_action(self, realaction)
def steering_cost(self, action):
if not self.is_discrete:
return 0
if action == 2 or action == 3:
return -0.1
else:
return 0
def angle_cost(self):
angle_const = 0.2
diff_to_half = np.abs(self.angle_to_target - 1.57)
is_forward = self.angle_to_target > 1.57
diff_angle = np.abs(1.57 - diff_to_half) if is_forward else 3.14 - np.abs(1.57 - diff_to_half)
debugmode = 0
if debugmode:
print("is forward", is_forward)
print("diff to half", diff_to_half)
print("angle to target", self.angle_to_target)
print("diff angle", diff_angle)
return -angle_const * diff_angle
def robot_specific_reset(self):
WalkerBase.robot_specific_reset(self)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
def setup_keys_to_action(self):
self.keys_to_action = {
(ord('w'),): 0, ## forward
(ord('s'),): 1, ## backward
(ord('d'),): 2, ## turn right
(ord('a'),): 3, ## turn left
(): 4
}
def calc_state(self):
base_state = WalkerBase.calc_state(self)
angular_speed = self.robot_body.angular_speed()
return np.concatenate((base_state, np.array(angular_speed)))
class JR2(WalkerBase):
foot_list = []
mjcf_scaling = 1
model_type = "URDF"
default_scale = 1
def __init__(self, config, env=None):
self.config = config
scale = config["robot_scale"] if "robot_scale" in config.keys() else self.default_scale
WalkerBase.__init__(self, "jr2_urdf/jr2.urdf", "base_link", action_dim=4,
sensor_dim=20, power=2.5, scale=scale,
initial_pos=config['initial_pos'],
target_pos=config["target_pos"],
resolution=config["resolution"],
control=['velocity', 'velocity', 'position', 'position'],
env=env)
self.is_discrete = config["is_discrete"]
if self.is_discrete:
self.action_space = gym.spaces.Discrete(5)
self.vel = 0.01
self.action_list = [[self.vel, self.vel,0,0.2],
[-self.vel, -self.vel,0,-0.2],
[self.vel, -self.vel,-0.5,0],
[-self.vel, self.vel,0.5,0],
[0, 0,0,0]]
self.setup_keys_to_action()
else:
action_high = 0.02 * np.ones([4])
self.action_space = gym.spaces.Box(-action_high, action_high)
def apply_action(self, action):
if self.is_discrete:
realaction = self.action_list[action]
else:
realaction = action
WalkerBase.apply_action(self, realaction)
def steering_cost(self, action):
if not self.is_discrete:
return 0
if action == 2 or action == 3:
return -0.1
else:
return 0
def angle_cost(self):
angle_const = 0.2
diff_to_half = np.abs(self.angle_to_target - 1.57)
is_forward = self.angle_to_target > 1.57
diff_angle = np.abs(1.57 - diff_to_half) if is_forward else 3.14 - np.abs(1.57 - diff_to_half)
debugmode = 0
if debugmode:
print("is forward", is_forward)
print("diff to half", diff_to_half)
print("angle to target", self.angle_to_target)
print("diff angle", diff_angle)
return -angle_const * diff_angle
def robot_specific_reset(self):
WalkerBase.robot_specific_reset(self)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
def setup_keys_to_action(self):
self.keys_to_action = {
(ord('w'),): 0, ## forward
(ord('s'),): 1, ## backward
(ord('d'),): 2, ## turn right
(ord('a'),): 3, ## turn left
(): 4
}
def calc_state(self):
base_state = WalkerBase.calc_state(self)
angular_speed = self.robot_body.angular_speed()
return np.concatenate((base_state, np.array(angular_speed)))
```
#### File: gibson/envs/cube_projection.py
```python
from skimage.draw import polygon
import math
import numpy as np
import glob
import itertools
import transforms3d
import matplotlib.pyplot as plt
np.set_printoptions(suppress=True)
class Cube(object):
def __init__(self, origin=(0.0, 0.0, 0.0), scale=1.0, rotation_mat=None):
STANDARD_CUBE_VERTS = []
for x in [0, 1]:
for y in [0, 1]:
for z in [0, 1]:
STANDARD_CUBE_VERTS.append([x, y, z])
STANDARD_CUBE_VERTS = np.array(STANDARD_CUBE_VERTS, dtype=np.float32)
STANDARD_CUBE_VERTS -= 0.5
# Find the faces of the cube
CUBE_FACES = list(itertools.combinations(STANDARD_CUBE_VERTS, 4))
def is_face(verts):
eps = 1e-8
edge_lengths = [np.linalg.norm(v0 - v1) for v0, v1 in itertools.combinations(verts, 2)] # We reqire 2 to have length sqrt(2) and 4 to be length 1
return len([e for e in edge_lengths if np.isclose(e, np.sqrt(2.0))]) == 2 and len([e for e in edge_lengths if np.isclose(e, 1.0)]) == 4
def clockwise(f):
for p in itertools.permutations(f, 4):
v1, v2, v3, v4 = p
if np.isclose(np.linalg.norm(v1 - v2), 1.0) and \
np.isclose(np.linalg.norm(v2 - v3), 1.0) and \
np.isclose(np.linalg.norm(v3 - v4), 1.0) and \
np.isclose(np.linalg.norm(v4 - v1), 1.0):
return p
raise ValueError
CUBE_FACES = [clockwise(f) for f in CUBE_FACES if is_face(f)]
# Map these faces to vertex indices
def index_of_vert(query, verts):
for i, v in enumerate(verts):
if np.isclose(np.linalg.norm(v - query), 0):
return i
raise KeyError
self.cube_face_idxs = [[index_of_vert(q, STANDARD_CUBE_VERTS) for q in face] for face in CUBE_FACES]
self.verts = np.copy(STANDARD_CUBE_VERTS)
self.verts *= scale
self.rotation_mat = rotation_mat
if rotation_mat is not None:
self.verts = self.rotation_mat.dot(self.verts.T).T
self.verts += origin
self.homogeneous_verts = np.ones((8, 4))
self.homogeneous_verts[:, :3] = self.verts
def generate_projection_matrix(x_world, y_world, z_world, yaw, pitch, roll, fov_x, fov_y, size_x, size_y):
# Camera Extrinsics
R_camera = transforms3d.euler.euler2mat(roll, pitch, yaw)
t_camera = np.array([x_world, y_world, z_world])
RT = np.eye(4)
RT[:3, :3] = R_camera.T
RT[:3, 3] = -R_camera.T.dot(t_camera)
rotation = np.array([[0,0,-1,0],[0,-1,0,0],[1,0,0,0],[0,0,0,1]]) # So that z-dimension points out
RT = np.dot(rotation, RT)
# Camera Intrinsics
f_x = size_x / math.tan(fov_x / 2)
f_y = size_y / math.tan(fov_y / 2)
K = np.array([
[f_x, 0.0, size_x],
[0.0, f_y, size_y],
[0.0, 0.0, 1.0]
])
world_to_image_mat = K.dot(RT[:3])
return world_to_image_mat
def draw_cube(cube, world_to_image_mat, im_size_x, im_size_y, fast_depth=True, debug=False):
depth_image = world_to_image_mat.dot(cube.homogeneous_verts.T).T
depth_image[:,:2] /= depth_image[:,2][:, np.newaxis]
im = np.full((im_size_x, im_size_y), np.inf)
xx, yy, depth_zz = depth_image.T
xx_in_range = np.logical_and(xx >= 0, xx < im_size_x)
yy_in_range = np.logical_and(yy >= 0, yy < im_size_y)
valid_coords = np.logical_and(xx_in_range, yy_in_range)
valid_coords = np.logical_and(valid_coords, depth_zz > 0)
for i, idxs in enumerate(cube.cube_face_idxs):
if fast_depth:
depth_to_fill = np.abs(min(depth_zz[idxs])) # Just use the max depth of this face. Not accurate, but probably sufficient
else:
raise NotImplementedError("We'd need to interpolate between the vertices")
if np.any(valid_coords[idxs]):
im[polygon(xx[idxs], yy[idxs], shape=im.shape)] = depth_to_fill
return im, xx, yy
def get_cube_depth_and_faces(cube, world_to_image_mat, im_size_x, im_size_y, fast_depth=True, debug=False):
depth_image = world_to_image_mat.dot(cube.homogeneous_verts.T).T
depth_image[:,:2] /= depth_image[:,2][:, np.newaxis]
xx, yy, depth_zz = depth_image.T
xx_in_range = np.logical_and(xx >= 0, xx < im_size_x)
yy_in_range = np.logical_and(yy >= 0, yy < im_size_y)
valid_coords = np.logical_and(xx_in_range, yy_in_range)
valid_coords = np.logical_and(valid_coords, depth_zz > 0)
xx_faces = []
yy_faces = []
masks = []
for i, idxs in enumerate(cube.cube_face_idxs):
im = np.full((im_size_x, im_size_y), np.inf)
if fast_depth:
depth_to_fill = np.abs(max(depth_zz[idxs])) # Just use the max depth of this face. Not accurate, but probably sufficient
else:
raise NotImplementedError("We'd need to interpolate between the vertices")
if np.any(valid_coords[idxs]):
im[polygon(xx[idxs], yy[idxs], shape=im.shape)] = depth_to_fill
xx_faces.append(xx[idxs])
yy_faces.append(yy[idxs])
masks.append(im)
return masks, xx_faces, yy_faces
if __name__ == '__main__':
x_world, y_world, z_world = -2.0, -0.9, 0.0
yaw, pitch, roll = 0.0, 0.0, 0.0
# Camera Intrinsics
SIZE_X = 128 // 2
SIZE_Y = 128 // 2
FOV_X = math.radians(90)
FOV_Y = math.radians(90)
f_x = SIZE_X / math.tan(FOV_X / 2)
f_y = SIZE_Y / math.tan(FOV_Y / 2)
cube = Cube()
world_to_image_mat = generate_projection_matrix(x_world, y_world, z_world, yaw, pitch, roll, FOV_X, FOV_Y, SIZE_X, SIZE_Y)
plt.imshow(draw_cube(cube, world_to_image_mat, SIZE_X*2, SIZE_Y*2))
plt.show()
```
#### File: torchnet/logger/visdommeterlogger.py
```python
import torch
from tnt.torchnet.logger import VisdomPlotLogger, VisdomLogger, VisdomTextLogger
from . import MeterLogger
from .. import meter as Meter
import numpy as np
class VisdomMeterLogger(MeterLogger):
''' A class to package and visualize meters.
Args:
server: The uri of the Visdom server
env: Visdom environment to log to.
port: Port of the visdom server.
title: The title of the MeterLogger. This will be used as a prefix for all plots.
plotstylecombined: Whether to plot train/test curves in the same window.
'''
def __init__(self, server="localhost", env='main', port=8097, title="DNN", nclass=21, plotstylecombined=True, log_to_filename=None, loggers=('train', 'val')):
super(VisdomMeterLogger, self).__init__()
self.server = server
self.env = env
self.port = port
self.title = title
self.logger = {}
for logger in loggers:
self.logger[logger] = {}
self.plotstylecombined = plotstylecombined
self.log_to_filename = log_to_filename
self.metername_to_ptype = {}
def __addlogger(self, meter, ptype):
first_logger = None
for logger_name, logger in self.logger.items():
if ptype == 'stacked_line':
opts = {'title': '{} {} ({})'.format(self.title, meter, logger_name),
'fillarea': True,
'legend': self.meter[logger_name][meter].keys}
logger[meter] = VisdomPlotLogger(ptype, env=self.env, server=self.server,
port=self.port, log_to_filename=self.log_to_filename,
opts=opts)
elif ptype == 'line':
if self.plotstylecombined:
if first_logger is None:
opts = {'title': self.title + ' ' + meter}
logger[meter] = VisdomPlotLogger(ptype, env=self.env, server=self.server,
port=self.port, log_to_filename=self.log_to_filename,
opts=opts)
else:
logger[meter] = self.logger[first_logger][meter]
else:
opts = {'title': self.title + '{} '.format(logger_name) + meter}
logger[meter] = VisdomPlotLogger(ptype, env=self.env, server=self.server,
port=self.port, log_to_filename=self.log_to_filename,
opts=opts)
elif ptype == 'heatmap':
names = list(range(self.nclass))
opts = {'title': '{} {} {}'.format(self.title, logger_name, meter) + meter, 'columnnames': names, 'rownames': names}
logger[meter] = VisdomLogger('heatmap', env=self.env, server=self.server,
port=self.port, log_to_filename=self.log_to_filename,
opts=opts)
# >>> # Image example
# >>> img_to_use = skimage.data.coffee().swapaxes(0,2).swapaxes(1,2)
# >>> image_logger = VisdomLogger('image')
# >>> image_logger.log(img_to_use)
elif ptype == 'image':
opts = {'title': '{} {} {}'.format(self.title, logger_name, meter) + meter}
logger[meter] = VisdomLogger(ptype, env=self.env, server=self.server,
port=self.port, log_to_filename=self.log_to_filename,
opts=opts)
# >>> # Histogram example
# >>> hist_data = np.random.rand(10000)
# >>> hist_logger = VisdomLogger('histogram', , opts=dict(title='Random!', numbins=20))
# >>> hist_logger.log(hist_data)
elif ptype == 'histogram':
opts = {'title': '{} {} {}'.format(self.title, logger_name, meter) + meter, 'numbins': 20}
logger[meter] = VisdomLogger(ptype, env=self.env, server=self.server,
port=self.port, log_to_filename=self.log_to_filename,
opts=opts)
elif ptype == 'text':
opts = {'title': '{} {} {}'.format(self.title, logger_name, meter) + meter}
logger[meter] = VisdomTextLogger(env=self.env, server=self.server,
port=self.port, log_to_filename=self.log_to_filename,
update_type='APPEND',
opts=opts)
elif ptype =='video':
opts = {'title': '{} {} {}'.format(self.title, logger_name, meter) + meter}
logger[meter] = VisdomLogger(ptype, env=self.env, server=self.server,
port=self.port, log_to_filename=self.log_to_filename,
opts=opts)
def add_meter(self, meter_name, meter, ptype=None):
super(VisdomMeterLogger, self).add_meter(meter_name, meter)
# for key in self.writer.keys():
# self.metername_to_ptype[meter] = ptype
self.metername_to_ptype[meter_name] = ptype
if ptype: # Use `ptype` for manually selecting the plot type
self.__addlogger(meter_name, ptype)
elif isinstance(meter, Meter.ClassErrorMeter):
self.__addlogger(meter_name, 'line')
elif isinstance(meter, Meter.mAPMeter):
self.__addlogger(meter_name, 'line')
elif isinstance(meter, Meter.AUCMeter):
self.__addlogger(meter_name, 'line')
elif isinstance(meter, Meter.ConfusionMeter):
self.__addlogger(meter_name, 'heatmap')
elif isinstance(meter, Meter.MSEMeter):
self.__addlogger(meter_name, 'line')
elif type(meter) == Meter.ValueSummaryMeter:
self.__addlogger(meter_name, 'line')
elif isinstance(meter, Meter.MultiValueSummaryMeter):
self.__addlogger(meter_name, 'stacked_line')
else:
raise NotImplementedError("Unknown meter type (and pytpe): {} ({})".format(type(meter), ptype))
def reset_meter(self, iepoch, mode='train', meterlist=None):
self.timer.reset()
for meter_name, meter in self.meter[mode].items():
if meterlist is not None and meter_name not in meterlist:
continue
val = self.meter[mode][meter_name].value()
val = val[0] if isinstance(val, (list, tuple)) else val
should_reset_and_continue = False
if isinstance(val, str) or val is None:
should_reset_and_continue = (val is None)
elif isinstance(val, np.ndarray):
should_reset_and_continue = np.isnan(val).any()
elif isinstance(val, torch.Tensor):
should_reset_and_continue = torch.isnan(val).any()
else:
should_reset_and_continue = np.isnan(val)
if should_reset_and_continue:
self.meter[mode][meter_name].reset()
continue
if isinstance(meter, Meter.ConfusionMeter) or self.metername_to_ptype[meter_name] in ['histogram', 'image', 'text']:
self.logger[mode][meter_name].log(val)
elif isinstance(self.meter[mode][meter_name], Meter.MultiValueSummaryMeter):
self.logger[mode][meter_name].log( np.array([iepoch]*len(val)), np.array(np.cumsum(val)), name=mode) # keep mean
elif meter_name in self.metername_to_ptype and self.metername_to_ptype[meter_name] == 'video':
self.logger[mode][meter_name].log(videofile=val) # video takes in a string
else:
self.logger[mode][meter_name].log(iepoch, val, name=mode)
self.meter[mode][meter_name].reset()
``` |
{
"source": "joel99/visdial-challenge-starter-pytorch",
"score": 2
} |
#### File: visdialch/encoders/__init__.py
```python
from visdialch.encoders.bu_lf import BottomUpLateFusionEncoder
from visdialch.encoders.lf import LateFusionEncoder
def Encoder(model_config, *args):
name_enc_map = {
'lf': LateFusionEncoder,
'bu_lf': BottomUpLateFusionEncoder
}
return name_enc_map[model_config["encoder"]](model_config, *args)
``` |
{
"source": "joeladam518/mqtt-flask-app",
"score": 3
} |
#### File: site/controllers/mqtt.py
```python
import os
from flask import request
from flask_restful import Resource, reqparse
from flask_httpauth import HTTPTokenAuth
import paho.mqtt.publish as publish
auth = HTTPTokenAuth('Bearer')
@auth.verify_token
def verify_token(token):
return (str(os.getenv('MQTT_PUBLISH_SECRET')) == str(token))
class MqttController(Resource):
def __init__(self):
pass
@auth.login_required
def post(self):
if os.getenv('MQTT_ENDPOINT_DISABLED') == 'True':
return {
"status": "error",
"message": {
"general": "Endpoint disabled",
}
}, 418
parser = reqparse.RequestParser()
parser.add_argument('topic', required=True, default='', location='form', help='No topic.')
parser.add_argument('message', required=True, default='', location='form', help='No payload.')
args = parser.parse_args()
topic = str(args['topic'])
message = str(args['message'])
if not topic or not message:
return {
'status': 'error',
'message': {
"general": "No topic and/or no payload.",
},
}, 422
"""
mqtt.publish.single(topic, payload=None, qos=0, retain=False, hostname="localhost",
port=1883, client_id="", keepalive=60, will=None, auth=None,
tls=None, protocol=mqtt.MQTTv311, transport="tcp")
"""
publish.single(topic=topic, payload=message, hostname=os.getenv('MQTT_SERVER_ADDRESS'))
return {
'status': 'success',
'message': {
"topic_submitted": "{}".format(topic),
"payload_submitted": "{}".format(message),
}
}, 200
``` |
{
"source": "joeladam518/sutler",
"score": 3
} |
#### File: sutler/app/debian.py
```python
import os
from .posix import PosixSystem
from .os import Sys
class DebianSystem(PosixSystem):
@property
def codename(self):
"""Return the codename for the debian system"""
return Sys.release_info('VERSION_CODENAME')
def install(self, *args: str) -> None:
"""Install a program"""
self.exec('apt install -y', *args, root=True)
def uninstall(self, *args: str) -> None:
"""Uninstall a program"""
# TODO: Which is the better way?
# self.exec("apt-get purge -y", *args, root=True)
# self.exec("apt-get --purge autoremove -y", root=True)
self.exec('apt purge -y', *args, root=True)
self.exec('apt autoremove -y', root=True)
def update(self) -> None:
"""Update the package repository"""
self.exec('apt update', root=True)
def update_and_upgrade(self) -> None:
"""Update the package repository and upgrade the systems packages"""
env = os.environ.copy()
env['DEBIAN_FRONTEND'] = 'noninteractive'
self.exec('apt update', root=True)
self.exec('apt upgrade -y', root=True, env=env)
self.exec('apt autoremove -y', root=True)
```
#### File: sutler/commands/uninstall.py
```python
import click
from click.core import Context as ClickContext
from ..installers import DotfilesInstaller, FzfInstaller, MariadbInstaller
from ..installers import NodeInstaller, PhpInstaller, RedisInstaller
@click.group()
def uninstall():
pass
@click.command()
@click.pass_context
@click.argument('system', type=click.Choice(('desktop', 'mac', 'server')), required=True)
def dotfiles(ctx: ClickContext, system: str):
installer = DotfilesInstaller(ctx)
installer.uninstall(system)
@click.command()
@click.pass_context
def fzf(ctx: ClickContext):
installer = FzfInstaller(ctx)
installer.uninstall()
@click.command()
@click.pass_context
def mariadb(ctx: ClickContext):
installer = MariadbInstaller(ctx)
installer.uninstall()
@click.command()
@click.pass_context
def nodejs(ctx: ClickContext):
installer = NodeInstaller(ctx)
installer.uninstall()
@click.command()
@click.pass_context
@click.argument('version', type=str, required=True)
def php(ctx: ClickContext, version: str):
installer = PhpInstaller(ctx)
installer.uninstall(version)
@click.command()
@click.pass_context
def redis(ctx: ClickContext):
installer = RedisInstaller(ctx)
installer.uninstall()
uninstall.add_command(dotfiles)
uninstall.add_command(fzf)
uninstall.add_command(mariadb)
uninstall.add_command(nodejs)
uninstall.add_command(php)
uninstall.add_command(redis)
```
#### File: sutler/installers/node.py
```python
import os
from .installer import Installer
class NodeInstaller(Installer):
versions = ('14', '15', '16', '17')
__source_file_path = '/etc/apt/sources.list.d/nodesource.list'
def install(self, version: str) -> None:
if version not in self.versions:
self.ctx.fail('Invalid node version')
os.chdir(self.app.user.home)
self.app.os.exec(f"curl -sL \"https://deb.nodesource.com/setup_{version}.x\" | sudo -E bash -")
self.app.os.install('nodejs')
def uninstall(self) -> None:
os.chdir(self.app.user.home)
self.app.os.uninstall('nodejs')
# TODO: Do I have to remove the apt gpg key?
if os.path.exists(self.__source_file_path):
self.app.os.rm(self.__source_file_path, root=True)
self.app.os.update()
```
#### File: sutler/installers/redis.py
```python
from .installer import Installer
class RedisInstaller(Installer):
def install(self) -> None:
self.app.os.update()
self.app.os.install('redis-server')
def uninstall(self) -> None:
self.app.os.uninstall('redis-server')
```
#### File: sutler/provisioners/desktop.py
```python
import click
import os
from git import Repo
from ..installers import ComposerInstaller, DotfilesInstaller, FzfInstaller
from ..installers import NodeInstaller, PhpInstaller, SublimeInstaller
from .provisioner import Provisioner
class DesktopProvisioner(Provisioner):
def run(self) -> None:
"""
Provision my ubuntu desktop machine
NOTE: This script will probably have to be updated before it is run every single time...
Color for terminal screen (to mimic iTerm):
-------------------------------------------
black dark = #000000 black light = #686868
red dark = #c91b00 red light = #ff6e67
green dark = #00c200 green light = #5ffa68
yellow dark = #C7B700 yellow light = #fffc67
blue dark = #0532e1 blue light = #5075ff #42A5F5
magenta dark = #ca30c7 magenta light = #ff77ff
cyan dark = #00c5c7 cyan light = #60fdff
white dark = #D7D7D7 white light = #ffffff
:return: None
"""
click.echo()
click.echo('Setting up your desktop environment')
click.echo()
os.chdir(self.app.user.home)
repos_path = os.path.join(self.app.user.home, 'repos')
if not os.path.isdir(repos_path):
os.mkdir(repos_path)
ssh_path = os.path.join(self.app.user.home, '.ssh')
if not os.path.isdir(ssh_path):
os.mkdir(ssh_path)
self.app.os.exec(f'cd "{ssh_path}" && ssh-keygen -t rsa')
self.app.os.update_and_upgrade()
# Base stuff
self.app.os.install('apt-transport-https', 'build-essential', 'ca-certificates', 'software-properties-common')
# Install some useful applications
self.app.os.install('curl', 'exfat-utils', 'exfat-fuse', 'git', 'gnome-tweaks', 'htop', 'mosquitto-clients',
'mariadb-client', 'python3-pip', 'ripit', 'tmux', 'tree', 'vim-gtk3', 'virtualenv')
if self.app.os.id == 'ubuntu':
# Install restricted extras
self.app.os.install('ubuntu-restricted-extras', 'ubuntu-restricted-addons')
# Install some snaps
self.app.os.exec('snap refresh')
self.app.os.exec('snap install audacity gimp vlc')
# Install even more
DotfilesInstaller(self.ctx).install('desktop')
FzfInstaller(self.ctx).install()
PhpInstaller(self.ctx).install('8.1', env='desktop')
ComposerInstaller(self.ctx).install()
NodeInstaller(self.ctx).install('16')
SublimeInstaller(self.ctx).install('merge')
# Install bash git prompt
os.chdir(self.app.user.home)
Repo.clone_from("https://github.com/magicmonty/bash-git-prompt.git", ".bash-git-prompt", depth=1)
# TODO: install docker
# Update the command line editor to vim (Has to be done manually)
# self.app.os.exec('update-alternatives --config editor', root=True)
# Clone my public repos
if click.confirm('Install repos?', default=False):
os.chdir(repos_path)
Repo.clone_from('<EMAIL>:joeladam518/arduino-mqtt-led.git', 'arduino-mqtt-led')
Repo.clone_from('<EMAIL>:joeladam518/BackupScripts.git', 'BackupScripts')
Repo.clone_from('<EMAIL>:joeladam518/CurtainCallWP.git', 'CurtainCallWP')
Repo.clone_from('<EMAIL>:joeladam518/colorschemes.git', 'colorschemes')
Repo.clone_from('<EMAIL>:joeladam518/feather-mqtt-rgb-tree.git', 'feather-mqtt-rgb-tree')
Repo.clone_from('<EMAIL>:joeladam518/feather-mqtt-temp-sensor.git', 'feather-mqtt-temp-sensor')
Repo.clone_from('<EMAIL>:joeladam518/sutler.git', 'sutler')
click.echo()
click.echo("Reminder of some other programs you like, but unfortunately their installation can't be automated")
click.echo("yet...")
click.echo()
click.echo("* Arduino - https://www.arduino.cc/en/software")
click.echo("* Chrome - https://www.google.com/chrome/")
click.echo("* dbeaver - https://dbeaver.io/")
click.echo("* Jetbrains IDE - https://www.jetbrains.com/toolbox-app/")
click.echo("* Postman - https://www.postman.com/downloads/?utm_source=postman-home")
click.echo("* Slack - https://slack.com/downloads/linux")
click.echo("* Vagrant - https://www.vagrantup.com/downloads")
click.echo("* Virtual-Box - https://www.virtualbox.org/wiki/Linux_Downloads")
click.echo("* Visual Studio Code - https://code.visualstudio.com/docs/setup/linux")
click.echo()
``` |
{
"source": "joeladdison/libmarks",
"score": 3
} |
#### File: src/libmarks/compile_py.py
```python
import os
import fnmatch
import py_compile
def compile(source_dir, dest_dir):
for root, dirs, files in os.walk(source_dir):
# Get path to file, relative to source_dir
curr_dir = os.path.relpath(root, source_dir)
if curr_dir == '.':
curr_dir = ''
# Ignore hidden directories (starting with .)
if curr_dir.startswith('.'):
continue
# Filter for Python files
py_files = fnmatch.filter(files, '*.py')
if len(py_files) == 0:
continue
# Directory contains Python files, so create in destination
try:
os.mkdir(os.path.join(dest_dir, curr_dir))
except OSError:
# Directory already exists
pass
# Compile all py files and put them in dest_dir
for f in py_files:
py_compile.compile(
os.path.join(root, f),
os.path.join(dest_dir, curr_dir, f + 'c'))
# Create all dirs within dest_dir
# for d in dirs:
# try:
# os.mkdir(os.path.join(dest_dir, curr_dir, d))
# except OSError:
# # Directory already exists
# pass
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
sys.exit("Usage: compile_py.py source_dir dest_dir")
source_dir = sys.argv[1]
dest_dir = sys.argv[2]
compile(source_dir, dest_dir)
``` |
{
"source": "JoeLambourne/SketchGraphs",
"score": 3
} |
#### File: sketchgraphs/data/_plotting.py
```python
import math
import matplotlib as mpl
import matplotlib.patches
import matplotlib.pyplot as plt
from ._entity import Arc, Circle, Line, Point
def _get_linestyle(entity):
return '--' if entity.isConstruction else '-'
def sketch_point(ax, point: Point, color='black', show_subnodes=False):
ax.scatter(point.x, point.y, c=color, marker='.')
def sketch_line(ax, line: Line, color='black', show_subnodes=False):
start_x, start_y = line.start_point
end_x, end_y = line.end_point
if show_subnodes:
marker = '.'
else:
marker = None
ax.plot((start_x, end_x), (start_y, end_y), color, linestyle=_get_linestyle(line), linewidth=1, marker=marker)
def sketch_circle(ax, circle: Circle, color='black', show_subnodes=False):
patch = matplotlib.patches.Circle(
(circle.xCenter, circle.yCenter), circle.radius,
fill=False, linestyle=_get_linestyle(circle), color=color)
if show_subnodes:
ax.scatter(circle.xCenter, circle.yCenter, c=color, marker='.', zorder=20)
ax.add_patch(patch)
def sketch_arc(ax, arc: Arc, color='black', show_subnodes=False):
angle = math.atan2(arc.yDir, arc.xDir) * 180 / math.pi
startParam = arc.startParam * 180 / math.pi
endParam = arc.endParam * 180 / math.pi
if arc.clockwise:
startParam, endParam = -endParam, -startParam
ax.add_patch(
matplotlib.patches.Arc(
(arc.xCenter, arc.yCenter), 2*arc.radius, 2*arc.radius,
angle=angle, theta1=startParam, theta2=endParam,
linestyle=_get_linestyle(arc), color=color))
if show_subnodes:
ax.scatter(arc.xCenter, arc.yCenter, c=color, marker='.')
ax.scatter(*arc.start_point, c=color, marker='.', zorder=40)
ax.scatter(*arc.end_point, c=color, marker='.', zorder=40)
_PLOT_BY_TYPE = {
Arc: sketch_arc,
Circle: sketch_circle,
Line: sketch_line,
Point: sketch_point
}
def render_sketch(sketch, ax=None, show_axes=False, show_origin=False, hand_drawn=False, show_subnodes=False):
"""Renders the given sketch using matplotlib.
Parameters
----------
sketch : Sketch
The sketch instance to render
ax : matplotlib.Axis, optional
Axis object on which to render the sketch. If None, a new figure is created.
show_axes : bool
Indicates whether axis lines should be drawn
show_origin : bool
Indicates whether origin point should be drawn
hand_drawn : bool
Indicates whether to emulate a hand-drawn appearance
show_subnodes : bool
Indicates whether endpoints/centerpoints should be drawn
Returns
-------
matplotlib.Figure
If `ax` is not provided, the newly created figure. Otherwise, `None`.
"""
if hand_drawn:
saved_rc = mpl.rcParams.copy()
plt.xkcd(scale=1, length=100, randomness=3)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
else:
fig = None
# Eliminate upper and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
if not show_axes:
ax.set_yticklabels([])
ax.set_xticklabels([])
_ = [line.set_marker('None') for line in ax.get_xticklines()]
_ = [line.set_marker('None') for line in ax.get_yticklines()]
# Eliminate lower and left axes
ax.spines['left'].set_color('none')
ax.spines['bottom'].set_color('none')
if show_origin:
point_size = mpl.rcParams['lines.markersize'] * 1
ax.scatter(0, 0, s=point_size, c='black')
for ent in sketch.entities.values():
sketch_fn = _PLOT_BY_TYPE.get(type(ent))
if sketch_fn is None:
continue
sketch_fn(ax, ent, show_subnodes=show_subnodes)
# Rescale axis limits
ax.relim()
ax.autoscale_view()
if hand_drawn:
mpl.rcParams.update(saved_rc)
return fig
def render_graph(graph, filename):
"""Renders the given pgv.AGraph to an image file.
Parameters
----------
graph : pgv.AGraph
The graph to render
filename : string
Where to save the image file
Returns
-------
None
"""
graph.layout('dot')
graph.draw(filename)
__all__ = ['render_sketch', 'render_graph']
```
#### File: SketchGraphs/sketchgraphs_models/training.py
```python
import abc
import collections
import dataclasses
import datetime
import functools
import numbers
import time
import typing
import numpy as np
import torch
import torch.utils.tensorboard
from sketchgraphs_models import distributed_utils
_scalar_types = (
torch.Tensor, np.ndarray, numbers.Number, np.int32, np.int64
)
def map_structure_flat(structure, function, scalar_types=None):
"""Utility function for mapping a function over an arbitrary structure,
maintaining the structure.
Parameters
----------
structure : object
An arbitrary nested structure
function : function
A function to apply to each leaf of the structure
scalar_types : Tuple, optional
If not None, a tuple of types considered scalar types over which the
function is directly applied
Returns
-------
object
A structure with each element modified.
Raises
------
ValueError
If the type is not a scalar type and is not decomposable, an exception is raised.
"""
map_structure_fn = functools.partial(map_structure_flat, function=function, scalar_types=scalar_types)
if scalar_types is None:
scalar_types = _scalar_types
if structure is None:
return None
if isinstance(structure, scalar_types):
return function(structure)
if hasattr(structure, '_make'):
return structure._make(map(map_structure_fn, structure))
if dataclasses.is_dataclass(structure):
return dataclasses.replace(structure, **map_structure_fn(vars(structure)))
if isinstance(structure, collections.OrderedDict):
return collections.OrderedDict([(k, map_structure_fn(v)) for k, v in structure.items()])
if isinstance(structure, collections.abc.Mapping):
return type(structure)([(k, map_structure_fn(v)) for k, v in structure.items()])
if isinstance(structure, collections.abc.Sequence):
return type(structure)([map_structure_fn(v) for v in structure])
raise ValueError('Unsupported structure type {0}'.format(type(structure)))
def load_cuda_async(batch, device=None):
"""Loads a structured batch recursively onto the given torch device."""
if device is not None and device.type != "cuda":
return batch
load_cuda_async_device = functools.partial(load_cuda_async, device=device)
if batch is None:
return None
elif isinstance(batch, torch.Tensor) or isinstance(batch, torch.nn.utils.rnn.PackedSequence):
return batch.to(device=device, non_blocking=False)
elif hasattr(batch, '_make'):
# Check for namedtuple
return batch._make(map(load_cuda_async_device, batch))
elif dataclasses.is_dataclass(batch):
# Check for @dataclass
return dataclasses.replace(batch, **load_cuda_async_device(vars(batch)))
elif isinstance(batch, collections.OrderedDict):
return collections.OrderedDict([(k, load_cuda_async_device(v)) for k, v in batch.items()])
elif isinstance(batch, collections.abc.Mapping):
return {k: load_cuda_async_device(v) for k, v in batch.items()}
elif isinstance(batch, collections.abc.Sequence):
return [load_cuda_async_device(v) for v in batch]
elif isinstance(batch, (int, np.int32, np.int64)):
return batch
else:
raise ValueError("Unsupported batch collection type {0}.".format(type(batch)))
def _accumulate(losses, acc):
for k, v in losses.items():
if v is None:
continue
if isinstance(v, dict):
_accumulate(v, acc.setdefault(k, {}))
else:
acc.setdefault(k, v.new_zeros(v.shape)).add_(v.detach())
class TrainingConfig(typing.NamedTuple):
"""Named tuple holding configuration for training a given model."""
dataloader: torch.utils.data.DataLoader
tb_writer: typing.Optional[torch.utils.tensorboard.SummaryWriter]
device: torch.device
batch_size: int
batches_per_epoch: typing.Optional[int] = None
class TrainingHarness(abc.ABC):
"""This class implements the main training loop."""
def __init__(self, model, opt, config_train: TrainingConfig, config_eval: TrainingConfig = None,
dist_config: distributed_utils.DistributedTrainingInfo = None):
"""Creates a new harness for the given model.
Parameters
----------
model : torch.nn.Module
The torch model to train.
opt : torch.optim.Optimizer
The optimizer to use during training
config_train : TrainingConfig
The configuration to use for training
config_eval : TrainingConfig, optional
The configuration to use for evaluation
dist_config : DistributedTrainingInfo, optional
The configuration used for distributed training
"""
self.model = model
self.opt = opt
self.config_train = config_train
self.config_eval = config_eval
self.dist_config = dist_config
@abc.abstractmethod
def single_step(self, batch, global_step):
"""Implements a single step of the model evaluation / training.
Parameters
----------
batch : dict
Input batch from the dataloader
global_step : int
Global step for this batch
Returns
-------
losses : dict
Dictionary of computed losses
accuracy : dict
Dictionary of computed accuracy
"""
def is_leader(self):
return distributed_utils.is_leader(self.dist_config)
def on_epoch_end(self, epoch, global_step):
"""This function is called at the end of each epoch."""
pass
def write_summaries(self, global_step, losses, accuracies, tb_writer):
pass
def print_statistics(self, loss_acc, accuracy_acc):
pass
def reset_statistics(self):
pass
def log(self, *args):
if self.is_leader():
print(*args)
def train_epochs(self, start_epoch=0, global_step=0):
"""Trains the model for a single iteration over the dataloader.
Note that usually, a single iteration over a dataloader represents a single epoch.
However, because starting a new epoch is very expensive for the dataloader, we instead
allow dataloaders to iterate over multiple epochs at a time.
Parameters
----------
start_epoch : int
The current epoch before training
global_step : int
The current global step before training
Returns
-------
epoch : int
The current epoch after training
global_step : int
The current global step after training
"""
last_time = time.perf_counter()
loss_acc = {}
accuracy_acc = {}
batch_idx = 0 # Mini-batch index within each epoch.
epoch = start_epoch
epoch_start_time = None
batches_per_epoch = (self.config_train.batches_per_epoch or len(self.config_train.dataloader.batch_sampler))
log_every_n = min(50, batches_per_epoch)
total_batch_size = self.config_train.batch_size
if self.dist_config:
total_batch_size *= self.dist_config.world_size
self.model.train()
for j, batch in enumerate(self.config_train.dataloader):
if j % batches_per_epoch == 0:
# new epoch initialization
epoch += 1
self.log(f'Starting epoch #{epoch}')
epoch_start_time = time.perf_counter()
# reset batch counters
batch_idx = 0
loss_acc = {}
accuracy_acc = {}
last_time = epoch_start_time
batch = load_cuda_async(batch, device=self.config_train.device)
losses, accuracy = self.single_step(batch, global_step)
_accumulate(losses, loss_acc)
_accumulate(accuracy, accuracy_acc)
global_step += total_batch_size
if (batch_idx + 1) % log_every_n == 0:
if self.is_leader():
self.write_summaries(global_step, losses, accuracy, self.config_train.tb_writer)
current_time = time.perf_counter()
elapsed = current_time - last_time
last_time = current_time
graph_per_second = log_every_n * total_batch_size / elapsed
self.log(f'Epoch {epoch}. Batch {batch_idx + 1}. {graph_per_second:.2f} graphs / s')
if self.is_leader():
loss_acc = map_structure_flat(loss_acc, lambda x: x / float(log_every_n))
accuracy_acc = map_structure_flat(accuracy_acc, lambda x: x / float(log_every_n))
self.print_statistics(loss_acc, accuracy_acc)
self.reset_statistics()
loss_acc = {}
accuracy_acc = {}
if (j + 1) % batches_per_epoch == 0:
# epoch end
self.on_epoch_end(epoch, global_step)
current_time = time.perf_counter()
self.log(f'Done with epoch #{epoch}. '
f'Took {datetime.timedelta(seconds=current_time - epoch_start_time)}\n')
self.run_holdout_eval(epoch, global_step)
batch_idx += 1
if (j + 1) % batches_per_epoch != 0:
print('Warning: incomplete epoch')
return epoch, global_step
def run_holdout_eval(self, epoch, global_step):
"""Runs the holdout evaluation process.
Parameters
----------
epoch : int
The current epoch of training
global_step : int
The current global step of training
"""
if self.config_eval is None:
self.log('Skipping holdout evaluation as no evaluation dataset specified.')
return
self.log('Running holdout eval...')
loss_acc = collections.OrderedDict()
accuracy_acc = collections.OrderedDict()
self.reset_statistics()
self.model.eval()
idx = 0
for idx, batch in enumerate(self.config_eval.dataloader):
batch = load_cuda_async(batch, device=self.config_eval.device)
with torch.no_grad():
losses, accuracy = self.single_step(batch, global_step)
_accumulate(losses, loss_acc)
_accumulate(accuracy, accuracy_acc)
num_batches = idx + 1
loss_acc = map_structure_flat(loss_acc, lambda x: x / num_batches)
accuracy_acc = map_structure_flat(accuracy_acc, lambda x: x / num_batches)
if self.is_leader():
self.log(f'Eval for epoch={epoch}, global_step={global_step}:')
self.print_statistics(loss_acc, accuracy_acc)
self.log()
self.write_summaries(global_step, loss_acc, accuracy_acc, self.config_eval.tb_writer)
```
#### File: sketchgraphs/onshape/call.py
```python
import argparse
import json
import urllib.parse
from . import Client
TEMPLATE_PATH = 'sketchgraphs/onshape/feature_template.json'
def _parse_resp(resp):
"""Parse the response of a retrieval call.
"""
parsed_resp = json.loads(resp.content.decode('utf8').replace("'", '"'))
return parsed_resp
def _save_or_print_resp(resp_dict, output_path=None, indent=4):
"""Saves or prints the given response dict.
"""
if output_path:
with open(output_path, 'w') as fh:
json.dump(resp_dict, fh, indent=indent)
else:
print(json.dumps(resp_dict, indent=indent))
def _create_client(logging):
"""Creates a `Client` with the given bool value for `logging`.
"""
client = Client(stack='https://cad.onshape.com',
logging=logging)
return client
def _parse_url(url):
"""Extracts doc, workspace, element ids from url.
"""
_, _, docid, _, wid, _, eid = urllib.parse.urlparse(url).path.split('/')
return docid, wid, eid
def update_template(url, logging=False):
"""Updates version identifiers in feature_template.json.
Parameters
----------
url : str
URL of Onshape PartStudio
logging: bool
Whether to log API messages (default False)
Returns
-------
None
"""
# Get PartStudio features (including version IDs)
features = get_features(url, logging)
# Get current feature template
with open(TEMPLATE_PATH, 'r') as fh:
template = json.load(fh)
for version_key in ['serializationVersion', 'sourceMicroversion', 'libraryVersion']:
template[version_key] = features[version_key]
# Save updated feature template
with open(TEMPLATE_PATH, 'w') as fh:
json.dump(template, fh, indent=4)
def add_feature(url, sketch_dict, sketch_name=None, logging=False):
"""Adds a sketch to a part.
Parameters
----------
url : str
URL of Onshape PartStudio
sketch_dict: dict
A dictionary representing a `Sketch` instance with keys `entities` and `constraints`
sketch_name: str
Optional name for the sketch. If none provided, defaults to 'My Sketch'.
logging: bool
Whether to log API messages (default False)
Returns
-------
None
"""
# Get doc ids and create Client
docid, wid, eid = _parse_url(url)
client = _create_client(logging)
# Get feature template
with open(TEMPLATE_PATH, 'r') as fh:
template = json.load(fh)
# Add sketch's entities and constraints to the template
template['feature']['message']['entities'] = sketch_dict['entities']
template['feature']['message']['constraints'] = sketch_dict['constraints']
if not sketch_name:
sketch_name = 'My Sketch'
template['feature']['message']['name'] = sketch_name
# Send to Onshape
client.add_feature(docid, wid, eid, payload=template)
def get_features(url, logging=False):
"""Retrieves features from a part.
Parameters
----------
url : str
URL of Onshape PartStudio
logging : bool
Whether to log API messages (default False)
Returns
-------
features : dict
A dictionary containing the part's features
"""
# Get doc ids and create Client
docid, wid, eid = _parse_url(url)
client = _create_client(logging)
# Get features
resp = client.get_features(docid, wid, eid)
features = _parse_resp(resp)
return features
def get_info(url, sketch_name=None, logging=False):
"""Retrieves possibly updated states of entities in a part's sketches.
Parameters
----------
url : str
URL of Onshape PartStudio
sketch_name : str
If provided, only the entity info for the specified sketch will be returned. Otherwise, the full response is returned.
logging : bool
Whether to log API messages (default False)
Returns
-------
sketch_info : dict
A dictionary containing entity info for sketches
"""
# Get doc ids and create Client
docid, wid, eid = _parse_url(url)
client = _create_client(logging)
# Get features
resp = client.sketch_information(docid, wid, eid)
sketch_info = _parse_resp(resp)
if sketch_name:
sketch_found = False
for sk in sketch_info['sketches']:
if sk['sketch'] == sketch_name:
sketch_info = sk
sketch_found = True
break
if not sketch_found:
raise ValueError("No sketch found with given name.")
return sketch_info
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--url',
help='URL of Onshape PartStudio',required=True)
parser.add_argument('--action',
help='The API call to perform', required=True,
choices=['add_feature', 'get_features', 'get_info', 'update_template'])
parser.add_argument('--payload_path',
help='Path to payload being sent to Onshape', default=None)
parser.add_argument('--output_path',
help='Path to save result of API call', default=None)
parser.add_argument('--enable_logging',
help='Whether to log API messages', action='store_true')
parser.add_argument('--sketch_name',
help='Optional name for sketch', default=None)
args = parser.parse_args()
# Parse the URL
_, _, docid, _, wid, _, eid = urllib.parse.urlparse(args.url).path.split('/')
# Create client
client = Client(stack='https://cad.onshape.com',
logging=args.enable_logging)
# Perform the specified action
if args.action =='add_feature':
# Add a sketch to a part
if not args.payload_path:
raise ValueError("payload_path required when adding a feature")
with open(args.payload_path, 'r') as fh:
sketch_dict = json.load(fh)
add_feature(args.url, sketch_dict, args.sketch_name,
args.enable_logging)
elif args.action == 'get_features':
# Retrieve features from a part
features = get_features(args.url, args.enable_logging)
_save_or_print_resp(features, output_path=args.output_path)
elif args.action == 'get_info':
# Retrieve possibly updated states of entities in a part's sketches
sketch_info = get_info(args.url, args.sketch_name, args.enable_logging)
_save_or_print_resp(sketch_info, output_path=args.output_path)
elif args.action == 'update_template':
# Updates version identifiers in template
update_template(args.url, args.enable_logging)
if __name__ == '__main__':
main()
``` |
{
"source": "JoeLanglands/MICE-MagneticFieldMapping",
"score": 2
} |
#### File: MICE-MagneticFieldMapping/micemag/core.py
```python
1import os
import pickle
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import utils
from fbutils import applyfb as appFB
from fbutils import fbfit as fitFB
from fieldmanip.readData import readFile
from fieldmanip import fieldManipulation as fm
from fieldmanip import polarMeasurement as rphiz
from plotting import plots3d as p3d
from makefields import mkfieldclass as mkfield
from geofit import geofit
from geofit import coilfit
"""
This core module as the name suggests contains a few functions that could be considered as core
features of this package. Everything that you definitely would want to do is defined as a function
here.
"""
def performFBfit(residField, magnet, coil, zmax=None, rmax=0.15, n=3, l=20, m=10,\
verbose=True, saveAs=None):
if zmax==None:
if coil in ['CC', 'ECE']:
zmax = 1.8
else:
zmax = 1.0
if type(residField) == type('string'):
fb_cls = fitFB.FBfitClass(readFile(os.path.join(utils.resid_field_path, residField)), \
coil, magnet, zmax, rmax, n, l, m, verbose, saveAs)
else:
fb_cls = fitFB.FBfitClass(residField, coil, magnet, zmax, rmax, n, l, m, \
verbose, saveAs)
fb_cls.run()
def showFBfield(_residField, magnet, coil, fitDict=None, nCores=1):
if type(_residField) == type('string'):
residField = readFile(os.path.join(utils.resid_field_path, _residField))
else:
residField = _residField
if fitDict == None:
_fitDict = appFB.getDefaultFitDict(coil, magnet)
else:
with (os.path.join(utils.fb_pickle_path, fitDict), 'rb') as _pickle:
_fitDict = pickle.load(_pickle)
fb_field = appFB.applyFB_field(residField, _fitDict, coil, magnet, FBonly=True, nCores=nCores)
p3d.wireFrame(residField, fb_field)
def buildG4BLfield(magDict, gridDict, saveAs=None, FBonly=False, coil=True):
"""Builds a magnetic field of SSU/SSD and prints it out to a .table file in g4blgrid format.
Args:
magDict (dict): Dictionary containing magnet, coil currents and custom fitDict paths.
If fitDict paths are not specified it pulls the default ones.
gridDict (dict): Dictionary containing information about the grid in which to calculate
the field over.
saveAs (str): Name that the user wishes to call the outputted field (no need to supply
full path). If None (default value), the magnet name + todays date is used.
FBonly (bool): When True: calculate only FB terms. When False: calculate geofit+FB terms,
i.e the full model field is output.
coil (bool): When true, the full field is calculated from the coil fit model. If false,
the geometrical fit model is used instead.
Returns:
Doesn't return anything. The outputted field is saved at data/MAUS/saveAs.table.
Todo:
*The scaleList part could change? May need support so that it can be adjusted by the user
"""
print 'Calculating field map for magnet:', magDict['magnet']
print 'With currents:'
print '\n\t M1 -> %.2f A\n\t M2 -> %.2f A\n\t ECE -> %.2f A\n'%(magDict['M1']['I'], \
magDict['M2']['I'], \
magDict['CC']['I'])
if FBonly == False and coil == True:
coilfit_calc = get_coilfit_class(magDict)
print 'This could take a while...'
if saveAs == None:
_date = time.localtime()
saveAs = '%s_%s%02d%02d.table'%(magDict['magnet'], _date.tm_year, \
_date.tm_mon, _date.tm_mday)
xNsteps = int((gridDict['x']['end'] + gridDict['x']['step'])/gridDict['x']['step'])
xARR = np.linspace(gridDict['x']['start'], gridDict['x']['end'], xNsteps)
yNsteps = int((gridDict['y']['end'] + gridDict['y']['step'])/gridDict['y']['step'])
yARR = np.linspace(gridDict['y']['start'], gridDict['y']['end'], yNsteps)
zNsteps = int((gridDict['z']['end'] + gridDict['z']['step'])/gridDict['z']['step'])
zARR = np.linspace(gridDict['z']['start'], gridDict['z']['end'], zNsteps)
scaleList = [' 1 X [1e3]\n', ' 2 Y [1e3]\n', ' 3 Z [1e3]\n', \
' 4 BX [1e-3]\n', ' 5 BY [1e-3]\n', ' 6 BZ [1e-3]\n', ' 0\n']
print 'Writing out %d field points'%(xNsteps*yNsteps*zNsteps)
count = 1
start_time = time.time()
with open(os.path.join(utils.maus_field_path, saveAs), 'w') as _output:
_output.write('\t%d\t%d\t%d\t1\n'%(xNsteps, yNsteps, zNsteps))
for i in scaleList:
_output.write(i)
for _x in xARR:
for _y in yARR:
for _z in zARR:
if FBonly == True:
Bx, By, Bz = appFB.applyFB_grid(magDict, _x, _y, _z, 0, 0, 0)
elif FBonly == False:
_Bx, _By, _Bz = coilfit_calc.calc_full_field_at_point_xyz(_x, _y, _z)
Bx, By, Bz = appFB.applyFB_grid(magDict, _x, _y, _z, _Bx, _By, _Bz)
_output.write('{:.3f}\t{:.3f}\t{:.3f}\t{:.8f}\t{:.8f}\t{:.8f}\n'.format( \
_x, _y,_z, Bx, By, Bz))
utils.progressBar(count, xNsteps*yNsteps*zNsteps, start_time, time.time())
count += 1
print 'Finished! File can be found at %s'%os.path.join(utils.maus_field_path, saveAs)
def perform_coil_fit(magnet, coil, FBfit=False, makeresid=True, save_as=None, verbose=True):
if magnet.upper() not in ['SSU', 'SSD']:
print 'Magnet unrecognised - please use SSU or SSD'
return
if coil.upper() not in ['M1', 'M2', 'CC', 'ECE']:
print 'Coil unrecognised - please use M1, M2, CC or ECE'
print '\tN.B You can not fit to the end coils individually, only to E1-CC-E2'
return
if coil.upper() == 'CC':
coil = 'ECE'
if save_as == None:
save_str = os.path.join(utils.geofit_field_path, magnet.upper() + '_' + coil.upper() \
+ '_coilfit_default.pickle')
else:
save_str = os.path.join(utils.geofit_field_path, save_as)
if coil.upper() in ['M1', 'M2']:
print 'Performing coil fit on', magnet.upper(), coil.upper()
if utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data'] == None:
print 'No data to fit to for this magnet!'
return
_centre = utils.centres_dict[magnet.upper()]['mapper'][coil.upper()]
_field = readFile(utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data'])
if magnet.upper() == 'SSD':
_field = fm.flip_SSD_data(_field)
coilFitClass = coilfit.CoilFitClass(utils.coil_datacards[magnet.upper()][coil.upper()], \
_field, _centre)
fitDict = coilFitClass.run()
print 'Finished with parameters: '
for key, value in fitDict.iteritems():
print key, value
print 'Saved fit parameters at: ', save_str
with open(save_str, 'wb') as save_pickle:
pickle.dump(fitDict, save_pickle, protocol=pickle.HIGHEST_PROTOCOL)
elif coil.upper() in ['CC', 'ECE']:
print 'Performing coil fit on', magnet.upper(), 'ECE'
cc_param = utils.coil_datacards[magnet.upper()]['CC']
e1_param = utils.coil_datacards[magnet.upper()]['E1']
e2_param = utils.coil_datacards[magnet.upper()]['E2']
cc_centre = utils.centres_dict[magnet.upper()]['mapper']['CC']
e1_centre = utils.centres_dict[magnet.upper()]['mapper']['E1']
e2_centre = utils.centres_dict[magnet.upper()]['mapper']['E2']
_field = readFile(utils.coil_datacards[magnet.upper()]['CC']['30A_data'])
if magnet.upper() == 'SSD':
_field = fm.flip_SSD_data(_field)
coilFitClass = coilfit.CoilFitClass_ECE(cc_param, e1_param, e2_param, _field, cc_centre, \
e1_centre, e2_centre)
fitDict = coilFitClass.run()
print 'Finished with parameters: '
for key, value in fitDict.iteritems():
print key
for _k, _v in value.iteritems():
print _k, _v
print 'Saved fit parameters at: ', save_str
with open(save_str, 'wb') as save_pickle:
pickle.dump(fitDict, save_pickle, protocol=pickle.HIGHEST_PROTOCOL)
if FBfit == True:
residField = make_resid_field(magnet.upper(), coil.upper())
performFBfit(residField, magnet.upper(), coil.upper())
return fitDict
def perform_geofit(magnet, coil, makeresid=True, save_as=None):
if magnet.upper() not in ['SSU', 'SSD']:
print 'Magnet unrecognised - please use SSU or SSD'
return
if coil.upper() not in ['M1', 'M2', 'CC', 'ECE']:
print 'Coil unrecognised - please use M1, M2, CC or ECE'
print '\tN.B You can not fit to the end coils individually, only to E1-CC-E2'
return
if coil.upper() == 'CC':
coil = 'ECE'
if save_as == None:
save_str = os.path.join(utils.geofit_field_path, magnet.upper() + '_' + coil.upper() \
+ '_geofit_default.pickle')
else:
save_str = os.path.join(utils.geofit_field_path, save_as)
if coil.upper() in ['M1', 'M2']:
print 'Performing geometrical fit on', magnet.upper(), coil.upper()
if utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data'] == None:
print 'No data to fit to for this magnet!'
return
_centre = utils.centres_dict[magnet.upper()]['mapper'][coil.upper()]
_field = readFile(utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data'])
geoFitClass = geofit.GeoFit(utils.coil_datacards[magnet.upper()][coil.upper()], \
_field, _centre)
fitDict = geoFitClass.run()
print 'Finished with parameters: '
for key, value in fitDict.iteritems():
print key, value
print 'Saved fit parameters at: ', save_str
with open(save_str, 'wb') as save_pickle:
pickle.dump(fitDict, save_pickle, protocol=pickle.HIGHEST_PROTOCOL)
return fitDict
elif coil.upper() in ['CC', 'ECE']:
pass
def get_coilfit_class(magDict):
coilFitDicts = []
currentList = []
_magnet = magDict['magnet']
for key, item in magDict.iteritems():
if key == 'CC':
if item['I'] < 0.001 and item['I'] > -0.001:
continue
pickle_str = '%s_ECE_coilfit_default.pickle'%_magnet
with open(os.path.join(utils.geofit_field_path, pickle_str)) as _handle:
ece_dict = pickle.load(_handle)
for _key, _dict in ece_dict.iteritems():
coilFitDicts.append(_dict)
currentList.append(item['I'])
elif key in ['M1', 'M2']:
if item['I'] < 0.001 and item['I'] > -0.001:
continue
pickle_str = '%s_%s_coilfit_default.pickle'%(_magnet, key)
with open(os.path.join(utils.geofit_field_path, pickle_str)) as _handle:
c_dict = pickle.load(_handle)
coilFitDicts.append(c_dict)
currentList.append(item['I'])
coilfit_class = mkfield.CalcFullField(coilFitDicts, currentList)
return coilfit_class
def make_resid_field(magnet, coil, coilfit=True, fitDict=None, saveAs=None, _current=30.0):
#I f*ing hate the mess that I have made this function... NEEDS CLEANING
dataFieldStr = utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data']
if coil.upper() == 'CC':
coil = 'ECE'
if fitDict == None:
if coilfit == True:
fitDictStr = '%s_%s_coilfit_default.pickle'%(magnet.upper(), coil.upper())
else:
fitDictStr = '%s_%s_geofit_default.pickle'%(magnet.upper(), coil.upper())
elif type(fitDict) == type('string!'):
fitDictStr = fitDict
elif type(fitDict) == type({}):
fitDictStr = 'N/A'
pass #Handle passing the actual fitDict here...
with open(os.path.join(utils.geofit_field_path, fitDictStr), 'rb') as _file:
fitDict = pickle.load(_file)
fitDictList, currentList = [], []
if coil == 'ECE':
for key, value in fitDict.iteritems():
fitDictList.append(value)
currentList.append(_current)
else:
fitDictList.append(fitDict)
currentList.append(_current)
if coilfit == True:
print 'Making residual field with coilfit using', fitDictStr, 'with data field', dataFieldStr
calcFieldClass = mkfield.CalcFullField(fitDictList, currentList)
dataField = readFile(dataFieldStr)
if magnet == 'SSD':
dataField = fm.flip_SSD_data(dataField)
residualField = []
for f in dataField:
Br, Bphi, Bz = calcFieldClass.calc_full_field_at_point(f.r, f.phi, f.z)
residualField.append(rphiz.Measurement(f.r, f.phi, f.z, f.Br - Br, f.Bphi - Bphi, \
f.Bz - Bz, f.sensorNumber))
if coilfit == False:
pass #need to implement calcgeofit class
if saveAs == None:
#obvs need to change this so it can handle geofit instead
saveAs = '%s_%s_coilfit_resid.dat'%(magnet.upper(), coil.upper())
saveAsFull = os.path.join(utils.resid_field_path, saveAs)
fm.print_field_from_list(residualField, saveAsFull)
return residualField
```
#### File: micemag/fbutils/applyfb.py
```python
import multiprocessing as mp
import pickle
import sys
import os
import scipy as sp
import numpy as np
import scipy.special as spec
import fbutils as _fb
from micemag.fieldmanip import polarMeasurement as rphiz
import micemag.utils as utils
#Consolidate all of this into a class to remove need for global values etc..
def getDefaultFitDict(coil, magnet):
if coil.upper() == 'CC':
coil = 'ECE'
picklePath = os.path.join(utils.fb_pickle_path, '%s_%s_3_20_10.pickle'%(magnet, coil))
try:
with open(picklePath, 'rb') as _pickle:
fitDict = pickle.load(_pickle)
return fitDict
except IOError:
print 'Attempted to load pickle:', picklePath
print 'Default FB term pickle not found! Has it been deleted?'
print 'You need to provide the name of the pickle that you wish to use'
sys.exit()
def applyFB_field(field, _fitDict, coil, magnet, FBonly=False, nCores=1):
global fitDict
fitDict = _fitDict
global jZeros
jZeros = _fb.genBesselZeros(fitDict['n'], fitDict['m'])
global _FBonly
_FBonly = FBonly
_fb.centreField(field, coil, magnet)
fieldPool = mp.Pool(nCores)
field = fieldPool.map(calcFB_unpack, field)
field.sort()
return field
def applyFB_grid(magDict, x, y, z, Bx, By, Bz):
global fitDict
global jZeros
_mag = magDict['magnet']
_r, _phi, _z, _Br, _Bphi, _Bz = cartToPolar(x, y, z, Bx, By, Bz)
if _r > 0.15:
return Bx, By, Bz #Can't add fb terms past rmax
for _coil in ['CC', 'M1', 'M2']:
if magDict[_coil]['I'] == 0:
continue
if magDict[_coil]['fb'] == None:
fitDict = getDefaultFitDict(_coil, magDict['magnet'])
else:
pass #need to handle non default dicts here
jZeros = _fb.genBesselZeros(fitDict['n'], fitDict['m'])
current_scale = magDict[_coil]['I']/30.0
coil_centre = utils.centres_dict[_mag]['mapper'][_coil]
Z = _z - coil_centre
if Z < (-1.0)*fitDict['zmax'] or Z > fitDict['zmax']:
continue
BrFB, BphiFB, BzFB = calcBrBphiBz(_r, _phi, Z)
_Br += BrFB*current_scale
_Bphi += BphiFB*current_scale
_Bz += BzFB*current_scale
X, Y, Z, _Bx, _By, _Bz = polarToCart(_r, _phi, _z, _Br, _Bphi, _Bz)
return _Bx, _By, _Bz
def cartToPolar(x, y, z, Bx, By, Bz, deg=False):
r = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
_matrix = np.array([[np.cos(phi), -1.0*np.sin(phi)], [np.sin(phi), np.cos(phi)]])
_matrix = np.transpose(_matrix)
B = np.array([Bx, By])
R = _matrix.dot(B)
Br = R[0]
Bphi = R[1]
if deg == True:
phi = np.degrees(phi)
if phi < 0.0:
phi = 360.0 + phi
elif phi > 360:
phi = phi - 360.0
return r, phi, z, Br, Bphi, Bz
def polarToCart(r, phi, z, Br, Bphi, Bz):
x = r*np.cos(phi)
y = r*np.sin(phi)
_matrix = np.array([[np.cos(phi), -1.0*np.sin(phi)], [np.sin(phi), np.cos(phi)]])
B = np.array([Br, Bphi])
X = _matrix.dot(B)
Bx = X[0]
By = X[1]
return x, y, z, Bx, By, Bz
def calcFB_unpack(point):
global fitDict
global _FBonly
if point.z >= (-1.0)*fitDict['zmax'] and point.z <= fitDict['zmax'] and point.r <= 0.15:
_Br, _Bphi, _Bz = calcBrBphiBz(point.r, np.radians(point.phi), point.z)
else:
_Br, _Bphi, _Bz = 0, 0, 0
if _FBonly == False:
return rphiz.Measurement(point.r, point.phi, point.z,\
point.Br + _Br, point.Bphi+ _Bphi, point.Bz + _Bz, \
point.sensorNumber)
elif _FBonly == True:
return rphiz.Measurement(point.r, point.phi, point.z,\
_Br, _Bphi, _Bz, point.sensorNumber)
def calcBrBphiBz(r, phi, z):
"""Calculates the Fourier Bessel field components at a point from the fitDict.
phi must be in radians!
"""
global fitDict
Br, Bphi, Bz = 0, 0, 0
for _n in range(fitDict['n']):
_Brl0, _Bphil0, _Bzl0 = _calcl0terms(fitDict['A_%d_0'%_n], \
fitDict['al_%d_0'%_n], \
_n, r, phi, z)
_BrE, _BphiE, _BzE = _calcEterms(fitDict['E_%d'%_n], \
fitDict['ep_%d'%_n], \
_n, r, phi)
Br += _Brl0 + _BrE
Bphi += _Bphil0 + _BphiE
Bz += _Bzl0 #_BzE is *always* 0
for _l in range(1, fitDict['l'] + 1):
_BrA, _BphiA, _BzA = _calcAterms(fitDict['A_%d_%d'%(_n,_l)], \
fitDict['al_%d_%d'%(_n, _l)], \
_n, _l, r, phi, z)
_BrB, _BphiB, _BzB = _calcBterms(fitDict['B_%d_%d'%(_n,_l)], \
fitDict['be_%d_%d'%(_n, _l)], \
_n, _l, r, phi, z)
Br += _BrA + _BrB
Bphi += _BphiA + _BphiB
Bz += _BzA + _BzB
for _m in range(1, fitDict['m'] + 1):
_BrC, _BphiC, _BzC = _calcCterms(fitDict['C_%d_%d'%(_n, _m)], \
fitDict['ga_%d_%d'%(_n, _m)], \
_n, _m, r, phi, z)
_BrD, _BphiD, _BzD = _calcDterms(fitDict['D_%d_%d'%(_n, _m)], \
fitDict['de_%d_%d'%(_n, _m)], \
_n, _m, r, phi, z)
Br += _BrC + _BrD
Bphi += _BphiC + _BphiD
Bz += _BzC + _BzD
return Br, Bphi, Bz
def _calcAterms(A, al, n, l, r, phi, z):
global fitDict
sc = np.pi/(fitDict['zmax']*1.2)
Br = A*spec.ivp(n, l*sc*r)*np.cos(n*phi + al)*np.sin(l*sc*z)
if r == 0:
Bphi = 0
else:
Bphi = (-1)*A*(1/(l*sc*r))*spec.iv(n, l*sc*r)*np.sin(n*phi + al)*np.sin(l*sc*z)
Bz = A*spec.iv(n, l*sc*r)*np.cos(n*phi + al)*np.cos(l*sc*z)
return Br, Bphi, Bz
def _calcBterms(B, be, n, l, r, phi, z):
global fitDict
sc = np.pi/(fitDict['zmax']*1.2)
Br = B*spec.ivp(n, l*sc*r)*np.cos(n*phi + be)*np.cos(l*sc*z)
if r == 0:
Bphi = 0
else:
Bphi = (-1)*B*(1/(l*sc*r))*spec.iv(n, l*sc*r)*np.sin(n*phi + be)*np.cos(l*sc*z)
Bz = (-1)*B*spec.iv(n, l*sc*r)*np.cos(n*phi + be)*np.sin(l*sc*z)
return Br, Bphi, Bz
def _calcl0terms(A, al, n, r, phi, z):
if r == 0 and n == 0:
Br, Bphi = 0, 0
else:
Br = A*n*np.power(r, n-1)*np.cos(n*phi + al)*z
Bphi = (-1)*A*np.power(r, n-1)*np.sin(n*phi + al)*z
Bz = A*np.power(r, n)*np.cos(n*phi + al)
return Br, Bphi, Bz
def _calcCterms(C, ga, n, m, r, phi, z):
global jZeros
global fitDict
sc = jZeros[n][m]/fitDict['rmax']
Br = C*spec.jvp(n, sc*r)*np.cos(n*phi + ga)*np.sinh(sc*z)
if r == 0:
Bphi = 0
else:
Bphi = (-1)*C*(1/(sc*r))*spec.jv(n, sc*r)*np.sin(n*phi + ga)*np.sinh(sc*z)
Bz = C*spec.jv(n, sc*r)*np.cos(n*phi + ga)*np.cosh(sc*z)
return Br, Bphi, Bz
def _calcDterms(D, de, n, m, r, phi, z):
global jZeros
global fitDict
sc = jZeros[n][m]/fitDict['rmax']
Br = D*spec.jvp(n, sc*r)*np.cos(n*phi + de)*np.cosh(sc*z)
if r == 0:
Bphi = 0
else:
Bphi = (-1)*D*(1/(sc*r))*spec.jv(n, sc*r)*np.sin(n*phi + de)*np.cosh(sc*z)
Bz = D*spec.jv(n, sc*r)*np.cos(n*phi + de)*np.sinh(sc*z)
return Br, Bphi, Bz
def _calcEterms(E, ep, n, r, phi):
if r == 0 and n == 0:
Br, Bphi = 0, 0
else:
Br = E*n*np.power(r, n-1)*np.cos(n*phi + ep)
Bphi = (-1)*E*n*np.power(r, n-1)*np.sin(n*phi + ep)
Bz = 0
return Br, Bphi, Bz
```
#### File: micemag/fbutils/fbfit.py
```python
import pickle
import sys
import os
import scipy as sp
import numpy as np
import iminuit as minuit
import fbutils as _fb
import micemag.utils as _paths
class FBfitClass:
def __init__(self, field, coil, magnet, zmax=1.8, rmax=0.15, n=2, l=20, m=10, \
verbose=True, saveAs=None):
self.field = field
_fb.centreField(self.field, coil, magnet)
self.n = n
self.l = l
self.m = m
self.coil = coil
self.magnet = magnet
self.fitDict = {'n': n, 'l': l, 'm': m, 'zmax': zmax, 'rmax': rmax}
self.zmax = zmax
self.rmax = rmax
self.fouData, self.zstart, self.zend = self._getFourierData()
self.FourFitClass = _fb.FourierFitFcn(self.fouData, n, l, zmax=zmax, \
rmax=rmax,\
verbose=verbose)
self.HypFitClass = _fb.HyperbolicFitFcn(None, n, m, zmax=zmax, \
rmax=rmax, \
verbose=verbose)
self.MultFitClass = _fb.MultipoleFitFcn(None, n, rmax=rmax, verbose=verbose)
self.verbose = verbose
if saveAs == None:
self.saveAs = magnet + '_' + coil + '_%d_%d_%d.pickle'%(n, l, m)
else:
self.saveAs = saveAs
def run(self, save=True): #The main function that runs everything
self._runFourierFit()
self._getHyperbolicData()
self._runHyperbolicFit()
self._getMultipoleData()
self._runMultipoleFit()
self.saveDict(self.saveAs)
return self.fitDict
def saveDict(self, saveAs):
if saveAs[-7:] != '.pickle':
saveAs += '.pickle'
with open(os.path.join(_paths.fb_pickle_path, saveAs), 'wb') as _pickle:
pickle.dump(self.fitDict, _pickle, protocol=pickle.HIGHEST_PROTOCOL)
def _getFourierData(self):
_fourierData = []
for point in self.field:
if point.z >= -1*self.zmax and point.z <= self.zmax and point.r == self.rmax:
_fourierData.append((point.r, np.radians(point.phi),\
point.z, point.Bz))
_z = []
for d in _fourierData:
_z.append(d[2])
zstart = min(_z)
zend = max(_z)
return _fourierData, zstart, zend
def _runFourierFit(self):
if self.verbose == True:
_min = minuit.Minuit(self.FourFitClass)
else:
_min = minuit.Minuit(self.FourFitClass, pedantic=False)
_min.migrad()
self.fitDict = _fb.mergeDicts(self.fitDict, _min.values)
def _getHyperbolicData(self):
_hyperbolicData = []
for i in self.field:
if i.z > self.zstart - 0.001 and i.z < self.zstart + 0.001:
fourBz = self.FourFitClass.calcFourierTerms(i.r, np.radians(i.phi), i.z)
_hyperbolicData.append((i.r, np.radians(i.phi), i.z, \
i.Bz - fourBz))
if i.z > self.zend - 0.001 and i.z < self.zend + 0.001:
fourBz = self.FourFitClass.calcFourierTerms(i.r, np.radians(i.phi), i.z)
_hyperbolicData.append((i.r, np.radians(i.phi), i.z, \
i.Bz - fourBz))
self.HypFitClass.setData(_hyperbolicData)
return _hyperbolicData
def _runHyperbolicFit(self):
#This MUST be called AFTER _runFourierFit() and AFTER _getHyperbolicData
if self.verbose == True:
_min = minuit.Minuit(self.HypFitClass)
else:
_min = minuit.Minuit(self.HypFitClass, pedantic=False)
_min.migrad()
self.fitDict = _fb.mergeDicts(self.fitDict, _min.values)
def _getMultipoleData(self):
multipoleData = []
uniquePhis = {} # {phi: sumBr, phi2: sumBr2, ...}
for point in self.field:
if point.r == self.rmax and point.z >= self.zstart and point.z <= self.zend:
_r = point.r
_phi = np.radians(point.phi)
_z = point.z
if point.phi in uniquePhis:
uniquePhis[point.phi] += point.Br \
- self.FourFitClass.calcFourierTerms(_r, _phi, _z, \
comp='Br') \
- self.HypFitClass.calcHypTerms(_r, _phi, _z, \
comp='Br')
else:
uniquePhis[point.phi] = point.Br \
- self.FourFitClass.calcFourierTerms(_r, _phi, _z, \
comp='Br') \
- self.HypFitClass.calcHypTerms(_r, _phi, _z, \
comp='Br')
for key, value in uniquePhis.iteritems():
multipoleData.append((key, np.mean(value)))
self.MultFitClass.setData(multipoleData)
def _runMultipoleFit(self):
if self.verbose == True:
_min = minuit.Minuit(self.MultFitClass)
else:
_min = minuit.Minuit(self.MultFitClass, pedantic=False)
_min.migrad()
self.fitDict = _fb.mergeDicts(self.fitDict, _min.values)
def getFitDict(self):
return self.fitDict
```
#### File: micemag/plotting/plots3d.py
```python
import sys
import matplotlib.pyplot as pyplot
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import scipy.interpolate as sp
from micemag.fieldmanip import polarMeasurement as rphiz
def wireFrame(field, field2=None, comp='Bz', unit='T', _R=(0.15, 0.15), zlim=None, labels=('field1', 'field2'), **kwargs):
"""
This will be a generic version of ContourRPhiplot.py so that these plots can be made easily
"""
if unit == 'T':
_mult = 1
elif unit == 'mT':
_mult = 1000
elif unit == 'G':
_mult = 10000
else: #if it is ballsed up then just leave it as Tesla
unit = 'T'
_mult = 1
field = roundField(field, Z=True)
field.sort()
if zlim == None:
zlim = (field[0].z, field[-1].z)
if 'rphi' in kwargs:
X, Y, Z = _shapeDataRPHI(field, comp=comp)
else:
X, Y, Z = _shapeData(field, _R[0], comp, _mult, zlim)
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
if comp == 'Bphi':
z_label = r'$B_{\phi}$ [%s]'%unit
elif comp == 'B':
z_label = r'|$B$| [%s]'%unit
else:
z_label = r'$\Delta B_{%s}$'%comp[1] + ' [%s]'%unit
ax.set_xlabel('z [m]', fontsize=14)
ax.set_ylabel(r'$\phi$ [deg]',fontsize=14)
ax.set_zlabel(' ' +z_label, fontsize=14)
if 'title' in kwargs:
ax.set_title(kwargs['title'], fontsize=14)
ax.plot_wireframe(X, Y, Z, color='m', alpha=0.5, label=labels[0])
if field2 != None:
field2 = roundField(field2, Z=True)
X2, Y2, Z2 = _shapeData(field2, _R[1], comp, _mult, zlim)
ax.plot_wireframe(X2, Y2, Z2, color='r', alpha=0.5, label=labels[1])
ax.zaxis.set_rotate_label(False)
ax.zaxis._axinfo['label']['space_factor'] = 10
pyplot.tight_layout()
ax.view_init(30,-75)
if 'saveAs' in kwargs:
pyplot.savefig(kwargs['saveAs'])
ax.legend()
pyplot.show()
def _shapeData(field, _R=0.15, comp='Bz', _mult=1, zlim=(-1.0, 1.0)):
field.sort()
#checkGrid(field)
zStart = np.around(field[0].z, 2)
PhiList = []
_phi_skip = None
for f in field:
if np.around(f.r, 2) == _R and np.around(f.phi,0) in PhiList:
_phi_skip = np.around(f.phi,0)
break
if np.around(f.r, 2) == _R:
PhiList.append(np.around(f.phi,0))
if np.around(f.z, 2) != zStart:
break
_z, _phi, _B = [], [], []
skipPhi = False
for f in field:
if np.around(f.phi,0) == _phi_skip and skipPhi == True:
skipPhi = False
continue
if np.around(f.r,2) == _R and np.around(f.z,2) > zlim[0] and np.around(f.z,2) < zlim[1]:
#print f.z, f.phi
_z.append(np.around(f.z, 2))
_phi.append(np.around(f.phi,0))
if comp == 'Bz':
_B.append(f.Bz*_mult)
elif comp == 'Br':
_B.append(f.Br*_mult)
elif comp == 'Bphi':
_B.append(f.Bphi*_mult)
if f.phi == _phi_skip:
skipPhi = True
_z.sort()
_z = np.array(_z)
_phi = np.array(_phi)
_B = np.array(_B)
cols = np.unique(_phi).shape[0]
_X = _z.reshape(-1, cols)
_Y = _phi.reshape(-1, cols)
_Z = _B.reshape(-1, cols)
return _X, _Y, _Z
def _shapeDataRPHI(field, comp='Bz', _mult=1):
field.sort()
_z, _phi, _B = [], [], []
skipPhi = False
for f in field:
_z.append(np.around(f.r,2))
_phi.append(np.around(f.phi, 0))
if comp == 'Bz':
_B.append(f.Bz*_mult)
elif comp == 'Br':
_B.append(f.Br*_mult)
elif comp == 'Bphi':
_B.append(f.Bphi*_mult)
elif comp == 'B':
_B.append(f.B*_mult)
_z.sort()
print len(_z), len(_phi), len(_B)
_z = np.array(_z)
_phi = np.array(_phi)
_B = np.array(_B)
cols = np.unique(_phi).shape[0]
X = _z.reshape(-1, cols)
Y = _phi.reshape(-1, cols)
Z = _B.reshape(-1, cols)
return X, Y, Z
def roundField(field, zAround=3, **kwargs):
"""
This function basically truncates and rounds number in the data to get rid of the 0.000012345 from stuff.
When rotated a point is sometimes r = 0.11999 instead of 0.12 which is annoying.
Also it rounds the z value intelligently so instead of z being 0.1000009876 it is just 0.10.
"""
resultField = []
for f in field:
if f.r < 0.0001:
PHI = 0.0
else:
PHI = np.around(f.phi, 0)
if 'Z' in kwargs:
if kwargs['Z'] == True:
Z = np.around(f.z, zAround)
elif kwargs['Z'] == False:
Z = f.z
else:
Z = f.z
resultField.append(rphiz.Measurement(np.around(f.r, 2), PHI, Z, f.Br, f.Bphi, f.Bz, f.sensorNumber))
return resultField
``` |
{
"source": "JoeLanska-NealAnalytics/microsoft-bonsai-api",
"score": 3
} |
#### File: plastic-extrusion/sim/extrusion_model.py
```python
from dataclasses import dataclass
import math
from sim import temperature as tm
from sim import units
π = math.pi
@dataclass
class ExtrusionModel:
ω: float # radians / second
Δω: float # radians / second
f_c: float # hertz
T: float # Kelvin
ΔT: float = 0 # Kelvin
Δt: float = 1 # second
# screw parameters
D: float = 2.5 * units.METERS_PER_INCH # meters
L_s: float = 60 * units.METERS_PER_INCH # meters
H: float = 0.24 * units.METERS_PER_INCH # meters
W: float = 0.45 * units.METERS_PER_INCH # meters
φ: float = math.radians(17.5) # radians
# die parameters
R_d: float = 0.5 * units.METERS_PER_INCH
L_d: float = 2 * units.METERS_PER_INCH
# extruder control actions
ω_max: float = (200 / 60) * units.RADIANS_PER_REVOLUTION # radians / second
f_max: float = 10 # Hz
# material parameters for viscosity power law model
# values for rigid PVC @ 180 deg C from [PTPL]
m: float = 1.7e4 # no units given in [PTPL], so I'm assuming Pa * s
n: float = 0.26 # (dimensionless)
# desired part specifications
L0: float = 1 * 12 * units.METERS_PER_INCH
ε: float = 0.1 * units.METERS_PER_INCH
def __post_init__(self):
# channel velocity (from [EH] Eq. 4.4)
self.V_z = π * self.D * math.cos(self.φ) * self.ω
# drag flow (from [EH] Eq. 4.3)
self.Q_D = 0.5 * self.W * self.H * self.V_z
# constant for pressure flow determined from screw geometry
self.β1 = (self.W * self.H ** 3 * math.sin(self.φ)) / (12 * self.L_s)
# Rabinowitsch correction to shear rate for non-Newtonian fluids
self.rabinowitsch_correction = (3 * self.n + 1) / (4 * self.n)
# die constant
self.k = (π * self.R_d ** 4) / (8 * self.L_d)
self.viscosity_screw()
self.viscosity_die()
# operating pressure
self.P_op = self.Q_D / (self.β1 / self.η_s + self.k / self.η_d)
# flow rate (see [WVFR])
self.Q_op = self.k * self.P_op / self.η_d
# cross-sectional area of the die
self.A = π * self.R_d ** 2
# linear fluid velocity
self.v = self.Q_op / self.A
# part length (assuming no extrudate swell)
self.L = self.v / self.f_c
self.production_efficiency()
def shear_rate_screw(self):
"""
Shear rate of the material in the barrel due to the rotation of the screw.
Returns
-------
γdot : float
Shear rate (1 / seconds).
References
----------
.. [EH] Eq. 8.1
"""
return π * self.D * self.ω / self.H
def shear_rate_die(self, Q):
"""
References
----------
.. [EH] Eq. 8.2
.. [D] p. 41
.. [WSR] <https://en.wikipedia.org/wiki/Shear_rate>
"""
return (4 * Q) / (π * self.R_d ** 3)
def viscosity_power_law(self, γdot):
"""
Returns
-------
η : float
Viscosity (Pascal * seconds).
References
----------
.. [D] p. 45
"""
return self.m * γdot ** (self.n - 1)
def temperature_adjustment(self):
"""Temperature adjustment to viscosity"""
self.ΔT = tm.temperature_change(Δω=self.Δω)
return tm.arrhenius(T1=self.T, T2=self.T + self.ΔT)
def viscosity_screw(self):
γdot = self.shear_rate_screw()
γdot *= self.rabinowitsch_correction
η_shear = self.viscosity_power_law(γdot=γdot)
h_temp = self.temperature_adjustment()
self.η_s = η_shear * h_temp
def viscosity_die(self):
γdot = self.shear_rate_die(Q=self.Q_D)
γdot *= self.rabinowitsch_correction
η_shear = self.viscosity_power_law(γdot=γdot)
self.η_d = η_shear
def length_within_tolerance(self):
return abs(self.L - self.L0) < self.ε
def production_efficiency(self):
# NOTE: the cutter frequency is equivalent to the number of parts
# produced per second. Since the simulation time step is also
# 1 second, this is also the number of parts per iteration.
parts_per_iteration = self.f_c * self.Δt
if self.length_within_tolerance():
self.yield_ = parts_per_iteration
else:
self.yield_ = 0
``` |
{
"source": "JoeLanzi/CSCI_5030_TEAM_1",
"score": 3
} |
#### File: CSCI_5030_TEAM_1/webpage/autocorrect.py
```python
import ast
from preprocess import to_n_gram
from grammar_checker import Checker
import pickle
LANGUAGES = ast.literal_eval(open("language_short_names.txt", "r").read())
class Autocorrect:
def __init__(self, language = 'en-US') -> None:
self.language = language
self.tool = self.load_dictionary()
# Detects language
def language_detect(self,input_string = None) -> str:
if input_string != None:
self.input_string = input_string
# Language Identification using multinb
loaded_model = pickle.load(open('../model_training/new_models/multinb.pickle', 'rb'))
predict_lang = loaded_model.predict(to_n_gram(self.input_string))[0]
self.language = [k for k, v in LANGUAGES.items() if v == predict_lang][0]
print("Loading Dictionary")
self.tool = self.load_dictionary()
print(f'Language Detected: {LANGUAGES[self.language]}')
# Loads Dictionary
def load_dictionary(self, language = None):
language = self.language if language == None else language
self.language = language
return Checker(self.language)
# word suggession
def suggestion(self,input_string): # with probability
self.tool.tool(input_string)
return [self.tool.repeated_words,self.tool.correct_grammar]
# Output Grammer + Spelling correction
def correct(self,input_string):
#return self.tool.correct(input_string)
pass
# %% Tests
'''
correct = Autocorrect()
sentence = "an lá go mbeidh meáin na Gaeilge agus an Bhéarla ar comhchéim"
correct.language_detect(sentence.lower())
correct.suggestion(sentence.lower())
'''
#%% Spell check for html
'''
sentence = "this is a a sample sentece"
correct = Autocorrect()
correct.language_detect(sentence)
samplelist = correct.suggestion(sentence)
# %%
newlist = []
corrected = False
for i in range(len(sentence.split())):
try:
if sentence.split()[i] == sentence.split()[i+1]:
newlist.append('<div class="err">'+sentence.split()[i]+'</div>')
continue
elif ' '.join([sentence.split()[i],sentence.split()[i+1]]) in samplelist[1]:
newlist.append('<div class="err">'+' '.join([sentence.split()[i],sentence.split()[i+1]])+'</div>')
corrected = True
continue
newlist.append(sentence.split()[i])
except IndexError:
if not corrected:
newlist.append(sentence.split()[i])
else:
pass
' '.join(newlist)
'''
```
#### File: CSCI_5030_TEAM_1/webpage/main.py
```python
from flask import Flask, render_template, request
from autocorrect import Autocorrect
import ast
app = Flask(__name__)
autocorrect = Autocorrect()
LANGUAGES = ast.literal_eval(open("language_short_names.txt", "r").read())
#,myfunction=test_func
@app.route("/")
@app.route("/home")
def index():
return render_template("index.html")
# highlight grammar error, like repeats, & mispelling
@app.route("/home",methods = ["POST","GET"])
def result():
sentence = request.form['name']
sentence = sentence.lower()
autocorrect.language_detect(sentence)
suggestions = autocorrect.suggestion(sentence.lower())
newlist = []
correctedlist = []
for i in range(len(sentence.split())):
try:
if sentence.split()[i] == sentence.split()[i+1]:
newlist.append('<span class="repeat" style="color: red;"><strike>'+sentence.split()[i]+'</strike></span>')
continue
elif ' '.join([sentence.split()[i],sentence.split()[i+1]]) in suggestions[1] and len(suggestions[1][' '.join([sentence.split()[i],sentence.split()[i+1]])])!=0 and sentence.split()[i] not in correctedlist:
correctedlist.append(sentence.split()[i])
correctedlist.append(sentence.split()[i+1])
newlist.append(sentence.split()[i]) # first word
newlist.append('<span class="err">'+sentence.split()[i+1]+'</span>') # second word
else:
if sentence.split()[i] not in correctedlist:
newlist.append(sentence.split()[i])
except IndexError:
if sentence.split()[i] not in correctedlist:
newlist.append(sentence.split()[i])
language = LANGUAGES[autocorrect.language]
correction = []
for i in correctedlist:
if len(autocorrect.suggestion(i)[1])!=0:
correction.append(autocorrect.suggestion(i)[1])
return render_template("index.html",name = language+': '+' '.join(newlist),sample=sentence,suggestions=correction)
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True)
'''#%%%%%%%%%%%%%%% TEST %%%%%%%%%%%%%%%%%#
sentence = 'this is a sample sentence to to see if everuthing is working accordingn to plann'
sentence = sentence.lower()
autocorrect.language_detect(sentence)
suggestions = autocorrect.suggestion(sentence.lower())
print(sentence,':',autocorrect.language,':',suggestions)
#%%
newlist = []
correctedlist = []
for i in range(len(sentence.split())):
print(i,' ',sentence.split()[i])
try:
if sentence.split()[i] == sentence.split()[i+1]:
newlist.append('<span class="repeat" style="color: red;"><strike>'+sentence.split()[i]+'</strike></span>')
print('------------- repeat')
continue
elif ' '.join([sentence.split()[i],sentence.split()[i+1]]) in suggestions[1] and len(suggestions[1][' '.join([sentence.split()[i],sentence.split()[i+1]])])!=0 and sentence.split()[i] not in correctedlist:
correctedlist.append(sentence.split()[i+1])
newlist.append(sentence.split()[i]) # first word
newlist.append('<span class="err">'+sentence.split()[i+1]+'</span>') # second word
print("--------- correction")
else:
if sentence.split()[i] not in correctedlist:
newlist.append(sentence.split()[i])
except IndexError:
if sentence.split()[i] not in correctedlist:
newlist.append(sentence.split()[i])
print(correctedlist)
# %%
print(newlist)
# %%
def sample():
give = []
for i in correctedlist:
give.append((i , autocorrect.suggestion(i)[1][i]))
return(give)
sample()
# %%'''
``` |
{
"source": "joelarmstrong/analysis-purgatory",
"score": 3
} |
#### File: analysis-purgatory/splitting-top-down/simulator_test.py
```python
import unittest
import random
from collections import Counter
from hypothesis import given, assume, settings, note
from hypothesis.strategies import text, builds, floats, sampled_from, composite, random_module, integers
from Bio import Phylo
from Bio.Phylo.BaseTree import Tree, Clade
from StringIO import StringIO
from simulator import GeneralizedReversibleSimulator, BirthDeathSimulator, prune_lineages
# random data strategies
probability = floats(min_value=0.0, max_value=1.0)
non_zero_probability = floats(min_value=0.01, max_value=1.0)
random_DNA = text(alphabet=['A', 'a', 'C', 'c', 'G', 'g', 'T', 't'])
# We need to use only somewhat realistic distances, because the
# matrix exponential is only approximate and becomes
# inaccurate at very high distances.
random_distance = floats(min_value=0.0, max_value=5)
@composite
def random_tree(draw, max_depth=5):
root = draw(random_clade(max_depth=max_depth))
root.branch_length = None
return Tree(root)
@composite
def random_clade(draw, depth=0, max_depth=8):
name = draw(text())
branch_length = draw(random_distance)
children = []
if depth < max_depth:
num_children = draw(integers(min_value=0, max_value=4))
for _ in xrange(num_children):
children.append(draw(random_clade(depth=depth+1, max_depth=max_depth)))
return Clade(name=name, branch_length=branch_length, clades=children)
@composite
def randomGRT(draw):
frac_a = draw(non_zero_probability)
frac_c = draw(non_zero_probability)
frac_g = draw(non_zero_probability)
frac_t = draw(non_zero_probability)
# Normalize the equilibrium frequencies
sum_frac = frac_a + frac_c + frac_g + frac_t
frac_a /= sum_frac
frac_c /= sum_frac
frac_g /= sum_frac
frac_t /= sum_frac
a_c = draw(non_zero_probability)
a_g = draw(non_zero_probability)
a_t = draw(non_zero_probability)
c_g = draw(non_zero_probability)
c_t = draw(non_zero_probability)
g_t = draw(non_zero_probability)
# Normalize the change parameters
sum_change = 2*frac_a*frac_c*a_c + \
2*frac_a*frac_g*a_g + \
2*frac_a*frac_t*a_t + \
2*frac_c*frac_g*c_g + \
2*frac_c*frac_t*c_t + \
2*frac_g*frac_t*g_t
a_c /= sum_change
a_g /= sum_change
a_t /= sum_change
c_g /= sum_change
c_t /= sum_change
g_t /= sum_change
sum_change = 2*frac_a*frac_c*a_c + \
2*frac_a*frac_g*a_g + \
2*frac_a*frac_t*a_t + \
2*frac_c*frac_g*c_g + \
2*frac_c*frac_t*c_t + \
2*frac_g*frac_t*g_t
return GeneralizedReversibleSimulator(frac_a, frac_c, frac_g, a_c, a_g, a_t, c_g, c_t, g_t)
class GRTSimulatorTest(unittest.TestCase):
char_to_frac_param = { 'A': 'frac_a', 'C': 'frac_c', 'G': 'frac_g' }
chars_to_change_param = { ('A', 'C'): 'a_c', ('A', 'G'): 'a_g', ('A', 'T'): 'a_t',
('C', 'G'): 'c_g', ('C', 'T'): 'c_t', ('G', 'T'): 'g_t' }
default_params = { 'frac_a': 0.25, 'frac_c': 0.25, 'frac_g': 0.25,
'a_c': 0.25, 'a_g': 0.25, 'a_t': 0.25, 'c_g': 0.25, 'c_t': 0.25,
'g_t': 0.25 }
default_sim = GeneralizedReversibleSimulator(frac_a=0.25, frac_c=0.25, frac_g=0.25,
a_c=0.25, a_g=0.25, a_t=0.25, c_g=0.25, c_t=0.25,
g_t=0.25)
def test_increasing_change_probability(self):
"""Increasing the change probability should be reflected in the probability."""
initial_probability = self.default_sim.probability('A', 'T', 1.0)
sim = GeneralizedReversibleSimulator(frac_a=0.25, frac_c=0.25, frac_g=0.25,
a_c=0.25, a_g=0.25, a_t=0.4, c_g=0.25, c_t=0.25, g_t=0.25)
self.assertGreater(sim.probability('A', 'T', 1.0), initial_probability)
def test_increasing_proportion(self):
"""Increasing the proportion of a character should be reflected in the probability."""
for char in ['A', 'C', 'G']:
params = self.default_params.copy()
params[self.char_to_frac_param[char]] = 0.4
other_chars = [c for c in ['A', 'C', 'G'] if c != char]
for other_char in other_chars:
params[self.char_to_frac_param[other_char]] = 0.2
sim = GeneralizedReversibleSimulator(**params)
initial_probability = self.default_sim.probability(char, char, 1.0)
self.assertGreater(sim.probability(char, char, 1.0), initial_probability)
@given(randomGRT(), sampled_from(['A', 'C', 'G', 'T']), random_distance)
def test_probability_sums_to_1(self, sim, char, distance):
"""Test that the probability from a character to all characters sums to 1.0."""
assume(distance > 0)
total_probability = sim.probability(char, 'A', distance) + sim.probability(char, 'C', distance) + sim.probability(char, 'G', distance) + sim.probability(char, 'T', distance)
self.assertAlmostEqual(total_probability, 1.0)
@given(randomGRT(), random_DNA, random_distance)
def test_mutate_gives_same_length_sequence(self, sim, sequence, distance):
mutated = sim.mutate(sequence, distance)
self.assertEqual(len(mutated), len(sequence))
@given(randomGRT(), random_DNA)
def test_generate_leaf_sequences_gives_same_length_sequence(self, sim, sequence):
species_tree = Phylo.read(StringIO('((((HUMAN:0.006969, CHIMP:0.009727):0.025291, RHESUS:0.044568):0.11,(MOUSE:0.072818, RAT:0.081244):0.260342):0.023260,((DOG:0.07, CAT:0.07):0.087381,((PIG:0.06, COW:0.06):0.104728,HORSE:0.05):0.05):0.04);'), 'newick')
leaf_sequences = sim.generate_leaf_sequences(species_tree, sequence)
self.assertEqual(len(leaf_sequences), 10)
self.assertTrue(all([leaf in leaf_sequences for leaf in ['HUMAN', 'CHIMP', 'RHESUS', 'MOUSE', 'RAT', 'DOG', 'CAT', 'PIG', 'COW', 'HORSE']]))
self.assertTrue(all([len(leaf_sequence) == len(sequence) for leaf_sequence in leaf_sequences.values()]))
class BirthDeathSimulatorTest(unittest.TestCase):
def test_prune_lineages(self):
tree = Phylo.read(StringIO('((a:0.1,b:0.1):0.1,c:0.1);'), 'newick')
pruned_tree = prune_lineages(tree, tree.find_clades(name='c'))
fake_file = StringIO()
Phylo.write(pruned_tree, fake_file, 'newick')
self.assertEqual(fake_file.getvalue(), '(a:0.10000,b:0.10000):0.10000;\n')
@settings(max_examples=2000, timeout=0)
@given(random_tree(), probability, probability, random_module())
def test_extinct_lineages_are_pruned(self, tree, duplication_rate, loss_rate, random_module):
# Duplication rate should be reasonable (if it is high we get
# an explosion in gene tree size)
assume(duplication_rate < 0.2)
seed = random.random()
sim = BirthDeathSimulator(tree, duplication_rate, loss_rate)
random.seed(seed)
tree_with_extinctions = sim.generate(remove_extinct_lineages=False)
random.seed(seed)
tree_without_extinctions = sim.generate()
note('With extinctions: %s' % tree_with_extinctions)
note('Without extinctions: %s' % tree_without_extinctions)
names_with_extinctions = [node.name for node in tree_with_extinctions.get_terminals() if node != tree_with_extinctions.root]
names_without_extinctions = [node.name for node in tree_without_extinctions.get_terminals() if node != tree_without_extinctions.root]
self.assertEqual(Counter(names_without_extinctions),
Counter(name for name in names_with_extinctions if 'extinct' not in name))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joelarmstrong/cactusBenchmarks",
"score": 3
} |
#### File: cactusBenchmarks/src/bioio.py
```python
import subprocess
import tempfile
import sys
def system(cmd):
"""Run a command or die if it fails"""
sts = subprocess.call(cmd, shell=True, bufsize=-1, stdout=sys.stdout, stderr=sys.stderr)
if sts != 0:
raise RuntimeError("Command: %s exited with non-zero status %i" % (cmd, sts))
def getTempDirectory(rootDir=None):
"""
returns a temporary directory that must be manually deleted
"""
if rootDir is None:
return tempfile.mkdtemp()
else:
while True:
rootDir = os.path.join(rootDir, "tmp_" + getRandomAlphaNumericString())
if not os.path.exists(rootDir):
break
os.mkdir(rootDir)
os.chmod(rootDir, 0777) #Ensure everyone has access to the file.
return rootDir
def nameValue(name, value, valueType=str, quotes=False):
"""Little function to make it easier to make name value strings for commands.
"""
if valueType == bool:
if value:
return "--%s" % name
return ""
if value is None:
return ""
if quotes:
return "--%s '%s'" % (name, valueType(value))
return "--%s %s" % (name, valueType(value))
def popenCatch(command, stdinString=None):
"""Runs a command and return standard out.
"""
if stdinString != None:
process = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1)
output, nothing = process.communicate(stdinString)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1)
output, nothing = process.communicate() #process.stdout.read().strip()
sts = process.wait()
if sts != 0:
raise RuntimeError("Command: %s with stdin string '%s' exited with non-zero status %i" % (command, stdinString, sts))
return output
``` |
{
"source": "joelarmstrong/cwltest",
"score": 3
} |
#### File: cwltest/tests/test_categories.py
```python
import unittest
import os
from .util import run_with_mock_cwl_runner, get_data
import xml.etree.ElementTree as ET
class TestCategories(unittest.TestCase):
def test_unsupported_with_required_tests(self):
args = ["--test", get_data("tests/test-data/required-unsupported.yml")]
error_code, stdout, stderr = run_with_mock_cwl_runner(args)
self.assertEquals(error_code, 1)
self.assertEquals("Test [1/2] Required test that is unsupported (without tags)\n\n"
"Test [2/2] Required test that is unsupported (with tags)\n\n"
"0 tests passed, 2 failures, 0 unsupported features\n", stderr)
def test_unsupported_with_optional_tests(self):
args = ["--test", get_data("tests/test-data/optional-unsupported.yml")]
error_code, stdout, stderr = run_with_mock_cwl_runner(args)
self.assertEquals(error_code, 0)
self.assertEquals("Test [1/1] Optional test that is unsupported\n\n"
"0 tests passed, 1 unsupported features\n", stderr)
def test_error_with_optional_tests(self):
args = ["--test", get_data("tests/test-data/optional-error.yml")]
error_code, stdout, stderr = run_with_mock_cwl_runner(args)
self.assertEquals(error_code, 1)
self.assertIn("1 failures", stderr)
def test_category_in_junit_xml(self):
junit_xml_report = get_data("tests/test-data/junit-report.xml")
args = ["--test", get_data("tests/test-data/optional-error.yml"), "--junit-xml", junit_xml_report]
run_with_mock_cwl_runner(args)
tree = ET.parse(junit_xml_report)
root = tree.getroot()
category = root.find("testsuite").find("testcase").attrib['class']
self.assertEquals(category, "js, init_work_dir")
os.remove(junit_xml_report)
```
#### File: joelarmstrong/cwltest/gittaggers.py
```python
from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', '.']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
try:
self.tag_build = self.git_timestamp_tag()
except subprocess.CalledProcessError:
pass
return egg_info.tags(self)
``` |
{
"source": "joelarmstrong/genbank-assembly-stats",
"score": 3
} |
#### File: joelarmstrong/genbank-assembly-stats/dump-tsv.py
```python
import os
from argparse import ArgumentParser
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Boolean, or_
from sqlalchemy.orm import relationship, sessionmaker
from tabulate import tabulate
from update_genbank_assembly_stats import Species, Assembly
Base = declarative_base()
def parse_args():
parser = ArgumentParser()
parser.add_argument('db', help='Path to database')
parser.add_argument('accessions', nargs='+')
return parser.parse_args()
def main():
opts = parse_args()
engine = create_engine('sqlite:///%s' % os.path.abspath(opts.db))
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
print "\t".join(["Common Name", "Species Name", "Assembly Name", "Accession", "Scaffold N50", "Contig N50", "Family", "Order"])
for accession in opts.accessions:
assembly = session.query(Assembly).filter(Assembly.accession == accession).one()
print "\t".join([assembly.species.common_name, assembly.species.name, assembly.name, assembly.accession, str(assembly.scaffold_n50), str(assembly.contig_n50), assembly.species.family, str(assembly.species.order)])
session.close()
if __name__ == '__main__':
main()
``` |
{
"source": "joelarmstrong/treeBuildingEvaluation",
"score": 2
} |
#### File: treeBuildingEvaluation/znfTruth/renameNewick.py
```python
import sys
from sonLib.bioio import fastaRead
from sonLib.nxnewick import NXNewick
from sonLib.nxtree import NXTree
import networkx as nx
def induceTreeOnLeaves(nxtree, leaves):
leaves = set(leaves)
dg = nxtree.nxDg
nodesToKeep = []
for node in dg.nodes():
succ = set([nxtree.getName(i) for i in nx.dfs_postorder_nodes(dg, node) if nxtree.hasName(i)])
if len(succ.intersection(leaves)) != 0:
nodesToKeep.append(node)
return NXTree(dg.subgraph(nodesToKeep))
renameFile = open(sys.argv[1])
newickFile = open(sys.argv[2])
translate = {}
curPastaID = None
curRealName = None
for i, line in enumerate(renameFile):
line = line.strip()
if i % 3 == 0:
curPastaID = line
elif i % 3 == 1:
curRealName = line
else:
translate[curPastaID] = curRealName.replace("...", ".-.").replace(".", "_").replace("__", "_")
s = newickFile.read()
for header1, header2 in translate.items():
s = s.replace(header1, header2)
tree = NXNewick().parseString(s)
inducedTree = induceTreeOnLeaves(tree, translate.values())
print NXNewick().writeString(inducedTree)
``` |
{
"source": "JoelAtDeluxe/GolfChallenges",
"score": 3
} |
#### File: GolfChallenges/thanksgiving-feast/full.py
```python
import math
def determine_turkeys(num_white, num_dark, num_any):
min_turkeys = math.ceil(max(num_white / 4, num_dark / 3))
leftover_servings = (min_turkeys * 7) - num_white - num_dark
needs = num_any - leftover_servings
extras = 0 if needs <= 0 else math.ceil(needs/7)
result = int(min_turkeys + extras)
return result
def cook_turkeys(num):
return '\n'.join([' .---. _ ' * num,
" .' './ ) " * num,
' / _ _/ /\ ' * num,
' =(_____) (__/_/==' * num,
'==================' * num]) + '='
if __name__ == '__main__':
# print(cook_turkeys(determine_turkeys(0, 20, 0)))
print(determine_turkeys(5, 4, 10))
```
#### File: GolfChallenges/thanksgiving-feast/golfed.py
```python
def determine_turkeys(w, d, a):
m=max(-(w//-4),-(d//-3))
e=-(max(0,a+w+d-m*7)//-7)
s=m+e
return s
def cook_turkeys(num):
return '\n'.join(map(lambda x: x*num, [' .---. _ '," .' './ ) ",' / _ _/ /\ ',' =(_____) (__/_/==','=================='])) + '='
def G(w, d, a):
m=max(-(w//-4),-(d//-3))
e=-(max(0,a+w+d-m*7)//-7)
return '\n'.join(map(lambda x: x*(m+e), [' .---. _ '," .' './ ) ",' / _ _/ /\ ',' =(_____) (__/_/==','='])) + '='*18
if __name__ == '__main__':
print(G(3, 6, 3))
# print(determine_turkeys(5, 4, 10))
``` |
{
"source": "JoelAtDeluxe/service-checker",
"score": 3
} |
#### File: service-checker/ServiceChecker/dnslookup.py
```python
import aiodns
import asyncio
import sys
from collections import namedtuple
from typing import List
from aiodns.error import DNSError
from urllib.parse import urlparse
Service = namedtuple("Service", ['host', 'port'])
async def _lookup(domain, resolver, lookup_type='SRV'):
try:
result = await resolver.query(domain, lookup_type)
except DNSError as e:
if e.args[0] == 4:
return LookupStatus(success=False, reason=f"Cannot find domain: {domain}")
else:
return LookupStatus(success=False, reason=f"Unexpected DNS error: {e}")
except Exception as e:
return LookupStatus(success=False, reason=f"Unexpected error:{sys.exc_info()[0]} -> {e}")
else:
return LookupStatus(success=True, services=[Service(x.host, x.port) for x in result])
async def resolve_service(service, dns_resolver):
"""
resolve_service is the "public" interface to looking up services. it splits the domain, does the srv lookup,
then generates the service domain name for the looked up service. If the lookup fails for some reason, then
the a blank service is returned (i.e. "")
"""
prefix, selected_domain = split_domain(service)
lookup_result = await _lookup(selected_domain, dns_resolver)
if lookup_result.success:
selected_domain = [f"{prefix}{svc.host}:{svc.port}" for svc in lookup_result.services]
else:
selected_domain = []
return selected_domain
def split_domain(addr):
parsed = urlparse(addr)
if parsed.scheme != '':
proto = f"{parsed.scheme}://"
domain = parsed.hostname
else:
# when no scheme is supplied, domain is in the path (see RFC 1808)
idx = parsed.path.find('/')
domain = parsed.path if idx == -1 else parsed.path[:idx]
proto = ""
return proto, domain
class LookupStatus(object):
def __init__(self, success=None, reason=None, services=None):
self.success:bool = success
self.reason:str = reason
self.services:List[Service] = services
``` |
{
"source": "JoelAtDeluxe/simplog",
"score": 3
} |
#### File: simplog/simplog/logger.py
```python
from functools import partial
from datetime import datetime
from typing import Callable, Any
def force_string(s: Any) -> str:
"""converts an Any value into a string by forcing string formatting"""
return f'{s}'
def escape_value(s: Any, to_str_func: Callable[[Any], str]=force_string, force_quotes:bool=False) -> str:
"""Performs successive steps to convert a regular string into one ready to be consumed by the logging function"""
stringified = to_str_func(s)
escape_funcs = [
escape_backslash,
escape_null,
escape_newline,
escape_tab,
escape_quotes,
lambda x: quote_string(x, force_quotes), # Maybe this should be done via partial... maybe passed in to pre-compile it then?
]
for step in escape_funcs:
stringified = step(stringified)
return stringified
def escape_backslash(s: str) -> str:
"""Replaces any \\ character with \\\\"""
return s.replace('\\', '\\\\')
def escape_newline(s: str) -> str:
"""Replaces each new line character (\\n) in the input with \\\\n"""
return s.replace('\n', '\\n') # Todo: replace with re.sub and factor in \r
def escape_tab(s: str) -> str:
"""Replaces each tab character (\\t) in the input with \\\\t"""
return s.replace('\t', '\\t')
def escape_null(s: str) -> str:
"""Replaces each null character (\\0) in the input with \\\\0"""
return s.replace("\0", "\\0")
def escape_quotes(s: str) -> str:
"""Replaces double quotes in the input string with either ' or \\".
Description:
Given a string, returns that string with double quotes escaped in one of two ways.
If the string contains single quotes, then \\" will be used to escape the double quotes.
Otherwise, a single quote (') will be used instead.
Examples:
>>> escape_quotes('one "two" three')
"one 'two' three"
>>> escape_quotes('"He said he didn\\'t know."')
'\\\\"He said he didn\\'t know.\\\\"'
"""
if "'" in s:
return s.replace('"', '\\"')
return s.replace('"', "'")
def quote_string(s: str, force:bool=False) -> str:
"""Sometimes wraps strings in double quotes, depending on the content and force parameter.
Description:
This function provides conditional wrapping of strings inside double quotes.
If the input string contains a space, OR force is set to True, then the input string will
always be quoted.
If the input string does not contain a space, AND force is set to False, then the input
string is returned unmodified.
Args:
s: The string that needs to be wrapped in quotes (maybe)
force (optional): Whether to force quotes around the string even if not needed. Defaults to False.
Examples:
>>> quote_string("nevermore", False)
'nevermore'
>>> quote_string("nevermore")
'nevermore'
>>> quote_string("nevermore", True)
'"nevermore"'
>>> quote_string("never more", False)
'"never more"'
Returns:
The string, maybe wrapped in double quotes
"""
return f'"{s}"' if force or ' ' in s else s
def make_logger(write_func:Callable[[str], None]=print, *, message_label="msg", level_label="level", time_label=None,
to_string_func=force_string, force_quote=False):
"""Generates a logging function with some predefined functionality.
Description:
A function to generate a semi-customizable logging function, specifically targeted for
microservices. The returned function contains basic logging info: Time of the event,
severity level, custom message, plus optional key/value pairs (provided as kwargs). The
general format is:
``<event><severity><provided key/value pairs><generic message>`` As a rough example:
2019-05-13T12:01:27.424242 level=info key1=value1 key2="value 2" msg="Just a test"
Args:
write_func (optional): a function used to "record" the logged output. By default, this is print
message_label (optional): What to call the value containing the generic log message. Defaults to 'msg'
level_label (optional): What to call the severity level field. Defaults to 'level'
time_label (optional): What to call the time/date field. Defaults to None, which means no label
to_string_func (optional): During logging, the function used to convert an input value to a string.
Defaults to force_string
force_quote (optional): If True, forces all values to be wrapped in double quotes. Defaults to False
Returns:
A callable function that takes a message, an optional severity level and any keyword args,
which can then be used to write out a log to the desired location
"""
time_label = '' if time_label is None else f'{time_label}='
esc = lambda m: escape_value(m, to_string_func, force_quote)
def log(message, level='info', **kwargs) -> None:
now = f'{time_label}{datetime.now().isoformat()}'
msg = f'{message_label}={esc(message)}'
lvl = f'{level_label}={esc(level)}'
v_fields = [] if kwargs == {} else (f'{k}={esc(v)}' for k, v in kwargs.items())
line = f'{now} {lvl} {" ".join(v_fields)}{" " if v_fields else ""}{msg}'
write_func(line)
return log
def refine_logger(logger, **kwargs):
"""Allows for the logging function to be amended with constant key/value pairs.
Description:
Returns back a function that will provide the logger function with pre-provided
kwargs values. Useful in situations where you may log the same field multiple times, such
as inside a particular function (e.g. inside=do_work) or when needing to identify a chain of
events through multiple levels (e.g. message_context=abc123)
Returns:
A modified function with pre-provided kwargs. Note that these args can be overwritten later,
but cannot be removed.
"""
return partial(logger, **kwargs)
``` |
{
"source": "joelau94/4705-NLP-Graders",
"score": 3
} |
#### File: Neural-Dep-Parsing/scripts/update_gradebook.py
```python
import csv
from collections import defaultdict
import os
import sys
def read_grades(filename):
fgrade = open(filename, 'r')
grades = defaultdict(int)
for line in fgrade:
entry = line.strip().split(',')
grades[entry[0]] = int(entry[13])
return grades
def write_gradebook(grades, in_file, out_file):
os.system('touch {}'.format(out_file))
grade_reader = csv.reader(open(in_file, 'r'))
grade_writer = csv.writer(open(out_file, 'w'))
for row in grade_reader:
if row[2] in grades:
row[11] = str(grades[row[2]])
grade_writer.writerow(row)
def main():
grades = read_grades(sys.argv[1]) # grades.csv
# gradebook.csv, new_gradebook.csv
write_gradebook(grades, sys.argv[2], sys.argv[3])
if __name__ == '__main__':
main()
``` |
{
"source": "joelau94/rumour2019-experiments",
"score": 3
} |
#### File: rumour2019-experiments/analysis/txt2json.py
```python
from collections import defaultdict
import json
import sys
def read_text(prediction_file, id_file):
ftext = open(prediction_file, 'r')
fid = open(id_file, 'r')
sdqc_ref = {}
sdqc_hyp = {}
veracity_ref = {}
veracity_hyp = {}
for line in ftext:
if line.strip() == '':
fid.readline()
continue
else:
line = line.strip().split('|||')
twid = fid.readline().strip()
if len(line) == 6:
if twid not in veracity_hyp or \
veracity_hyp[twid][1] < float(line[5].strip()):
veracity_ref[twid] = line[3].strip()
veracity_hyp[twid] = [line[4].strip(), float(line[5].strip())]
sdqc_ref[twid] = line[1].strip()
sdqc_hyp[twid] = line[2].strip()
elif len(line) == 3:
sdqc_ref[twid] = line[1].strip()
sdqc_hyp[twid] = line[2].strip()
return sdqc_ref, sdqc_hyp, veracity_ref, veracity_hyp
def write_answer(sdqc_hyp, veracity_hyp, answer_file):
ans = {
'subtaskaenglish': sdqc_hyp,
'subtaskbenglish': veracity_hyp
}
json.dump(ans, open(answer_file, 'w'))
def sdqc_confusion(ref, hyp):
matrix = defaultdict(lambda: defaultdict(int))
for k in ref.keys():
matrix[ref[k]][hyp[k]] += 1
total = len(ref.keys())
corr = matrix['support']['support'] + \
matrix['deny']['deny'] + \
matrix['query']['query'] + \
matrix['comment']['comment']
sys.stdout.write('Task A: Acc={}\n'.format(float(corr) / total))
sys.stdout.write('ref | hyp\tsupport\tdeny\tquery\tcomment\n')
for r in ['support', 'deny', 'query', 'comment']:
sys.stdout.write('{}\t{}\t{}\t{}\t{}\n'
.format(r,
matrix[r]['support'],
matrix[r]['deny'],
matrix[r]['query'],
matrix[r]['comment']))
def veracity_confusion(ref, hyp):
matrix = defaultdict(lambda: defaultdict(int))
for k in ref.keys():
matrix[ref[k]][hyp[k][0]] += 1
total = len(ref.keys())
corr = matrix['true']['true'] + \
matrix['false']['false'] + \
matrix['unverified']['unverified']
sys.stdout.write('Task B: Acc={}\n'.format(float(corr) / total))
sys.stdout.write('ref | hyp\ttrue\tfalse\tunverified\n')
for r in ['true', 'false', 'unverified']:
sys.stdout.write('{}\t{}\t{}\t{}\n'
.format(r,
matrix[r]['true'],
matrix[r]['false'],
matrix[r]['unverified']))
def main():
prediction_file, id_file, answer_file = sys.argv[1:4]
sdqc_ref, sdqc_hyp, veracity_ref, veracity_hyp = \
read_text(prediction_file, id_file)
sdqc_confusion(sdqc_ref, sdqc_hyp)
veracity_confusion(veracity_ref, veracity_hyp)
write_answer(sdqc_hyp, veracity_hyp, answer_file)
if __name__ == '__main__':
main()
``` |
{
"source": "Joe-Laue/bread-tools",
"score": 3
} |
#### File: tools/data/feature_encode.py
```python
def one_hot(length, current):
"""
Standard one hot encoding.
>>> one_hot(length=3,current=1)
[0, 1, 0]
"""
assert length > current
assert current > -1
code = [0] * length
code[current] = 1
return code
```
#### File: tools/file/path.py
```python
import pathlib
import os
from codecs import BOM_UTF8, BOM_LE, BOM_BE
_Parent = pathlib.WindowsPath if os.name == 'nt' else pathlib.PosixPath
BOM_CODE = {
BOM_UTF8: 'utf_8',
BOM_LE: 'utf_16_le',
BOM_BE: 'utf_16_be',
}
DEFAULT_CODES = 'utf8', 'gbk', 'utf16', 'big5'
class Path(_Parent):
__slots__ = ()
def __new__(cls,
path='.',
*args,
**kwargs):
if isinstance(path, str):
# Support the beginning of user directory.
if path.startswith('~'):
path = os.path.expanduser(path)
# Support environment variable escape.
elif path.startswith('%'):
path = os.path.expandvars(path)
return super().__new__(cls, path, *args, **kwargs)
def read(self,
*args,
**kwargs):
"""
Read the file with the specified parameters.
>>> file=Path('__cases/example.txt')
>>> file.read()[0]
'1'
"""
with self.open(*args, **kwargs)as fn:
return fn.read()
def ensure(self,
parents=True):
"""
Make sure the directory exists. If the directory does not exist, create it directly.
>>> file=Path('__cases/tmp/')
>>> file.ensure()
True
"""
if not self.exists():
return self.mkdir(parents=parents)
else:
return True
@property
def text(self):
"""
Reads the file and returns a string.
>>> file=Path('__cases/example.txt')
>>> file.read()[0]
'1'
"""
rb = self.read('rb')
for k in BOM_CODE:
if k == rb[:len(k)]:
return rb[len(k):].decode(BOM_CODE[k])
for encoding in DEFAULT_CODES:
try:
return rb.decode(encoding)
except:
pass
raise Exception('Decode error.')
@text.setter
def text(self,
text):
"""
Write text into file.
"""
self.write(text=text)
@property
def lines(self):
"""
Read file by line.
>>> file=Path('__cases/example.txt')
>>> len(file.lines) == 9
True
"""
return self.text.splitlines()
@lines.setter
def lines(self,
lines):
"""
Write file by lines.
"""
self.write(*lines)
def write(self,
*lines,
text=None,
data=None,
encoding='utf8',
parents=False):
"""
Write file, you can write by line, by text, or directly write data.
"""
if parents:
self.parent.ensure()
if lines:
text = "\n".join(lines)
if text:
data = text.encode(encoding)
if data:
with self.open('wb')as fn:
fn.write(data)
@property
def l_suffix(self):
"""
Returns the lowercase extension.
>>> file = Path('__cases/example.txt')
>>> file.l_suffix
'.txt'
"""
return self.suffix.lower()
@property
def p_name(self):
"""
Returns a file name without an extension.
>>> file = Path('__cases/example.txt')
>>> file.p_name
'example'
"""
return self.with_suffix("").name
def rm_tree(self):
"""
Delete entire directory.
"""
import shutil
shutil.rmtree(str(self))
```
#### File: tools/text/string.py
```python
import re
class String(object):
@staticmethod
def is_number(num):
"""
Check whether it is a number.
>>> String.is_number(num='13569')
True
>>> String.is_number(num='126B1')
False
"""
pattern = re.compile(r'^[-+]?[0-9]*\.?[0-9]+$')
result = pattern.match(num)
if result:
return True
else:
return False
@staticmethod
def get_lines_from_text(text):
"""
Text segmentation based on standard.
>>> String.get_lines_from_text('1。2!3?')
['1', '2', '3', '']
"""
return re.split(r'。|\?|!|?|!', text)
``` |
{
"source": "joelawm/ZFS-Monitoring-Tools",
"score": 3
} |
#### File: ZFS-Monitoring-Tools/Nodes/testing.py
```python
import socket
from datetime import datetime
net = input("Enter the IP address: ")
net1 = net.split('.')
a = '.'
net2 = net1[0] + a + net1[1] + a + net1[2] + a
st1 = int(input("Enter the Starting Number: "))
en1 = int(input("Enter the Last Number: "))
en1 = en1 + 1
t1 = datetime.now()
def scan(addr, port):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = s.connect_ex((addr, port))
if result == 0:
return 1
else :
return 0
def run1():
# these are a mix of Linux, Mac, and Windows Ports to search through
port = [20, 21, 22, 23, 25, 80, 111, 443, 631, 993, 995, 135, 137, 138, 139, 445, 548]
for ip in range(st1,en1):
addr = net2 + str(ip)
for i in port:
if (scan(addr, i)):
print (str(addr) , "is live at " + str(i))
else:
print (str(addr) , "is not alive and very much dead at " + str(i))
run1()
t2 = datetime.now()
total = t2 - t1
print ("Scanning completed in: " , total)
``` |
{
"source": "joelazar/practice_django",
"score": 2
} |
#### File: practice_django/catalog/tables.py
```python
import django_tables2 as tables
from .models import Presentation
class CheckBoxColumnWithName(tables.CheckBoxColumn):
@property
def header(self):
return self.verbose_name
class PresentationTable(tables.Table):
pagination_style = 'range'
template = '<a href="/catalog/modify/{{record.presentation_id}}" class="btn btn-default">Modify</a>'
modify = tables.TemplateColumn(template, orderable=False)
class Meta:
model = Presentation
``` |
{
"source": "joelb123/alphabetsoup",
"score": 2
} |
#### File: alphabetsoup/tests/__init__.py
```python
import contextlib
import os
from pathlib import Path
@contextlib.contextmanager
def working_directory(path):
"""Change working directory in context."""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
``` |
{
"source": "joelb123/azulejo",
"score": 3
} |
#### File: azulejo/azulejo/analysis.py
```python
import sys
from pathlib import Path
# third-party imports
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# first-party imports
from loguru import logger
# module imports
from . import cli
from . import click_loguru
from .common import cluster_set_name
from .common import homo_degree_dist_filename
# Global constants
NAME = "azulejo"
STATFILE_SUFFIX = f"-{NAME}_stats.tsv"
ANYFILE_SUFFIX = f"-{NAME}_ids-any.tsv"
ALLFILE_SUFFIX = f"-{NAME}_ids-all.tsv"
IDENT_LOG_MIN = -3
IDENT_LOG_MAX = 0
EPSILON = 0.000001
FILETYPE = "png"
MAX_BINS = 10
def make_histogram(dist, name, log10=False):
"""Do histogram plot with kernel density estimate."""
dist = dist[dist < 10]
mean = dist.mean()
if log10:
dist = np.log10(dist)
# if len(dist) < MAX_BINS:
# bins = len(dist)
# else:
# bins = MAX_BINS
sns.distplot(
dist,
bins=None,
rug=False,
kde=False,
norm_hist=True,
rug_kws={"color": "b"},
kde_kws={"color": "k", "linewidth": 1, "label": "KDE"},
hist_kws={
"histtype": "step",
"linewidth": 2,
"alpha": 1,
"color": "b",
#'range':(0,20)
},
)
plt.title(f"{name} histogram of {len(dist):d} values, mean={mean:.1f}")
if log10:
plt.xlabel("log " + name)
else:
plt.xlabel(name)
plt.ylabel("Frequency")
# for ext in PLOT_TYPES:
# plt.savefig(LOG_PATH / ('%s-histogram.' % (name.rstrip('%')) + ext),
# bbox_inches='tight')
plt.show()
# plt.close('all')
def tick_function(tick_x):
"""Compute ticks."""
tick_x = tick_x * 3.0 - 3
vals = [
(f"{v:f}").rstrip("0").rstrip(".")
for v in (1.0 - 10 ** tick_x) * 100.0
]
ticks = [f"{v}%" for v in vals]
return ticks
def log_deriv(xvals, yvals):
"""Compute the logarithmic derivative."""
log_x = -1.0 * np.log10(xvals + EPSILON)
log_y = np.log10(yvals)
return np.gradient(log_y) / np.gradient(log_x)
@cli.command()
@click_loguru.init_logger(logfile=False)
@click.argument("instemlist")
def analyze_clusters(
dirname, instemlist, label, reference=None, on_id=None, match_type=None
):
"""Statistics of clustering as function of identity."""
if match_type is None:
matches = ["all", "any"]
else:
matches = [match_type]
uniques = {}
divergence = {}
dirpath = Path(dirname)
div_dist = {"all": {"ref": 0.0}, "any": {"ref": 0.0}}
print("ref=", reference)
for stem in instemlist:
print("stem=", stem)
paths = {
"all": dirpath / (stem + ALLFILE_SUFFIX),
"any": dirpath / (stem + ANYFILE_SUFFIX),
"stat": dirpath / (stem + STATFILE_SUFFIX),
}
stats = pd.read_csv(paths["stat"], sep="\t", index_col=0)
uniques[stem] = stats["unique_seqs"].iloc[0]
divergence[stem] = stats["divergence"]
if on_id is None:
div_dist["all"][stem] = log_deriv(
divergence[stem], stats["clusters"]
)
div_dist["any"][stem] = None
if stem == reference:
div_dist["all"]["ref"] = div_dist["all"][stem]
div_dist["any"]["ref"] = None
else:
for match in ["any", "all"]:
data = pd.read_csv(paths[match], sep="\t", index_col=0)
try:
div_dist[match][stem] = log_deriv(
divergence[stem], data.loc[on_id]
)
except KeyError: # this label wasn't found
div_dist[match][stem] = None
if stem == reference:
div_dist[match]["ref"] = div_dist[match][stem]
#
# Make the plots
#
plt.style.use("seaborn-whitegrid")
axis_dict = {}
fig, axes = plt.subplots(len(matches))
try:
for axis, i in enumerate(axes):
axis_dict[matches[i]] = axis
loweraxis = axes[1]
except TypeError:
axis_dict[matches[0]] = axes
loweraxis = axes
for stem in instemlist:
for match in matches:
if div_dist[match][stem] is None:
continue
axis_dict[match].plot(
divergence[stem],
div_dist[match][stem] - div_dist[match]["ref"],
label=f"{stem.replace(label + '.', '')}",
)
# uniques[stem]/1000.))
if reference is None:
if on_id is None:
title = f"{label} Divergence Distribution"
outfilestem = f"{label}_divergence_dist."
else:
title = f'{label} Divergence Distribution on "{on_id}"'
outfilestem = f"{label}_divergence_dist_{on_id}."
else:
if on_id is None:
title = (
f"{label}_Differential Divergence Distribution vs. {reference}"
)
outfilestem = f"{label}_divergence_dist_vs{reference}."
else:
title = (
f'{label} Differential Divergence Distribution on "{on_id}"'
f" vs. {reference}"
)
outfilestem = f"{label}_divergence_dist_on_{on_id}_vs_ref."
if reference is None:
fig.text(
0.02,
0.5,
"Logarithmic Derivative on Clusters",
ha="center",
va="center",
rotation="vertical",
)
else:
fig.text(
0.02,
0.5,
"Logarithmic Derivative Difference on Clusters",
ha="center",
va="center",
rotation="vertical",
)
if len(matches) == 2:
fig.text(0.5, 0.47, "All in Cluster", ha="center", va="center")
fig.text(0.5, 0.89, "Any in Cluster", ha="center", va="center")
else:
fig.text(
0.5,
0.91,
f"{matches[0].capitalize()} in Cluster",
ha="center",
va="center",
)
loweraxis.set(xlabel="Divergence on Sequence Identity")
loweraxis.legend(loc="upper left")
fig.suptitle(title)
plt.xscale("log")
limits = [0.001, 1.0]
new_tick_locations = np.array([0.0, 1.0 / 3.0, 2.0 / 3.0, 1.0])
# loweratick_xis.set_xlim(limits)
axis_dict["second"] = loweraxis.twiny()
axis_dict["second"].set_xlim(limits)
axis_dict["second"].set_xticks(new_tick_locations)
axis_dict["second"].set_xticklabels(tick_function(new_tick_locations))
axis_dict["second"].set_xlabel(" ")
# r'%Identity')
# plt.ylim([-0.002,0.002])
outfilename = outfilestem + f"{FILETYPE}"
print(f"saving plot to {outfilename}")
plt.savefig(dirpath / outfilename, dpi=200)
plt.show()
def do_cuts(obs, high, low, label):
"""Cut at high and low levels."""
if high > 0.0:
hicuts = obs[obs > high]
obs = obs[obs <= high]
if len(hicuts) == 0:
hifilename = label + "_hicuts.tsv"
logger.info(
"%d observations dropped by high-side cutoff of %.2f written"
" to %s",
len(hicuts),
high,
hifilename,
)
logger.info(hicuts)
if low > 0.0:
locuts = obs[obs > low]
obs = obs[obs >= low]
logger.info(
"%d observations dropped by low-side cutoff of %.2f",
len(locuts),
low,
)
if len(locuts) == 0:
lofilename = label + "_locuts.tsv"
logger.info(
"%d observations dropped by low-side cutoff of %.2f written"
" to %s",
len(locuts),
low,
lofilename,
)
logger.info(locuts)
return obs
@cli.command()
@click_loguru.init_logger(logfile=False)
@click.option(
"--hi_cutoff",
default=2.0,
show_default=True,
help="Disregard above this value.",
)
@click.option(
"--lo_cutoff",
default=0.0,
show_default=True,
help="Disregard below this value.",
)
@click.argument("cluster_size")
@click.argument("combinedfile")
def outlier_length_dist(hi_cutoff, lo_cutoff, cluster_size, combinedfile):
"""Plot length distribution of outliers in clusters."""
cluster_size = int(cluster_size)
if cluster_size <= 0:
logger.error("Positive cluster size must be specified")
sys.exit(1)
clusters = pd.read_csv(combinedfile, sep="\t", index_col=0)
norm_lengths = []
for unused_cluster_id, cluster in clusters.groupby(
"hom.cluster"
): # pylint: disable=unused-variable
if cluster["siz"].iloc[0] != cluster_size:
# not the right size
continue
if len(set(cluster["sub"])) != 2:
# not just two subclusters
continue
if 1 not in set(cluster["sub_siz"]):
# no singleton cluster
continue
singleton = cluster[cluster["sub_siz"] == 1]
length = singleton["norm"].iloc[0]
norm_lengths.append(length)
norm_lengths = np.array(norm_lengths)
norm_lengths = do_cuts(norm_lengths, hi_cutoff, lo_cutoff, "len")
logger.info(
"%d singleton outliers in clusters of size %d",
len(norm_lengths),
cluster_size,
)
logger.info("min:\t%.3f", min(norm_lengths))
logger.info("maxes:\t%.3f", max(norm_lengths))
logger.info("mean: %.3f", norm_lengths.mean())
axis = sns.distplot(norm_lengths, bins=100, kde_kws={"label": "KDE"})
axis.set_xlabel("Normalized Length of Singleton")
plt.title(
"Length distribution of %d singleton subclusters" % (len(norm_lengths))
)
outfilename = f"norm_len_dist.{FILETYPE}"
logger.info("saving plot to %s", outfilename)
# plt.yscale('log')
plt.savefig(outfilename, dpi=200)
plt.show()
@cli.command()
@click_loguru.init_logger(logfile=False)
@click.option(
"--hi_cutoff",
default=0.0,
show_default=True,
help="Disregard above this value.",
)
@click.option(
"--lo_cutoff",
default=0.0,
show_default=True,
help="Disregard below this value.",
)
@click.argument("cluster_size")
@click.argument("combinedfile")
def length_std_dist(cluster_size, hi_cutoff, lo_cutoff, combinedfile):
"""Plot length distribution of cluster singletons."""
cluster_size = int(cluster_size)
if cluster_size <= 0:
logger.error("Positive cluster size must be specified")
sys.exit(1)
clusters = pd.read_csv(combinedfile, sep="\t", index_col=0)
stds = []
for unused_cluster_id, cluster in clusters.groupby(
"hom.cluster"
): # pylint: disable=unused-variable
if cluster["siz"].iloc[0] != cluster_size:
# not the right size
continue
if len(set(cluster["sub"])) != 1:
# Only one subcluster
continue
val = cluster["std"].iloc[0]
stds.append(val)
stds = np.array(stds)
pct_zeros = len(stds[stds == 0.0]) * 100 / len(stds)
stds = do_cuts(stds, hi_cutoff, lo_cutoff, "stds")
logger.info(
"%d single-subgroup clusters of size %d", len(stds), cluster_size
)
logger.info("%.1f %% zeroes, max is %.2f", pct_zeros, max(stds))
logger.info("mean is %.3f", stds.mean())
logbins = np.logspace(0.7, 3, 100)
axis = sns.distplot(stds, bins=logbins, kde=False)
axis.set_xlabel("Standard Deviation of Single-Subgroup Clusters")
title = "Length Standard Deviation distribution of %d clusters" % len(stds)
plt.title(title)
outfilename = f"std_dist.{FILETYPE}"
logger.info("saving plot to %s", outfilename)
plt.yscale("log")
plt.xscale("log")
plt.savefig(outfilename, dpi=200)
plt.show()
@cli.command()
@click_loguru.init_logger(logfile=False)
@click.option(
"--hi_cutoff",
default=1000,
show_default=True,
help="Disregard above this value.",
)
@click.option(
"--lo_cutoff",
default=1,
show_default=True,
help="Disregard below this value.",
)
@click.option(
"--identity",
"-i",
default=0.0,
help="Minimum sequence ID (0-1). [default: lowest]",
)
@click.argument("setsize", type=click.IntRange(2, 100))
@click.argument("setname")
def plot_degree_dists(identity, hi_cutoff, lo_cutoff, setname, setsize):
"""Plot homology and synteny degree distributions."""
set_path = Path(setname)
homo_cluster_name = cluster_set_name(setname, identity)
homo_degree = pd.read_csv(
set_path / homo_degree_dist_filename(homo_cluster_name),
index_col=0,
sep="\t",
)
homo_degree.index.name = "size"
homo_degree = homo_degree.rename(
columns={"clusters": "clusts", "pct_total": "%clusts"}
)
homo_clusts = sum(homo_degree["clusts"])
homo_genes = sum(homo_degree["clusts"] * homo_degree.index)
homo_degree["%genes"] = (
homo_degree["clusts"] * homo_degree.index * 100.0 / homo_genes
)
synteny_degree_path = set_path / "dagchainer" / "clusters-sizedist.tsv"
synteny_degree = pd.read_csv(synteny_degree_path, index_col=0, sep="\t")
synteny_clusts = sum(synteny_degree["clusts"])
synteny_genes = sum(synteny_degree["clusts"] * synteny_degree.index)
logger.info(" method clusters genes")
logger.info(f"homology:\t{homo_clusts}\t{homo_genes}")
logger.info(f" synteny:\t{synteny_clusts}\t{synteny_genes}")
# Make plot
plt.plot(homo_degree.index, homo_degree["%genes"], label="homology")
plt.plot(synteny_degree.index, synteny_degree["%genes"], label="synteny")
plt.style.use("seaborn-whitegrid")
plt.xlabel("Cluster Size")
plt.ylabel("% of Genes in Cluster")
plt.title(
f"Cluster size distribution of {homo_genes} genes in {setsize}"
f" {setname} genomes"
)
outfilename = f"cluster_size_dist.{FILETYPE}"
logger.info(f"saving plot to {outfilename}")
plt.xlim([lo_cutoff, hi_cutoff])
plt.legend()
plt.yscale("log")
plt.xscale("log", basex=setsize)
plt.savefig(outfilename, dpi=200)
plt.show()
```
#### File: azulejo/azulejo/homology.py
```python
import fcntl
import json
import os
import shutil
import sys
from pathlib import Path
# third-party imports
import dask.bag as db
import pandas as pd
from dask.diagnostics import ProgressBar
# first-party imports
import sh
# module imports
from .common import CLUSTER_FILETYPE
from .common import CLUSTERS_FILE
from .common import EXTERNAL_CLUSTERS_FILE
from .common import FRAGMENTS_FILE
from .common import HOMOLOGY_FILE
from .common import PROTEINS_FILE
from .common import PROTEOMES_FILE
from .common import PROTEOMOLOGY_FILE
from .common import SEARCH_PATHS
from .common import SPINNER_UPDATE_PERIOD
from .common import TrimmableMemoryMap
from .common import calculate_adjacency_group
from .common import dotpath_to_path
from .common import group_key_filename
from .common import logger
from .common import read_tsv_or_parquet
from .common import sort_proteome_frame
from .common import write_tsv_or_parquet
from .core import homology_cluster
from .mailboxes import DataMailboxes
# global constants
HOMOLOGY_COLS = ["hom.cluster", "hom.cl_size"]
def cluster_build_trees(
identity, set_name, cluster_file=None, click_loguru=None
):
"""Calculate homology clusters, MSAs, trees."""
options = click_loguru.get_global_options()
user_options = click_loguru.get_user_global_options()
parallel = user_options["parallel"]
set_path = Path(set_name)
# read and possibly update proteomes
proteomes_path = set_path / PROTEOMES_FILE
proteomes_in = read_tsv_or_parquet(proteomes_path)
proteomes = sort_proteome_frame(proteomes_in)
if not proteomes_in.equals(proteomes):
logger.info("proteomes sort order changed, writing new proteomes file")
write_tsv_or_parquet(proteomes, proteomes_path)
n_proteomes = len(proteomes)
# read and update fragment ID's
frags = read_tsv_or_parquet(set_path / FRAGMENTS_FILE)
frags["frag.idx"] = pd.array(frags.index, dtype=pd.UInt32Dtype())
frag_frames = {}
for dotpath, subframe in frags.groupby(by=["path"]):
frag_frames[dotpath] = subframe.copy().set_index("frag.orig_id")
arg_list = []
concat_fasta_path = set_path / "proteins.fa"
for i, row in proteomes.iterrows():
arg_list.append((row, concat_fasta_path, frag_frames[row["path"]]))
file_idx = {}
stem_dict = {}
for i, row in proteomes.iterrows():
stem = row["path"]
file_idx[stem] = i
stem_dict[i] = stem
if cluster_file is None:
if concat_fasta_path.exists():
concat_fasta_path.unlink()
if not options.quiet:
logger.info(
f"Renaming fragments and concatenating sequences for {len(arg_list)}"
" proteomes:"
)
for args in arg_list:
write_protein_fasta(args)
del arg_list
cwd = Path.cwd()
os.chdir(set_path)
n_clusters, run_stats, cluster_hist = homology_cluster(
"proteins.fa",
identity,
write_ids=True,
delete=False,
cluster_stats=False,
outname="homology",
click_loguru=click_loguru,
)
log_path = Path("homology.log")
log_dir_path = Path("logs")
log_dir_path.mkdir(exist_ok=True)
shutil.copy2(log_path, "logs/homology.log")
log_path.unlink()
os.chdir(cwd)
logger.info(f"Number of clusters: {n_clusters}")
del cluster_hist
del run_stats
concat_fasta_path.unlink()
else: # use pre-existing clusters
homology_path = set_path / "homology"
if homology_path.exists():
shutil.rmtree(homology_path)
inclusts = pd.read_csv(cluster_file, sep="\t")
for col in ["cluster_id", "members"]:
if col not in inclusts.columns:
logger.error(
f'Column named "{col}" not found in external homology cluster file'
)
sys.exit(1)
cluster_counts = inclusts["cluster_id"].value_counts()
cluster_map = pd.Series(
range(len(cluster_counts)), index=cluster_counts.index
)
cluster_ids = inclusts["cluster_id"].map(cluster_map)
cluster_sizes = inclusts["cluster_id"].map(cluster_counts)
predef_clusters = pd.DataFrame(
{
"cluster_id": cluster_ids,
"size": cluster_sizes,
"members": inclusts["members"],
}
)
predef_clusters.sort_values(by=["cluster_id"], inplace=True)
predef_clusters.drop(
predef_clusters[predef_clusters["size"] < 2].index,
axis=0,
inplace=True,
)
n_clusters = predef_clusters["cluster_id"].max() + 1
predef_clusters.index = range(len(predef_clusters))
external_cluster_path = set_path / EXTERNAL_CLUSTERS_FILE
logger.info(
f"Writing {external_cluster_path} with {len(predef_clusters)} genes"
+ f" in {n_clusters} homology clusters"
)
predef_clusters.to_csv(external_cluster_path, sep="\t")
del cluster_counts, cluster_map, cluster_sizes, inclusts
homology_path = set_path / "homology"
homology_path.mkdir(exist_ok=True)
if not options.quiet:
logger.info(
f"Creating cluster files for for {len(arg_list)}" " proteomes:"
)
proteome_no = 0
for args in arg_list:
logger.info(f"doing proteome {proteome_no}")
write_protein_fasta(
args, fasta_dir=homology_path, clusters=predef_clusters
)
proteome_no += 1
del arg_list
logger.info(
"Checking that all cluster files are present (gene-id mismatch)"
)
missing_files = False
for i in range(n_clusters):
if not (homology_path / f"{i}.fa").exists():
logger.error(f"External cluster {i} is missing.")
missing_files = True
if missing_files:
sys.exit(1)
#
# Write homology info back into proteomes
#
click_loguru.elapsed_time("Alignment/tree-building")
hom_mb = DataMailboxes(
n_boxes=n_proteomes,
mb_dir_path=(set_path / "mailboxes" / "clusters2proteomes"),
file_extension="tsv",
)
hom_mb.write_tsv_headers(HOMOLOGY_COLS)
cluster_paths = [
set_path / "homology" / f"{i}.fa" for i in range(n_clusters)
]
bag = db.from_sequence(cluster_paths)
cluster_stats = []
if not options.quiet:
logger.info(
f"Calculating MSAs and trees for {len(cluster_paths)} homology"
" clusters:"
)
ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()
if parallel:
cluster_stats = bag.map(
parse_cluster,
file_dict=file_idx,
file_writer=hom_mb.locked_open_for_write,
)
else:
for clust_fasta in cluster_paths:
cluster_stats.append(
parse_cluster(
clust_fasta,
file_dict=file_idx,
file_writer=hom_mb.locked_open_for_write,
)
)
n_clust_genes = 0
clusters_dict = {}
for cluster_id, cluster_dict in cluster_stats:
n_clust_genes += cluster_dict["size"]
clusters_dict[cluster_id] = cluster_dict
del cluster_stats
clusters = pd.DataFrame.from_dict(clusters_dict).transpose()
del clusters_dict
clusters.sort_index(inplace=True)
grouping_dict = {}
for i in range(n_proteomes): # keep numbering of single-file clusters
grouping_dict[f"[{i}]"] = i
grouping_dict[str(list(range(n_proteomes)))] = 0
for n_members, subframe in clusters.groupby(["n_memb"]):
if n_members == 1:
continue
if n_members == n_proteomes:
continue
member_counts = pd.DataFrame(subframe["n_members"].value_counts())
member_counts["key"] = range(len(member_counts))
for newcol in range(n_members):
member_counts[f"memb{newcol}"] = ""
for member_string, row in member_counts.iterrows():
grouping_dict[member_string] = row["key"]
member_list = json.loads(member_string)
for col in range(n_members):
member_counts.loc[member_string, f"memb{col}"] = stem_dict[
member_list[col]
]
member_counts = member_counts.set_index("key")
write_tsv_or_parquet(
member_counts, set_path / group_key_filename(n_members)
)
clusters["n_members"] = clusters["n_members"].map(grouping_dict)
clusters = clusters.rename(columns={"n_members": "group_key"})
n_adj = clusters["n_adj"].sum()
adj_pct = n_adj * 100.0 / n_clust_genes
n_adj_clust = sum(clusters["adj_groups"] != 0)
adj_clust_pct = n_adj_clust * 100.0 / len(clusters)
logger.info(
f"{n_adj} ({adj_pct:.1f}%) out of {n_clust_genes}"
+ " clustered genes are adjacent"
)
logger.info(
f"{n_adj_clust} ({adj_clust_pct:.1f}%) out of "
+ f"{len(clusters)} clusters contain adjacency"
)
write_tsv_or_parquet(clusters, set_path / CLUSTERS_FILE)
# join homology cluster info to proteome info
click_loguru.elapsed_time("Joining")
arg_list = []
for i, row in proteomes.iterrows():
arg_list.append(
(
i,
dotpath_to_path(row["path"]),
)
)
bag = db.from_sequence(arg_list)
hom_stats = []
if not options.quiet:
logger.info(f"Joining homology info to {n_proteomes} proteomes:")
ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()
if parallel:
hom_stats = bag.map(
join_homology_to_proteome, mailbox_reader=hom_mb.open_then_delete
).compute()
else:
for args in arg_list:
hom_stats.append(
join_homology_to_proteome(
args, mailbox_reader=hom_mb.open_then_delete
)
)
hom_mb.delete()
hom_frame = pd.DataFrame.from_dict(hom_stats)
hom_frame.set_index(["prot.idx"], inplace=True)
hom_frame.sort_index(inplace=True)
logger.info("Homology cluster coverage:")
with pd.option_context(
"display.max_rows", None, "display.float_format", "{:,.2f}%".format
):
logger.info(hom_frame)
proteomes = pd.concat([proteomes, hom_frame], axis=1)
write_tsv_or_parquet(
proteomes, set_path / PROTEOMOLOGY_FILE, float_format="%5.2f"
)
click_loguru.elapsed_time(None)
def write_protein_fasta(args, clusters=None, fasta_dir=None):
"""Read peptide sequences from info file and write them out."""
row, concat_fasta_path, frags = args
dotpath = row["path"]
phylogeny_dict = {"prot.idx": row.name, "path": dotpath}
for phy_prop in [name for name in row.index if name.startswith("phy.")]:
phylogeny_dict[phy_prop] = row[phy_prop]
inpath = dotpath_to_path(dotpath)
prot_info = read_tsv_or_parquet(inpath / PROTEINS_FILE)
prot_info["frag.idx"] = prot_info["frag.id"].map(
lambda oid: frags.loc[oid]["frag.idx"]
)
prot_info["frag.is_plas"] = prot_info["frag.id"].map(
lambda oid: frags.loc[oid]["frag.is_plas"]
)
prot_info["frag.is_scaf"] = prot_info["frag.id"].map(
lambda oid: frags.loc[oid]["frag.is_scaf"]
)
prot_info["frag.is_chr"] = prot_info["frag.id"].map(
lambda oid: frags.loc[oid]["frag.is_chr"]
)
prot_info["frag.id"] = prot_info["frag.id"].map(
lambda oid: frags.loc[oid]["frag.id"]
)
# Write out updated protein info
write_tsv_or_parquet(prot_info, inpath / HOMOLOGY_FILE)
# include phylogeny info in per-sequence info
for prop in phylogeny_dict:
prot_info[prop] = phylogeny_dict[prop]
# write concatenated sequence info
if clusters is None:
fasta_path = concat_fasta_path
info_to_fasta(None, fasta_path, append=True, infoobj=prot_info)
else:
for cluster_id, subframe in clusters.groupby(by=["cluster_id"]):
cluster_info = prot_info[prot_info.index.isin(subframe["members"])]
fasta_path = fasta_dir / f"{cluster_id}.fa"
info_to_fasta(None, fasta_path, append=True, infoobj=cluster_info)
def parse_cluster(
fasta_path, file_dict=None, file_writer=None, neighbor_joining=False
):
"""Parse cluster FASTA headers to create cluster table.."""
cluster_id = fasta_path.name[:-3]
outdir = fasta_path.parent
clusters = parse_cluster_fasta(fasta_path)
if len(clusters) < 2:
# fasta_path.unlink()
logger.error(f"Singleton Cluster {cluster_id} is size {len(clusters)}")
cluster_dict = {
"size": len(clusters),
"n_memb": None,
"n_members": None,
"n_adj": None,
"adj_groups": None,
}
return int(cluster_id)
# calculate MSA and return guide tree
muscle_args = [
"-in",
f"{outdir}/{cluster_id}.fa",
"-out",
f"{outdir}/{cluster_id}.faa",
"-diags",
"-sv",
"-maxiters",
"2",
"-quiet",
"-distance1",
"kmer20_4",
]
if len(clusters) >= 4:
muscle_args += [
"-tree2",
f"{outdir}/{cluster_id}.nwk",
]
if neighbor_joining:
muscle_args += ["-cluster2", "neighborjoining"] # adds 20%
try:
muscle = sh.Command("muscle", search_paths=SEARCH_PATHS)
except sh.CommandNotFound:
logger.error("muscle must be installed first.")
sys.exit(1)
muscle(muscle_args)
# fasta_path.unlink()
clusters["prot.idx"] = clusters["path"].map(file_dict)
clusters.sort_values(by=["prot.idx", "frag.id", "frag.pos"], inplace=True)
n_adj, adj_gr_count, unused_adj_group = calculate_adjacency_group(
clusters["frag.pos"], clusters["frag.idx"]
)
idx_values = clusters["prot.idx"].value_counts()
idx_list = list(idx_values.index)
idx_list.sort()
write_tsv_or_parquet(clusters, outdir / f"{cluster_id}.{CLUSTER_FILETYPE}")
cluster_dict = {
"size": len(clusters),
"n_memb": len(idx_values),
"n_members": str(idx_list),
"n_adj": n_adj,
"adj_groups": adj_gr_count,
}
for group_id, subframe in clusters.groupby(by=["prot.idx"]):
proteome_frame = subframe.copy()
proteome_frame["hom.cluster"] = cluster_id
proteome_frame["hom.cl_size"] = len(idx_values)
proteome_frame.drop(
proteome_frame.columns.drop(HOMOLOGY_COLS), # drop EXCEPT these
axis=1,
inplace=True,
)
with file_writer(group_id) as file_handle:
proteome_frame.to_csv(file_handle, header=False, sep="\t")
return int(cluster_id), cluster_dict
def parse_cluster_fasta(filepath, trim_dict=True):
"""Return FASTA headers as a dictionary of properties."""
next_pos = 0
properties_dict = {}
memory_map = TrimmableMemoryMap(filepath)
with memory_map.map() as mem_map:
size = memory_map.size
next_pos = mem_map.find(b">", next_pos)
while next_pos != -1 and next_pos < size:
eol_pos = mem_map.find(b"\n", next_pos)
if eol_pos == -1:
break
space_pos = mem_map.find(b" ", next_pos + 1, eol_pos)
if space_pos == -1:
raise ValueError(
f"Header format is bad in {filepath} header"
f" {len(properties_dict)+1}"
)
cluster_id = mem_map[next_pos + 1 : space_pos].decode("utf-8")
payload = json.loads(mem_map[space_pos + 1 : eol_pos])
properties_dict[cluster_id] = payload
if trim_dict:
size = memory_map.trim(space_pos, eol_pos)
next_pos = mem_map.find(b">", space_pos)
cluster = (
pd.DataFrame.from_dict(properties_dict).transpose().convert_dtypes()
)
return cluster
def join_homology_to_proteome(args, mailbox_reader=None):
"""Read homology info from mailbox and join it to proteome file."""
idx, protein_parent = args
proteins = pd.read_parquet(protein_parent / HOMOLOGY_FILE)
n_proteins = len(proteins)
with mailbox_reader(idx) as file_handle:
homology_frame = pd.read_csv(
file_handle, sep="\t", index_col=0
).convert_dtypes()
clusters_in_proteome = len(homology_frame)
proteome_frame = pd.concat([proteins, homology_frame], axis=1)
write_tsv_or_parquet(proteome_frame, protein_parent / HOMOLOGY_FILE)
return {
"prot.idx": idx,
"hom.clusters": clusters_in_proteome,
"hom.cluster_pct": clusters_in_proteome * 100.0 / n_proteins,
}
def info_to_fasta(infofile, fastafile, append, infoobj=None):
"""Convert infofile to FASTA file."""
if infoobj is None:
infoobj = read_tsv_or_parquet(infofile)
if append:
filemode = "a+"
else:
filemode = "w"
with Path(fastafile).open(filemode) as file_handle:
fcntl.flock(file_handle, fcntl.LOCK_EX)
logger.debug(f"Writing to {fastafile} with mode {filemode}.")
seqs = infoobj["prot.seq"].copy()
del infoobj["prot.seq"]
for gene_id, row in infoobj.iterrows():
file_handle.write(f">{gene_id} {row.to_json()}\n")
file_handle.write(f"{seqs[gene_id]}\n")
fcntl.flock(file_handle, fcntl.LOCK_UN)
```
#### File: azulejo/azulejo/synteny.py
```python
import sys
from itertools import combinations
# from os.path import commonprefix as prefix
from pathlib import Path
# third-party imports
import dask.bag as db
import networkx as nx
import numpy as np
import pandas as pd
from dask.diagnostics import ProgressBar
# module imports
from .common import AMBIGUOUS_CODE
from .common import ANCHOR_HIST_FILE
from .common import ANCHORS_FILE
from .common import CLUSTERS_FILE
from .common import CLUSTERSYN_FILE
from .common import CODE_DICT
from .common import DISAMBIGUATED_CODE
from .common import HOMOLOGY_FILE
from .common import INDIRECT_CODE
from .common import LOCALLY_UNAMBIGUOUS_CODE
from .common import NON_AMBIGUOUS_CODE
from .common import PROTEOMOLOGY_FILE
from .common import PROTEOSYN_FILE
from .common import SPINNER_UPDATE_PERIOD
from .common import SYNTENY_FILE
from .common import SYNTENY_FILETYPE
from .common import UNAMBIGUOUS_CODE
from .common import calculate_adjacency_group
from .common import dotpath_to_path
from .common import hash_array
from .common import log_and_add_to_stats
from .common import logger
from .common import read_tsv_or_parquet
from .common import write_tsv_or_parquet
from .hash import SyntenyBlockHasher
from .mailboxes import DataMailboxes
from .mailboxes import ExternalMerge
from .merger import AmbiguousMerger
# global constants
__ALL__ = ["synteny_anchors"]
CLUSTER_COLS = ["syn.anchor.id", "syn.anchor.count", "syn.anchor.direction"]
JOIN_COLS = [
"member_ids",
"syn.anchor.sub_id",
"syn.anchor.id",
"syn.anchor.count",
"syn.code",
"frag.idx",
]
ANCHOR_COLS = [
"path",
"syn.code",
"syn.anchor.count",
"hom.cluster",
"frag.id",
"frag.pos",
"hom.cl_size",
"frag.direction",
"frag.idx",
"frag.is_chr",
"frag.is_plas",
"frag.is_scaf",
"frag.prot_count",
"frag.start",
"prot.len",
"prot.m_start",
"prot.n_ambig",
"prot.no_stop",
]
MAILBOX_SUBDIR = "mailboxes"
# CLI function
def synteny_anchors(
k,
peatmer,
setname,
click_loguru=None,
write_ambiguous=True,
thorny=True,
disambig_adj_only=True,
):
"""Calculate synteny anchors."""
#
# Marshal input arguments
#
if k < 2:
logger.error("k must be at least 2.")
sys.exit(1)
options = click_loguru.get_global_options()
user_options = click_loguru.get_user_global_options()
set_path = Path(setname)
file_stats_path = set_path / PROTEOMOLOGY_FILE
proteomes = read_tsv_or_parquet(file_stats_path)
n_proteomes = len(proteomes)
clusters = read_tsv_or_parquet(set_path / CLUSTERS_FILE)
n_clusters = len(clusters)
hasher = SyntenyBlockHasher(
k=k,
peatmer=peatmer,
thorny=thorny,
disambig_adj_only=disambig_adj_only,
)
logger.info(
f"Calculating {hasher.hash_name(no_prefix=True)} synteny anchors"
+ f" for {n_proteomes} proteomes"
)
# durable argument list for passes
arg_list = [
(
idx,
row["path"],
)
for idx, row in proteomes.iterrows()
]
runner = PassRunner(
{
"n_proteomes": n_proteomes,
"set_path": set_path,
"hasher": hasher,
"quiet": options.quiet,
"parallel": user_options["parallel"],
"bag": db.from_sequence(arg_list),
"merge_args": arg_list,
"click_loguru": click_loguru,
}
)
#
# Make first three passes:
# 1. hash and find unambiguous anchors
# 2. disambiguate ambiguous anchors adjacent to unambiguous ones
# 3. find non-ambiguous hashes uncovered by 2)
#
for pass_code in [
UNAMBIGUOUS_CODE,
DISAMBIGUATED_CODE,
NON_AMBIGUOUS_CODE,
]:
proteomes = runner.make_pass(pass_code, proteomes)
runner.add_ambig_to_total_assigned()
n_anchors = runner.get_total_assigned()
#
# Fourth pass -- merge, write anchor and homology info
#
join_mb = DataMailboxes(
n_boxes=n_proteomes,
mb_dir_path=(set_path / MAILBOX_SUBDIR / "join"),
file_extension="tsv",
)
join_mb.write_tsv_headers(JOIN_COLS)
cluster_mb = DataMailboxes(
n_boxes=n_clusters,
mb_dir_path=(set_path / MAILBOX_SUBDIR / "clusters"),
file_extension="tsv",
)
cluster_mb.write_tsv_headers(CLUSTER_COLS)
anchor_mb = DataMailboxes(
n_boxes=n_anchors,
mb_dir_path=(set_path / MAILBOX_SUBDIR / "anchors"),
file_extension="tsv",
)
anchor_mb.write_tsv_headers(ANCHOR_COLS)
proteomes = runner.make_pass(
INDIRECT_CODE,
proteomes,
extra_kwargs={
"join_mb": join_mb,
"cluster_mb": cluster_mb,
"anchor_mb": anchor_mb,
"n_proteomes": n_proteomes,
"write_ambiguous": write_ambiguous,
},
)
write_tsv_or_parquet(
proteomes, set_path / PROTEOSYN_FILE, remove_tmp=False
)
adjacency_stats = anchors_to_adjacency(
set_path, n_proteomes, join_mb.open_then_delete
)
logger.info(f"adjacency_stats: {adjacency_stats}")
return
#
# Write anchors
#
click_loguru.elapsed_time("Anchor writing")
arg_list = [(i,) for i in range(n_anchors)]
anchor_path = set_path / "synteny"
anchor_path.mkdir(exist_ok=True)
logger.info(f"Writing {n_anchors} synteny anchors to {anchor_path}:")
if not options.quiet:
ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()
if parallel:
bag = db.from_sequence(arg_list)
anchor_stats = bag.map(
write_anchor,
mailbox_reader=anchor_mb.open_then_delete,
synteny_parent=anchor_path,
).compute()
else:
anchor_stats = []
for args in arg_list:
anchor_stats.append(
write_anchor(
args,
mailbox_reader=anchor_mb.open_then_delete,
synteny_parent=anchor_path,
)
)
anchor_mb.delete()
anchor_stat_list = []
for results in anchor_stats:
if results is not None:
anchor_stat_list += results
anchor_frame = pd.DataFrame.from_dict(results)
write_tsv_or_parquet(
anchor_frame,
set_path / ANCHORS_FILE,
sort_cols=False,
)
#
# Merge synteny into clusters
#
arg_list = [(i,) for i in range(n_clusters)]
click_loguru.elapsed_time("Synteny joining")
homology_path = set_path / "homology"
logger.info(
f"Joining synteny info to {n_clusters} clusters in {homology_path}:"
)
if not options.quiet:
ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()
if parallel:
bag = db.from_sequence(arg_list)
cluster_stats = bag.map(
join_synteny_to_clusters,
mailbox_reader=cluster_mb.open_then_delete,
cluster_parent=homology_path,
).compute()
else:
cluster_stats = []
for args in arg_list:
cluster_stats.append(
join_synteny_to_clusters(
args,
mailbox_reader=cluster_mb.open_then_delete,
cluster_parent=homology_path,
)
)
cluster_mb.delete()
cluster_frame = pd.DataFrame.from_dict(cluster_stats)
cluster_frame.set_index(["clust_id"], inplace=True)
cluster_frame.sort_index(inplace=True)
clusters = _concat_without_overlap(clusters, cluster_frame)
write_tsv_or_parquet(
clusters, set_path / CLUSTERSYN_FILE, float_format="%5.2f"
)
mean_gene_synteny = (
clusters["in_synteny"].sum() * 100.0 / clusters["size"].sum()
)
mean_clust_synteny = clusters["synteny_pct"].mean()
logger.info(
f"Mean anchor coverage: {mean_gene_synteny: .1f}% (on proteins)"
)
logger.info(
f"Mean cluster anchor coverage: {mean_clust_synteny:.1f}% (on clusters)"
)
click_loguru.elapsed_time(None)
class PassRunner:
"""Run a pass over all proteomes."""
def __init__(self, std_kwargs):
"""Save initial pass info"""
self.std_kwargs = std_kwargs
self.last_code = None
self.pass_name = "<PASSWORD>"
self.merger_kw_dict = {
UNAMBIGUOUS_CODE: {
"count_key": "syn.anchor.count",
"ordinal_key": "syn.anchor.id",
"ambig_ordinal_key": "tmp.ambig.id",
},
DISAMBIGUATED_CODE: {
"count_key": "tmp.disambig.anchor.count",
"ordinal_key": "tmp.disambig.anchor.id",
"alt_hash": True,
},
NON_AMBIGUOUS_CODE: {
"count_key": "tmp.nonambig.anchor.count",
"ordinal_key": "tmp.nonambig.anchor.id",
"ambig_count_key": "tmp.ambig.anchor.count",
"ambig_ordinal_key": "tmp.ambig.anchor.id",
},
}
self.merge_function_dict = {
UNAMBIGUOUS_CODE: calculate_synteny_hashes,
DISAMBIGUATED_CODE: merge_unambig_hashes,
NON_AMBIGUOUS_CODE: merge_disambig_hashes,
INDIRECT_CODE: merge_nonambig_hashes,
}
self.n_assigned_list = []
self.ambig = None
self.unambig = None
self.log_ambig = False
def make_pass(
self,
code,
proteomes,
extra_kwargs=None,
):
"""Make a calculate-merge pass over each proteome."""
self.std_kwargs["click_loguru"].elapsed_time(self.pass_name)
if self.unambig is not None:
if self.log_ambig:
ambig_msg = f" and {len(self.ambig)} ambiguous"
else:
ambig_msg = ""
logger.info(
f"Merging {len(self.unambig)} {CODE_DICT[self.last_code]}"
+ f"({self.last_code}){ambig_msg} synteny anchors into proteomes"
)
if extra_kwargs is None:
extra_kwargs = {}
kwargs = {
"unambig": self.unambig,
"ambig": self.ambig,
"hasher": self.std_kwargs["hasher"],
}
if code in self.merger_kw_dict:
mailboxes = DataMailboxes(
n_boxes=self.std_kwargs["n_proteomes"],
mb_dir_path=(
self.std_kwargs["set_path"]
/ MAILBOX_SUBDIR
/ CODE_DICT[code]
),
)
mailboxes.write_headers("hash\n")
kwargs["mailboxes"] = mailboxes
merge_func = self.merge_function_dict[code]
if not self.std_kwargs["quiet"]:
ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()
if self.std_kwargs["parallel"]:
stats_list = (
self.std_kwargs["bag"]
.map(merge_func, **kwargs, **extra_kwargs)
.compute()
)
else:
stats_list = []
for args in self.std_kwargs["merge_args"]:
stats_list.append(merge_func(args, **kwargs, **extra_kwargs))
stats = (
pd.DataFrame.from_dict(stats_list).set_index("idx").sort_index()
)
proteomes = log_and_add_to_stats(proteomes, stats)
if code in self.merger_kw_dict:
merger = ExternalMerge(
file_path_func=mailboxes.path_to_mailbox,
n_merge=self.std_kwargs["n_proteomes"],
)
merger.init("hash")
merge_counter = AmbiguousMerger(
start_base=sum(self.n_assigned_list),
**self.merger_kw_dict[code],
)
self.unambig, self.ambig = merger.merge(merge_counter)
mailboxes.delete()
self.n_assigned_list.append(len(self.unambig))
self.last_code = code
self.pass_name = CODE_DICT[code]
return proteomes
def add_ambig_to_total_assigned(self):
"""Include the most recent ambiguous assignment in the total."""
self.n_assigned_list.append(len(self.ambig))
self.log_ambig = True
def get_total_assigned(self):
"""Return the total assigned anchors."""
return sum(self.n_assigned_list)
def calculate_synteny_hashes(
args, mailboxes=None, hasher=None, unambig=None, ambig=None
):
"""Calculate synteny hashes for proteins per-genome."""
idx, dotpath = args
outpath = dotpath_to_path(dotpath)
hom = read_tsv_or_parquet(outpath / HOMOLOGY_FILE)
hom["tmp.nan_group"] = (
(hom["hom.cluster"].isnull()).astype(int).cumsum() + 1
) * (~hom["hom.cluster"].isnull())
hom.replace(to_replace={"tmp.nan_group": 0}, value=pd.NA, inplace=True)
hash_name = hasher.hash_name()
syn_list = []
if hasher.thorny: # drop rows
hom = hom[hom["hom.cluster"].notna()]
for unused_id_tuple, subframe in hom.groupby(
by=["frag.id", "tmp.nan_group"]
):
syn_list.append(hasher.calculate(subframe["hom.cluster"]))
del hom["tmp.nan_group"]
syn = hom.join(
pd.concat([df for df in syn_list if df is not None], axis=0)
)
del syn_list
write_tsv_or_parquet(syn, outpath / SYNTENY_FILE, remove_tmp=False)
syn["tmp.self_count"] = pd.array(
syn[hash_name].map(syn[hash_name].value_counts()),
dtype=pd.UInt32Dtype(),
)
unique_hashes = (
syn[[hash_name, "tmp.self_count"]]
.drop_duplicates(subset=[hash_name])
.dropna(how="any")
)
unique_hashes = unique_hashes.set_index(hash_name).sort_index()
with mailboxes.locked_open_for_write(idx) as file_handle:
unique_hashes.to_csv(file_handle, header=False, sep="\t")
return {
"idx": idx,
"path": dotpath,
"hom.clusters": syn["hom.cluster"].notna().sum(),
"syn.hashes.n": syn[hash_name].notna().sum(),
}
def merge_unambig_hashes(
args,
unambig=None,
ambig=None,
hasher=None,
mailboxes=None,
):
"""Merge unambiguous synteny hashes into proteomes per-proteome."""
hash_name = hasher.hash_name()
idx, dotpath = args
outpath = dotpath_to_path(dotpath)
syn = read_tsv_or_parquet(outpath / SYNTENY_FILE)
syn = _join_on_col_with_na(syn, unambig, hash_name)
syn = _join_on_col_with_na(syn, ambig, hash_name)
syn["syn.code"] = pd.NA
syn["syn.code"] = _fill_col1_val_where_col2_notna(
syn["syn.code"], syn["syn.anchor.id"], UNAMBIGUOUS_CODE
)
# Calculate disambiguation hashes and write them out for merge
disambig_frame_list = []
for unused_frag, subframe in syn.groupby(by=["frag.id"]):
disambig_frame_list.append(hasher.calculate_disambig_hashes(subframe))
disambig_fr = pd.concat(
[df for df in disambig_frame_list if df is not None]
)
disambig_fr = disambig_fr.dropna(how="all")
syn = syn.join(disambig_fr)
write_tsv_or_parquet(syn, outpath / SYNTENY_FILE, remove_tmp=False)
# Write out unified upstream/downstream hash values
merged_hashes = pd.concat(
[
_rename_and_fill_alt(syn, "tmp.disambig.up", "tmp.disambig.down"),
_rename_and_fill_alt(syn, "tmp.disambig.down", "tmp.disambig.up"),
],
ignore_index=True,
)
merged_hashes["self_count"] = pd.array(
merged_hashes["hash"].map(merged_hashes["hash"].value_counts()),
dtype=pd.UInt32Dtype(),
)
merged_hashes = merged_hashes.reindex(
columns=["hash", "self_count", "alt_hash"]
)
unique_hashes = (
merged_hashes.drop_duplicates(subset=["hash"])
.set_index("hash")
.sort_index()
)
del merged_hashes
with mailboxes.locked_open_for_write(idx) as file_handle:
unique_hashes.to_csv(file_handle, header=False, sep="\t")
return {
"idx": idx,
"path": dotpath,
"syn.anchors.unambiguous": _count_code(
syn["syn.code"], UNAMBIGUOUS_CODE
),
}
def merge_disambig_hashes(
args,
unambig=None,
ambig=None,
hasher=None,
mailboxes=None,
):
"""Merge disambiguated synteny hashes into proteomes per-proteome."""
idx, dotpath = args
plain_hash_name = hasher.hash_name(no_prefix=True)
hash_name = "syn." + plain_hash_name
outpath = dotpath_to_path(dotpath)
syn = read_tsv_or_parquet(outpath / SYNTENY_FILE)
syn = _join_on_col_with_na(syn, unambig, "tmp.disambig.up")
syn = _join_on_col_with_na(syn, unambig, "tmp.disambig.down")
for dup_col in [
"tmp.disambig.anchor.count",
"tmp.disambig.anchor.id",
]:
xcol = dup_col + "_x"
ycol = dup_col + "_y"
syn[dup_col] = syn[xcol].fillna(syn[ycol])
del syn[xcol], syn[ycol]
syn["syn.anchor.id"] = syn["syn.anchor.id"].fillna(
syn["tmp.disambig.anchor.id"]
)
syn["syn.anchor.count"] = syn["syn.anchor.count"].fillna(
syn["tmp.disambig.anchor.count"]
)
syn["syn.code"] = _fill_col1_val_where_col2_notna(
syn["syn.code"], syn["tmp.disambig.anchor.id"], DISAMBIGUATED_CODE
)
# Delete some non-needed tmp columns
non_needed_cols = [
"tmp.disambig.anchor.count",
"tmp.disambig.anchor.id",
"tmp.disambig.up",
"tmp.disambig.down",
]
syn = syn.drop(columns=non_needed_cols)
# null hashes are already assigned
syn[hash_name][syn["syn.anchor.id"].notna()] = pd.NA
write_tsv_or_parquet(syn, outpath / SYNTENY_FILE, remove_tmp=False)
# Write out non-ambiguous hashes
syn["tmp.self_count"] = pd.array(
syn[hash_name].map(syn[hash_name].value_counts()),
dtype=pd.UInt32Dtype(),
)
unique_hashes = (
syn[[hash_name, "tmp.self_count"]]
.drop_duplicates(subset=[hash_name])
.dropna(how="any")
)
unique_hashes = unique_hashes.set_index(hash_name).sort_index()
with mailboxes.locked_open_for_write(idx) as file_handle:
unique_hashes.to_csv(file_handle, header=False, sep="\t")
# logger.debug(f"{dotpath} has {syn['syn.anchor.id'].notna().sum()} assignments")
return {
"idx": idx,
"path": dotpath,
"syn.anchors.disambiguated": _count_code(
syn["syn.code"], DISAMBIGUATED_CODE
),
}
def merge_nonambig_hashes(
args,
join_mb=None,
unambig=None,
ambig=None,
hasher=None,
n_proteomes=None,
cluster_mb=None,
anchor_mb=None,
write_ambiguous=True,
):
"""Merge disambiguated synteny hashes into proteomes per-proteome."""
idx, dotpath = args
plain_hash_name = hasher.hash_name(no_prefix=True)
hash_name = "syn." + plain_hash_name
outpath = dotpath_to_path(dotpath)
syn = read_tsv_or_parquet(outpath / SYNTENY_FILE)
syn = _join_on_col_with_na(syn, unambig, hash_name)
#
# Do the indirects (formerly ambig made nonambig)
#
syn["syn.anchor.id"] = syn["syn.anchor.id"].fillna(
syn["tmp.nonambig.anchor.id"]
)
syn["syn.anchor.count"] = syn["syn.anchor.count"].fillna(
syn["tmp.nonambig.anchor.count"]
)
syn["syn.code"] = _fill_col1_val_where_col2_notna(
syn["syn.code"], syn["tmp.nonambig.anchor.id"], INDIRECT_CODE
)
#
# Do the nonambig (w.r.t. this proteome) and ambig, if requested
#
syn = _join_on_col_with_na(syn, ambig, hash_name)
n_proteins = len(syn)
syn["tmp.i"] = range(n_proteins)
ambig_code = syn["syn.code"].copy()
ambig_ids = syn["tmp.ambig.anchor.id"].copy()
ambig_counts = syn["tmp.ambig.anchor.count"].copy()
for unused_ambig_id, subframe in syn.groupby(by=["tmp.ambig.anchor.id"]):
ambig_n = len(subframe)
for unused_i, row in subframe.iterrows():
row_no = row["tmp.i"]
if ambig_n == 1:
ambig_code.iloc[row_no] = LOCALLY_UNAMBIGUOUS_CODE
elif ambig_n > 1:
if write_ambiguous:
ambig_code.iloc[row_no] = AMBIGUOUS_CODE
else:
ambig_ids.iloc[row_no] = pd.NA
ambig_counts.iloc[row_no] = pd.NA
syn["syn.anchor.id"] = syn["syn.anchor.id"].fillna(ambig_ids)
syn["syn.anchor.count"] = syn["syn.anchor.count"].fillna(ambig_counts)
syn["syn.code"] = syn["syn.code"].fillna(ambig_code)
del ambig_code, ambig_ids, ambig_counts
#
# Hsh footprint and direction are anchor properties, where set
#
syn = syn.rename(
columns={
"syn.hash.footprint": "syn.anchor.footprint",
"syn.hash.direction": "syn.anchor.direction",
}
)
n_anchors = syn["syn.anchor.id"].notna().sum() # before shingling
#
# Do shingling
#
# shingle_id = np.array([np.nan] * n_proteins)
# shingle_count = np.array([np.nan] * n_proteins)
# shingle_code = syn["syn.code"].to_numpy()
# shingle_direction = syn["syn.anchor.direction"].to_numpy()
# shingle_sub = np.array([np.nan] * n_proteins)
with join_mb.locked_open_for_write(idx) as file_handle:
for anchor_count, subframe in syn.groupby(by=["syn.anchor.count"]):
for unused_i, row in subframe.iterrows():
anchor_id = row["syn.anchor.id"]
if pd.isnull(anchor_id):
continue
first_row = row["tmp.i"]
last_row = first_row + row["syn.anchor.footprint"]
# shingle_id[first_row:last_row] = anchor_id
# shingle_count[first_row:last_row] = anchor_count
# shingle_code[first_row:last_row] = row["syn.code"]
# shingle_direction[first_row:last_row] = row[
# "syn.anchor.direction"
# ]
# shingle_sub[first_row:last_row] = hasher.shingle(
# syn["hom.cluster"][first_row:last_row],
# row["syn.anchor.direction"],
# row[hash_name],
# )
shingle_fr = pd.DataFrame(
{
"member_ids": syn.iloc[first_row:last_row].index,
"syn.anchor.sub_id": hasher.shingle(
syn["hom.cluster"][first_row:last_row],
row["syn.anchor.direction"],
row[hash_name],
),
}
)
shingle_fr["syn.anchor.id"] = anchor_id
shingle_fr["syn.anchor.count"] = anchor_count
shingle_fr["syn.code"] = row["syn.code"]
shingle_fr["frag.idx"] = row["frag.idx"]
shingle_fr.to_csv(file_handle, header=False, sep="\t")
# syn["syn.anchor.id"] = shingle_id
# syn["syn.anchor.count"] = shingle_count
# syn["syn.code"] = shingle_code
# syn["syn.anchor.direction"] = shingle_direction
# syn["syn.anchor.sub_id"] = shingle_sub
# del shingle_id, shingle_count, shingle_code, shingle_sub
# Delete non-needed (but non-tmp) columns
# non_needed_cols = [hash_name]
# syn = syn.drop(columns=non_needed_cols)
# Write proteome file
# write_tsv_or_parquet(
# syn,
# outpath / SYNTENY_FILE,
# )
# Write anchor info to mailbox
# for anchor_id, subframe in syn.groupby(by=["syn.anchor.id"]):
# anchor_frame = subframe.copy()
# anchor_frame["path"] = dotpath
# with anchor_mb.locked_open_for_write(anchor_id) as file_handle:
# anchor_frame[ANCHOR_COLS].to_csv(
# file_handle, header=False, sep="\t"
# )
# for cluster_id, subframe in syn.groupby(by=["hom.cluster"]):
# with cluster_mb.locked_open_for_write(cluster_id) as file_handle:
# subframe[CLUSTER_COLS].to_csv(file_handle, header=False, sep="\t")
in_synteny = syn["syn.anchor.id"].notna().sum()
n_assigned = syn["hom.cluster"].notna().sum()
avg_ortho = syn["syn.anchor.count"].mean()
synteny_pct = in_synteny * 100.0 / n_assigned
n_ambig = _count_code(syn["syn.code"], AMBIGUOUS_CODE)
n_nonambig = in_synteny - n_ambig
nonambig_pct = n_nonambig * 100.0 / n_assigned
synteny_stats = {
"idx": idx,
"path": dotpath,
"syn.anchors.indirect_unambiguous": _count_code(
syn["syn.code"], INDIRECT_CODE
),
"syn.anchors.locally_unambiguous": _count_code(
syn["syn.code"], LOCALLY_UNAMBIGUOUS_CODE
),
"syn.anchors.ambiguous": n_ambig,
"syn.anchors.nonambiguous": n_nonambig,
"syn.anchors.nonambig_pct": nonambig_pct,
"syn.anchors.base": n_anchors,
"syn.anchors.total": in_synteny,
"syn.anchors.total_pct": synteny_pct,
"syn.orthogenomic_pct": avg_ortho * 100.0 / n_proteomes,
}
return synteny_stats
def join_synteny_to_clusters(args, cluster_parent=None, mailbox_reader=None):
"""Read homology info from mailbox and join it to proteome file."""
idx = args[0]
cluster_path = cluster_parent / f"{idx}.parq"
cluster = pd.read_parquet(cluster_path)
n_cluster = len(cluster)
with mailbox_reader(idx) as file_handle:
synteny_frame = pd.read_csv(
file_handle, sep="\t", index_col=0
).convert_dtypes()
in_synteny = len(synteny_frame)
# cluster files are unusual in that I don't bother to version them,
# so overlapping info has to be deleted each time
clust_syn = _concat_without_overlap(cluster, synteny_frame)
write_tsv_or_parquet(clust_syn, cluster_path)
anchor_count = clust_syn["syn.anchor.id"].value_counts()
anchor_frag_counts = [0]
for unused_id_tuple, subframe in clust_syn.groupby(
by=["syn.anchor.id", "path"]
):
if len(subframe) == 1:
anchor_frag_counts.append(1)
else:
anchor_frag_counts.append(len(subframe["frag.idx"].value_counts()))
return {
"clust_id": idx,
"in_synteny": in_synteny,
"n_anchors": len(anchor_count),
"max_frags_per_anch": max(anchor_frag_counts),
"synteny_pct": in_synteny * 100.0 / n_cluster,
}
def write_anchor(args, synteny_parent=None, mailbox_reader=None):
"""Read synteny anchor info from mailbox and join it to synteny file."""
idx = args[0]
with mailbox_reader(idx) as file_handle:
anchor_frame = pd.read_csv(
file_handle, sep="\t", index_col=0
).convert_dtypes()
in_anchor = len(anchor_frame)
if in_anchor == 0:
return None
# drop any duplicated ID's--normally shouldn't happen
anchor_frame.drop(
anchor_frame[anchor_frame.index.duplicated()].index, inplace=True
)
anchor_frame.sort_values(
by=["syn.anchor.sub_id", "frag.idx", "frag.pos"], inplace=True
)
# Make a dictionary of common anchor properties, order will be kept
anchor_props = {
"anchor.id": idx,
"sub": None,
"code": None,
"count": None,
"n": None,
"n_ambig": None,
"n_adj": None,
"adj_groups": None,
"frag.direction": None,
"syn.anchor.direction": None,
"anchor.subframe.ok": True,
"hash": None,
}
code_set = set(anchor_frame["syn.code"])
for test_code in CODE_DICT.keys():
if test_code in code_set:
anchor_props["code"] = test_code
break
bad_subframe = False
prop_list = []
for sub_no, subframe in anchor_frame.groupby(by=["syn.anchor.sub_id"]):
(subanchor_props, anchor_subframe, bad_subframe) = _subframe_props(
anchor_props, subframe, sub_no
)
if bad_subframe:
break
write_tsv_or_parquet(
anchor_subframe,
synteny_parent / f"{idx}.{sub_no}.{SYNTENY_FILETYPE}",
sort_cols=False,
)
prop_list.append(subanchor_props)
if bad_subframe: # Probably means a hash collision
logger.error(f"bad anchor set {idx}")
prop_list = []
sub_no = 0
anchor_props["anchor.subframe.ok"] = False
for cluster_id, subframe in anchor_frame.groupby(by=["hom.cluster"]):
(
subanchor_props,
anchor_subframe,
unused_bad_subframe,
) = _subframe_props(anchor_props, subframe, sub_no)
write_tsv_or_parquet(
anchor_subframe,
synteny_parent / f"{idx}.{sub_no}.{SYNTENY_FILETYPE}",
sort_cols=False,
)
sub_no += 1
prop_list.append(subanchor_props)
return prop_list
def _subframe_props(anchor_props, subframe, sub_no):
"""Calculate subframe properties and write subframe"""
anchor_subframe = subframe.copy()
subanchor_props = anchor_props.copy()
subanchor_props["sub"] = sub_no
anchor_dir_set = set(anchor_subframe["syn.anchor.direction"])
if len(anchor_dir_set) == 1:
subanchor_props["syn.anchor.direction"] = list(anchor_dir_set)[0]
frag_dir_set = set(anchor_subframe["frag.direction"])
if len(frag_dir_set) == 1:
subanchor_props["frag.direction"] = list(frag_dir_set)[0]
subanchor_props["count"] = anchor_subframe["syn.anchor.count"].iloc[0]
subanchor_props["n_ambig"] = _count_code(
anchor_subframe["syn.code"], AMBIGUOUS_CODE
)
hom_clust_set = set(anchor_subframe["hom.cluster"])
if len(hom_clust_set) == 1:
subanchor_props[f"anchor.{sub_no}.cluster"] = list(hom_clust_set)[0]
else:
bad_subframe = True
del (
anchor_subframe["syn.anchor.count"],
anchor_subframe["syn.anchor.sub_id"],
)
subanchor_props["n"] = len(anchor_subframe)
subanchor_props["hash"] = hash_array(
np.sort(anchor_subframe.index.to_numpy())
)
(
subanchor_props["n_adj"],
subanchor_props["adj_groups"],
unused_adj_group,
) = calculate_adjacency_group(
anchor_subframe["frag.pos"], anchor_subframe["frag.idx"]
)
return subanchor_props, anchor_subframe, bad_subframe
def _concat_without_overlap(df1, df2):
"""Concatenate df2 on top of df1."""
overlapping = set(df1.columns).intersection(df2.columns)
if len(overlapping) > 0:
df1 = df1.drop(columns=overlapping)
return pd.concat([df1, df2], axis=1)
def _rename_and_fill_alt(df1, key, alt_key):
"""Rename columns and zero-fill alternate."""
df2 = df1[[key]].rename(columns={key: "hash"})
df2["alt_hash"] = df1[alt_key].fillna(0)
return df2.dropna(how="any")
def _join_on_col_with_na(left, right, col_name):
"""Join on a temporary column of type 'O'."""
tmp_col_name = "tmp." + col_name
left[tmp_col_name] = left[col_name].astype("O")
merged = pd.merge(
left, right, left_on=tmp_col_name, right_index=True, how="left"
)
del merged[tmp_col_name]
return merged
def _fill_col1_val_where_col2_notna(col1, col2, val):
"""Set col1 to val where col2 is not NA if col1 is not set."""
fill_ser = col1.copy()
fill_ser[col2.notna()] = val
return col1.fillna(fill_ser)
def _count_code(code_ser, code):
"""Counts number of occurrances of code in code_ser."""
return (code_ser == code).sum()
def anchors_to_adjacency(set_path, n_proteomes, mailbox_reader):
"""Merge adjacencies and produce and adjacency graph."""
frame_list = []
for idx in range(n_proteomes):
with mailbox_reader(idx) as file_handle:
frame_list.append(
pd.read_csv(
file_handle, sep="\t", index_col=0
).convert_dtypes()
)
nodes = pd.concat(
frame_list,
ignore_index=True,
)
del frame_list
graph = nx.Graph()
for unused_tuple, subframe in nodes.groupby(
by=["syn.anchor.id", "syn.anchor.sub_id"]
):
ids = subframe["member_ids"]
n_ids = len(ids)
graph.add_nodes_from(ids)
if n_ids > 1:
edges = combinations(ids, 2)
graph.add_edges_from(edges, weight=n_ids)
outpath = set_path / ANCHORS_FILE
summarypath = outpath.parent / (
outpath.name[: -len(outpath.suffix)] + "_summary.tsv"
)
histpath = outpath.parent / (
outpath.name[: -len(outpath.suffix)] + "_hist.tsv"
)
components = [
c
for c in sorted(nx.connected_components(graph), key=len, reverse=True)
if len(c) > 1
]
fh = outpath.open("w")
fh.write("idx\tcluster_id\tsize\tmembers\n")
n_items = 0
count_list = []
hash_list = []
id_list = []
for i, comp in enumerate(components):
component = np.sort(pd.Index(list(comp)).to_numpy())
id_list.append(i)
size = len(comp)
count_list.append(size)
hash_list.append(hash_array(component))
for node in component:
fh.write(f"{n_items}\t{i}\t{size}\t{node}\n")
n_items += 1
fh.close()
n_clusts = len(count_list)
del graph, components
cluster_counts = pd.DataFrame({"size": count_list})
largest_cluster = cluster_counts["size"].max()
cluster_hist = (
pd.DataFrame(cluster_counts.value_counts()).sort_index().reset_index()
)
cluster_hist = cluster_hist.set_index("size")
cluster_hist = cluster_hist.rename(columns={0: "n"})
cluster_hist["item_pct"] = (
cluster_hist["n"] * cluster_hist.index * 100.0 / n_items
)
cluster_hist.to_csv(histpath, sep="\t", float_format="%5.2f")
cluster_hist["cluster_pct"] = cluster_hist["n"] * 100.0 / n_clusts
cluster_hist.to_csv(histpath, sep="\t", float_format="%5.2f")
clusters = pd.DataFrame(
{"anchor.id": id_list, "count": count_list, "hash": hash_list}
)
clusters.to_csv(summarypath, sep="\t")
stats_dict = {
"in_anchor": n_items,
"syn.anchors.n": n_clusts,
"syn.anchors.largest": largest_cluster,
}
return stats_dict
def intersect_anchors(set1_file, set2_file):
set1_path = Path(set1_file)
set2_path = Path(set2_file)
set1_fr = pd.read_csv(set1_path, sep="\t", index_col=0)
set2_fr = pd.read_csv(set2_path, sep="\t", index_col=0)
set1_dict = {}
for cluster_id, subframe in set1_fr.groupby(by=["cluster_id"]):
set1_dict[cluster_id] = set(subframe["members"].to_numpy())
set2_dict = {}
for cluster_id, subframe in set2_fr.groupby(by=["cluster_id"]):
set2_dict[cluster_id] = set(subframe["members"].to_numpy())
identity_sets = []
s1_subset = []
s2_subset = []
incongruent = []
match_keys = list(set2_dict.keys())
for key1 in set1_dict:
s1 = set1_dict[key1]
for i, key2 in enumerate(match_keys):
s2 = set2_dict[key2]
if len(s1.intersection(s2)) == 0:
continue
elif s1 == s2:
identity_sets.append(
(
key1,
key2,
)
)
match_keys.pop(i)
break
elif s1.issubset(s2):
s1_subset.append(
(
key1,
key2,
)
)
match_keys.pop(i)
break
elif s2.issubset(s1):
s2_subset.append(
(
key1,
key2,
)
)
match_keys.pop(i)
break
else:
incongruent.append(
(
key1,
key2,
)
)
logger.info(f"set 1 ({set1_file}): {len(set1_dict)}")
logger.info(f"set 2 ({set2_file}): {len(set2_dict)}")
min_sets = min(len(set1_dict), len(set2_dict))
id_len = len(identity_sets)
id_pct = id_len * 100.0 / min_sets
logger.info(f"identity: {id_len} ({id_pct:.1f}%)")
s1_len = len(s1_subset)
s1_pct = s1_len * 100.0 / min_sets
logger.info(f"set 1 is subset: {s1_len} ({s1_pct:.1f}%)")
s2_len = len(s2_subset)
s2_pct = s2_len * 100.0 / min_sets
logger.info(f"set 2 is subset: {s2_len} ({s2_pct:.1f}%)")
incon_len = len(incongruent)
incon_pct = incon_len * 100.0 / min_sets
logger.info(f"incongruent: {incon_len}({incon_pct}%)")
```
#### File: azulejo/tests/4homology_test.py
```python
import sys
from pathlib import Path
# third-party imports
import pytest
import sh
# module imports
from . import INGEST_OUTPUTS
from . import find_homology_files
from . import help_check
from . import print_docstring
# global constants
azulejo = sh.Command("azulejo")
SUBCOMMAND = "homology"
def test_subcommand_help():
"""Test subcommand help message."""
help_check(SUBCOMMAND)
@print_docstring()
def test_homology(datadir_mgr, capsys):
"""Test homology clustering, MSA, and tree building."""
with capsys.disabled():
with datadir_mgr.in_tmp_dir(
inpathlist=INGEST_OUTPUTS,
save_outputs=True,
outscope="global",
excludepaths=["logs/"],
progressbar=True,
):
args = ["-e", SUBCOMMAND, "glycines"]
print(f"azulejo {' '.join(args)}")
try:
azulejo(
args,
_out=sys.stderr,
)
except sh.ErrorReturnCode as errors:
print(errors)
pytest.fail("Homology clustering failed")
for filestring in find_homology_files():
assert Path(filestring).stat().st_size > 0
``` |
{
"source": "joelb123/gffpandas-jb",
"score": 2
} |
#### File: gffpandas-jb/tests/test_gffpandas.py
```python
import shutil
import time
from pathlib import Path
# first-party imports
import gffpandas.gffpandas as gff3pd
# third-party imports
import pandas as pd
# module imports
from . import print_docstring
# global constants
REFSEQ_URL = (
"https://ftp.ncbi.nih.gov/genomes/refseq/vertebrate_mammalian"
+ "/Homo_sapiens/annotation_releases/109.20191205/GCF_000001405.39_GRCh38.p13/"
)
HUMAN_GFF = "GCF_000001405.39_GRCh38.p13_genomic.gff"
TESTFILELIST = ["test_file.gff"]
written_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type="
"genomic DNA;serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
)
written_header = "##gff-version 3\n" "##sequence-region NC_016810.1 1 20\n"
written_csv = (
"seq_id,source,type,start,end,score,strand,phase,attributes\n"
"NC_016810.1,RefSeq,region,1,4000,.,+,.,Dbxref=taxon:216597;ID="
"id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;serovar="
"Typhimurium;strain=SL1344\n"
"NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene1;Name=thrL;gbkey="
"Gene;gene=thrL;locus_tag=SL1344_0001\n"
"NC_016810.1,RefSeq,CDS,13,235,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene2;Name=thrA;gbkey="
"Gene;gene=thrA;locus_tag=SL1344_0002\n"
"NC_016810.1,RefSeq,CDS,341,523,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,1,600,.,-,.,ID=gene3;Name=thrX;gbkey="
"Gene;gene=thrX;locus_tag=SL1344_0003\n"
"NC_016810.1,RefSeq,CDS,21,345,.,-,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,41,255,.,+,.,ID=gene4;Name=thrB;gbkey="
"Gene;gene=thrB;locus_tag=SL1344_0004\n"
"NC_016810.1,RefSeq,CDS,61,195,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1,RefSeq,gene,170,546,.,+,.,ID=gene5;Name=thrC;gbkey"
"=Gene;gene=thrC;locus_tag=SL1344_0005\n"
"NC_016810.1,RefSeq,CDS,34,335,.,+,0,Dbxref=UniProtKB%252FTr"
"EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799"
"41.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide;"
"protein_id=YP_005179941.1;transl_table=11\n"
)
written_tsv = (
"seq_id\tsource\ttype\tstart\tend\tscore\tstrand\tphase\t"
"attributes\n"
"NC_016810.1\tRefSeq\tregion\t1\t4000\t.\t+\t.\tDbxref=taxon:21"
"6597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;"
"serovar=Typhimurium;strain=SL1344\n"
"NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene1;Name=thrL;"
"gbkey=Gene;gene=thrL;locus_tag=SL1344_0001\n"
"NC_016810.1\tRefSeq\tCDS\t13\t235\t.\t+\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene1;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene2;Name=thrA;"
"gbkey=Gene;gene=thrA;locus_tag=SL1344_0002\n"
"NC_016810.1\tRefSeq\tCDS\t341\t523\t.\t+\t0\tDbxref=UniProtKB%"
"252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_005"
"179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t1\t600\t.\t-\t.\tID=gene3;Name=thrX"
";gbkey=Gene;gene=thrX;locus_tag=SL1344_0003\n"
"NC_016810.1\tRefSeq\tCDS\t21\t345\t.\t-\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene3;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t41\t255\t.\t+\t.\tID=gene4;Name="
"thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004\n"
"NC_016810.1\tRefSeq\tCDS\t61\t195\t.\t+\t0\tDbxref=UniProtKB%2"
"52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051"
"79941.1;Parent=gene4;gbkey=CDS;product=thr operon leader "
"peptide;protein_id=YP_005179941.1;transl_table=11\n"
"NC_016810.1\tRefSeq\tgene\t170\t546\t.\t+\t.\tID=gene5;Name="
"thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005\n"
"NC_016810.1\tRefSeq\tCDS\t34\t335\t.\t+\t0\tDbxref=UniProt"
"KB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name="
"YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon "
"leader peptide;protein_id=YP_005179941.1;transl_table=11\n"
)
written_gff = (
"##gff-version 3\n"
"##sequence-region NC_016810.1 1 20\n"
"NC_016810.1 RefSeq region 1 4000 . +"
" . Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=ge"
"nomic;mol_type=genomic DNA;serovar=Typhimurium;strain=SL1344\n"
"NC_016810.1 RefSeq gene 1 20 . +"
" . ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_"
"tag=SL1344_0001\n"
"NC_016810.1 RefSeq CDS 13 235 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene1;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 1 20 . +"
" . ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_"
"tag=SL1344_0002\n"
"NC_016810.1 RefSeq CDS 341 523 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene2;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 1 600 . -"
" . ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_"
"tag=SL1344_0003\n"
"NC_016810.1 RefSeq CDS 21 345 . -"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene3;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 41 255 . +"
" . ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_"
"tag=SL1344_0004\n"
"NC_016810.1 RefSeq CDS 61 195 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene4;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
"NC_016810.1 RefSeq gene 170 546 . +"
" . ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_"
"tag=SL1344_0005\n"
"NC_016810.1 RefSeq CDS 34 335 . +"
" 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y"
"P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene5;gbkey=C"
"DS;product=thr operon leader peptide;protein_id=YP_005179941.1"
";transl_table=11\n"
)
written_filtered_length = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 2, 3, 4, 7, 8],
)
compare_get_feature_by_attribute = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3, 5, 7, 9],
)
compare_get_feature_by_attribute2 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID="
"cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon"
" leader peptide;protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[4, 6, 8],
)
written_attribute_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic"
" DNA;serovar=Typhimurium;strain=SL1344",
"taxon:216597",
"id0",
None,
None,
"Src",
None,
"genomic",
None,
"genomic DNA",
None,
None,
"Typhimurium",
"SL1344",
None,
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
None,
"gene1",
"thrL",
None,
"Gene",
"thrL",
None,
"SL1344_0001",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene1",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
None,
"gene2",
"thrA",
None,
"Gene",
"thrA",
None,
"SL1344_0002",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene2",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
None,
"gene3",
"thrX",
None,
"Gene",
"thrX",
None,
"SL1344_0003",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene3",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
None,
"gene4",
"thrB",
None,
"Gene",
"thrB",
None,
"SL1344_0004",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene4",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
None,
"gene5",
"thrC",
None,
"Gene",
"thrC",
None,
"SL1344_0005",
None,
None,
None,
None,
None,
None,
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader"
" peptide;protein_id=YP_005179941.1;transl_table=11",
"UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1",
"cds0",
"YP_005179941.1",
"gene5",
"CDS",
None,
None,
None,
None,
"thr operon leader peptide",
"YP_005179941.1",
None,
None,
"11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
"Dbxref",
"ID",
"Name",
"Parent",
"gbkey",
"gene",
"genome",
"locus_tag",
"mol_type",
"product",
"protein_id",
"serovar",
"strain",
"transl_table",
],
)
strand_counts = pd.value_counts(written_df["strand"]).to_dict()
type_counts = pd.value_counts(written_df["type"]).to_dict()
compare_stats_dic = {
"Maximal_bp_length": 599,
"Minimal_bp_length": 19,
"Counted_strands": strand_counts,
"Counted_feature_types": type_counts,
}
df_empty = pd.DataFrame(
{},
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[],
)
redundant_entry = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[3],
)
compare_filter_feature_df = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3, 5, 7, 9],
)
compare_overlap_gene_1_40 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3],
)
compare_overlap_40_300 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA"
";serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
13,
235,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
41,
255,
".",
"+",
".",
"ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
61,
195,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
34,
335,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[0, 2, 7, 8, 9, 10],
)
compare_overlap_170_171 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
600,
".",
"-",
".",
"ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
21,
345,
".",
"-",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[5, 6],
)
compare_overlap_525_545 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA"
";serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[0, 9],
)
compare_overlap_341_500 = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"region",
1,
4000,
".",
"+",
".",
"Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA"
";serovar=Typhimurium;strain=SL1344",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;"
"Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader pep"
"tide;protein_id=YP_005179941.1;transl_table=11",
],
[
"NC_016810.1",
"RefSeq",
"gene",
170,
546,
".",
"+",
".",
"ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[0, 4, 9],
)
compare_complement = pd.DataFrame(
[
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001",
],
[
"NC_016810.1",
"RefSeq",
"gene",
1,
20,
".",
"+",
".",
"ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002",
],
[
"NC_016810.1",
"RefSeq",
"CDS",
341,
523,
".",
"+",
"0",
"Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name"
"=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide"
";protein_id=YP_005179941.1;transl_table=11",
],
],
columns=[
"seq_id",
"source",
"type",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
],
index=[1, 3, 4],
)
def generate_gff3_df():
read_in_file = gff3pd.read_gff3("test_file.gff")
return read_in_file
@print_docstring()
def test_clean_datadir(request):
"""Clean up datadir."""
testdir = Path(request.fspath.dirpath())
datadir = testdir / "data"
if datadir.exists():
shutil.rmtree(datadir) # remove anything left in data directory
@print_docstring()
def test_setup_datadir(request, datadir_mgr, capsys):
"""Copy in and download static data."""
testdir = Path(request.fspath.dirpath())
datadir = testdir / "data"
filesdir = testdir / "testdata"
shutil.copytree(filesdir, datadir)
with capsys.disabled():
datadir_mgr.download(
download_url=REFSEQ_URL,
files=[HUMAN_GFF],
scope="global",
md5_check=False,
gunzip=True,
progressbar=True,
)
@print_docstring()
def test_read_gff3_if_df_type(datadir_mgr):
"""Test basic gff3dataframe creation."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
assert type(gff3_df) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(gff3_df.df, written_df)
@print_docstring()
def test_generate_gff_header(datadir_mgr):
"""Test GFF header generation."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
object_header = generate_gff3_df()
generate_header = object_header._read_gff_header()
assert type(object_header) == gff3pd.Gff3DataFrame
assert object_header.header == written_header
assert generate_header == written_header
@print_docstring()
def test_if_df_values_equal_gff_values(datadir_mgr):
"""Testing whether dataframe values equal input GFF values."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
test_df_object = generate_gff3_df()
test_df = test_df_object._read_gff3_to_df()
assert type(test_df_object) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(test_df, written_df)
@print_docstring()
def test_to_csv(datadir_mgr):
"""Test CSV file creation."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
gff3_df.to_csv("temp.csv")
csv_content = open("temp.csv").read()
assert csv_content == written_csv
@print_docstring()
def test_to_tsv(datadir_mgr):
"""Test TSV file creation."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
gff3_df.to_tsv("temp.tsv")
tsv_content = open("temp.tsv").read()
assert tsv_content == written_tsv
@print_docstring()
def test_to_gff3(datadir_mgr):
"""Test GFF file creation and rereading."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
gff3_df.to_gff3("temp.gff")
gff_content = open("temp.gff").read()
assert gff_content == written_gff
read_gff_output = gff3pd.read_gff3("temp.gff")
read_in_file = gff3pd.read_gff3("test_file.gff")
pd.testing.assert_frame_equal(read_in_file.df, read_gff_output.df)
@print_docstring()
def test_filter_feature_of_type(datadir_mgr):
"""Test feature filtering."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
object_type_df = gff3_df.filter_feature_of_type(["gene"])
assert type(object_type_df) == gff3pd.Gff3DataFrame
assert object_type_df.df.empty == compare_filter_feature_df.empty
pd.testing.assert_frame_equal(object_type_df.df, compare_filter_feature_df)
assert object_type_df.header == written_header
@print_docstring()
def test_filter_by_length(datadir_mgr):
"""Test filtering by length."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
filtered_length = gff3_df.filter_by_length(min_length=10, max_length=300)
assert type(filtered_length) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(filtered_length.df, written_filtered_length)
assert filtered_length.header == written_header
@print_docstring()
def test_get_feature_by_attribute(datadir_mgr):
"""Test get feature by attibute."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
filtered_gff3_df = gff3_df.get_feature_by_attribute("gbkey", ["Gene"])
filtered_gff3_df2 = gff3_df.get_feature_by_attribute(
"Parent", ["gene2", "gene3", "gene4"]
)
filtered_gff3_df3 = gff3_df.get_feature_by_attribute(
"locus_tag", ["SL1344_0006"]
)
assert type(filtered_gff3_df) == gff3pd.Gff3DataFrame
assert type(filtered_gff3_df2) == gff3pd.Gff3DataFrame
assert type(filtered_gff3_df3) == gff3pd.Gff3DataFrame
assert filtered_gff3_df.df.shape == (5, 9)
pd.testing.assert_frame_equal(
filtered_gff3_df.df, compare_get_feature_by_attribute
)
pd.testing.assert_frame_equal(
filtered_gff3_df2.df, compare_get_feature_by_attribute2
)
assert filtered_gff3_df3.df.shape == df_empty.shape
@print_docstring()
def test_attributes_to_columns(datadir_mgr):
"""Test attributes to columns."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
gff3_df_with_attr_columns = gff3_df.attributes_to_columns()
assert gff3_df_with_attr_columns.shape == (11, 23)
assert gff3_df_with_attr_columns.shape == written_attribute_df.shape
assert type(gff3_df_with_attr_columns) == type(written_attribute_df)
pd.testing.assert_frame_equal(gff3_df_with_attr_columns, written_attribute_df)
@print_docstring()
def test_stats_dic(datadir_mgr):
"""Test stats dictionary."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
stats_dict = gff3_df.stats_dic()
assert type(stats_dict) == type(compare_stats_dic)
assert stats_dict.keys() == compare_stats_dic.keys()
assert stats_dict["Maximal_bp_length"] == compare_stats_dic["Maximal_bp_length"]
assert stats_dict["Minimal_bp_length"] == compare_stats_dic["Minimal_bp_length"]
assert stats_dict["Counted_strands"] == compare_stats_dic["Counted_strands"]
assert (
stats_dict["Counted_feature_types"]
== compare_stats_dic["Counted_feature_types"]
)
@print_docstring()
def test_overlaps_with(datadir_mgr):
"""Test finding overlaps."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
overlap_gene_1_40 = gff3_df.overlaps_with(
seq_id="NC_016810.1", type="gene", start=1, end=40, strand="+"
)
overlap_40_300 = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=40, end=300, strand="+"
)
overlap_170_171 = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=170, end=171, strand="-"
)
overlap_525_545 = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=525, end=545, strand="+"
)
overlap_341_500 = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=341, end=500, strand="+"
)
complement_test = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=40, end=300, strand="+", complement=True
)
out_of_region = gff3_df.overlaps_with(
seq_id="NC_016810.1", start=1, end=4000, strand="+", complement=True
)
assert type(overlap_gene_1_40) == gff3pd.Gff3DataFrame
assert type(overlap_40_300) == gff3pd.Gff3DataFrame
assert type(overlap_170_171) == gff3pd.Gff3DataFrame
assert type(overlap_525_545) == gff3pd.Gff3DataFrame
assert type(overlap_341_500) == gff3pd.Gff3DataFrame
assert type(complement_test) == gff3pd.Gff3DataFrame
assert type(out_of_region) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(overlap_gene_1_40.df, compare_overlap_gene_1_40)
pd.testing.assert_frame_equal(overlap_40_300.df, compare_overlap_40_300)
pd.testing.assert_frame_equal(overlap_170_171.df, compare_overlap_170_171)
pd.testing.assert_frame_equal(overlap_525_545.df, compare_overlap_525_545)
pd.testing.assert_frame_equal(overlap_341_500.df, compare_overlap_341_500)
pd.testing.assert_frame_equal(complement_test.df, compare_complement)
assert out_of_region.df.shape == df_empty.shape
@print_docstring()
def test_find_duplicated_entries(datadir_mgr):
"""Test finding duplicated entries."""
with datadir_mgr.in_tmp_dir(inpathlist=TESTFILELIST):
gff3_df = generate_gff3_df()
redundant_df = gff3_df.find_duplicated_entries(
seq_id="NC_016810.1", type="gene"
)
redundant_df2 = gff3_df.find_duplicated_entries(
seq_id="NC_016810.1", type="CDS"
)
assert type(redundant_df) == gff3pd.Gff3DataFrame
assert type(redundant_df2) == gff3pd.Gff3DataFrame
pd.testing.assert_frame_equal(redundant_df.df, redundant_entry)
assert redundant_df2.df.shape == df_empty.shape
assert redundant_df.df.empty == redundant_entry.empty
def test_read_human_genome_gff(datadir_mgr):
"""Test reading the human genome GFF."""
with datadir_mgr.in_tmp_dir(inpathlist=[HUMAN_GFF]):
start = time.time()
human_gff = gff3pd.read_gff3(HUMAN_GFF)
delta = time.time() - start
print(f"Test reading the human genome GFF ({delta:.1f} s).")
assert len(human_gff.df) == 3706805
``` |
{
"source": "joelb349/truebayes",
"score": 2
} |
#### File: truebayes/truebayes/loss.py
```python
import math
import numpy as np
import torch
from truebayes.geometry import qdim
from truebayes.utils import numpy2cuda, cuda2numpy
def lossfunction(o, l: 'indicator'):
"""MSE loss for DNN histogram output, labels represented as indicator arrays."""
return torch.mean(torch.sum(o**2,dim=1) - 2*torch.sum(o*l,dim=1))
def kllossfunction(o, l: 'indicator'):
"""KL loss for DNN histogram output, labels represented as indicator arrays."""
return -torch.mean(2*torch.sum(torch.log(o)*l, dim=1))
def lossG1(o, l: 'xtrue'):
"""MSE loss for normal-PDF output (represented as a mean/variance pair)."""
# since int N^2(x;x0,s) dx = 1/(2 sqrt(pi) s)
# the sqerr loss is 1/(2 sqrt(pi) s) - 2 * e^{-(x_tr - x0)^2/2 s^2} / sqrt(2 pi s^2)
# multiplying by 2 sqrt(pi)
return torch.mean((1 - 2*math.sqrt(2)*torch.exp(-0.5*(l - o[:,0])**2/o[:,1]**2)) / o[:,1])
def kllossGn(o, l: 'xtrue'):
"""KL loss for Gaussian-mixture output (represented as a vector of concatenated mean/variance/weight triples)."""
x0 = o[:,0::3]
std = o[:,1::3]
weight = torch.softmax(o[:,2::3], dim=1)
# numerically unstable
# return -torch.mean(2*torch.log(torch.sum(weight * torch.exp(-0.5*(x0 - l[:,np.newaxis])**2/std**2) / torch.sqrt(2 * math.pi * std**2),dim=1)))
return -torch.mean(torch.logsumexp(torch.log(weight) - 0.5*(x0 - l[:,np.newaxis])**2/std**2 - 0.5*torch.log(2 * math.pi * std**2), dim=1))
def netmeanGn(inputs, net=None, single=True):
if isinstance(inputs, np.ndarray):
inputs = numpy2cuda(inputs, single)
pars = cuda2numpy(net(inputs))
dx = pars[:,0::3]
std = pars[:,1::3]
pweight = torch.softmax(torch.from_numpy(pars[:,2::3]),dim=1).numpy()
# see https://en.wikipedia.org/wiki/Mixture_distribution
xmean = np.sum(pweight * dx, axis=1)
xerr = np.sqrt(np.sum(pweight * (dx**2 + std**2), axis=1) - xmean**2)
return xmean, xerr
def kllossfunction2(o, l: 'indicator'):
"""KL loss over 2-D histogram."""
q = o.reshape((o.shape[0], qdim, qdim))
return torch.mean(-torch.sum(torch.log(q)*l, dim=[1,2]))
def kllossGn2(o, l: 'xtrue'):
"""KL loss for Gaussian-mixture output, 2D, precision-matrix parameters."""
dx = o[:,0::6] - l[:,0,np.newaxis]
dy = o[:,2::6] - l[:,1,np.newaxis]
# precision matrix is positive definite, so has positive diagonal terms
Fxx = o[:,1::6]**2
Fyy = o[:,3::6]**2
# precision matrix is positive definite, so has positive
Fxy = torch.atan(o[:,4::6]) / (0.5*math.pi) * o[:,1::6] * o[:,3::6]
weight = torch.softmax(o[:,5::6], dim=1)
# omitting the sqrt(4*math*pi) since it's common to all templates
return -torch.mean(torch.logsumexp(torch.log(weight) - 0.5*(Fxx*dx*dx + Fyy*dy*dy + 2*Fxy*dx*dy) + 0.5*torch.log(Fxx*Fyy - Fxy*Fxy), dim=1)), dx, dy
def netmeanGn2(inputs, net=None, single=True):
if isinstance(inputs, np.ndarray):
inputs = numpy2cuda(inputs, single)
pars = cuda2numpy(net(inputs))
dx, dy = pars[:,0::6], pars[:,2::6]
Fxx, Fyy = pars[:,1::6]**2, pars[:,3::6]**2
Fxy = np.arctan(pars[:,4::6]) / (0.5*math.pi) * pars[:,1::6] * pars[:,3::6]
det = Fxx*Fyy - Fxy*Fxy
Cxx, Cyy, Cxy = Fyy/det, Fxx/det, -Fxy/det
pweight = torch.softmax(torch.from_numpy(pars[:,5::6]),dim=1).numpy()
xmean, ymean = np.sum(pweight * dx, axis=1), np.sum(pweight * dy, axis=1)
xerr, yerr = np.sqrt(np.sum(pweight * (dx**2 + Cxx), axis=1) - xmean**2), np.sqrt(np.sum(pweight * (dy**2 + Cyy), axis=1) - ymean**2)
xycov = np.sum(pweight * (dx*dy + Cxy), axis=1) - xmean*ymean
return np.vstack((xmean, ymean)).T, np.vstack((xerr, yerr)).T, xycov
def sqerr(o, l: 'xtrue'):
"""Squared error loss for estimator output."""
return torch.mean((o - l)**2)
```
#### File: truebayes/truebayes/utils.py
```python
import torch
def numpy2cuda(array, single=True):
array = torch.from_numpy(array)
if single:
array = array.float()
if torch.cuda.is_available():
array = array.cuda()
return array
def cuda2numpy(tensor):
return tensor.detach().cpu().numpy()
``` |
{
"source": "joelbader/regulon-enrichment",
"score": 2
} |
#### File: joelbader/regulon-enrichment/lit_microarray_enrichment_analysis.py
```python
import pandas as pd
import numpy as np
import mhyp_enrich as mh
import pdb
import time
import math
import statsmodels.stats.multitest as mt
import random
from scipy import stats as st
from scipy.stats import beta
def main():
num_MC_samp = 1000000 # Number of Monte-Carlo samples to use
alt = 'two-sided'
random.seed(525601)
if 1:
# Create Pickle for fast loading of the data
tfoe_FC_df = pd.read_excel('Downloads/tfoe.searchable_130115.xlsx',sheetname='TFOE.data', header=9, skip_footer = 3, index_col = 0, parse_cols = list(range(0,210)))
tfoe_FC_df.to_pickle('Analysis_Output/tfoe_FC.pkl')
tfoe_pval_df = pd.read_excel('Downloads/tfoe.searchable_130115.xlsx',sheetname='TFOE.data', header=9, skip_footer = 3, index_col = 0, parse_cols = list(range(210,420)))
tfoe_pval_df.to_pickle('Analysis_Output/tfoe_pval.pkl')
else:
# Load Pickles (much faster than reading excel files)
tfoe_FC_df = pd.read_pickle('Analysis_Output/tfoe_FC.pkl')
tfoe_pval_df = pd.read_pickle('Analysis_Output/tfoe_pval.pkl')
# Remove TFs (from both dfs) with less than 0.5 l2FC up.
to_keep = [tfoe_FC_df.loc[name,name] > 0.5 for name in list(tfoe_FC_df.columns.values)]
tfoe_FC_df = tfoe_FC_df.loc[:, to_keep]
tfoe_pval_df = tfoe_pval_df.loc[:, to_keep]
# Create new df with 1 = UP, -1 = DOWN, 0 = NOCALL for each TF
col_up_down_ls = list()
for i,c in enumerate(tfoe_FC_df.columns.values):
new_col = pd.DataFrame({'Rv': tfoe_FC_df.index, c: 0}).set_index('Rv')
new_col[((tfoe_pval_df[c] < .01) & (tfoe_FC_df[c] > 1.0))] = 1 #called upregulated
new_col[((tfoe_pval_df[c] < .01) & (tfoe_FC_df[c] < -1.0))] = -1 #called downregulated
col_up_down_ls.append(new_col)
tfoe_call_df = pd.concat(col_up_down_ls,axis=1)
# Read in RNA-seq data to get NCBI Descriptions
hyp_rnaseq = pd.read_csv("Analysis_Output/7H9vshyp_low-read-rm.csv").rename(columns={"Rv.Homologs..NCBI.":"Rv#","Annotations..NCBI.":"Description"})
ncbi_desc = hyp_rnaseq[["Rv#","Description"]]
# Read in and format Voskuil Hypoxia data
hyp_rna_arr = pd.read_excel('Downloads/1-s2.0-S147297920400023X-mmc1.xls',sheetname='Sheet1', header=3, skip_footer = 0, parse_cols = [0,63])
hyp_rna_arr['Ave.'] = pd.to_numeric(hyp_rna_arr['Ave.'], errors='coerce')
hyp_rna_arr = hyp_rna_arr.dropna(how = 'any',axis=0) #Remove genes where data is missing.
def RV_to_Rv(x):
# Converts the format of the Rv numbers so that merge will work.
x = x[0] + x[1].lower() + x[2:]
x = x[0:-1] + x[-1].lower()
return x
hyp_rna_arr['Rv#'] = hyp_rna_arr['Rv#'].apply(RV_to_Rv)
hyp_rna_arr['log2FC_hyp'] = hyp_rna_arr['Ave.'].apply(lambda x: math.log2(x))
hyp_rna_arr = hyp_rna_arr.merge(ncbi_desc,how='left',on='Rv#')
# Read in a format Betts PBS data
pbs_rna_arr_up = pd.read_excel('Downloads/MMI_2779_sm_sup.xlsx',sheetname='RESUP',header=0, skip_footer = 0, parse_cols = [0,1,3,6])
pbs_rna_arr_down = pd.read_excel('Downloads/MMI_2779_sm_sup.xlsx',sheetname='RESDOWN',header=0, skip_footer = 0, parse_cols = [0,1,3,6])
pbs_rna_arr = pd.concat([pbs_rna_arr_up,pbs_rna_arr_down])
pbs_rna_arr = pbs_rna_arr[pbs_rna_arr['Time'] == 't3'].drop(['Time'],axis=1)
pbs_rna_arr = pbs_rna_arr.rename(columns = {'Gene':'Rv#', 'P-value':'pval', 'Log ratio':'log2FC_pbs'})
pbs_rna_arr['log2FC_pbs'] = pbs_rna_arr['log2FC_pbs'].apply(lambda x: x*(math.log(10,2))) #Convert to base 2.
pbs_rna_arr['pval'].loc[(pbs_rna_arr['pval'] == '<.000001')] = '0.000001' # This line produces a warning but appears to work as expected.
pbs_rna_arr['pval'] = pd.to_numeric(pbs_rna_arr['pval'])
pbs_rna_arr = pbs_rna_arr.merge(ncbi_desc,how='left',on='Rv#')
# Call each gene from microarray data as UP = 1, DOWN = -1, NOCALL = 0.
hyp_rna_arr['rna_arr_data'] = 0
hyp_rna_arr['rna_arr_data'].loc[(hyp_rna_arr['Ave.'] > 1.6)] = 1 #upregulated
hyp_rna_arr['rna_arr_data'].loc[(hyp_rna_arr['Ave.'] < 1/1.6)] = -1 #downregulated
hyp_rna_arr = hyp_rna_arr.set_index('Rv#')[['rna_arr_data','log2FC_hyp','Description']]
pbs_rna_arr['rna_arr_data'] = 0
pbs_rna_arr['rna_arr_data'].loc[(pbs_rna_arr['log2FC_pbs'] > 1) & (pbs_rna_arr['pval'] < .001)] = 1 #upregulated
pbs_rna_arr['rna_arr_data'].loc[(pbs_rna_arr['log2FC_pbs'] < -1) & (pbs_rna_arr['pval'] < .001)] = -1 #downregulated
pbs_rna_arr = pbs_rna_arr.set_index('Rv#')[['rna_arr_data','log2FC_pbs','Description']]
both_rna_arr = hyp_rna_arr.merge(pbs_rna_arr.drop(['Description'],axis=1),how='outer',left_index=True,right_index=True) #Note: This puts nans for any gene not appearing in both datasets. Betts only included ~3000 genes in the published dataset. The reason for the missing genes is unknown - it could be that they failed QC.
both_rna_arr['rna_arr_data'] = 0
both_rna_arr.loc[(both_rna_arr['rna_arr_data_x'] > 0) & (both_rna_arr['rna_arr_data_y'] > 0), 'rna_arr_data'] = 1
both_rna_arr.loc[(both_rna_arr['rna_arr_data_x'] < 0) & (both_rna_arr['rna_arr_data_y'] < 0), 'rna_arr_data'] = -1
both_rna_arr = both_rna_arr[['rna_arr_data','log2FC_hyp','log2FC_pbs','Description']]
# scores_df,cont_tables_ls = mh.find_enriched_regs(tfoe_call_df,both_rna_arr,num_MC_samp,alt)
scores_hyp_df,cont_hyp_ls = mh.find_enriched_regs(tfoe_call_df,hyp_rna_arr,num_MC_samp,alt)
scores_pbs_df,cont_pbs_ls = mh.find_enriched_regs(tfoe_call_df,pbs_rna_arr,num_MC_samp,alt)
if 1:
#Write individual tf scores (and p-values) to file
# with open('Analysis_Output/lit_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_hyp+pbs.csv', 'w') as fp:
# scores_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_hyp','log2FC_pbs','Description']].to_csv(fp)
#For hyp and pbs individually:
with open('Analysis_Output/lit_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_hyp.csv', 'w') as fp:
scores_hyp_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_hyp','Description']].to_csv(fp)
with open('Analysis_Output/lit_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_pbs.csv', 'w') as fp:
scores_pbs_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_pbs','Description']].to_csv(fp)
if 1:
#Write confusion matrices for TFs out to file
# writer = pd.ExcelWriter('Analysis_Output/lit_confusion_matrices_tf_hyp+pbs.xlsx')
# for x in cont_tables_ls:
# if isinstance(x[0],pd.DataFrame):
# x[0].to_excel(writer, sheet_name=x[1])
# writer.save()
# Write out confusion matrices for hyp, pbs individually.
writer = pd.ExcelWriter('Analysis_Output/lit_confusion_matrices_tf_hyp_only.xlsx')
for x in cont_hyp_ls:
if isinstance(x[0],pd.DataFrame):
x[0].to_excel(writer, sheet_name=x[1])
writer.save()
writer = pd.ExcelWriter('Analysis_Output/lit_confusion_matrices_tf_pbs_only.xlsx')
for x in cont_pbs_ls:
if isinstance(x[0],pd.DataFrame):
x[0].to_excel(writer, sheet_name=x[1])
writer.save()
return(0)
if __name__ == "__main__":
main()
```
#### File: joelbader/regulon-enrichment/make_dict_V735_Rv_MT_annot.py
```python
from Bio.Blast.Applications import NcbiblastpCommandline
from Bio.Blast import NCBIXML
from Bio import SeqIO
from Bio import Seq
import subprocess
import fileinput
def main():
map_file = open("V735_to_Rv_MT_annot_mapping.csv",'w') # File to store the mapping
#return_code = subprocess.call("blast_bin/blastp -out blast_results.xml -outfmt 5 -query CDC1551_proteins.fa -db old_CDC1551_blast_db/old_CDC1551 -evalue .001 -max_hsps 1 -max_target_seqs 50", shell=True)
V735_records = SeqIO.parse("CDC1551_genes.fa","fasta")
result_handle = open("blast_results.xml")
blast_records = NCBIXML.parse(result_handle)
V735_to_MT_dict = {} # Initialize dictionary to hold mapping
V735_to_Rv_dict = {} # Initialize dictionary to hold mapping
V735_to_annotation_dict = {} # Initialize dictionary to hold mapping
i = -1
for blast_record in blast_records:
V735_record = next(V735_records)
i = i+1
#print(i)
V735_num, Rv_num, annotation = V735_record.description.split(sep = '|', maxsplit=3)[0:3]
annotation = annotation[1:].replace(',',';')
V735_to_Rv_dict[V735_num] = Rv_num
V735_to_annotation_dict[V735_num] = annotation
if blast_record.alignments:
evalue = ""
MT_num = ""
for alignment in blast_record.alignments:
evalue = evalue + str(alignment.hsps[0].expect) + ' '
MT_num = MT_num + alignment.title.split(sep = '|', maxsplit=2)[0] + ' '
map_file.write(V735_num + ',' + Rv_num + ',' + MT_num + ',' + str(evalue) + ',' + annotation + '\n')
V735_to_MT_dict[V735_num] = MT_num
else:
map_file.write(V735_num + ',' + Rv_num + ',' + 'None' + ',' + 'NA' + ',' + annotation + '\n')
if __name__ == "__main__":
main()
```
#### File: joelbader/regulon-enrichment/rnaseq_enrichment_analysis.py
```python
import pandas as pd
import numpy as np
import pdb
import time
import mhyp_enrich as mh
import statsmodels.stats.multitest as mt
import random
from scipy import stats as st
from scipy.stats import beta
def main():
num_MC_samp = 1000000 # Number of Monte-Carlo samples to use
alt = 'two-sided' # Should the Monte-Carlo test be one or two-sided?
random.seed(525600)
## Consider functionalizing this section:
if 1:
# Create Pickle for fast loading of the data
tfoe_FC_df = pd.read_excel('Downloads/tfoe.searchable_130115.xlsx',sheetname='TFOE.data', header=9, skip_footer = 3, index_col = 0, parse_cols = list(range(0,210)))
tfoe_FC_df.to_pickle('Analysis_Output/tfoe_FC.pkl')
tfoe_pval_df = pd.read_excel('Downloads/tfoe.searchable_130115.xlsx',sheetname='TFOE.data', header=9, skip_footer = 3, index_col = 0, parse_cols = list(range(210,420)))
tfoe_pval_df.to_pickle('Analysis_Output/tfoe_pval.pkl')
else:
# Load Pickles (much faster than reading excel files)
tfoe_FC_df = pd.read_pickle('Analysis_Output/tfoe_FC.pkl')
tfoe_pval_df = pd.read_pickle('Analysis_Output/tfoe_pval.pkl')
# Remove TFs (from both dfs) with less than 0.5 l2FC up.
to_keep = [tfoe_FC_df.loc[name,name] > 0.5 for name in list(tfoe_FC_df.columns.values)]
tfoe_FC_df = tfoe_FC_df.loc[:, to_keep]
tfoe_pval_df = tfoe_pval_df.loc[:, to_keep]
# Create new df with 1 = UP, -1 = DOWN, 0 = NOCALL for each TF
col_up_down_ls = list()
for i,c in enumerate(tfoe_FC_df.columns.values):
new_col = pd.DataFrame({'Rv#': tfoe_FC_df.index, c: 0}).set_index('Rv#')
new_col[((tfoe_pval_df[c] < .01) & (tfoe_FC_df[c] > 1.0))] = 1 #called upregulated
new_col[((tfoe_pval_df[c] < .01) & (tfoe_FC_df[c] < -1.0))] = -1 #called downregulated
col_up_down_ls.append(new_col)
tfoe_call_df = pd.concat(col_up_down_ls,axis=1)
##
# Read in Analysis_Output/7H9vshyp_low-read.csv and 7H9vsPBS_low-read.csv into pandas array.
hyp_rnaseq = pd.read_csv("Analysis_Output/7H9vshyp_low-read-rm.csv")
pbs_rnaseq = pd.read_csv("Analysis_Output/7H9vsPBS_low-read-rm.csv")
hyp_rnaseq = hyp_rnaseq.drop(["Unnamed: 0","Evalues.of.MT.Homologs..BLAST.","MT.Homologs..BLAST.","Functional.Category","baseMean","lfcSE","Expression Direction (Hypoxia/7H9)"], axis=1)
pbs_rnaseq = pbs_rnaseq.drop(["Unnamed: 0","Evalues.of.MT.Homologs..BLAST.","MT.Homologs..BLAST.","Functional.Category","baseMean","lfcSE","Expression Direction (PBS/7H9)"], axis=1)
# Call each gene from RNA-seq data as UP = 1, DOWN = -1, NOCALL = 0.
#hyp_rnaseq['rnaseq_data'] = 0
hyp_rnaseq.insert(0,'rnaseq_data',0)
pbs_rnaseq.insert(0,'rnaseq_data',0)
hyp_rnaseq.loc[((hyp_rnaseq.padj < .05) & (hyp_rnaseq.log2FoldChange > 1.0)),'rnaseq_data'] = 1 #upregulated
hyp_rnaseq.loc[((hyp_rnaseq.padj < .05) & (hyp_rnaseq.log2FoldChange < -1.0)),'rnaseq_data'] = -1 #downregulated
pbs_rnaseq.loc[((pbs_rnaseq.padj < .05) & (pbs_rnaseq.log2FoldChange > 1.0)),'rnaseq_data'] = 1 #upregulated
pbs_rnaseq.loc[((pbs_rnaseq.padj < .05) & (pbs_rnaseq.log2FoldChange < -1.0)),'rnaseq_data'] = -1 #downregulated
hyp_rnaseq = hyp_rnaseq.rename(columns={'Rv.Homologs..NCBI.':'Rv#','Annotations..NCBI.':'Description','log2FoldChange':'log2FC_hyp'}).set_index('Rv#',drop=True)
pbs_rnaseq = pbs_rnaseq.rename(columns={'Rv.Homologs..NCBI.':'Rv#','Annotations..NCBI.':'Description','log2FoldChange':'log2FC_pbs'}).set_index('Rv#',drop=True)
# Combine hyp_rnaseq$rnaseq_data with pbs_rnaseq
both_rnaseq = hyp_rnaseq[['rnaseq_data']].copy()
both_rnaseq['log2FC_hyp'] = hyp_rnaseq['log2FC_hyp']
both_rnaseq['log2FC_pbs'] = pbs_rnaseq['log2FC_pbs']
both_rnaseq['Description'] = pbs_rnaseq['Description']
# both_rnaseq$rnaseq_data = 0 if genes go in opposite directions, otherwise (1, -1) for (UP, DOWN) relative to 7H9-log phase.
both_rnaseq['rnaseq_data'] = 0
both_rnaseq.loc[(hyp_rnaseq['rnaseq_data'] > 0) & (pbs_rnaseq['rnaseq_data'] > 0), 'rnaseq_data'] = 1
both_rnaseq.loc[(hyp_rnaseq['rnaseq_data'] < 0) & (pbs_rnaseq['rnaseq_data'] < 0), 'rnaseq_data'] = -1
#scores_df,cont_tables_ls = mh.find_enriched_regs(tfoe_call_df,both_rnaseq,num_MC_samp,alt)
scores_hyp_df,cont_hyp_ls = mh.find_enriched_regs(tfoe_call_df,hyp_rnaseq,num_MC_samp,alt)
scores_pbs_df,cont_pbs_ls = mh.find_enriched_regs(tfoe_call_df,pbs_rnaseq,num_MC_samp,alt)
if 1:
#Write individual tf scores (and p-values) to file
# with open('Analysis_Output/rnaseq_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_hyp+pbs.csv', 'w') as fp:
# scores_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_hyp','log2FC_pbs','Description']].to_csv(fp)
with open('Analysis_Output/rnaseq_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_hyp.csv', 'w') as fp:
scores_hyp_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_hyp','Description']].to_csv(fp)
with open('Analysis_Output/rnaseq_tf_scores'+'_'+str(num_MC_samp)+'_'+alt+'_pbs.csv', 'w') as fp:
scores_pbs_df[['Pvalue','mu-score','FET Pvalue','BY corrected Pvalue','log2FC_pbs','Description']].to_csv(fp)
if 1:
#Write confusion matrices for TFs out to file
# writer = pd.ExcelWriter('Analysis_Output/rnaseq_confusion_matrices_tf_hyp+pbs.xlsx')
# for x in cont_tables_ls:
# if isinstance(x[0],pd.DataFrame):
# x[0].to_excel(writer, sheet_name=x[1])
# writer.save()
# Write out confusion matrices for hyp, pbs individually.
writer = pd.ExcelWriter('Analysis_Output/rnaseq_confusion_matrices_tf_hyp_only.xlsx')
for x in cont_hyp_ls:
if isinstance(x[0],pd.DataFrame):
x[0].to_excel(writer, sheet_name=x[1])
writer.save()
writer = pd.ExcelWriter('Analysis_Output/rnaseq_confusion_matrices_tf_pbs_only.xlsx')
for x in cont_pbs_ls:
if isinstance(x[0],pd.DataFrame):
x[0].to_excel(writer, sheet_name=x[1])
writer.save()
return(0)
if __name__ == "__main__":
main()
``` |
{
"source": "joelbarmettlerUZH/AnySound_Recorder",
"score": 3
} |
#### File: joelbarmettlerUZH/AnySound_Recorder/Application.py
```python
from Screenshot import ScreenInfo
from Soundrecorder import Recorder
import time
import threading
import os
import random
class ScreenSound():
def __init__(self):
self._info = self.setNameFinder()
self._recorder = Recorder()
self._songs = []
# Ask user to hover over top-left and bottom-right corner of screen part that holds the song name
def setNameFinder(self):
print("Hover still over top-left location where name is displayed for 1 second")
x1, y1 = ScreenInfo.pickLocation()
print("Hover still over bottom-right location where name is displayed for 1 second")
x2, y2 = ScreenInfo.pickLocation()
# Return 4 coordiantes
return ScreenInfo((x1, y1, x2, y2))
def recordSong(self, folder=""):
# Identify song name
songname = self._info.getText()
# Rename song if already existent. Comment this line if you do not wish that behaviour.
if os.path.isfile(songname+".mp3"):
songname += " - Copy "+str(random.randint(10000,100000))
# Start new thread that records the music
print("Recording: "+songname)
threading._start_new_thread(self._recorder.start, ())
# Wait for a change in the songname to happen
while(True):
self._info.update()
if songname != self._info.getText():
break
time.sleep(0.5)
# Stop recording if change is detected
self._recorder.stop()
# Force entering other thread to stop the song
time.sleep(0.01)
# Song stopped, now save it to folder
self._recorder.save(folder+songname)
self._songs.append(songname)
def recordFor(self, seconds, folder=""):
#Record for X seconds all songs into folder.
if not os.path.exists(folder):
os.makedirs(folder)
start = time.time()
# Check whether X seconds have passed.
while(time.time() - start < seconds):
self.recordSong(folder)
return self._songs
if __name__ == "__main__":
s = ScreenSound()
output_folder = "./records/"
#Record songs
print(s.recordFor(60*10, folder=output_folder))
# Convert all songs from wav to mp3
for song in os.listdir(output_folder):
if song.endswith(".wav"):
Recorder.wavTomp3(output_folder+song)
``` |
{
"source": "joelbarmettlerUZH/AudioRec",
"score": 3
} |
#### File: AudioRec/AudioRec/Recorder.py
```python
import pyaudio
import wave
import subprocess
import os
import time
import threading
class Recorder():
#Defines sound properties like frequency and channels
def __init__(self, chunk=1024, channels=2, rate=44100):
self.CHUNK = chunk
self.FORMAT = pyaudio.paInt16
self.CHANNELS = channels
self.RATE = rate
self._running = True
self._frames = []
#Start recording sound
def start(self):
threading._start_new_thread(self.__recording, ())
def __recording(self):
#Set running to True and reset previously recorded frames
self._running = True
self._frames = []
#Create pyaudio instance
p = pyaudio.PyAudio()
#Open stream
stream = p.open(format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
frames_per_buffer=self.CHUNK)
# To stop the streaming, new thread has to set self._running to false
# append frames array while recording
while(self._running):
data = stream.read(self.CHUNK)
self._frames.append(data)
# Interrupted, stop stream and close it. Terinate pyaudio process.
stream.stop_stream()
stream.close()
p.terminate()
# Sets boolean to false. New thread needed.
def stop(self):
self._running = False
#Save file to filename location as a wavefront file.
def save(self, filename):
print("Saving")
p = pyaudio.PyAudio()
if not filename.endswith(".wav"):
filename = filename + ".wav"
wf = wave.open(filename, 'wb')
wf.setnchannels(self.CHANNELS)
wf.setsampwidth(p.get_sample_size(self.FORMAT))
wf.setframerate(self.RATE)
wf.writeframes(b''.join(self._frames))
wf.close()
print("Saved")
# Delete a file
@staticmethod
def delete(filename):
os.remove(filename)
# Convert wav to mp3 with same name using ffmpeg.exe
@staticmethod
def wavTomp3(wav):
mp3 = wav[:-3] + "mp3"
# Remove file if existent
if os.path.isfile(mp3):
Recorder.delete(mp3)
# Call CMD command
subprocess.call('ffmpeg -i "'+wav+'" "'+mp3+'"')
if __name__ == "__main__":
rec = Recorder()
print("Start recording")
rec.start()
time.sleep(5)
print("Stop recording")
rec.stop()
print("Saving")
rec.save("test.wav")
print("Converting wav to mp3")
Recorder.wavTomp3("test.wav")
print("deleting wav")
Recorder.delete("test.wav")
``` |
{
"source": "joelbarmettlerUZH/Hurricane_data_Visualization",
"score": 3
} |
#### File: joelbarmettlerUZH/Hurricane_data_Visualization/DataSet.py
```python
import requests
import gzip
import numpy as np
import os.path
import shutil
class DataSet(object):
def __init__(self, category="temperature", timestep=1):
self._category = category
self._timestep = timestep
# defined values for a data record
self.XDIM = 500
self.YDIM = 500
self.ZDIM = 100
self.TDIM = 1
self.ZMIN = 35
self.ZMAX = 19835
self.FROMLAT = 23.7
self.TOLAT = 41.7
self.FROMLON = -83
self.TOLON = -62
self.__data = self.getPackage(timestep, category)
#downloads a certain package, unzips it and returns the data as a numpy array
def getPackage(self, packagenumber, variable):
#convert names to the pseudonames used online
code = {
"moisture": "QCLOUD",
"graupel": "QGRAUP",
"ice": "QICE",
"snow": "QSNOW",
"vapor": "QVAPOR",
"cloud": "CLOUD",
"precipitation": "PRECIP",
"presure": "P",
"temperature": "TC",
"wind-x": "U",
"wind-y": "V",
"wind-z": "W"
}
var = code[variable]
#bring the package number into the form "0X" when it is only one digit large
if packagenumber < 10:
number = "0"+str(packagenumber)
else:
number = str(packagenumber)
#if unzipped file already exists, rename it accordingly
if os.path.isfile(var + "f" + number + ".bin"):
os.rename(var + "f" + number + ".bin", variable + number + ".bin")
#unzipp the file and return its content as numpy array
if os.path.isfile(variable + number + ".bin"):
print("-File already downloaded and unzipped, skipping.")
return np.fromfile(variable + number + ".bin", dtype='>f')
#If package .gz already exists but with old / original name, rename it
if os.path.isfile(var+"f"+number+".bin.gz"):
os.rename(var+"f"+number+".bin.gz", variable+number+".bin.gz")
#if package .gz exists with, check whether it has the needed file size. This is not the case when a download was interrupted or a file is corrupted
if os.path.isfile(variable + number + ".bin.gz") and (os.path.getsize(variable + number + ".bin.gz")
!= int(requests.head("http://www.vets.ucar.edu/vg/isabeldata/"+var+"f"+number+".bin.gz",headers={'Accept-Encoding': 'identity'}).headers['content-length'])):
print("-Similar file exists, but seems to be corrupted. Downloading it again to be sure to have valid data")
#remove the file when it is corrupted so it will be downloaded again
os.remove(variable + number + ".bin.gz")
#if the .gz file still exists untill now, unzip it and do not download it again
if os.path.isfile(variable+number+".bin.gz"):
print("-File already downloaded, skipping.")
return self.unzip(variable + number + ".bin.gz")
#get the bin.gz file from ucar.edu via request cause it does not exist yet
print("-Downlaoding " + var + number+". This may take a few minutes")
#Try to establish a download connection
try:
request = requests.get("http://www.vets.ucar.edu/vg/isabeldata/"+var+"f"+number+".bin.gz", stream=True)
except:
print("File could not be downloaded. Please download file manually and place in folder, then restart the software. \nHere ist the link: http://www.vets.ucar.edu/vg/isabeldata/"+var+"f"+number+".bin.gz")
exit()
print("-Saving File to Disk")
#save the request content to a file on the local disk
with open(variable+number+".bin.gz", "wb") as file:
shutil.copyfileobj(request.raw, file)
#unzip file and return the unzipped values
return self.unzip(variable+number+".bin.gz")
#unzips a .gz file and returns its content
def unzip(self, name):
new_filename = name.replace(".gz","")
#only unzip file when it is not already unzipped
if os.path.isfile(new_filename):
print("-File already unzipped, skipping.")
else:
print("-Unzipping file")
#open file as gzip file
with gzip.open(name, "rb") as readfile:
#rad content and save it back to the disk
fileContent = readfile.read()
with open(new_filename, "wb") as writefile:
writefile.write(fileContent)
#open unzipped file and return its content as np array
return np.fromfile(new_filename, dtype='>f')
#returns a datarecord for x,y,z and t values
def getRecord(self, x, y, z, t=1, treshold=1000000):
#find the datarecord in the datalist
record = self.__data[x + self.XDIM * (y + self.YDIM * z)]
#if the size of the record is above or below a certain threshold, we suspect it to be an outlier and take its neighbour as a reference then.
if abs(record) < treshold:
return self.__data[x + self.XDIM * (y + self.YDIM * z)]
#return the datapoint directly if the size is in the threshold range
return self.getRecord(x+1, y, z, t, treshold)
``` |
{
"source": "joelbarmettlerUZH/Image_Manipulation_Bokeh_Server",
"score": 3
} |
#### File: joelbarmettlerUZH/Image_Manipulation_Bokeh_Server/dashboard.py
```python
from bokeh.io import output_file, show
from bokeh.layouts import layout, column, row
from bokeh.models.widgets import Panel, Tabs, Slider
from bokeh.plotting import curdoc
from bokeh.models import ColumnDataSource
from images_class import *
from bokeh.client import push_session
from bokeh.driving import cosine
#The dashboard .py file serves to call the different image transformations out of the images_class
#The output file is scratch.html when no server is started. See last line for further explenation.
output_file("dashboard.html", title="Processed Photo - Dashboard")
#Open the image "image.jpg" as an original image, do not transform it and assign its figure to p1_img for later plotting
p1 = Original_Image("photo.jpeg")
p1_img = p1.create_img()
#Open the image "image.jpg", seperate its color Value "RED" and assign its figure to p2_img for later plotting
p2 = Colorchannel_Image("photo.jpeg","R")
p2.transform_img()
p2_img = p2.create_img()
#Open the image "image.jpg", seperate its color Value "GREEN" and assign its figure to p3_img for later plotting
p3 = Colorchannel_Image("photo.jpeg","G")
p3.transform_img()
p3_img = p3.create_img()
#Open the image "image.jpg", seperate its color Value "BLUE" and assign its figure to p4_img for later plotting
p4 = Colorchannel_Image("photo.jpeg","B")
p4.transform_img()
p4_img = p4.create_img()
#Open the image "image.jpg", convert it to greyscale in the images_class and assign its figure to p5_t1_img for later plotting, assign it to a tab
p5_t1 = Greyscale_Image("photo.jpeg", [0.3, 0.59, .11])
p5_t1.transform_img()
p5_t1_img = p5_t1.create_img()
tab1 = Panel(child=p5_t1_img, title="Greyscale")
#Open the image "image.jpg", calculate the color cubes, redefine each color to its nearest color-cube value and assign its figure to p5_t2_img for later plotting, assign it to a tab
bits = 16
p5_t2 = Mediancut_Image("photo.jpeg", bits, threading=False)
p5_t2.transform_img()
p5_t2_img = p5_t2.create_img()
tab2 = Panel(child=p5_t2_img, title="{}-Bit".format(str(bits)))
#Open the image "image.jpg", add random noise to it and assign its figure to p6_img. Add a slider to it to let the user input values
p6 = Noise_Image("photo.jpeg")
image = p6.transform_img(.1)
source = ColumnDataSource({'image': [image]})
p6_img = figure(title="Photo, Noise", x_range=(0, len(image[0])), y_range=(0, len(image)), plot_width=len(image[0]) // size, plot_height=len(image) // size, toolbar_location=None)
p6_img.image_rgba(image='image', x=0, y=0, dw=len(image[0]), dh=len(image), source=source)
slider = Slider(start=0, end=.5, value=.1, step=.01, title="Noise Level")
#Open the image "image.jpg", apply a gaussian filter to it and assign its figure to p5_t2_img for later plotting, assign it to a tab
p7 = Gaussian_Image("photo.jpeg",2, threading=False)
p7.transform_img()
p7_img = p7.create_img()
#Function that recalculates the noise in an image on slider change
def update_img(attr, old, new):
new_img = p6.transform_img(new)
source.data = {'image': [new_img]}
slider.on_change("value", update_img)
#Combine the tabs inside a Tag-Object
tabs = Tabs(tabs=[tab1, tab2])
#START BOKEH SERVER FIRST IN SAME DIRECTORY: CMD: --> CD "THIS-DIRECTORY" --> BOKEH SERVE
session = push_session(curdoc())
session.show(layout([row(column(p1_img, row(p2_img, p3_img, p4_img)), column(row(tabs), p6_img, slider, p7_img))]))
session.loop_until_closed()
#To make the program work without the bokeh server, uncomment the line underneath and comment out the three lines above
#show(layout([row(column(p1_img, row(p2_img, p3_img, p4_img)), column(row(tabs), p6_img, slider, p7_img))]))
``` |
{
"source": "joelbarmettlerUZH/url_image_downloader",
"score": 3
} |
#### File: url_image_downloader/scrapeasy/Page.py
```python
import requests
from bs4 import BeautifulSoup
import validators
import time
from scrapeasy.WebData import OnlineData
#Abstract page class with base functionality
class abstractPage(object):
def __init__(self, url, verify=True):
# Define verify behaviour and extract domain from url
self._verify = verify
url = url.replace("%2F", "/")
self._domain = self.findDomain(url)
# Normalize URL to not contain anything before the domain / subdomain
try:
self._url = url[url.index(self._domain):]
except ValueError as ve:
self._url = url
if not validators.url("http://"+self._url):
raise ValueError("Not valid URL: "+url+"!")
# Try getting the header via http request.head
try:
self._header = requests.head("http://www."+self._url, verify=self._verify).headers
except requests.exceptions.ConnectionError as ce:
self._header = "Unknown"
# Add scrapers headers to identify python scraper on websites
self._headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'}
self._html = None
self.update()
# Categorize links into intern - extern and domain
self._links = {"intern":[], "extern":[], "domain":[]}
self.findLinks()
# Empty dict in which media will be inserted
self._media = {}
def __str__(self):
return self._url
# Getters for private Page content
def getURL(self):
return self._url
def getHTML(self):
return self._html
def getDomain(self):
return self._domain
def getHeader(self):
return self._header
# Return links according to type parameter
def getLinks(self, intern=True, extern=True, domain=False):
linklist = []
if intern:
linklist += self._links["intern"]
if extern:
linklist += self._links["extern"]
if domain:
linklist += self._links["domain"]
return linklist
# Extracts url out of a domain according to the first backslash occurence that marks the start of the path
@staticmethod
def findDomain(url):
url = url.replace("https://", "")
url = url.replace("http://", "")
url = url.replace("www.", "")
if "/" in url:
url = url[:url.index("/")]
return url.lower()
# Folder is the part of a url without the file, so without the part after the last backslash
@staticmethod
def findFolder(url):
return url[:url.rindex("/")]
@staticmethod
def normalize(string):
return string.replace("https://", "").replace("http://","").replace("www.","")
# Try scraping the site. If it does not work out, wait some time and try again
def update(self, tries=5):
try:
self._html = requests.get("http://www."+self._url, headers=self._headers, allow_redirects=True, verify=self._verify).text
except requests.exceptions.ConnectionError as ce:
if tries > 0:
time.sleep(1)
self.update(tries=tries-1)
# Exctract links from all urls that do not define some well-known filetypes that for sure do not contain any html text (unless .txt or .md could, in theory, contain such links)
def findLinks(self):
# print("Finding links of "+self._url)
# Defined filetypes that are to ignore
endings = [".jpg", ".jpeg", ".png", ".tiff", ".gif", ".pdf", ".svc", ".ics", ".docx", ".doc", ".mp4", ".mov",
".webm", ".zip", ".ogg"]
for end in endings:
if self._url.lower().endswith(end):
return
# Parse request as lxml and extract a-tags
soup = BeautifulSoup(self._html, "lxml")
links = soup.findAll("a")
for link in links:
# Filter out the href link
link = str(link.get("href")).replace("../", "")
# Break when the link is None or consists of some javascript that could not be read out
if link == "None" or "JavaScript:" in link:
break
# Categorize link according to its form
if validators.url(link) and "mailto:" not in link:
if self._domain in link.lower():
self.addInternal(self._domain + link[link.lower().index(self._domain)+len(self._domain):])
else:
self.addExternal((Page.normalize(link)))
else:
if validators.url("http://www."+self._domain+"/"+link) and "mailto:" not in link:
self.addInternal((self._domain + "/" + link))
# Add a link to the appropriate list with removing everything before the domain first
def add(self, list, link):
link = Page.normalize(link)
if link[-1] == "/":
link = link[:-1]
if "#" in link:
link = link[:link.index("#")]
if link not in list:
list.append(link)
# Add link to internal links
def addInternal(self, link):
self.add(self._links["intern"], link)
# Add link to external links
def addExternal(self, link):
self.add(self._links["extern"], link)
self.add(self._links["domain"], self.findDomain(link))
# Filter all internal and external links to certain file endings and returns them
def filterFiles(self, endlist):
links = []
for ending in endlist:
ending = ending.lower()
if not ending.startswith("."):
ending = "."+ending
for link in self._links["intern"]+self._links["extern"]:
for ending in endlist:
if link.lower().endswith(ending):
links.append(link)
return links
# Pagemedie extends the abstract page with media scraping support
class PageMedia(abstractPage):
def __init__(self, url,verify=True):
abstractPage.__init__(self, url, verify)
# Find all images in a page by filtering its links and finding img src tags
def updateImages(self):
data = ["jpg","jpeg","png","tiff","svg","webm","gif", ".ico"]
links = self.findSrc("img")
new = self.filterFiles(data)
for link in new:
if link not in links:
links.append(link)
self._media["img"] = links
# Find all videos in a page by filtering its links and finding video src tags
def updateVideos(self):
data = ["avi", "mp4", "mpeg4", "asf", "mov", "qt", "flv", "swf", "wmv"]
links = self.findSrc("video", "source")
new = self.filterFiles(data)
for link in new:
if link not in links:
links.append(link)
self._media["video"] = links
# Return list of all image links
def getImages(self):
if not "img" in self._media.keys() or self._media["img"] == None:
self.updateImages()
return self._media["img"]
# Return list of all video links
def getVideos(self):
if not "video" in self._media.keys() or self._media["video"] == None:
self.updateVideos()
return self._media["video"]
# Filter for some specific file types in all links and return the list of all these links
def get(self, filetype):
self._media[filetype] = self.filterFiles([filetype])
return self._media[filetype]
# Download a file to specified folder
def download(self, filetype, folder):
if filetype not in self._media.keys():
self.get(filetype)
for link in self._media[filetype]:
data = OnlineData(link)
data.download(folder)
# Find some source that is nested in *tags, like tags("video"->"source"), then "src"!
def findSrc(self, *tags):
links = []
# Sometimes strange Not-implemented error occurs
try:
soup = BeautifulSoup(self._html, "html.parser")
except NotImplementedError as nie:
print("Not implemented error occurred!")
print(nie.args)
return []
# Filter as long as there are tags left, in the right order
filter = soup.find_all(tags[0])
tags = tags[1:]
for t in range(len(tags)):
filter_new = []
for f in range(len(filter)):
filter_new += filter[f].find_all(tags[t])
filter = filter_new.copy()
#Find source in tag and add link according to its structure
for link in filter:
img_url = str(link.get("src")).lower()
if not self._domain in img_url:
if img_url[0] == "/":
self.add(links, self.findFolder(self._url) + img_url)
elif validators.url(img_url):
self.add(links, img_url)
else:
self.add(links, self.findFolder(self._url) + "/" + img_url)
else:
self.add(links, img_url)
return links
# Pagemedia is the version of Page that is always including all functionality, multi-inheritence will be used here later on
class Page(PageMedia):
def __init__(self, url, verify=True):
PageMedia.__init__(self, url, verify=True)
# Testing
if __name__=="__main__":
web = Page("http://mathcourses.ch/mat182.html")
print(web)
#web.download("pdf", "mathcourses/pdf-files")
```
#### File: url_image_downloader/scrapeasy/WebData.py
```python
import requests
import shutil
import os
# Class representing an online Image with an url and a name
class OnlineData(object):
def __init__(self, url):
self._url = url
self._name = self.findName()
self._headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'}
def __str__(self):
return "Name: {}. Url: {}".format(self._name, self._url)
# find name as the string part behind the last backslash
def findName(self):
if "/" in self._url:
last_slash = self._url.rfind("/")
return self._url[last_slash+1:]
return self._url
# download an image to thep rovided folder
def download(self, folder):
try:
img = requests.get("http://www."+self._url, headers=self._headers, stream=True, allow_redirects=True)
if not os.path.exists(folder):
os.makedirs(folder)
if folder and not folder[:-1] == "/":
folder = folder + "/"
with open(folder + self._name, "wb") as file:
# print("Downloading {}".format(self._name))
shutil.copyfileobj(img.raw, file)
except:
print("Invalid URL: {}".format(self._url))
``` |
{
"source": "joelbb24/PyCharmProjects",
"score": 4
} |
#### File: joelbb24/PyCharmProjects/Calc.py
```python
def Calculator():
while True:
print("\t\t\t\t WELCOME TO JOE'S CALCULATOR")
print("1. Addition")
print("2. Subtraction")
print("3. Multiplication")
print("4. Division")
print("5. Modulo")
choice = int(input("Enter your choice: "))
var1 = int(input("Enter variable one: "))
var2 = int(input("Enter variable two: "))
var3 = 0
if choice == 1:
var3 = var1 + var2
print("The sum is: " + str(var3))
elif choice == 2:
var3 = var1 - var2
print("The difference is: " + str(var3))
elif choice == 3:
var3 = var1 * var2
print("The product is: " + str(var3))
elif choice == 4:
var3 = var1 / var2
print("The dividend is: " + str(var3))
elif choice == 5:
var3 = var1 % var2
print("The remainder is: " + str(var3))
else:
print("You've entered incorrect choice.")
print("Do you want to calculate again?")
recalc = input("Y/N")
if recalc == "Y" or recalc == "y":
Calculator()
else:
exit()
Calculator()
``` |
{
"source": "joelbcastillo/CS6903-Project-One",
"score": 3
} |
#### File: src/castillo_chan_zhou_decrypt_binary/cli.py
```python
import click
from castillo_chan_zhou_decrypt_binary import __version__, vigenere_hack
@click.group()
@click.version_option(__version__)
def cli() -> int:
"""CS6903 Project One - Cryptanalysis of a Class of Ciphers using Kasiski Examination"""
return 0
@cli.command()
def test_one() -> int:
"""Decrypt ciphertext using a chosen-message attack.
This method uses a known dictionary of possible plaintexts (e.g. chosen messages)
to attempt to decrypt ciphertext. The key is determined by using the Kasiski Examination
method of Cryptanalysis combined with an optimized check of the decrypted text using the
prefix of the known plaintexts to return a plaintext guess.
Prints the plaintext guess to the CLI.
Returns:
int: Return code
"""
ciphertext = click.prompt("Enter the ciphertext")
plaintext = vigenere_hack.hack_vigenere(ciphertext, "test_one")
click.echo(f"My plaintext guess is: {plaintext}")
return 0
@cli.command()
def test_two() -> int:
"""Decrypt ciphertext using a chosen-message attack.
This method uses a known dictionary of possible plaintext words to
attempt to decrypt ciphertext. The key is determined by decrypting
the first (x) ciphertext characters (where x is the length of each
word in the dictionary). We then use the same key to decrypt
the rest of the ciphertext. The key with the most words
decrypted is returned as the guess of the plaintext.
Prints the plaintext guess to the CLI.
Returns:
int: Return code
"""
ciphertext = click.prompt("Enter the ciphertext")
plaintext = vigenere_hack.hack_vigenere(ciphertext, "test_two")
click.echo(f"My plaintext guess is: {plaintext}")
return 0
if __name__ == "__main__":
cli() # pragma: no cover
```
#### File: CS6903-Project-One/tests/test_cli.py
```python
from typing import List
import pytest
from click.testing import CliRunner
import castillo_chan_zhou_decrypt_binary
from castillo_chan_zhou_decrypt_binary import cli
USAGE = """Usage: cli [OPTIONS] COMMAND [ARGS]...
CS6903 Project One - CLI.
Options:
--version Show the version and exit.
--help Show this message and exit.
Commands:
cs6903-encrypt Encrypt a string passed in on the CLI using the CS6903...
decrypt Decrypt a string passed in on the CLI.
encrypt Encrypt a string passed in on the CLI."""
@pytest.mark.parametrize(
"options,expected",
[
([], USAGE),
(["--help"], USAGE),
(["--version"], f"cli, version { castillo_chan_zhou_decrypt_binary.__version__ }\n"),
],
)
def test_command_line_interface(options: List[str], expected: str) -> None:
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.cli, options)
assert result.exit_code == 0
assert expected in result.output
@pytest.mark.parametrize(
"command, expected",
[
("encrypt --text 'nancy' --key 'mykey'", " zyhw"),
],
)
def test_encrypt_command_line_interface(command: str, expected: str) -> None:
"""Test the encrypt CLI."""
runner = CliRunner()
result = runner.invoke(cli.cli, command)
assert result.exit_code == 0
assert expected in result.output
@pytest.mark.parametrize(
"command, expected",
[
("decrypt --text ' zyhw' --key 'mykey'", "nancy"),
],
)
def test_decrypt_command_line_interface(command: str, expected: str) -> None:
"""Test the decrypt CLI."""
runner = CliRunner()
result = runner.invoke(cli.cli, command)
print(result)
assert result.exit_code == 0
assert expected in result.output
```
#### File: CS6903-Project-One/tests/test_vigenere.py
```python
import pytest
from castillo_chan_zhou_decrypt_binary import vigenere
from castillo_chan_zhou_decrypt_binary.exceptions import InvalidModeException
@pytest.mark.parametrize(
"message, key, ciphertext",
[("nancy", "mykey", " zyhw"), ("nancy!", "mykey", " zyhw!")],
)
def test_encrypt(
message: str,
key: str,
ciphertext: str,
) -> None:
"""Ensure encryption works properly."""
result = vigenere.encrypt(message, key)
assert result == ciphertext
@pytest.mark.parametrize(
"ciphertext, key, message",
[(" zyhw", "mykey", "nancy"), (" zyhw!", "mykey", "nancy!")],
)
def test_decrypt(ciphertext: str, key: str, message: str) -> None:
"""Ensure decryption works properly."""
result = vigenere.decrypt(ciphertext, key)
assert result == message
def test_shift_method_invalid_mode() -> None:
"""Ensure shift method works properly."""
message = "nancy"
key = "mykey"
mode = ""
with pytest.raises(InvalidModeException):
vigenere.shift_message(message, key, mode)
``` |
{
"source": "joelbcastillo/github-service-desk",
"score": 3
} |
#### File: functional/public/teset_registration.py
```python
from flask import url_for
from servicedesk.user.models import User
from tests.factories import UserFactory
def test_can_register(user, testapp):
"""Register a new user."""
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form["username"] = "foobar"
form["email"] = "<EMAIL>"
form["password"] = "<PASSWORD>"
form["confirm"] = "<PASSWORD>"
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(user, testapp):
"""Show error if passwords don't match."""
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form["username"] = "foobar"
form["email"] = "<EMAIL>"
form["password"] = "<PASSWORD>"
form["confirm"] = "secrets"
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(user, testapp):
"""Show error if user already registered."""
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form["username"] = user.username
form["email"] = "<EMAIL>"
form["password"] = "<PASSWORD>"
form["confirm"] = "secret"
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
```
#### File: tests/functional/test_commands.py
```python
import pytest
from servicedesk.commands import blacken, lint
def test_lint(app):
runner = app.test_cli_runner()
result = runner.invoke(lint)
assert (
"Checking code style: black --check autoapp.py assets docs servicedesk tests tmp __pycache__\n"
in result.output
)
def test_format(app):
runner = app.test_cli_runner()
result = runner.invoke(blacken)
assert (
"Formatting project: black autoapp.py assets docs servicedesk tests tmp __pycache__\n"
in result.output
)
def test_format_fix_imports(app):
runner = app.test_cli_runner()
result = runner.invoke(blacken, ["-f"])
assert (
"Fixing import order: isort -rc autoapp.py assets docs servicedesk tests tmp __pycache__\n"
in result.output
)
result = runner.invoke(blacken, ["--fix-imports"])
assert (
"Fixing import order: isort -rc autoapp.py assets docs servicedesk tests tmp __pycache__\n"
in result.output
)
``` |
{
"source": "joelbcastillo/NYCOpenRecords",
"score": 2
} |
#### File: app/lib/db_utils.py
```python
from flask import current_app
from app import db, sentry
from app.models import Agencies, Requests
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.attributes import flag_modified
def create_object(obj):
"""
Add a database record and its elasticsearch counterpart.
If 'obj' is a Requests object, nothing will be added to
the es index since a UserRequests record is created after
its associated request and the es doc requires a
requester id. 'es_create' is called explicitly for a
Requests object in app.request.utils.
:param obj: object (instance of sqlalchemy model) to create
:return: string representation of created object
or None if creation failed
"""
try:
db.session.add(obj)
db.session.commit()
except SQLAlchemyError:
sentry.captureException()
db.session.rollback()
current_app.logger.exception("Failed to CREATE {}".format(obj))
return None
else:
# create elasticsearch doc
if (not isinstance(obj, Requests)
and hasattr(obj, 'es_create')
and current_app.config['ELASTICSEARCH_ENABLED']):
obj.es_create()
return str(obj)
def update_object(data, obj_type, obj_id, es_update=True):
"""
Update a database record and its elasticsearch counterpart.
:param data: a dictionary of attribute-value pairs
:param obj_type: sqlalchemy model
:param obj_id: id of record
:param es_update: update the elasticsearch index
:return: was the record updated successfully?
"""
obj = get_object(obj_type, obj_id)
if obj:
for attr, value in data.items():
if isinstance(value, dict):
# update json values
attr_json = getattr(obj, attr) or {}
for key, val in value.items():
attr_json[key] = val
setattr(obj, attr, attr_json)
flag_modified(obj, attr)
else:
setattr(obj, attr, value)
try:
db.session.commit()
except SQLAlchemyError:
sentry.captureException()
db.session.rollback()
current_app.logger.exception("Failed to UPDATE {}".format(obj))
else:
# update elasticsearch
if hasattr(obj, 'es_update') and current_app.config['ELASTICSEARCH_ENABLED'] and es_update:
obj.es_update()
return True
return False
def delete_object(obj):
"""
Delete a database record.
:param obj: object (instance of sqlalchemy model) to delete
:return: was the record deleted successfully?
"""
try:
db.session.delete(obj)
db.session.commit()
return True
except SQLAlchemyError:
sentry.captureException()
db.session.rollback()
current_app.logger.exception("Failed to DELETE {}".format(obj))
return False
def bulk_delete(query):
"""
Delete multiple database records via a bulk delete query.
http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.delete
:param query: Query object
:return: the number of records deleted
"""
try:
num_deleted = query.delete()
db.session.commit()
return num_deleted
except SQLAlchemyError:
sentry.captureException()
db.session.rollback()
current_app.logger.exception("Failed to BULK DELETE {}".format(query))
return 0
def get_object(obj_type, obj_id):
"""
Safely retrieve a database record by its id
and its sqlalchemy object type.
"""
if not obj_id:
return None
try:
return obj_type.query.get(obj_id)
except SQLAlchemyError:
sentry.captureException()
db.session.rollback()
current_app.logger.exception('Error searching "{}" table for id {}'.format(
obj_type.__tablename__, obj_id))
return None
def get_agency_choices():
choices = sorted([(agencies.ein, agencies.name)
for agencies in db.session.query(Agencies).all()],
key=lambda x: x[1])
return choices
```
#### File: app/report/views.py
```python
from app.report import report
from flask import (
render_template,
jsonify,
request
)
from flask_login import current_user
from app.models import (
Agencies,
Requests,
UserRequests
)
from app.constants import (
request_status,
user_type_auth
)
from app.report.forms import ReportFilterForm
@report.route('/show', methods=['GET'])
def show_report():
"""
This function handles the rendering of the reports page
:return: redirect to reports page
"""
return render_template('report/reports.html',
report_filter_form=ReportFilterForm())
@report.route('/', methods=['GET'])
def get():
"""
This function handles the retrieval of report data to generate the chart on the frontend.
Takes in agency_ein or user_guid from the frontend and filters for the number of requests closed and requests
opened.
:return: json object({"labels": ["Opened", "Closed"],
"values": [150, 135],
"active_users": [('', ''), ('o8pj0k', '<NAME>')]}), 200
"""
agency_ein = request.args.get('agency_ein')
user_guid = request.args.get('user_guid')
requests_opened = 0
requests_closed = 0
active_users = []
is_visible = False
results = False
if agency_ein:
if agency_ein == 'all':
active_requests = Requests.query.with_entities(Requests.status).join(
Agencies, Requests.agency_ein == Agencies.ein).filter(
Agencies.is_active).all()
requests_closed = len([r for r in active_requests if r[0] == request_status.CLOSED])
requests_opened = len(active_requests) - requests_closed
else:
active_requests = Requests.query.with_entities(Requests.status).join(
Agencies, Requests.agency_ein == Agencies.ein).filter(
Agencies.ein == agency_ein, Agencies.is_active).all()
requests_closed = len([r for r in active_requests if r[0] == request_status.CLOSED])
requests_opened = len(active_requests) - requests_closed
if not (current_user.is_anonymous or current_user.is_public):
if (current_user.is_agency and current_user.is_agency_admin(agency_ein)) or current_user.is_super:
is_visible = True
if current_user.is_agency_admin(agency_ein) or current_user.is_super:
active_users = sorted(
[(user.guid, user.name)
for user in Agencies.query.filter_by(ein=agency_ein).one().active_users],
key=lambda x: x[1])
elif current_user.is_agency_active(agency_ein):
active_users = [(current_user.guid, current_user.name)]
if active_users:
active_users.insert(0, ('', ''))
results = True
elif user_guid and (current_user.is_agency_active(agency_ein) or
current_user.is_agency_admin(agency_ein) or
current_user.is_super):
is_visible = True
ureqs = UserRequests.query.filter(UserRequests.user_guid == user_guid,
UserRequests.auth_user_type.in_(user_type_auth.AGENCY_USER_TYPES)
).all()
requests_closed = len([u for u in ureqs if u.request.status == request_status.CLOSED])
requests_opened = len([u for u in ureqs if u.request.status != request_status.CLOSED])
return jsonify({"labels": ["Opened", "Closed"],
"values": [requests_opened, requests_closed],
"active_users": active_users,
"is_visible": is_visible,
"results": results
}), 200
```
#### File: request/api/utils.py
```python
from flask_login import current_user
from app.lib.db_utils import create_object
from app.models import Events
def create_request_info_event(request_id, type_, previous_value, new_value):
"""
Create and store events object for updating the request information into database.
:param request_id: request ID
:param type_: event type
:param previous_value: previous value
:param new_value: new value
"""
event = Events(request_id=request_id,
user_guid=current_user.guid,
auth_user_type=current_user.auth_user_type,
type_=type_,
previous_value=previous_value,
new_value=new_value)
create_object(event)
```
#### File: app/upload/views.py
```python
import os
import app.lib.file_utils as fu
from flask import (
request,
jsonify,
current_app,
)
from flask_login import (
current_user,
login_required
)
from werkzeug.utils import secure_filename
from app import upload_redis as redis, sentry
from app.constants import permission
from app.lib.utils import (
b64decode_lenient,
eval_request_bool,
)
from app.lib.permission_utils import is_allowed
from app.models import (
Responses,
Requests
)
from app.constants import UPDATED_FILE_DIRNAME
from app.upload import upload
from app.upload.constants import (
CONTENT_RANGE_HEADER,
upload_status
)
from app.upload.utils import (
parse_content_range,
is_valid_file_type,
scan_and_complete_upload,
get_upload_key,
upload_exists,
)
@upload.route('/<request_id>', methods=['POST'])
@login_required
def post(request_id):
"""
Create a new upload.
Handles chunked files through the Content-Range header.
For filesize validation and more upload logic, see:
/static/js/upload/fileupload.js
Optional request body parameters:
- update (bool)
save the uploaded file to the 'updated' directory
(this indicates the file is meant to replace
a previously uploaded file)
- response_id (int)
the id of a response associated with the file
this upload is replacing
- REQUIRED if 'update' is 'true'
- ignored if 'update' is 'false'
:returns: {
"name": file name,
"size": file size
}
"""
files = request.files
file_ = files[next(files.keys())]
filename = secure_filename(file_.filename)
is_update = eval_request_bool(request.form.get('update'))
agency_ein = Requests.query.filter_by(id=request_id).one().agency.ein
if is_allowed(user=current_user, request_id=request_id, permission=permission.ADD_FILE) or \
is_allowed(user=current_user, request_id=request_id, permission=permission.EDIT_FILE):
response_id = request.form.get('response_id') if is_update else None
if upload_exists(request_id, filename, response_id):
response = {
"files": [{
"name": filename,
"error": "A file with this name has already "
"been uploaded for this request."
# TODO: "link": <link-to-existing-file> ? would be nice
}]
}
else:
upload_path = os.path.join(
current_app.config['UPLOAD_QUARANTINE_DIRECTORY'],
request_id)
if not os.path.exists(upload_path):
os.mkdir(upload_path)
filepath = os.path.join(upload_path, filename)
key = get_upload_key(request_id, filename, is_update)
try:
if CONTENT_RANGE_HEADER in request.headers:
start, size = parse_content_range(
request.headers[CONTENT_RANGE_HEADER])
# Only validate mime type on first chunk
valid_file_type = True
file_type = None
if start == 0:
valid_file_type, file_type = is_valid_file_type(file_)
if current_user.is_agency_active(agency_ein):
valid_file_type = True
if os.path.exists(filepath):
# remove existing file (upload 'restarted' for same file)
os.remove(filepath)
if valid_file_type:
redis.set(key, upload_status.PROCESSING)
with open(filepath, 'ab') as fp:
fp.seek(start)
fp.write(file_.stream.read())
# scan if last chunk written
if os.path.getsize(filepath) == size:
scan_and_complete_upload.delay(request_id, filepath, is_update, response_id)
else:
valid_file_type, file_type = is_valid_file_type(file_)
if current_user.is_agency_active(agency_ein):
valid_file_type = True
if valid_file_type:
redis.set(key, upload_status.PROCESSING)
file_.save(filepath)
scan_and_complete_upload.delay(request_id, filepath, is_update, response_id)
if not valid_file_type:
response = {
"files": [{
"name": filename,
"error": "The file type '{}' is not allowed.".format(
file_type)
}]
}
else:
response = {
"files": [{
"name": filename,
"original_name": file_.filename,
"size": os.path.getsize(filepath),
}]
}
except Exception as e:
sentry.captureException()
redis.set(key, upload_status.ERROR)
current_app.logger.exception("Upload for file '{}' failed: {}".format(filename, e))
response = {
"files": [{
"name": filename,
"error": "There was a problem uploading this file."
}]
}
return jsonify(response), 200
@upload.route('/<r_id_type>/<r_id>/<filecode>', methods=['DELETE'])
@login_required
def delete(r_id_type, r_id, filecode):
"""
Removes an uploaded file.
:param r_id_type: "response" or "request"
:param r_id: the Response or Request identifier
:param filecode: the encoded name of the uploaded file
(base64 without padding)
Optional request body parameters:
- quarantined_only (bool)
only delete the file if it is quarantined
(beware: takes precedence over 'updated_only')
- updated_only (bool)
only delete the file if it is in the 'updated' directory
:returns:
On success:
{ "deleted": filename }
On failure:
{ "error": error message }
"""
filename = secure_filename(b64decode_lenient(filecode))
if r_id_type not in ["request", "response"]:
response = {"error": "Invalid ID type."}
else:
try:
if r_id_type == "response":
response = Responses.query.filter_by(id=r_id, deleted=False)
r_id = response.request_id
path = ''
quarantined_only = eval_request_bool(request.form.get('quarantined_only'))
has_add_edit = (is_allowed(user=current_user, request_id=r_id, permission=permission.ADD_FILE) or
is_allowed(user=current_user, request_id=r_id, permission=permission.EDIT_FILE))
if quarantined_only and has_add_edit:
path = os.path.join(
current_app.config['UPLOAD_QUARANTINE_DIRECTORY'],
r_id
)
elif eval_request_bool(request.form.get('updated_only')) and \
is_allowed(user=current_user, request_id=r_id, permission=permission.EDIT_FILE):
path = os.path.join(
current_app.config['UPLOAD_DIRECTORY'],
r_id,
UPDATED_FILE_DIRNAME
)
else:
path_for_status = {
upload_status.PROCESSING: current_app.config['UPLOAD_QUARANTINE_DIRECTORY'],
upload_status.SCANNING: current_app.config['UPLOAD_QUARANTINE_DIRECTORY'],
upload_status.READY: current_app.config['UPLOAD_DIRECTORY']
}
status = redis.get(get_upload_key(r_id, filename))
if status is not None:
dest_path = path_for_status[status.decode("utf-8")]
if (dest_path == current_app.config['UPLOAD_QUARANTINE_DIRECTORY'] and has_add_edit) or (
dest_path == current_app.config['UPLOAD_DIRECTORY'] and
is_allowed(user=current_user, request_id=r_id, permission=permission.ADD_FILE)
):
path = os.path.join(
dest_path,
r_id
)
filepath = os.path.join(path, filename)
found = False
if path != '':
if quarantined_only:
if os.path.exists(filepath):
os.remove(filepath)
found = True
else:
if fu.exists(filepath):
fu.remove(filepath)
found = True
if found:
response = {"deleted": filename}
else:
response = {"error": "Upload not found."}
except Exception as e:
sentry.captureException()
current_app.logger.exception("Error on DELETE /upload/: {}".format(e))
response = {"error": "Failed to delete '{}'".format(filename)}
return jsonify(response), 200
@upload.route('/status', methods=['GET'])
def status():
"""
Check the status of an upload.
Request Parameters:
- request_id
- filename
- for_update (bool, optional)
:returns: {
"status": upload status
}
"""
try:
status = redis.get(
get_upload_key(
request.args['request_id'],
secure_filename(request.args['filename']),
eval_request_bool(request.args.get('for_update'))
)
)
if status is not None:
response = {"status": status.decode("utf-8")}
else:
response = {"error": "Upload status not found."}
status_code = 200
except KeyError:
sentry.captureException()
response = {}
status_code = 422
return jsonify(response), status_code
```
#### File: migrations/versions/971f341c0204_adding_agency_features_json_column_to_.py
```python
revision = '<KEY>'
down_revision = '6ab8af7a1347'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('agencies', sa.Column('agency_features', postgresql.JSON(astext_type=sa.Text()), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('agencies', 'agency_features')
### end Alembic commands ###
```
#### File: tests/helpers/utils.py
```python
import pytest
from flask_sqlalchemy import SQLAlchemy
from app.models import Requests
def clear_data(db: SQLAlchemy):
"""Clear the data in the database after a test.
Args:
db (SQLAlchemy): Instance of the database.
Returns:
"""
meta = db.metadata
for table in reversed(meta.sorted_tables):
# print('Clear table %s' % table)
db.session.execute(table.delete())
db.session.commit()
def create_request():
"""
Returns:
"""
pass
``` |
{
"source": "joel-becker/modularizationandtesting",
"score": 3
} |
#### File: paddleboat/OLS/OLS_casey.py
```python
import numpy as np
import pandas as pd
def deg_freedom(dependent_variable_data)
if isinstance(dependent_variable_data, np.array):
dimensions = np.shape(dependent_variable_data)
deg_freedom = dimension[0] - dimensions[1]
return deg_freedom
``` |
{
"source": "JoelBender/bacpypes-extension",
"score": 3
} |
#### File: bacpypes-extension/udpserver/udpserver.py
```python
import os
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ArgumentParser
from bacpypes.core import run, stop
from bacpypes.comm import Client, bind
from bacpypes.udp import UDPDirector
from custom import Custom
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# settings
SERVER_HOST = os.getenv('SERVER_HOST', 'any')
SERVER_PORT = int(os.getenv('SERVER_PORT', 9000))
#
# CustomClient
#
@bacpypes_debugging
class CustomClient(Custom, Client):
def __init__(self):
if _debug: CustomClient._debug("__init__")
#
# __main__
#
def main():
# parse the command line arguments
parser = ArgumentParser(usage=__doc__)
parser.add_argument(
"host", nargs='?',
help="listening address of server or 'any' (default %r)" % (SERVER_HOST,),
default=SERVER_HOST,
)
parser.add_argument(
"port", nargs='?', type=int,
help="server port (default %r)" % (SERVER_PORT,),
default=SERVER_PORT,
)
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
host = args.host
if host == "any":
host = ''
server_address = (host, args.port)
if _debug: _log.debug(" - server_address: %r", server_address)
udp_director = UDPDirector(server_address)
bind(CustomClient(), udp_director)
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
``` |
{
"source": "JoelBender/bacpypes-pcap",
"score": 3
} |
#### File: JoelBender/bacpypes-pcap/AddressFilter.py
```python
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ArgumentParser
from bacpypes.pdu import Address
from bacpypes.analysis import trace, strftimestamp, Tracer
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
filterSource = None
filterDestination = None
filterHost = None
#
# Match
#
@bacpypes_debugging
def Match(addr1, addr2):
"""Return true iff addr1 matches addr2."""
if _debug:
Match._debug("Match %r %r", addr1, addr2)
if addr2.addrType == Address.localBroadcastAddr:
# match any local station
return (addr1.addrType == Address.localStationAddr) or (
addr1.addrType == Address.localBroadcastAddr
)
elif addr2.addrType == Address.localStationAddr:
# match a specific local station
return (addr1.addrType == Address.localStationAddr) and (
addr1.addrAddr == addr2.addrAddr
)
elif addr2.addrType == Address.remoteBroadcastAddr:
# match any remote station or remote broadcast on a matching network
return (
(addr1.addrType == Address.remoteStationAddr)
or (addr1.addrType == Address.remoteBroadcastAddr)
) and (addr1.addrNet == addr2.addrNet)
elif addr2.addrType == Address.remoteStationAddr:
# match a specific remote station
return (
(addr1.addrType == Address.remoteStationAddr)
and (addr1.addrNet == addr2.addrNet)
and (addr1.addrAddr == addr2.addrAddr)
)
elif addr2.addrType == Address.globalBroadcastAddr:
# match a global broadcast address
return addr1.addrType == Address.globalBroadcastAddr
else:
raise RuntimeError("invalid match combination")
#
# AddressFilterTracer
#
@bacpypes_debugging
class AddressFilterTracer(Tracer):
def __init__(self):
if _debug:
AddressFilterTracer._debug("__init__")
Tracer.__init__(self, self.Filter)
def Filter(self, pkt):
if _debug:
AddressFilterTracer._debug("Filter %r", pkt)
# apply the filters
if filterSource:
if not Match(pkt.pduSource, filterSource):
if _debug:
AddressFilterTracer._debug(" - source filter fail")
return
if filterDestination:
if not Match(pkt.pduDestination, filterDestination):
if _debug:
AddressFilterTracer._debug(" - destination filter fail")
return
if filterHost:
if (not Match(pkt.pduSource, filterHost)) and (
not Match(pkt.pduDestination, filterHost)
):
if _debug:
AddressFilterTracer._debug(" - host filter fail")
return
# passed all the filter tests
print(strftimestamp(pkt._timestamp), pkt.__class__.__name__)
pkt.debug_contents()
print
#
# __main__
#
# parse the command line arguments
parser = ArgumentParser(description=__doc__)
parser.add_argument("-s", "--source", nargs="?", type=str, help="source address")
parser.add_argument(
"-d", "--destination", nargs="?", type=str, help="destination address"
)
parser.add_argument("--host", nargs="?", type=str, help="source or destination")
parser.add_argument("pcap", nargs="+", type=str, help="pcap file(s)")
args = parser.parse_args()
if _debug:
_log.debug("initialization")
if _debug:
_log.debug(" - args: %r", args)
# interpret the arguments
if args.source:
filterSource = Address(args.source)
if _debug:
_log.debug(" - filterSource: %r", filterSource)
if args.destination:
filterDestination = Address(args.destination)
if _debug:
_log.debug(" - filterDestination: %r", filterDestination)
if args.host:
filterHost = Address(args.host)
if _debug:
_log.debug(" - filterHost: %r", filterHost)
# trace the file(s)
for fname in args.pcap:
trace(fname, [AddressFilterTracer])
```
#### File: JoelBender/bacpypes-pcap/WhoIsIAmSummaryFilter.py
```python
from collections import defaultdict
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ArgumentParser
from bacpypes.pdu import Address
from bacpypes.analysis import trace, Tracer
from bacpypes.apdu import WhoIsRequest, IAmRequest
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
filterSource = None
filterDestination = None
filterHost = None
# dictionaries of requests
whoIsTraffic = defaultdict(int)
iAmTraffic = defaultdict(int)
#
# Match
#
@bacpypes_debugging
def Match(addr1, addr2):
"""Return true iff addr1 matches addr2."""
if _debug:
Match._debug("Match %r %r", addr1, addr2)
if addr2.addrType == Address.localBroadcastAddr:
# match any local station
return (addr1.addrType == Address.localStationAddr) or (
addr1.addrType == Address.localBroadcastAddr
)
elif addr2.addrType == Address.localStationAddr:
# match a specific local station
return (addr1.addrType == Address.localStationAddr) and (
addr1.addrAddr == addr2.addrAddr
)
elif addr2.addrType == Address.remoteBroadcastAddr:
# match any remote station or remote broadcast on a matching network
return (
(addr1.addrType == Address.remoteStationAddr)
or (addr1.addrType == Address.remoteBroadcastAddr)
) and (addr1.addrNet == addr2.addrNet)
elif addr2.addrType == Address.remoteStationAddr:
# match a specific remote station
return (
(addr1.addrType == Address.remoteStationAddr)
and (addr1.addrNet == addr2.addrNet)
and (addr1.addrAddr == addr2.addrAddr)
)
elif addr2.addrType == Address.globalBroadcastAddr:
# match a global broadcast address
return addr1.addrType == Address.globalBroadcastAddr
else:
raise RuntimeError("invalid match combination")
#
# WhoIsIAmSummary
#
@bacpypes_debugging
class WhoIsIAmSummary(Tracer):
def __init__(self):
if _debug:
WhoIsIAmSummary._debug("__init__")
Tracer.__init__(self, self.Filter)
def Filter(self, pkt):
if _debug:
WhoIsIAmSummary._debug("Filter %r", pkt)
global requests
# apply the filters
if filterSource:
if not Match(pkt.pduSource, filterSource):
if _debug:
WhoIsIAmSummary._debug(" - source filter fail")
return
if filterDestination:
if not Match(pkt.pduDestination, filterDestination):
if _debug:
WhoIsIAmSummary._debug(" - destination filter fail")
return
if filterHost:
if (not Match(pkt.pduSource, filterHost)) and (
not Match(pkt.pduDestination, filterHost)
):
if _debug:
WhoIsIAmSummary._debug(" - host filter fail")
return
# check for Who-Is
if isinstance(pkt, WhoIsRequest):
key = (
pkt.pduSource,
pkt.deviceInstanceRangeLowLimit,
pkt.deviceInstanceRangeHighLimit,
)
whoIsTraffic[key] += 1
# check for I-Am
elif isinstance(pkt, IAmRequest):
key = (pkt.pduSource, pkt.iAmDeviceIdentifier[1])
iAmTraffic[key] += 1
#
# __main__
#
# parse the command line arguments
parser = ArgumentParser(description=__doc__)
parser.add_argument("-s", "--source", nargs="?", type=str, help="source address")
parser.add_argument(
"-d", "--destination", nargs="?", type=str, help="destination address"
)
parser.add_argument("--host", nargs="?", type=str, help="source or destination")
parser.add_argument("pcap", nargs="+", type=str, help="pcap file(s)")
args = parser.parse_args()
if _debug:
_log.debug("initialization")
if _debug:
_log.debug(" - args: %r", args)
# interpret the arguments
if args.source:
filterSource = Address(args.source)
if _debug:
_log.debug(" - filterSource: %r", filterSource)
if args.destination:
filterDestination = Address(args.destination)
if _debug:
_log.debug(" - filterDestination: %r", filterDestination)
if args.host:
filterHost = Address(args.host)
if _debug:
_log.debug(" - filterHost: %r", filterHost)
# trace the file(s)
for fname in args.pcap:
trace(fname, [WhoIsIAmSummary])
# dump request counts
print("----- Top 20 Who-Is -----")
print("")
items = whoIsTraffic.items()
items.sort(key=lambda x: (x[1], x[0][0]), reverse=True)
for item in items[:20]:
print("%-20s %8s %8s %5d" % (item[0][0], item[0][1], item[0][2], item[1]))
print("")
print("----- Top 20 I-Am -----")
print("")
items = iAmTraffic.items()
items.sort(key=lambda x: (x[1], x[0][0]), reverse=True)
for item in items[:20]:
print("%-20s %8s %5d" % (item[0][0], item[0][1], item[1]))
print("")
``` |
{
"source": "JoelBender/modpypes",
"score": 2
} |
#### File: modpypes/modpypes/pdu.py
```python
import struct
from bacpypes.debugging import bacpypes_debugging, DebugContents, ModuleLogger
from bacpypes.comm import PDUData, PCI
from bacpypes.errors import DecodingError
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# a dictionary of functions and classes
request_types = {}
response_types = {}
def register_request_type(klass):
request_types[klass.functionCode] = klass
def register_response_type(klass):
response_types[klass.functionCode] = klass
#
# Packing and Unpacking Functions
#
def _packBitsToString(bits):
barry = []
i = packed = 0
for bit in bits:
if bit:
packed += 128
i += 1
if i == 8:
barry.append(packed)
i = packed = 0
else:
packed >>= 1
if i > 0 and i < 8:
packed >>= 7 - i
barry.append(packed)
return struct.pack("B" * len(barry), *barry)
def _unpackBitsFromString(string):
barry = struct.unpack("B" * len(string), string)
bits = []
for byte in barry:
for bit in range(8):
bits.append((byte & 1) == 1)
byte >>= 1
return bits
#
# _Struct
#
class _Struct:
"""
This is an abstract class for functions that pack and unpack the
variably encoded portion of a PDU. Each of the derived classes
produces or consumes a number of 16-registers.
"""
registerLength = None
def pack(self, value):
raise NotImplementedError("pack is not implemented in %s" % (self.__class__.__name__,))
def unpack(self, registers):
raise NotImplementedError("unpack is not implemented in %s" % (self.__class__.__name__,))
@bacpypes_debugging
class Byte(_Struct):
"""
This class packs and unpacks a register as an unsigned octet.
"""
registerLength = 1
def pack(self, value):
if _debug: Byte._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
Byte._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFF]
def unpack(self, registers):
if _debug: Byte._debug("unpack %r", registers)
return registers[0]
@bacpypes_debugging
class Int(_Struct):
"""
This class packs and unpacks a register as a 16-bit signed integer.
"""
registerLength = 1
def pack(self, value):
if _debug: Int._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
Int._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF]
def unpack(self, registers):
if _debug: Int._debug("unpack %r", registers)
value = registers[0]
if (value & 0x8000):
value = (-1 << 16) | value
return value
@bacpypes_debugging
class UnsignedInt(_Struct):
"""
This class packs and unpacks a register as a 16-bit unsigned integer.
"""
registerLength = 1
def pack(self, value):
if _debug: UnsignedInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
UnsignedInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF]
def unpack(self, registers):
if _debug: UnsignedInt._debug("unpack %r", registers)
return registers[0]
@bacpypes_debugging
class DoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a 32-bit signed integer.
"""
registerLength = 2
def pack(self, value):
if _debug: DoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
DoubleInt._error("coercion error: %r not an int", value)
value = 0
return [(value >> 16) & 0xFFFF, value & 0xFFFF]
def unpack(self, registers):
if _debug: DoubleInt._debug("unpack %r", registers)
value = (registers[0] << 16) | registers[1]
if (value & 0x80000000):
value = (-1 << 32) | value
return value
@bacpypes_debugging
class UnsignedDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a 32-bit unsigned integer.
"""
registerLength = 2
def pack(self, value):
if _debug: UnsignedDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
UnsignedDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [(value >> 16) & 0xFFFF, value & 0xFFFF]
def unpack(self, registers):
if _debug: UnsignedDoubleInt._debug("unpack %r", registers)
return (registers[0] << 16) | registers[1]
@bacpypes_debugging
class Real(_Struct):
registerLength = 2
def pack(self, value):
if _debug: Real._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
BigEndianReal._error("coercion error: %r not a float", value)
value = 0.0
registers = struct.unpack(">HH", struct.pack(">f", value))
return [registers[1], registers[0]]
def unpack(self, registers):
if _debug: Real._debug("unpack %r", registers)
value, = struct.unpack(">f", struct.pack(">HH", registers[1], registers[0]))
return value
@bacpypes_debugging
class ROCReal(_Struct):
registerLength = 1
def pack(self, value):
if _debug: ROCReal._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
ROCReal._error("coercion error: %r not a float", value)
value = 0.0
raise NotImplementedError("packing ROCReal is not supported")
def unpack(self, registers):
if _debug: ROCReal._debug("unpack %r", registers)
# byte-swap the registers
r0, r1 = registers
r0 = ((r0 & 0xFF00) >> 8) | ((r0 & 0x00FF) << 8)
r1 = ((r1 & 0xFF00) >> 8) | ((r1 & 0x00FF) << 8)
value, = struct.unpack(">f", struct.pack(">HH", r1, r0))
return value
@bacpypes_debugging
class BigEndianDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a bit endian 32-bit signed integer.
"""
registerLength = 2
def pack(self, value):
if _debug: BigEndianDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
BigEndianDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF, (value >> 16) & 0xFFFF]
def unpack(self, registers):
if _debug: BigEndianDoubleInt._debug("unpack %r", registers)
value = (registers[1] << 16) | registers[0]
if (value & 0x80000000):
value = (-1 << 32) | value
return value
@bacpypes_debugging
class BigEndianUnsignedDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a bit endian 32-bit unsigned integer.
"""
registerLength = 2
def pack(self, value):
if _debug: BigEndianUnsignedDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
BigEndianUnsignedDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF, (value >> 16) & 0xFFFF]
def unpack(self, registers):
if _debug: BigEndianUnsignedDoubleInt._debug("unpack %r", registers)
return (registers[1] << 16) | registers[0]
@bacpypes_debugging
class BigEndianReal(_Struct):
registerLength = 2
def pack(self, value):
if _debug: BigEndianReal._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
BigEndianReal._error("coercion error: %r not a float", value)
value = 0.0
registers = struct.unpack(">HH", struct.pack(">f", value))
return [registers[0], registers[1]]
def unpack(self, registers):
if _debug: BigEndianReal._debug("unpack %r", registers)
value, = struct.unpack(">f", struct.pack(">HH", registers[0], registers[1]))
return value
@bacpypes_debugging
class String(_Struct):
"""
This class packs and unpacks a list of registers as a null terminated string.
"""
def __init__(self, registerLength=6):
if _debug: String._debug("__init__ %r", registerLength)
# save the length
self.registerLength = registerLength
def pack(self, value):
if _debug: String._debug("pack %r", value)
raise NotImplementedError("packing strings is not implemeted")
def unpack(self, registers):
if _debug: String._debug("unpack %r", registers)
octets = []
for reg in registers:
octets.append(reg >> 8)
octets.append(reg & 0xFF)
value = ''.join(chr(c) for c in octets)
value = value[:value.find('\x00')]
return value
@bacpypes_debugging
class BigEndianString(_Struct):
"""
This class packs and unpacks a list of registers as a null terminated string.
"""
def __init__(self, registerLength=6):
if _debug: String._debug("__init__ %r", registerLength)
# save the length
self.registerLength = registerLength
def pack(self, value):
if _debug: String._debug("pack %r", value)
raise NotImplementedError("packing strings is not implemeted")
def unpack(self, registers):
if _debug: String._debug("unpack %r", registers)
octets = []
for reg in registers:
octets.append(reg & 0xFF)
octets.append(reg >> 8)
value = ''.join(chr(c) for c in octets)
value = value[:value.find('\x00')]
return value
#
# ModbusStruct
#
ModbusStruct = {
'byte': Byte(),
'int': Int(),
'uint': UnsignedInt(),
'dint': DoubleInt(),
'udint': UnsignedDoubleInt(),
'real': Real(),
'roc-real': ROCReal(),
'be-dint': BigEndianDoubleInt(),
'be-udint': BigEndianUnsignedDoubleInt(),
'be-real': BigEndianReal(),
'str': String(),
'be-str': BigEndianString(),
}
#
# MPCI
#
@bacpypes_debugging
class MPCI(PCI, DebugContents):
"""
This class contains the MODBUS protocol control information which
is the 8 octet header at the front of all MODBUS PDUs.
"""
_debug_contents = (
'mpduTransactionID',
'mpduProtocolID',
'mpduLength',
'mpduUnitID',
'mpduFunctionCode',
)
readCoils = 1
readDiscreteInputs = 2
readMultipleRegisters = 3
readInputRegisters = 4
writeSingleCoil = 5
writeSingleRegister = 6
writeMultipleCoils = 15
writeMultipleRegisters = 16
readWriteMultipleRegisters = 23
announceMaster = 100
registerSlave = 105
def __init__(self, *args, **kwargs):
if _debug: MPCI._debug("__init__ %r %r", args, kwargs)
PCI.__init__(self, *args, **kwargs)
self.mpduTransactionID = 0
self.mpduProtocolID = 0
self.mpduLength = None
self.mpduUnitID = 0
self.mpduFunctionCode = None
def update(self, mpci):
if _debug: MPCI._debug("update %r", mpci)
PCI.update(self, mpci)
self.mpduTransactionID = mpci.mpduTransactionID
self.mpduProtocolID = mpci.mpduProtocolID
self.mpduLength = mpci.mpduLength
self.mpduUnitID = mpci.mpduUnitID
self.mpduFunctionCode = mpci.mpduFunctionCode
def encode(self, pdu):
"""Encode the contents into the PDU."""
if _debug: MPCI._debug("encode %r", pdu)
PCI.update(pdu, self)
pdu.put_short(self.mpduTransactionID)
pdu.put_short(self.mpduProtocolID)
pdu.put_short(self.mpduLength)
pdu.put(self.mpduUnitID)
pdu.put(self.mpduFunctionCode)
def decode(self, pdu):
"""Decode the contents of the PDU."""
if _debug: MPCI._debug("decode %r", pdu)
PCI.update(self, pdu)
self.mpduTransactionID = pdu.get_short()
self.mpduProtocolID = pdu.get_short()
self.mpduLength = pdu.get_short()
self.mpduUnitID = pdu.get()
self.mpduFunctionCode = pdu.get()
# check the length
if self.mpduLength != len(pdu.pduData) + 2:
raise DecodingError("invalid length")
#
# MPDU
#
@bacpypes_debugging
class MPDU(MPCI, PDUData):
"""
This class is a generic MODBUS PDU. It inherits the :class:`MPCI`
layer and the more generic PDU data functions.
"""
def __init__(self, *args, **kwargs):
if _debug: MPDU._debug("__init__ %r %r", args, kwargs)
MPCI.__init__(self, **kwargs)
PDUData.__init__(self, *args)
def encode(self, pdu):
if _debug: MPDU._debug("encode %r", pdu)
MPCI.encode(self, pdu)
pdu.put_data(self.pduData)
def decode(self, pdu):
if _debug: MPDU._debug("decode %r", pdu)
MPCI.decode(self, pdu)
self.pduData = pdu.get_data(len(pdu.pduData))
#------------------------------
@bacpypes_debugging
class ReadBitsRequestBase(MPCI, DebugContents):
"""
Base class for messages requesting bit values. This is inherited by
both :class:`ReadCoilsRequest` and :class:`ReadDiscreteInputsRequest`.
"""
_debug_contents = ('address', 'count')
def __init__(self, address, count, **kwargs):
if _debug: ReadBitsRequestBase._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self, pdu):
if _debug: ReadBitsRequestBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadBitsRequestBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
@bacpypes_debugging
class ReadBitsResponseBase(MPCI, DebugContents):
"""
Base class for messages that are responses to reading bit values.
This is inherited by both :class:`ReadCoilsResponse` and
:class:`ReadDiscreteInputsResponse`.
"""
_debug_contents = ('bits',)
def __init__(self, values=None, **kwargs):
if _debug: ReadBitsResponseBase._debug("__init__ %r %r", values, kwargs)
MPCI.__init__(self, **kwargs)
if values is not None:
self.bits = values
else:
self.bits = []
def encode(self, pdu):
if _debug: ReadBitsResponseBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
stringbits = _packBitsToString(self.bits)
if _debug: ReadBitsResponseBase._debug(" - stringbits: %r", stringbits)
pdu.put(len(stringbits))
pdu.put_data(stringbits)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadBitsResponseBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.bits = _unpackBitsFromString(pdu.get_data(datalen))
@bacpypes_debugging
class ReadRegistersRequestBase(MPCI, DebugContents):
"""
Base class for messages requesting register values.
This is inherited by both :class:`ReadMultipleRegistersRequest` and
:class:`ReadInputRegistersRequest`.
"""
_debug_contents = ('address', 'count')
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadRegistersRequestBase._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self, pdu):
if _debug: ReadRegistersRequestBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadRegistersRequestBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
@bacpypes_debugging
class ReadRegistersResponseBase(MPCI, DebugContents):
"""
Base class for messages requesting register values.
This is inherited by both :class:`ReadMultipleRegistersResponse` and
:class:`ReadInputRegistersResponse`.
"""
_debug_contents = ('registers',)
def __init__(self, values=None, **kwargs):
if _debug: ReadRegistersResponseBase._debug("__init__ %r %r", values, kwargs)
MPCI.__init__(self, **kwargs)
if values is not None:
self.registers = values
else:
self.registers = []
def encode(self, pdu):
if _debug: ReadRegistersResponseBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadRegistersResponseBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
@bacpypes_debugging
class ReadWriteValueBase(MPCI, DebugContents):
"""
Base class for messages reading and writing values. This class is
inherted by :class:`WriteSingleCoilRequest`, :class:`WriteSingleCoilResponse`,
:class:`WriteSingleRegisterRequest`, and :class:`WriteSingleRegisterResponse`.
"""
_debug_contents = ('address', 'value')
def __init__(self, address=None, value=None, **kwargs):
if _debug: ReadWriteValueBase._debug("__init__ %r %r %r", address, value, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.value = value
def encode(self, pdu):
if _debug: ReadWriteValueBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.value)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteValueBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.value = pdu.get_short()
#------------------------------
#
# ReadCoils
#
@bacpypes_debugging
class ReadCoilsRequest(ReadBitsRequestBase):
"""
Read Coils Request
"""
functionCode = MPCI.readCoils
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadCoilsRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadCoilsRequest.functionCode
register_request_type(ReadCoilsRequest)
@bacpypes_debugging
class ReadCoilsResponse(ReadBitsResponseBase):
"""
Read Coils Response
"""
functionCode = MPCI.readCoils
def __init__(self, values=None, **kwargs):
if _debug: ReadCoilsResponse._debug("__init__ %r %r", values, kwargs)
ReadBitsResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadCoilsResponse.functionCode
register_response_type(ReadCoilsResponse)
#
# ReadDescreteInputs
#
@bacpypes_debugging
class ReadDiscreteInputsRequest(ReadBitsRequestBase):
"""
Read Discrete Inputs Request
"""
functionCode = MPCI.readDiscreteInputs
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadDiscreteInputsRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadDiscreteInputsRequest.functionCode
register_request_type(ReadDiscreteInputsRequest)
@bacpypes_debugging
class ReadDiscreteInputsResponse(ReadBitsResponseBase):
"""
Read Discrete Inputs Response
"""
functionCode = MPCI.readDiscreteInputs
def __init__(self, values=None, **kwargs):
if _debug: ReadDiscreteInputsResponse._debug("__init__ %r %r", values, kwargs)
ReadBitsResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadDiscreteInputsResponse.functionCode
register_response_type(ReadDiscreteInputsResponse)
#
# ReadMultipleRegisters
#
@bacpypes_debugging
class ReadMultipleRegistersRequest(ReadRegistersRequestBase):
"""
Read Multiple Registers Request
"""
functionCode = MPCI.readMultipleRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadMultipleRegistersRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadMultipleRegistersRequest.functionCode
register_request_type(ReadMultipleRegistersRequest)
@bacpypes_debugging
class ReadMultipleRegistersResponse(ReadRegistersResponseBase):
"""
Read Multiple Registers Response
"""
functionCode = MPCI.readMultipleRegisters
def __init__(self, values=None, **kwargs):
if _debug: ReadMultipleRegistersResponse._debug("__init__ %r %r", values, kwargs)
ReadRegistersResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadMultipleRegistersResponse.functionCode
register_response_type(ReadMultipleRegistersResponse)
#
# ReadInputRegisters
#
@bacpypes_debugging
class ReadInputRegistersRequest(ReadRegistersRequestBase):
"""
Read Input Registers Request
"""
functionCode = MPCI.readInputRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadInputRegistersRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadInputRegistersRequest.functionCode
register_request_type(ReadInputRegistersRequest)
@bacpypes_debugging
class ReadInputRegistersResponse(ReadRegistersResponseBase):
"""
Read Input Registers Response
"""
functionCode = MPCI.readInputRegisters
def __init__(self, values=None, **kwargs):
if _debug: ReadInputRegistersResponse._debug("__init__ %r %r", values, kwargs)
ReadRegistersResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadInputRegistersResponse.functionCode
register_response_type(ReadInputRegistersResponse)
#
# WriteSingleCoil
#
@bacpypes_debugging
class WriteSingleCoilRequest(ReadWriteValueBase):
"""
Write Single Coil Request
"""
functionCode = MPCI.writeSingleCoil
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleCoilRequest._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleCoilRequest.functionCode
register_request_type(WriteSingleCoilRequest)
@bacpypes_debugging
class WriteSingleCoilResponse(ReadWriteValueBase):
"""
Write Single Coil Response
"""
functionCode = MPCI.writeSingleCoil
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleCoilResponse._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleCoilResponse.functionCode
register_response_type(WriteSingleCoilResponse)
#
# WriteSingleRegister
#
@bacpypes_debugging
class WriteSingleRegisterRequest(ReadWriteValueBase):
"""
Write Single Register Request
"""
functionCode = MPCI.writeSingleRegister
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleRegisterRequest._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleRegisterRequest.functionCode
register_request_type(WriteSingleRegisterRequest)
@bacpypes_debugging
class WriteSingleRegisterResponse(ReadWriteValueBase):
"""
Write Single Register Response
"""
functionCode = MPCI.writeSingleRegister
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleRegisterResponse._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleRegisterResponse.functionCode
register_response_type(WriteSingleRegisterResponse)
#
# WriteMultipleCoils
#
@bacpypes_debugging
class WriteMultipleCoilsRequest(MPCI, DebugContents):
"""
Write Multiple Coils Request
"""
_debug_contents = ('address', 'count', 'coils')
functionCode = MPCI.writeMultipleCoils
def __init__(self, address=None, count=None, coils=None, **kwargs):
if _debug: WriteMultipleCoilsRequest._debug("__init__ %r %r %r %r", address, count, coils, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleCoilsRequest.functionCode
self.address = address
self.count = count
if coils is not None:
self.coils = coils
else:
self.coils = [False] * count
def encode(self, pdu):
if _debug: WriteMultipleCoilsRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
stringbits = _packBitsToString(self.coils)
pdu.put(len(stringbits))
pdu.put_data(stringbits)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleCoilsRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
datalen = pdu.get()
coils = _unpackBitsFromString(pdu.get_data(datalen))
self.coils = coils[:self.count]
register_request_type(WriteMultipleCoilsRequest)
@bacpypes_debugging
class WriteMultipleCoilsResponse(MPCI, DebugContents):
"""
Write Multiple Coils Response
"""
_debug_contents = ('address', 'count')
functionCode = MPCI.writeMultipleCoils
def __init__(self, address=None, count=None, **kwargs):
if _debug: WriteMultipleCoilsResponse._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleCoilsResponse.functionCode
self.address = address
self.count = count
def encode(self, pdu):
if _debug: WriteMultipleCoilsResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleCoilsResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
register_response_type(WriteMultipleCoilsResponse)
#
# WriteMultipleRegisters
#
@bacpypes_debugging
class WriteMultipleRegistersRequest(MPCI, DebugContents):
"""
Write Multiple Registers Request
"""
_debug_contents = ('address', 'count', 'registers')
functionCode = MPCI.writeMultipleRegisters
def __init__(self, address=None, count=None, registers=None, **kwargs):
if _debug: WriteMultipleRegistersRequest._debug("__init__ %r %r %r %r", address, count, registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleRegistersRequest.functionCode
self.address = address
self.count = count
if registers is not None:
self.registers = registers
elif count is not None:
self.registers = [0] * self.count
else:
self.registers = None
def encode(self, pdu):
if _debug: WriteMultipleRegistersRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleRegistersRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_request_type(WriteMultipleRegistersRequest)
@bacpypes_debugging
class WriteMultipleRegistersResponse(MPCI, DebugContents):
"""
Write Multiple Registers Response
"""
_debug_contents = ('address', 'count')
functionCode = MPCI.writeMultipleRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: WriteMultipleRegistersResponse._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleRegistersResponse.functionCode
self.address = address
self.count = count
def encode(self, pdu):
if _debug: WriteMultipleRegistersResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleRegistersResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
register_response_type(WriteMultipleRegistersResponse)
#
# ReadWriteMultipleRegistersRequest
#
@bacpypes_debugging
class ReadWriteMultipleRegistersRequest(MPCI, DebugContents):
"""
Read Write Multiple Registers Request
"""
_debug_contents = ('raddress', 'rcount', 'waddress', 'wcount', 'registers')
functionCode = MPCI.readWriteMultipleRegisters
def __init__(self, raddress=None, rcount=None, waddress=None, wcount=None, registers=None, **kwargs):
if _debug: ReadWriteMultipleRegistersRequest._debug("__init__ %r %r %r %r %r %r", raddress, rcount, waddress, wcount, registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = ReadWriteMultipleRegistersRequest.functionCode
self.raddress = raddress
self.rcount = rcount
self.waddress = waddress
self.wcount = wcount
if registers is not None:
self.registers = registers
else:
self.registers = [0] * wcount
def encode(self, pdu):
if _debug: ReadWriteMultipleRegistersRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.raddress)
pdu.put_short(self.rcount)
pdu.put_short(self.waddress)
pdu.put_short(self.wcount)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteMultipleRegistersRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.raddress = pdu.get_short()
self.rcount = pdu.get_short()
self.waddress = pdu.get_short()
self.wcount = pdu.get_short()
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_request_type(ReadWriteMultipleRegistersRequest)
@bacpypes_debugging
class ReadWriteMultipleRegistersResponse(MPCI, DebugContents):
"""
Read Write Multiple Registers Response
"""
_debug_contents = ('registers',)
functionCode = MPCI.readWriteMultipleRegisters
def __init__(self, registers=None, **kwargs):
if _debug: ReadWriteMultipleRegistersResponse._debug("__init__ %r %r", registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = ReadWriteMultipleRegistersResponse.functionCode
if registers is not None:
self.registers = registers
else:
self.registers = []
def encode(self, pdu):
if _debug: ReadWriteMultipleRegistersResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteMultipleRegistersResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_response_type(ReadWriteMultipleRegistersResponse)
#
# ExceptionResponse
#
@bacpypes_debugging
class ExceptionResponse(MPCI, DebugContents):
"""
Exception Response
"""
_debug_contents = ('exceptionCode',)
ILLEGAL_FUNCTION = 0x01
ILLEGAL_DATA_ADDRESS = 0x02
ILLEGAL_DATA_VALUE = 0x03
ILLEGAL_RESPONSE_LENGTH = 0x04
ACKNOWLEDGE = 0x05
SLAVE_DEVICE_BUSY = 0x06
NEGATIVE_ACKNOWLEDGE = 0x07
MEMORY_PARITY_ERROR = 0x08
GATEWAY_PATH_UNAVAILABLE = 0x0A
GATEWAY_TARGET_DEVICE_FAILED_TO_RESPOND = 0x0B
def __init__(self, function=None, exceptionCode=None, **kwargs):
if _debug: ExceptionResponse._debug("__init__ %r %r %r", function, exceptionCode, kwargs)
MPCI.__init__(self, **kwargs)
if function is not None:
self.mpduFunctionCode = function + 128
else:
self.mpduFunctionCode = None
self.exceptionCode = exceptionCode
def encode(self, pdu):
if _debug: ExceptionResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(self.exceptionCode)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ExceptionResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.exceptionCode = pdu.get()
```
#### File: modpypes/modpypes/server.py
```python
import os
import logging
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.consolelogging import ArgumentParser
from bacpypes.comm import Client, bind
from bacpypes.core import run
from .pdu import ExceptionResponse, \
ReadCoilsResponse, ReadDiscreteInputsResponse, ReadMultipleRegistersResponse, \
WriteSingleCoilResponse, WriteSingleRegisterResponse, WriteMultipleRegistersResponse
from .app import ModbusServer, ModbusException
# some debugging
_debug = 0
_log = ModuleLogger(globals())
_commlog = logging.getLogger(__name__ + "._commlog")
# settings
SERVER_HOST = os.getenv("SERVER_HOST", "")
SERVER_PORT = int(os.getenv("SERVER_PORT", 502))
IDLE_TIMEOUT = int(os.getenv('IDLE_TIMEOUT', 0)) or None
#
# SimpleServer
#
@bacpypes_debugging
class SimpleServer(Client):
"""
Simple Server
"""
def __init__(self, unitNumber=1):
if _debug: SimpleServer._debug("__init__")
Client.__init__(self)
# save the unit number
self.unitNumber = unitNumber
# create some coils and registers
self.coils = [False] * 10
self.registers = [0] * 10
def confirmation(self, req):
"""Got a request from a client."""
if _debug: SimpleServer._debug("confirmation %r", req)
_commlog.debug(">>> %r %r", req.pduSource, req)
# if its an exception, punt
if isinstance(req, Exception):
if _debug: SimpleServer._debug(" - punt exceptions")
return
# if it's not for us, dump it
if req.mpduUnitID != self.unitNumber:
if _debug: SimpleServer._debug(" - not for us")
return
try:
# look up a matching function
try:
fn = getattr(self, "do_" + req.__class__.__name__)
except AttributeError:
raise ModbusException(ExceptionResponse.ILLEGAL_FUNCTION)
# try to execute it
resp = fn(req)
except ModbusException as err:
# create an exception response
resp = ExceptionResponse(req.mpduFunctionCode, err.errCode)
# match the transaction information
resp.pduDestination = req.pduSource
resp.mpduTransactionID = req.mpduTransactionID
resp.mpduUnitID = req.mpduUnitID
_commlog.debug("<<< %r %r", resp.pduDestination, resp)
# send the response back
self.request(resp)
def pull_coils(self, address, count):
"""Called when there is a request for the current value of a coil."""
if _debug: SimpleServer._debug("pull_coils %r %r", address, count)
def push_coils(self, address, count):
"""Called when a MODBUS service has changed the value of one or more coils."""
if _debug: SimpleServer._debug("push_coils %r %r", address, count)
def pull_registers(self, address, count):
"""Called when a MODBUS client is requesting the current value of one
or more registers."""
if _debug: SimpleServer._debug("pull_registers %r %r", address, count)
def push_registers(self, address, count):
"""Called when a MODBUS service has changed the value of one or more
registers."""
if _debug: SimpleServer._debug("push_registers %r %r", address, count)
# ---------- Coils ----------
def do_ReadCoilsRequest(self, req):
SimpleServer._debug('do_ReadCoilsRequest %r', req)
if (req.address + req.count) > len(self.coils):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
self.pull_coils(req.address, req.count)
return ReadCoilsResponse(self.coils[req.address:req.address+req.count])
def do_WriteSingleCoilRequest(self, req):
SimpleServer._debug('do_WriteSingleCoilRequest %r', req)
if req.address > len(self.coils):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
# check the value and save it
if (req.value == 0x0000):
self.coils[req.address] = 0
elif (req.value == 0xFF00):
self.coils[req.address] = 1
else:
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_VALUE)
self.push_coils(req.address, 1)
# return the new value
return WriteSingleCoilResponse(req.address, req.value)
# ---------- Descrete Inputs (mapped as a coil) ----------
def do_ReadDescreteInputsRequest(self, req):
SimpleServer._debug('do_ReadDescreteInputsRequest %r', req)
if (req.address + req.count) > len(self.coils):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
self.pull_coils(req.address, req.count)
return ReadDiscreteInputsResponse(self.coils[req.address:req.address+req.count])
# ---------- Registers ----------
def do_ReadMultipleRegistersRequest(self, req):
SimpleServer._debug('do_ReadMultipleRegistersRequest %r', req)
if (req.address + req.count) > len(self.registers):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
self.pull_registers(req.address, req.count)
return ReadMultipleRegistersResponse(self.registers[req.address:req.address+req.count])
def do_WriteSingleRegisterRequest(self, req):
SimpleServer._debug('do_WriteSingleRegisterRequest %r', req)
if req.address > len(self.registers):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
# save the value
self.registers[req.address] = req.value
self.push_registers(req.address, 1)
# return the new value
return WriteSingleRegisterResponse(req.address, req.value)
def do_WriteMultipleRegistersRequest(self, req):
SimpleServer._debug('do_WriteMultipleRegistersRequest %r', req)
if (req.address + req.count) > len(self.registers):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
# save the values
for i in range(req.count):
self.registers[req.address + i] = req.registers[i]
self.push_registers(req.address, req.count)
return WriteMultipleRegistersResponse(req.address, req.count)
# ---------- Input Registers (mapped as a register) ----------
def do_ReadInputRegistersRequest(self, req):
SimpleServer._debug('do_ReadInputRegistersRequest %r', req)
if (req.address + req.count) > len(self.registers):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
self.pull_registers(req.address, req.count)
return ReadInputRegistersResponse(self.registers[req.address:req.address+req.count])
#
# main
#
def main():
# parse the command line arguments
parser = ArgumentParser(description=__doc__)
# listener arguments
parser.add_argument(
"--host", type=str,
help="address of host (default {!r})".format(SERVER_HOST),
default=SERVER_HOST,
)
parser.add_argument(
"--port", type=int,
help="server port (default {!r})".format(SERVER_PORT),
default=SERVER_PORT,
)
# connection timeout arguments
parser.add_argument(
"--idle-timeout", nargs='?', type=int,
help="idle connection timeout",
default=IDLE_TIMEOUT,
)
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# local IO functions
bind(SimpleServer(), ModbusServer(port=args.port, idle_timeout=args.idle_timeout))
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
``` |
{
"source": "joelberkeley/GPflow",
"score": 2
} |
#### File: conditionals/multioutput/conditionals.py
```python
from typing import Optional
import tensorflow as tf
from ...base import MeanAndVariance
from ...inducing_variables import (
FallbackSeparateIndependentInducingVariables,
FallbackSharedIndependentInducingVariables,
InducingPoints,
MultioutputInducingVariables,
SeparateIndependentInducingVariables,
SharedIndependentInducingVariables,
)
from ...kernels import (
IndependentLatent,
LinearCoregionalization,
MultioutputKernel,
SeparateIndependent,
SharedIndependent,
)
from ...posteriors import (
FallbackIndependentLatentPosterior,
FullyCorrelatedPosterior,
IndependentPosteriorMultiOutput,
LinearCoregionalizationPosterior,
)
from ..dispatch import conditional
@conditional._gpflow_internal_register(
object, SharedIndependentInducingVariables, SharedIndependent, object
)
def shared_independent_conditional(
Xnew: tf.Tensor,
inducing_variable: SharedIndependentInducingVariables,
kernel: SharedIndependent,
f: tf.Tensor,
*,
full_cov: bool = False,
full_output_cov: bool = False,
q_sqrt: Optional[tf.Tensor] = None,
white: bool = False,
) -> MeanAndVariance:
"""Multioutput conditional for an independent kernel and shared inducing inducing.
Same behaviour as conditional with non-multioutput kernels.
The covariance matrices used to calculate the conditional have the following shape:
- Kuu: [M, M]
- Kuf: [M, N]
- Kff: N or [N, N]
Further reference
-----------------
- See `gpflow.conditionals._conditional` for a detailed explanation of
conditional in the single-output case.
- See the multioutput notebook for more information about the multioutput framework.
Parameters
----------
:param Xnew: data matrix, size [N, D].
:param f: data matrix, [M, P]
:param full_cov: return the covariance between the datapoints
:param full_output_cov: return the covariance between the outputs.
Note: as we are using a independent kernel these covariances will be zero.
:param q_sqrt: matrix of standard-deviations or Cholesky matrices,
size [M, P] or [P, M, M].
:param white: boolean of whether to use the whitened representation
:return:
- mean: [N, P]
- variance: [N, P], [P, N, N], [N, P, P] or [N, P, N, P]
Please see `gpflow.conditional._expand_independent_outputs` for more information
about the shape of the variance, depending on `full_cov` and `full_output_cov`.
"""
posterior = IndependentPosteriorMultiOutput(
kernel,
inducing_variable,
f,
q_sqrt,
whiten=white,
mean_function=None,
precompute_cache=None,
)
return posterior.fused_predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
@conditional._gpflow_internal_register(
object, SeparateIndependentInducingVariables, SeparateIndependent, object
)
@conditional._gpflow_internal_register(
object, SharedIndependentInducingVariables, SeparateIndependent, object
)
@conditional._gpflow_internal_register(
object, SeparateIndependentInducingVariables, SharedIndependent, object
)
def separate_independent_conditional(
Xnew: tf.Tensor,
inducing_variable: MultioutputInducingVariables,
kernel: MultioutputKernel,
f: tf.Tensor,
*,
full_cov: bool = False,
full_output_cov: bool = False,
q_sqrt: Optional[tf.Tensor] = None,
white: bool = False,
) -> MeanAndVariance:
posterior = IndependentPosteriorMultiOutput(
kernel,
inducing_variable,
f,
q_sqrt,
whiten=white,
mean_function=None,
precompute_cache=None,
)
return posterior.fused_predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
@conditional._gpflow_internal_register(
object,
(FallbackSharedIndependentInducingVariables, FallbackSeparateIndependentInducingVariables),
IndependentLatent,
object,
)
def fallback_independent_latent_conditional(
Xnew: tf.Tensor,
inducing_variable: MultioutputInducingVariables,
kernel: IndependentLatent,
f: tf.Tensor,
*,
full_cov: bool = False,
full_output_cov: bool = False,
q_sqrt: Optional[tf.Tensor] = None,
white: bool = False,
) -> MeanAndVariance:
"""Interdomain conditional with independent latents.
In this case the number of latent GPs (L) will be different than the number of outputs (P)
The covariance matrices used to calculate the conditional have the following shape:
- Kuu: [L, M, M]
- Kuf: [M, L, N, P]
- Kff: [N, P, N, P], [N, P, P], [N, P]
Further reference
-----------------
- See `gpflow.conditionals._conditional` for a detailed explanation of
conditional in the single-output case.
- See the multioutput notebook for more information about the multioutput framework.
- See above for the parameters and the return value.
"""
posterior = FallbackIndependentLatentPosterior(
kernel,
inducing_variable,
f,
q_sqrt,
whiten=white,
mean_function=None,
precompute_cache=None,
)
return posterior.fused_predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
@conditional._gpflow_internal_register(object, InducingPoints, MultioutputKernel, object)
def inducing_point_conditional(
Xnew: tf.Tensor,
inducing_variable: InducingPoints,
kernel: MultioutputKernel,
f: tf.Tensor,
*,
full_cov: bool = False,
full_output_cov: bool = False,
q_sqrt: Optional[tf.Tensor] = None,
white: bool = False,
) -> MeanAndVariance:
"""Multi-output GP with fully correlated inducing variables.
The inducing variables are shaped in the same way as evaluations of K, to allow a default
inducing point scheme for multi-output kernels.
The covariance matrices used to calculate the conditional have the following shape:
- Kuu: [M, L, M, L]
- Kuf: [M, L, N, P]
- Kff: [N, P, N, P], [N, P, P], [N, P]
Further reference
-----------------
- See `gpflow.conditionals._conditional` for a detailed explanation of
conditional in the single-output case.
- See the multioutput notebook for more information about the multioutput framework.
Parameters
----------
:param f: variational mean, [L, 1]
:param q_sqrt: standard-deviations or cholesky, [L, 1] or [1, L, L]
"""
posterior = FullyCorrelatedPosterior(
kernel,
inducing_variable,
f,
q_sqrt,
whiten=white,
mean_function=None,
precompute_cache=None,
)
return posterior.fused_predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
@conditional._gpflow_internal_register(
object,
(SharedIndependentInducingVariables, SeparateIndependentInducingVariables),
LinearCoregionalization,
object,
)
def coregionalization_conditional(
Xnew: tf.Tensor,
inducing_variable: MultioutputInducingVariables,
kernel: LinearCoregionalization,
f: tf.Tensor,
*,
full_cov: bool = False,
full_output_cov: bool = False,
q_sqrt: Optional[tf.Tensor] = None,
white: bool = False,
) -> MeanAndVariance:
"""Most efficient routine to project L independent latent gps through a mixing matrix W.
The mixing matrix is a member of the `LinearCoregionalization` and has shape [P, L].
The covariance matrices used to calculate the conditional have the following shape:
- Kuu: [L, M, M]
- Kuf: [L, M, N]
- Kff: [L, N] or [L, N, N]
Further reference
-----------------
- See `gpflow.conditionals._conditional` for a detailed explanation of
conditional in the single-output case.
- See the multioutput notebook for more information about the multioutput framework.
"""
posterior = LinearCoregionalizationPosterior(
kernel,
inducing_variable,
f,
q_sqrt,
whiten=white,
mean_function=None,
precompute_cache=None,
)
return posterior.fused_predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
```
#### File: gpflow/conditionals/sample_conditionals.py
```python
from typing import Optional
import tensorflow as tf
from ..base import SamplesMeanAndVariance
from ..inducing_variables import InducingVariables
from ..kernels import Kernel
from .dispatch import conditional, sample_conditional
from .util import sample_mvn
@sample_conditional.register(object, object, Kernel, object)
@sample_conditional.register(object, InducingVariables, Kernel, object)
def _sample_conditional(
Xnew: tf.Tensor,
inducing_variable: InducingVariables,
kernel: Kernel,
f: tf.Tensor,
*,
full_cov: bool = False,
full_output_cov: bool = False,
q_sqrt: Optional[tf.Tensor] = None,
white: bool = False,
num_samples: Optional[int] = None,
) -> SamplesMeanAndVariance:
"""
`sample_conditional` will return a sample from the conditional distribution.
In most cases this means calculating the conditional mean m and variance v and then
returning m + sqrt(v) * eps, with eps ~ N(0, 1).
However, for some combinations of Mok and Mof more efficient sampling routines exists.
The dispatcher will make sure that we use the most efficient one.
:return: samples, mean, cov
samples has shape [num_samples, N, P] or [N, P] if num_samples is None
mean and cov as for conditional()
"""
if full_cov and full_output_cov:
msg = "The combination of both `full_cov` and `full_output_cov` is not permitted."
raise NotImplementedError(msg)
mean, cov = conditional(
Xnew,
inducing_variable,
kernel,
f,
q_sqrt=q_sqrt,
white=white,
full_cov=full_cov,
full_output_cov=full_output_cov,
)
if full_cov:
# mean: [..., N, P]
# cov: [..., P, N, N]
mean_for_sample = tf.linalg.adjoint(mean) # [..., P, N]
samples = sample_mvn(
mean_for_sample, cov, full_cov=True, num_samples=num_samples
) # [..., (S), P, N]
samples = tf.linalg.adjoint(samples) # [..., (S), N, P]
else:
# mean: [..., N, P]
# cov: [..., N, P] or [..., N, P, P]
samples = sample_mvn(
mean, cov, full_cov=full_output_cov, num_samples=num_samples
) # [..., (S), N, P]
return samples, mean, cov
```
#### File: gpflow/config/__config__.py
```python
import contextlib
import enum
import os
from dataclasses import dataclass, field, replace
from typing import Any, Dict, Generator, List, Mapping, Optional, Union
import numpy as np
import tabulate
import tensorflow as tf
import tensorflow_probability as tfp
__all__ = [
"Config",
"as_context",
"config",
"set_config",
"default_float",
"set_default_float",
"default_int",
"set_default_int",
"default_jitter",
"set_default_jitter",
"default_positive_bijector",
"set_default_positive_bijector",
"default_positive_minimum",
"set_default_positive_minimum",
"default_summary_fmt",
"set_default_summary_fmt",
"positive_bijector_type_map",
]
__config: Optional["Config"] = None
class _Values(enum.Enum):
"""Setting's names collection with default values. The `name` method returns name
of the environment variable. E.g. for `SUMMARY_FMT` field the environment variable
will be `GPFLOW_SUMMARY_FMT`."""
INT = np.int32
FLOAT = np.float64
POSITIVE_BIJECTOR = "softplus"
POSITIVE_MINIMUM = 0.0
SUMMARY_FMT = "fancy_grid"
JITTER = 1e-6
@property
def name(self) -> str: # type: ignore # name is generated and has weird typing.
return f"GPFLOW_{super().name}"
def _default(value: _Values) -> Any:
"""Checks if value is set in the environment."""
return os.getenv(value.name, default=value.value)
def _default_numeric_type_factory(
valid_types: Mapping[str, type], enum_key: _Values, type_name: str
) -> type:
value: Union[str, type] = _default(enum_key)
if isinstance(value, type) and (value in valid_types.values()):
return value
assert isinstance(value, str) # Hint for mypy
if value not in valid_types:
raise TypeError(f"Config cannot recognize {type_name} type.")
return valid_types[value]
def _default_int_factory() -> type:
valid_types = dict(int16=np.int16, int32=np.int32, int64=np.int64)
return _default_numeric_type_factory(valid_types, _Values.INT, "int")
def _default_float_factory() -> type:
valid_types = dict(float16=np.float16, float32=np.float32, float64=np.float64)
return _default_numeric_type_factory(valid_types, _Values.FLOAT, "float")
def _default_jitter_factory() -> float:
value = _default(_Values.JITTER)
try:
return float(value)
except ValueError:
raise TypeError("Config cannot set the jitter value with non float type.")
def _default_positive_bijector_factory() -> str:
bijector_type: str = _default(_Values.POSITIVE_BIJECTOR)
if bijector_type not in positive_bijector_type_map().keys():
raise TypeError(
"Config cannot set the passed value as a default positive bijector."
f"Available options: {set(positive_bijector_type_map().keys())}"
)
return bijector_type
def _default_positive_minimum_factory() -> float:
value = _default(_Values.POSITIVE_MINIMUM)
try:
return float(value)
except ValueError:
raise TypeError("Config cannot set the positive_minimum value with non float type.")
def _default_summary_fmt_factory() -> Optional[str]:
result: Optional[str] = _default(_Values.SUMMARY_FMT)
return result
# The following type alias is for the Config class, to help a static analyser distinguish
# between the built-in 'float' type and the 'float' type defined in the that class.
Float = Union[float]
@dataclass(frozen=True)
class Config:
"""
Immutable object for storing global GPflow settings
Args:
int: Integer data type, int32 or int64.
float: Float data type, float32 or float64
jitter: Jitter value. Mainly used for for making badly conditioned matrices more stable.
Default value is `1e-6`.
positive_bijector: Method for positive bijector, either "softplus" or "exp".
Default is "softplus".
positive_minimum: Lower bound for the positive transformation.
summary_fmt: Summary format for module printing.
"""
int: type = field(default_factory=_default_int_factory)
float: type = field(default_factory=_default_float_factory)
jitter: Float = field(default_factory=_default_jitter_factory)
positive_bijector: str = field(default_factory=_default_positive_bijector_factory)
positive_minimum: Float = field(default_factory=_default_positive_minimum_factory)
summary_fmt: Optional[str] = field(default_factory=_default_summary_fmt_factory)
def config() -> Config:
"""Returns current active config."""
assert __config is not None, "__config is None. This should never happen."
return __config
def default_int() -> type:
"""Returns default integer type"""
return config().int
def default_float() -> type:
"""Returns default float type"""
return config().float
def default_jitter() -> float:
"""
The jitter is a constant that GPflow adds to the diagonal of matrices
to achieve numerical stability of the system when the condition number
of the associated matrices is large, and therefore the matrices nearly singular.
"""
return config().jitter
def default_positive_bijector() -> str:
"""Type of bijector used for positive constraints: exp or softplus."""
return config().positive_bijector
def default_positive_minimum() -> float:
"""Shift constant that GPflow adds to all positive constraints."""
return config().positive_minimum
def default_summary_fmt() -> Optional[str]:
"""Summary printing format as understood by :mod:`tabulate` or a special case "notebook"."""
return config().summary_fmt
def set_config(new_config: Config) -> None:
"""Update GPflow config with new settings from `new_config`."""
global __config
__config = new_config
def set_default_int(value_type: type) -> None:
"""
Sets default integer type. Available options are ``np.int16``, ``np.int32``,
or ``np.int64``.
"""
try:
tf_dtype = tf.as_dtype(value_type) # Test that it's a tensorflow-valid dtype
except TypeError:
raise TypeError(f"{value_type} is not a valid tf or np dtype")
if not tf_dtype.is_integer:
raise TypeError(f"{value_type} is not an integer dtype")
set_config(replace(config(), int=tf_dtype.as_numpy_dtype))
def set_default_float(value_type: type) -> None:
"""
Sets default float type. Available options are `np.float16`, `np.float32`,
or `np.float64`.
"""
try:
tf_dtype = tf.as_dtype(value_type) # Test that it's a tensorflow-valid dtype
except TypeError:
raise TypeError(f"{value_type} is not a valid tf or np dtype")
if not tf_dtype.is_floating:
raise TypeError(f"{value_type} is not a float dtype")
set_config(replace(config(), float=tf_dtype.as_numpy_dtype))
def set_default_jitter(value: float) -> None:
"""
Sets constant jitter value.
The jitter is a constant that GPflow adds to the diagonal of matrices
to achieve numerical stability of the system when the condition number
of the associated matrices is large, and therefore the matrices nearly singular.
"""
if not (
isinstance(value, (tf.Tensor, np.ndarray)) and len(value.shape) == 0
) and not isinstance(value, float):
raise TypeError("Expected float32 or float64 scalar value")
if value < 0:
raise ValueError("Jitter must be non-negative")
set_config(replace(config(), jitter=value))
def set_default_positive_bijector(value: str) -> None:
"""
Sets positive bijector type.
There are currently two options implemented: "exp" and "softplus".
"""
type_map = positive_bijector_type_map()
if isinstance(value, str):
value = value.lower()
if value not in type_map:
raise ValueError(f"`{value}` not in set of valid bijectors: {sorted(type_map)}")
set_config(replace(config(), positive_bijector=value))
def set_default_positive_minimum(value: float) -> None:
"""Sets shift constant for positive transformation."""
if not (
isinstance(value, (tf.Tensor, np.ndarray)) and len(value.shape) == 0
) and not isinstance(value, float):
raise TypeError("Expected float32 or float64 scalar value")
if value < 0:
raise ValueError("Positive minimum must be non-negative")
set_config(replace(config(), positive_minimum=value))
def set_default_summary_fmt(value: Optional[str]) -> None:
formats: List[Optional[str]] = list(tabulate.tabulate_formats)
formats.extend(["notebook", None])
if value not in formats:
raise ValueError(f"Summary does not support '{value}' format")
set_config(replace(config(), summary_fmt=value))
def positive_bijector_type_map() -> Dict[str, type]:
return {
"exp": tfp.bijectors.Exp,
"softplus": tfp.bijectors.Softplus,
}
@contextlib.contextmanager
def as_context(temporary_config: Optional[Config] = None) -> Generator[None, None, None]:
"""Ensure that global configs defaults, with a context manager. Useful for testing."""
current_config = config()
temporary_config = replace(current_config) if temporary_config is None else temporary_config
try:
set_config(temporary_config)
yield
finally:
set_config(current_config)
# Set global config.
set_config(Config())
```
#### File: covariances/multioutput/kuus.py
```python
from typing import Union
import tensorflow as tf
from ...inducing_variables import (
FallbackSeparateIndependentInducingVariables,
FallbackSharedIndependentInducingVariables,
InducingPoints,
)
from ...kernels import (
IndependentLatent,
LinearCoregionalization,
MultioutputKernel,
SeparateIndependent,
SharedIndependent,
)
from ..dispatch import Kuu
@Kuu.register(InducingPoints, MultioutputKernel)
def Kuu_generic(
inducing_variable: InducingPoints, kernel: MultioutputKernel, *, jitter: float = 0.0
) -> tf.Tensor:
Kmm = kernel(inducing_variable.Z, full_cov=True, full_output_cov=True) # [M, P, M, P]
M = tf.shape(Kmm)[0] * tf.shape(Kmm)[1]
jittermat = jitter * tf.reshape(tf.eye(M, dtype=Kmm.dtype), tf.shape(Kmm))
return Kmm + jittermat
@Kuu.register(FallbackSharedIndependentInducingVariables, SharedIndependent)
def Kuu_shared_shared(
inducing_variable: FallbackSharedIndependentInducingVariables,
kernel: SharedIndependent,
*,
jitter: float = 0.0,
) -> tf.Tensor:
Kmm = Kuu(inducing_variable.inducing_variable, kernel.kernel) # [M, M]
jittermat = tf.eye(inducing_variable.num_inducing, dtype=Kmm.dtype) * jitter
return Kmm + jittermat
@Kuu.register(FallbackSharedIndependentInducingVariables, (SeparateIndependent, IndependentLatent))
def Kuu_fallback_shared(
inducing_variable: FallbackSharedIndependentInducingVariables,
kernel: Union[SeparateIndependent, IndependentLatent],
*,
jitter: float = 0.0,
) -> tf.Tensor:
Kmm = tf.stack(
[Kuu(inducing_variable.inducing_variable, k) for k in kernel.kernels], axis=0
) # [L, M, M]
jittermat = tf.eye(inducing_variable.num_inducing, dtype=Kmm.dtype)[None, :, :] * jitter
return Kmm + jittermat
@Kuu.register(FallbackSeparateIndependentInducingVariables, SharedIndependent)
def Kuu_fallback_separate_shared(
inducing_variable: FallbackSeparateIndependentInducingVariables,
kernel: SharedIndependent,
*,
jitter: float = 0.0,
) -> tf.Tensor:
Kmm = tf.stack(
[Kuu(f, kernel.kernel) for f in inducing_variable.inducing_variable_list], axis=0
) # [L, M, M]
jittermat = tf.eye(inducing_variable.num_inducing, dtype=Kmm.dtype)[None, :, :] * jitter
return Kmm + jittermat
@Kuu.register(
FallbackSeparateIndependentInducingVariables, (SeparateIndependent, LinearCoregionalization)
)
def Kuu_fallbace_separate(
inducing_variable: FallbackSeparateIndependentInducingVariables,
kernel: Union[SeparateIndependent, LinearCoregionalization],
*,
jitter: float = 0.0,
) -> tf.Tensor:
Kmms = [Kuu(f, k) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)]
Kmm = tf.stack(Kmms, axis=0) # [L, M, M]
jittermat = tf.eye(inducing_variable.num_inducing, dtype=Kmm.dtype)[None, :, :] * jitter
return Kmm + jittermat
```
#### File: experimental/check_shapes/inheritance.py
```python
import inspect
from typing import Callable, Optional, cast
from ..utils import experimental
from .base_types import C
@experimental
def inherit_check_shapes(func: C) -> C:
"""
Decorator that inherits the :func:`check_shapes` decoration from any overridden method in a
super-class.
See: `Class inheritance`_.
"""
return cast(C, _InheritCheckShapes(func))
class _InheritCheckShapes:
"""
Implementation of inherit_check_shapes.
The ``__set_name__`` hack is to get access to the class the method was declared on.
See: https://stackoverflow.com/a/54316392 .
"""
def __init__(self, func: C) -> None:
self._func = func
def __set_name__(self, owner: type, name: str) -> None:
overridden_check_shapes: Optional[Callable[[C], C]] = None
for parent in inspect.getmro(owner)[1:]:
overridden_method = getattr(parent, name, None)
if overridden_method is None:
continue
overridden_check_shapes = getattr(overridden_method, "__check_shapes__", None)
if overridden_check_shapes is None:
continue
break
assert overridden_check_shapes is not None, (
f"@inherit_check_shapes did not find any overridden method of name '{name}'"
f" on class '{owner.__name__}'."
)
self._func.class_name = owner.__name__ # type: ignore
wrapped = overridden_check_shapes(self._func)
setattr(owner, name, wrapped)
```
#### File: gpflow/kernels/periodic.py
```python
from typing import Optional
import numpy as np
import tensorflow as tf
from ..base import Parameter, TensorType
from ..utilities import positive
from ..utilities.ops import difference_matrix
from .base import ActiveDims, Kernel, NormalizedActiveDims
from .stationaries import IsotropicStationary
class Periodic(Kernel):
"""
The periodic family of kernels. Can be used to wrap any Stationary kernel
to transform it into a periodic version. The canonical form (based on the
SquaredExponential kernel) can be found in Equation (47) of
D.J.C.MacKay. Introduction to Gaussian processes. In C.M.Bishop, editor,
Neural Networks and Machine Learning, pages 133--165. Springer, 1998.
The derivation can be achieved by mapping the original inputs through the
transformation u = (cos(x), sin(x)).
For the SquaredExponential base kernel, the result can be expressed as:
k(r) = σ² exp{ -0.5 sin²(π r / γ) / ℓ²}
where:
r is the Euclidean distance between the input points
ℓ is the lengthscales parameter,
σ² is the variance parameter,
γ is the period parameter.
NOTE: usually we have a factor of 4 instead of 0.5 in front but this
is absorbed into the lengthscales hyperparameter.
NOTE: periodic kernel uses `active_dims` of a base kernel, therefore
the constructor doesn't have it as an argument.
"""
def __init__(self, base_kernel: IsotropicStationary, period: TensorType = 1.0) -> None:
"""
:param base_kernel: the base kernel to make periodic; must inherit from Stationary
Note that `active_dims` should be specified in the base kernel.
:param period: the period; to induce a different period per active dimension
this must be initialized with an array the same length as the number
of active dimensions e.g. [1., 1., 1.]
"""
if not isinstance(base_kernel, IsotropicStationary):
raise TypeError("Periodic requires an IsotropicStationary kernel as the `base_kernel`")
super().__init__()
self.base_kernel = base_kernel
self.period = Parameter(period, transform=positive())
self.base_kernel._validate_ard_active_dims(self.period)
@property
def active_dims(self) -> NormalizedActiveDims:
return self.base_kernel.active_dims
@active_dims.setter
def active_dims(self, value: ActiveDims) -> None:
# type-ignore below is because mypy doesn't understand that getter and the setter of
# `active_dims` have different types.
self.base_kernel.active_dims = value # type: ignore
def K_diag(self, X: TensorType) -> tf.Tensor:
return self.base_kernel.K_diag(X)
def K(self, X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:
r = np.pi * (difference_matrix(X, X2)) / self.period
scaled_sine = tf.sin(r) / self.base_kernel.lengthscales
if hasattr(self.base_kernel, "K_r"):
sine_r = tf.reduce_sum(tf.abs(scaled_sine), -1)
K = self.base_kernel.K_r(sine_r)
else:
sine_r2 = tf.reduce_sum(tf.square(scaled_sine), -1)
K = self.base_kernel.K_r2(sine_r2)
return K
```
#### File: gpflow/models/gplvm.py
```python
from typing import Optional
import numpy as np
import tensorflow as tf
from .. import covariances, kernels, likelihoods
from ..base import Parameter, RegressionData, TensorType
from ..config import default_float, default_jitter
from ..expectations import expectation
from ..inducing_variables import InducingPoints
from ..kernels import Kernel
from ..mean_functions import MeanFunction, Zero
from ..probability_distributions import DiagonalGaussian
from ..utilities import positive, to_default_float
from ..utilities.ops import pca_reduce
from .gpr import GPR
from .model import GPModel, MeanAndVariance
from .training_mixins import InputData, InternalDataTrainingLossMixin, OutputData
from .util import InducingVariablesLike, data_input_to_tensor, inducingpoint_wrapper
class GPLVM(GPR):
"""
Standard GPLVM where the likelihood can be optimised with respect to the latent X.
"""
def __init__(
self,
data: OutputData,
latent_dim: int,
X_data_mean: Optional[tf.Tensor] = None,
kernel: Optional[Kernel] = None,
mean_function: Optional[MeanFunction] = None,
):
"""
Initialise GPLVM object. This method only works with a Gaussian likelihood.
:param data: y data matrix, size N (number of points) x D (dimensions)
:param latent_dim: the number of latent dimensions (Q)
:param X_data_mean: latent positions ([N, Q]), for the initialisation of the latent space.
:param kernel: kernel specification, by default Squared Exponential
:param mean_function: mean function, by default None.
"""
if X_data_mean is None:
X_data_mean = pca_reduce(data, latent_dim)
num_latent_gps = X_data_mean.shape[1]
if num_latent_gps != latent_dim:
msg = "Passed in number of latent {0} does not match initial X {1}."
raise ValueError(msg.format(latent_dim, num_latent_gps))
if mean_function is None:
mean_function = Zero()
if kernel is None:
kernel = kernels.SquaredExponential(lengthscales=tf.ones((latent_dim,)))
if data.shape[1] < num_latent_gps:
raise ValueError("More latent dimensions than observed.")
gpr_data = (Parameter(X_data_mean), data_input_to_tensor(data))
super().__init__(gpr_data, kernel, mean_function=mean_function)
class BayesianGPLVM(GPModel, InternalDataTrainingLossMixin):
def __init__(
self,
data: OutputData,
X_data_mean: tf.Tensor,
X_data_var: tf.Tensor,
kernel: Kernel,
num_inducing_variables: Optional[int] = None,
inducing_variable: Optional[InducingVariablesLike] = None,
X_prior_mean: Optional[TensorType] = None,
X_prior_var: Optional[TensorType] = None,
):
"""
Initialise Bayesian GPLVM object. This method only works with a Gaussian likelihood.
:param data: data matrix, size N (number of points) x D (dimensions)
:param X_data_mean: initial latent positions, size N (number of points) x
Q (latent dimensions).
:param X_data_var: variance of latent positions ([N, Q]), for the initialisation of the
latent space.
:param kernel: kernel specification, by default Squared Exponential
:param num_inducing_variables: number of inducing points, M
:param inducing_variable: matrix of inducing points, size M (inducing points) x
Q (latent dimensions). By default random permutation of X_data_mean.
:param X_prior_mean: prior mean used in KL term of bound. By default 0.
Same size as X_data_mean.
:param X_prior_var: prior variance used in KL term of bound. By default 1.
"""
num_data, num_latent_gps = X_data_mean.shape
super().__init__(kernel, likelihoods.Gaussian(), num_latent_gps=num_latent_gps)
self.data = data_input_to_tensor(data)
assert X_data_var.ndim == 2
self.X_data_mean = Parameter(X_data_mean)
self.X_data_var = Parameter(X_data_var, transform=positive())
self.num_data = num_data
self.output_dim = self.data.shape[-1]
assert np.all(X_data_mean.shape == X_data_var.shape)
assert X_data_mean.shape[0] == self.data.shape[0], "X mean and Y must be same size."
assert X_data_var.shape[0] == self.data.shape[0], "X var and Y must be same size."
if (inducing_variable is None) == (num_inducing_variables is None):
raise ValueError(
"BayesianGPLVM needs exactly one of `inducing_variable` and"
" `num_inducing_variables`"
)
if inducing_variable is None:
# By default we initialize by subset of initial latent points
# Note that tf.random.shuffle returns a copy, it does not shuffle in-place
Z = tf.random.shuffle(X_data_mean)[:num_inducing_variables]
inducing_variable = InducingPoints(Z)
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
assert X_data_mean.shape[1] == self.num_latent_gps
# deal with parameters for the prior mean variance of X
if X_prior_mean is None:
X_prior_mean = tf.zeros((self.num_data, self.num_latent_gps), dtype=default_float())
if X_prior_var is None:
X_prior_var = tf.ones((self.num_data, self.num_latent_gps))
self.X_prior_mean = tf.convert_to_tensor(np.atleast_1d(X_prior_mean), dtype=default_float())
self.X_prior_var = tf.convert_to_tensor(np.atleast_1d(X_prior_var), dtype=default_float())
assert self.X_prior_mean.shape[0] == self.num_data
assert self.X_prior_mean.shape[1] == self.num_latent_gps
assert self.X_prior_var.shape[0] == self.num_data
assert self.X_prior_var.shape[1] == self.num_latent_gps
# type-ignore is because of changed method signature:
def maximum_log_likelihood_objective(self) -> tf.Tensor: # type: ignore
return self.elbo()
def elbo(self) -> tf.Tensor:
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood.
"""
Y_data = self.data
pX = DiagonalGaussian(self.X_data_mean, self.X_data_var)
num_inducing = self.inducing_variable.num_inducing
psi0 = tf.reduce_sum(expectation(pX, self.kernel))
psi1 = expectation(pX, (self.kernel, self.inducing_variable))
psi2 = tf.reduce_sum(
expectation(
pX, (self.kernel, self.inducing_variable), (self.kernel, self.inducing_variable)
),
axis=0,
)
cov_uu = covariances.Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
L = tf.linalg.cholesky(cov_uu)
sigma2 = self.likelihood.variance
# Compute intermediate matrices
A = tf.linalg.triangular_solve(L, tf.transpose(psi1), lower=True)
tmp = tf.linalg.triangular_solve(L, psi2, lower=True)
AAT = tf.linalg.triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
log_det_B = 2.0 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
c = tf.linalg.triangular_solve(LB, tf.linalg.matmul(A, Y_data), lower=True) / sigma2
# KL[q(x) || p(x)]
dX_data_var = (
self.X_data_var
if self.X_data_var.shape.ndims == 2
else tf.linalg.diag_part(self.X_data_var)
)
NQ = to_default_float(tf.size(self.X_data_mean))
D = to_default_float(tf.shape(Y_data)[1])
KL = -0.5 * tf.reduce_sum(tf.math.log(dX_data_var))
KL += 0.5 * tf.reduce_sum(tf.math.log(self.X_prior_var))
KL -= 0.5 * NQ
KL += 0.5 * tf.reduce_sum(
(tf.square(self.X_data_mean - self.X_prior_mean) + dX_data_var) / self.X_prior_var
)
# compute log marginal bound
ND = to_default_float(tf.size(Y_data))
bound = -0.5 * ND * tf.math.log(2 * np.pi * sigma2)
bound += -0.5 * D * log_det_B
bound += -0.5 * tf.reduce_sum(tf.square(Y_data)) / sigma2
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * D * (tf.reduce_sum(psi0) / sigma2 - tf.reduce_sum(tf.linalg.diag_part(AAT)))
bound -= KL
return bound
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
"""
Compute the mean and variance of the latent function at some new points.
Note that this is very similar to the SGPR prediction, for which
there are notes in the SGPR notebook.
Note: This model does not allow full output covariances.
:param Xnew: points at which to predict
"""
if full_output_cov:
raise NotImplementedError
pX = DiagonalGaussian(self.X_data_mean, self.X_data_var)
Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
psi1 = expectation(pX, (self.kernel, self.inducing_variable))
psi2 = tf.reduce_sum(
expectation(
pX, (self.kernel, self.inducing_variable), (self.kernel, self.inducing_variable)
),
axis=0,
)
jitter = default_jitter()
Kus = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)
sigma2 = self.likelihood.variance
L = tf.linalg.cholesky(covariances.Kuu(self.inducing_variable, self.kernel, jitter=jitter))
A = tf.linalg.triangular_solve(L, tf.transpose(psi1), lower=True)
tmp = tf.linalg.triangular_solve(L, psi2, lower=True)
AAT = tf.linalg.triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
c = tf.linalg.triangular_solve(LB, tf.linalg.matmul(A, Y_data), lower=True) / sigma2
tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = (
self.kernel(Xnew)
+ tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
- tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
)
shape = tf.stack([1, 1, tf.shape(Y_data)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = (
self.kernel(Xnew, full_cov=False)
+ tf.reduce_sum(tf.square(tmp2), axis=0)
- tf.reduce_sum(tf.square(tmp1), axis=0)
)
shape = tf.stack([1, tf.shape(Y_data)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
def predict_log_density(
self, data: RegressionData, full_cov: bool = False, full_output_cov: bool = False
) -> tf.Tensor:
raise NotImplementedError
```
#### File: gpflow/models/vgp.py
```python
from typing import Optional
import numpy as np
import tensorflow as tf
import gpflow
from .. import posteriors
from ..base import InputData, MeanAndVariance, Parameter, RegressionData
from ..conditionals import conditional
from ..config import default_float, default_jitter
from ..kernels import Kernel
from ..kullback_leiblers import gauss_kl
from ..likelihoods import Likelihood
from ..mean_functions import MeanFunction
from ..utilities import is_variable, triangular, triangular_size
from .model import GPModel
from .training_mixins import InternalDataTrainingLossMixin
from .util import data_input_to_tensor
class VGP_deprecated(GPModel, InternalDataTrainingLossMixin):
r"""
This method approximates the Gaussian process posterior using a multivariate Gaussian.
The idea is that the posterior over the function-value vector F is
approximated by a Gaussian, and the KL divergence is minimised between
the approximation and the posterior.
This implementation is equivalent to SVGP with X=Z, but is more efficient.
The whitened representation is used to aid optimization.
The posterior approximation is
.. math::
q(\mathbf f) = N(\mathbf f \,|\, \boldsymbol \mu, \boldsymbol \Sigma)
"""
def __init__(
self,
data: RegressionData,
kernel: Kernel,
likelihood: Likelihood,
mean_function: Optional[MeanFunction] = None,
num_latent_gps: Optional[int] = None,
):
"""
data = (X, Y) contains the input points [N, D] and the observations [N, P]
kernel, likelihood, mean_function are appropriate GPflow objects
"""
if num_latent_gps is None:
num_latent_gps = self.calc_num_latent_gps_from_data(data, kernel, likelihood)
super().__init__(kernel, likelihood, mean_function, num_latent_gps)
self.data = data_input_to_tensor(data)
X_data, _Y_data = self.data
static_num_data = X_data.shape[0]
if static_num_data is None:
q_sqrt_unconstrained_shape = (self.num_latent_gps, None)
else:
q_sqrt_unconstrained_shape = (self.num_latent_gps, triangular_size(static_num_data))
self.num_data = Parameter(tf.shape(X_data)[0], shape=[], dtype=tf.int32, trainable=False)
# Many functions below don't like `Parameter`s:
dynamic_num_data = tf.convert_to_tensor(self.num_data)
self.q_mu = Parameter(
tf.zeros((dynamic_num_data, self.num_latent_gps)),
shape=(static_num_data, num_latent_gps),
)
q_sqrt = tf.eye(dynamic_num_data, batch_shape=[self.num_latent_gps])
self.q_sqrt = Parameter(
q_sqrt,
transform=triangular(),
unconstrained_shape=q_sqrt_unconstrained_shape,
constrained_shape=(num_latent_gps, static_num_data, static_num_data),
)
# type-ignore is because of changed method signature:
def maximum_log_likelihood_objective(self) -> tf.Tensor: # type: ignore
return self.elbo()
def elbo(self) -> tf.Tensor:
r"""
This method computes the variational lower bound on the likelihood,
which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(\mathbf f) = N(\mathbf f \,|\, \boldsymbol \mu, \boldsymbol \Sigma)
"""
X_data, Y_data = self.data
num_data = tf.convert_to_tensor(self.num_data)
# Get prior KL.
KL = gauss_kl(self.q_mu, self.q_sqrt)
# Get conditionals
K = self.kernel(X_data) + tf.eye(num_data, dtype=default_float()) * default_jitter()
L = tf.linalg.cholesky(K)
fmean = tf.linalg.matmul(L, self.q_mu) + self.mean_function(X_data) # [NN, ND] -> ND
q_sqrt_dnn = tf.linalg.band_part(self.q_sqrt, -1, 0) # [D, N, N]
L_tiled = tf.tile(tf.expand_dims(L, 0), tf.stack([self.num_latent_gps, 1, 1]))
LTA = tf.linalg.matmul(L_tiled, q_sqrt_dnn) # [D, N, N]
fvar = tf.reduce_sum(tf.square(LTA), 2)
fvar = tf.transpose(fvar)
# Get variational expectations.
var_exp = self.likelihood.variational_expectations(fmean, fvar, Y_data)
return tf.reduce_sum(var_exp) - KL
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
X_data, _Y_data = self.data
mu, var = conditional(
Xnew,
X_data,
self.kernel,
self.q_mu,
q_sqrt=self.q_sqrt,
full_cov=full_cov,
white=True,
)
return mu + self.mean_function(Xnew), var
class VGP_with_posterior(VGP_deprecated):
"""
This is an implementation of VGP that provides a posterior() method that
enables caching for faster subsequent predictions.
"""
def posterior(
self,
precompute_cache: posteriors.PrecomputeCacheType = posteriors.PrecomputeCacheType.TENSOR,
) -> posteriors.VGPPosterior:
"""
Create the Posterior object which contains precomputed matrices for
faster prediction.
precompute_cache has three settings:
- `PrecomputeCacheType.TENSOR` (or `"tensor"`): Precomputes the cached
quantities and stores them as tensors (which allows differentiating
through the prediction). This is the default.
- `PrecomputeCacheType.VARIABLE` (or `"variable"`): Precomputes the cached
quantities and stores them as variables, which allows for updating
their values without changing the compute graph (relevant for AOT
compilation).
- `PrecomputeCacheType.NOCACHE` (or `"nocache"` or `None`): Avoids
immediate cache computation. This is useful for avoiding extraneous
computations when you only want to call the posterior's
`fused_predict_f` method.
"""
X_data, _Y_data = self.data
return posteriors.VGPPosterior(
self.kernel,
X_data,
self.q_mu,
self.q_sqrt,
mean_function=self.mean_function,
precompute_cache=precompute_cache,
)
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
"""
For backwards compatibility, VGP's predict_f uses the fused (no-cache)
computation, which is more efficient during training.
For faster (cached) prediction, predict directly from the posterior object, i.e.,:
model.posterior().predict_f(Xnew, ...)
"""
return self.posterior(posteriors.PrecomputeCacheType.NOCACHE).fused_predict_f(
Xnew, full_cov=full_cov, full_output_cov=full_output_cov
)
class VGP(VGP_with_posterior):
# subclassed to ensure __class__ == "VGP"
pass
def update_vgp_data(vgp: VGP_deprecated, new_data: RegressionData) -> None:
"""
Set the data on the given VGP model, and update its variational parameters.
As opposed to many of the other models the VGP has internal parameters whose shape depends on
the shape of the data. This functions updates the internal data of the given vgp, and updates
the variational parameters to fit.
This function requires that the input :param:`vgp` were create with :class:`tf.Variable`s for
:param:`data`.
"""
old_X_data, old_Y_data = vgp.data
assert is_variable(old_X_data) and is_variable(
old_Y_data
), "update_vgp_data requires the model to have been created with variable data."
new_X_data, new_Y_data = new_data
new_num_data = tf.shape(new_X_data)[0]
f_mu, f_cov = vgp.predict_f(new_X_data, full_cov=True) # [N, L], [L, N, N]
# This model is hard-coded to use the whitened representation, i.e. q_mu and q_sqrt
# parametrize q(v), and u = f(X) = L v, where L = cholesky(K(X, X)) Hence we need to
# back-transform from f_mu and f_cov to obtain the updated new_q_mu and new_q_sqrt:
Knn = vgp.kernel(new_X_data, full_cov=True) # [N, N]
jitter_mat = default_jitter() * tf.eye(new_num_data, dtype=Knn.dtype)
Lnn = tf.linalg.cholesky(Knn + jitter_mat) # [N, N]
new_q_mu = tf.linalg.triangular_solve(Lnn, f_mu) # [N, L]
tmp = tf.linalg.triangular_solve(Lnn[None], f_cov) # [L, N, N], L⁻¹ f_cov
S_v = tf.linalg.triangular_solve(Lnn[None], tf.linalg.matrix_transpose(tmp)) # [L, N, N]
new_q_sqrt = tf.linalg.cholesky(S_v + jitter_mat) # [L, N, N]
old_X_data.assign(new_X_data)
old_Y_data.assign(new_Y_data)
vgp.num_data.assign(new_num_data)
vgp.q_mu.assign(new_q_mu)
vgp.q_sqrt.assign(new_q_sqrt)
class VGPOpperArchambeau(GPModel, InternalDataTrainingLossMixin):
r"""
This method approximates the Gaussian process posterior using a multivariate Gaussian.
The key reference is:
::
@article{Opper:2009,
title = {The Variational Gaussian Approximation Revisited},
author = {<NAME> and <NAME>},
journal = {Neural Comput.},
year = {2009},
pages = {786--792},
}
The idea is that the posterior over the function-value vector F is
approximated by a Gaussian, and the KL divergence is minimised between
the approximation and the posterior. It turns out that the optimal
posterior precision shares off-diagonal elements with the prior, so
only the diagonal elements of the precision need be adjusted.
The posterior approximation is
.. math::
q(\mathbf f) = N(\mathbf f \,|\, \mathbf K \boldsymbol \alpha,
[\mathbf K^{-1} + \textrm{diag}(\boldsymbol \lambda))^2]^{-1})
This approach has only 2ND parameters, rather than the N + N^2 of vgp,
but the optimization is non-convex and in practice may cause difficulty.
"""
def __init__(
self,
data: RegressionData,
kernel: Kernel,
likelihood: Likelihood,
mean_function: Optional[MeanFunction] = None,
num_latent_gps: Optional[int] = None,
):
"""
data = (X, Y) contains the input points [N, D] and the observations [N, P]
kernel, likelihood, mean_function are appropriate GPflow objects
"""
if num_latent_gps is None:
num_latent_gps = self.calc_num_latent_gps_from_data(data, kernel, likelihood)
super().__init__(kernel, likelihood, mean_function, num_latent_gps)
self.data = data_input_to_tensor(data)
X_data, _Y_data = self.data
self.num_data = X_data.shape[0]
self.q_alpha = Parameter(np.zeros((self.num_data, self.num_latent_gps)))
self.q_lambda = Parameter(
np.ones((self.num_data, self.num_latent_gps)), transform=gpflow.utilities.positive()
)
# type-ignore is because of changed method signature:
def maximum_log_likelihood_objective(self) -> tf.Tensor: # type: ignore
return self.elbo()
def elbo(self) -> tf.Tensor:
r"""
q_alpha, q_lambda are variational parameters, size [N, R]
This method computes the variational lower bound on the likelihood,
which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(f) = N(f | K alpha + mean, [K^-1 + diag(square(lambda))]^-1) .
"""
X_data, Y_data = self.data
K = self.kernel(X_data)
K_alpha = tf.linalg.matmul(K, self.q_alpha)
f_mean = K_alpha + self.mean_function(X_data)
# compute the variance for each of the outputs
I = tf.tile(
tf.eye(self.num_data, dtype=default_float())[None, ...], [self.num_latent_gps, 1, 1]
)
A = (
I
+ tf.transpose(self.q_lambda)[:, None, ...]
* tf.transpose(self.q_lambda)[:, :, None, ...]
* K
)
L = tf.linalg.cholesky(A)
Li = tf.linalg.triangular_solve(L, I)
tmp = Li / tf.transpose(self.q_lambda)[:, None, ...]
f_var = 1.0 / tf.square(self.q_lambda) - tf.transpose(tf.reduce_sum(tf.square(tmp), 1))
# some statistics about A are used in the KL
A_logdet = 2.0 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(L)))
trAi = tf.reduce_sum(tf.square(Li))
KL = 0.5 * (
A_logdet
+ trAi
- self.num_data * self.num_latent_gps
+ tf.reduce_sum(K_alpha * self.q_alpha)
)
v_exp = self.likelihood.variational_expectations(f_mean, f_var, Y_data)
return tf.reduce_sum(v_exp) - KL
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
r"""
The posterior variance of F is given by
q(f) = N(f | K alpha + mean, [K^-1 + diag(lambda**2)]^-1)
Here we project this to F*, the values of the GP at Xnew which is given
by
q(F*) = N ( F* | K_{*F} alpha + mean, K_{**} - K_{*f}[K_{ff} +
diag(lambda**-2)]^-1 K_{f*} )
Note: This model currently does not allow full output covariances
"""
if full_output_cov:
raise NotImplementedError
X_data, _ = self.data
# compute kernel things
Kx = self.kernel(X_data, Xnew)
K = self.kernel(X_data)
# predictive mean
f_mean = tf.linalg.matmul(Kx, self.q_alpha, transpose_a=True) + self.mean_function(Xnew)
# predictive var
A = K + tf.linalg.diag(tf.transpose(1.0 / tf.square(self.q_lambda)))
L = tf.linalg.cholesky(A)
Kx_tiled = tf.tile(Kx[None, ...], [self.num_latent_gps, 1, 1])
LiKx = tf.linalg.triangular_solve(L, Kx_tiled)
if full_cov:
f_var = self.kernel(Xnew) - tf.linalg.matmul(LiKx, LiKx, transpose_a=True)
else:
f_var = self.kernel(Xnew, full_cov=False) - tf.reduce_sum(tf.square(LiKx), axis=1)
return f_mean, tf.transpose(f_var)
```
#### File: gpflow/monitor/tensorboard.py
```python
from io import BytesIO
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
import numpy as np
import tensorflow as tf
from ..base import Parameter
from ..models import BayesianModel
from ..utilities import parameter_dict
from .base import MonitorTask
if TYPE_CHECKING:
import matplotlib
__all__ = ["ToTensorBoard", "ModelToTensorBoard", "ScalarToTensorBoard", "ImageToTensorBoard"]
class ToTensorBoard(MonitorTask):
writers: Dict[str, tf.summary.SummaryWriter] = {}
def __init__(self, log_dir: str) -> None:
"""
:param log_dir: directory in which to store the tensorboard files.
Can be nested, e.g. ./logs/my_run/
"""
super().__init__()
if log_dir not in self.writers:
self.writers[log_dir] = tf.summary.create_file_writer(log_dir)
self.file_writer = self.writers[log_dir]
def __call__(self, step: int, **kwargs: Any) -> None:
with self.file_writer.as_default():
super().__call__(step, **kwargs)
self.file_writer.flush()
class ModelToTensorBoard(ToTensorBoard):
"""
Monitoring task that creates a sensible TensorBoard for a model.
Monitors all the model's parameters for which their name matches with `keywords_to_monitor`.
By default, "kernel" and "likelihood" are elements of `keywords_to_monitor`.
Example:
keyword = "kernel", parameter = "kernel.lengthscale" => match
keyword = "variational", parameter = "kernel.lengthscale" => no match
"""
def __init__(
self,
log_dir: str,
model: BayesianModel,
*,
max_size: int = 3,
keywords_to_monitor: List[str] = ["kernel", "likelihood"],
left_strip_character: str = ".",
) -> None:
"""
:param log_dir: directory in which to store the tensorboard files.
Can be a nested: for example, './logs/my_run/'.
:param model: model to be monitord.
:param max_size: maximum size of arrays (incl.) to store each
element of the array independently as a scalar in the TensorBoard.
Setting max_size to -1 will write all values. Use with care.
:param keywords_to_monitor: specifies keywords to be monitored.
If the parameter's name includes any of the keywords specified it
will be monitored. By default, parameters that match the `kernel` or
`likelihood` keyword are monitored.
Adding a "*" to the list will match with all parameters,
i.e. no parameters or variables will be filtered out.
:param left_strip_character: certain frameworks prepend their variables with
a character. GPflow adds a '.' and Keras add a '_', for example.
When a `left_strip_character` is specified it will be stripped from the
parameter's name. By default the '.' is left stripped, for example:
".likelihood.variance" becomes "likelihood.variance".
"""
super().__init__(log_dir)
self.model = model
self.max_size = max_size
self.keywords_to_monitor = keywords_to_monitor
self.summarize_all = "*" in self.keywords_to_monitor
self.left_strip_character = left_strip_character
def run(self, **unused_kwargs: Any) -> None:
for name, parameter in parameter_dict(self.model).items():
# check if the parameter name matches any of the specified keywords
if self.summarize_all or any(keyword in name for keyword in self.keywords_to_monitor):
# keys are sometimes prepended with a character, which we strip
name = name.lstrip(self.left_strip_character)
self._summarize_parameter(name, parameter)
def _summarize_parameter(self, name: str, param: Union[Parameter, tf.Variable]) -> None:
"""
:param name: identifier used in tensorboard
:param param: parameter to be stored in tensorboard
"""
param = tf.reshape(param, (-1,))
size = param.shape[0]
if not isinstance(size, int):
raise ValueError(
f"The monitoring can not be autographed as the size of a parameter {param} "
"is unknown at compile time. If compiling the monitor task is important, "
"make sure the shape of all parameters is known beforehand. Otherwise, "
"run the monitor outside the `tf.function`."
)
if size == 1:
# if there's only one element do not add a numbered suffix
tf.summary.scalar(name, param[0], step=self.current_step)
else:
it = range(size) if self.max_size == -1 else range(min(size, self.max_size))
for i in it:
tf.summary.scalar(f"{name}[{i}]", param[i], step=self.current_step)
class ScalarToTensorBoard(ToTensorBoard):
"""Stores the return value of a callback in a TensorBoard."""
def __init__(self, log_dir: str, callback: Callable[[], float], name: str) -> None:
"""
:param log_dir: directory in which to store the tensorboard files.
For example, './logs/my_run/'.
:param callback: callback to be executed and result written to TensorBoard.
A callback can have arguments (e.g. data) passed to the function using
keyword arguments.
For example:
```
lambda cb(x=None): 2 * x
task = ScalarToTensorBoard(logdir, cb, "callback")
# specify the argument of the function using kwargs, the names need to match.
task(step, x=1)
```
:param name: name used in TensorBoard.
"""
super().__init__(log_dir)
self.name = name
self.callback = callback
def run(self, **kwargs: Any) -> None:
tf.summary.scalar(self.name, self.callback(**kwargs), step=self.current_step)
class ImageToTensorBoard(ToTensorBoard):
def __init__(
self,
log_dir: str,
plotting_function: Callable[
["matplotlib.figure.Figure", "matplotlib.figure.Axes"], "matplotlib.figure.Figure"
],
name: Optional[str] = None,
*,
fig_kw: Optional[Dict[str, Any]] = None,
subplots_kw: Optional[Dict[str, Any]] = None,
) -> None:
"""
:param log_dir: directory in which to store the tensorboard files.
Can be nested: for example, './logs/my_run/'.
:param plotting_function: function performing the plotting.
:param name: name used in TensorBoard.
:params fig_kw: keyword arguments to be passed to Figure constructor, e.g. `figsize`.
:params subplots_kw: keyword arguments to be passed to figure.subplots constructor, e.g.
`nrows`, `ncols`, `sharex`, `sharey`. By default the default values
from matplotlib.pyplot are used.
"""
super().__init__(log_dir)
self.plotting_function = plotting_function
self.name = name
self.fig_kw = fig_kw or {}
self.subplots_kw = subplots_kw or {}
try:
from matplotlib.figure import Figure
except ImportError:
raise RuntimeError("ImageToTensorBoard requires the matplotlib package to be installed")
self.fig = Figure(**self.fig_kw)
if self.subplots_kw != {}:
self.axes = self.fig.subplots(**self.subplots_kw)
else:
self.axes = self.fig.add_subplot(111)
def _clear_axes(self) -> None:
if isinstance(self.axes, np.ndarray):
for ax in self.axes.flatten():
ax.clear()
else:
self.axes.clear()
def run(self, **unused_kwargs: Any) -> None:
from matplotlib.backends.backend_agg import FigureCanvasAgg
self._clear_axes()
self.plotting_function(self.fig, self.axes)
canvas = FigureCanvasAgg(self.fig)
canvas.draw()
# get PNG data from the figure
png_buffer = BytesIO()
canvas.print_png(png_buffer)
png_encoded = png_buffer.getvalue()
png_buffer.close()
image_tensor = tf.io.decode_png(png_encoded)[None]
# Write to TensorBoard
tf.summary.image(self.name, image_tensor, step=self.current_step)
```
#### File: gpflow/utilities/misc.py
```python
from typing import Callable, Iterable, List, Optional, Union
import tensorflow as tf
import tensorflow_probability as tfp
from ..base import TensorData
from ..config import default_float, default_int
from .ops import cast
__all__ = [
"to_default_float",
"to_default_int",
"set_trainable",
"is_variable",
"training_loop",
]
def to_default_int(x: TensorData) -> tf.Tensor:
return cast(x, dtype=default_int())
def to_default_float(x: TensorData) -> tf.Tensor:
return cast(x, dtype=default_float())
def set_trainable(model: Union[tf.Module, Iterable[tf.Module]], flag: bool) -> None:
"""
Set trainable flag for all `tf.Variable`s and `gpflow.Parameter`s in a `tf.Module` or collection
of `tf.Module`s.
"""
modules = [model] if isinstance(model, tf.Module) else model
for mod in modules:
for variable in mod.variables:
variable._trainable = flag
def is_variable(t: TensorData) -> bool:
"""
Returns whether the `t` is a TensorFlow variable.
"""
return isinstance(t, (tf.Variable, tfp.util.TransformedVariable))
def training_loop(
closure: Callable[[], tf.Tensor],
optimizer: Optional[tf.optimizers.Optimizer] = None,
var_list: Optional[List[tf.Variable]] = None,
maxiter: int = 1_000,
compile: bool = False,
) -> None:
"""
Simple generic training loop. At each iteration uses a GradientTape to compute
the gradients of a loss function with respect to a set of variables.
:param closure: Callable that constructs a loss function based on data and model being trained
:param optimizer: tf.optimizers or tf.keras.optimizers that updates variables by applying the
corresponding loss gradients. Adam is a default optimizer with default settings.
:param var_list: List of model variables to be learnt during training
:param maxiter: Maximum number of
:return:
"""
safe_optimizer = tf.optimizers.Adam() if optimizer is None else optimizer
safe_var_list = [] if var_list is None else var_list
def optimization_step() -> None:
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(safe_var_list)
loss = closure()
grads = tape.gradient(loss, safe_var_list)
safe_optimizer.apply_gradients(zip(grads, safe_var_list))
if compile:
optimization_step = tf.function(optimization_step)
for _ in range(maxiter):
optimization_step()
```
#### File: gpflow/utilities/model_utils.py
```python
import tensorflow as tf
from ..base import TensorType
def add_noise_cov(K: tf.Tensor, likelihood_variance: TensorType) -> tf.Tensor:
"""
Returns K + σ² I, where σ² is the likelihood noise variance (scalar),
and I is the corresponding identity matrix.
"""
k_diag = tf.linalg.diag_part(K)
s_diag = tf.fill(tf.shape(k_diag), likelihood_variance)
return tf.linalg.set_diag(K, k_diag + s_diag)
```
#### File: gpflow/conditionals/test_broadcasted_conditionals.py
```python
from typing import cast
import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_allclose
import gpflow
import gpflow.inducing_variables.multioutput as mf
import gpflow.kernels.multioutput as mk
from gpflow.base import SamplesMeanAndVariance
from gpflow.conditionals import sample_conditional
from gpflow.conditionals.util import mix_latent_gp
# ------------------------------------------
# Data classes: storing constants
# ------------------------------------------
class Data:
S1, S2, N, M = (
7, # num samples 1
6, # num samples 2
4, # num datapoints
3, # num inducing
)
Dx, Dy, L = (
2, # input dim
5, # output dim
4, # num latent GPs
)
W = np.random.randn(Dy, L) # mixing matrix
SX = np.random.randn(S1 * S2, N, Dx)
S1_S2_X = np.reshape(SX, [S1, S2, N, Dx])
Z = np.random.randn(M, Dx)
@pytest.mark.parametrize("full_cov", [False, True])
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("conditional_type", ["mixing", "Z", "inducing_points"])
def test_conditional_broadcasting(full_cov: bool, white: bool, conditional_type: str) -> None:
"""
Test that the `conditional` and `sample_conditional` broadcasts correctly
over leading dimensions of Xnew. Xnew can be shape [..., N, D],
and conditional should broadcast over the [...].
"""
q_mu = np.random.randn(Data.M, Data.Dy)
q_sqrt = np.tril(np.random.randn(Data.Dy, Data.M, Data.M), -1)
if conditional_type == "Z":
inducing_variable = Data.Z
kernel = gpflow.kernels.Matern52(lengthscales=0.5)
elif conditional_type == "inducing_points":
inducing_variable = gpflow.inducing_variables.InducingPoints(Data.Z)
kernel = gpflow.kernels.Matern52(lengthscales=0.5)
elif conditional_type == "mixing":
# variational params have different output dim in this case
q_mu = np.random.randn(Data.M, Data.L)
q_sqrt = np.tril(np.random.randn(Data.L, Data.M, Data.M), -1)
inducing_variable = mf.SharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(Data.Z)
)
kernel = mk.LinearCoregionalization(
kernels=[gpflow.kernels.Matern52(lengthscales=0.5) for _ in range(Data.L)],
W=Data.W,
)
else:
raise NotImplementedError
if conditional_type == "mixing" and full_cov:
pytest.skip("combination is not implemented")
num_samples = 5
def sample_conditional_fn(X: tf.Tensor) -> SamplesMeanAndVariance:
return cast(
SamplesMeanAndVariance,
sample_conditional(
X,
inducing_variable,
kernel,
tf.convert_to_tensor(q_mu),
q_sqrt=tf.convert_to_tensor(q_sqrt),
white=white,
full_cov=full_cov,
num_samples=num_samples,
),
)
samples = np.array([sample_conditional_fn(X)[0] for X in Data.SX])
means = np.array([sample_conditional_fn(X)[1] for X in Data.SX])
variables = np.array([sample_conditional_fn(X)[2] for X in Data.SX])
samples_S12, means_S12, vars_S12 = sample_conditional(
Data.SX,
inducing_variable,
kernel,
tf.convert_to_tensor(q_mu),
q_sqrt=tf.convert_to_tensor(q_sqrt),
white=white,
full_cov=full_cov,
num_samples=num_samples,
)
samples_S1_S2, means_S1_S2, vars_S1_S2 = sample_conditional(
Data.S1_S2_X,
inducing_variable,
kernel,
tf.convert_to_tensor(q_mu),
q_sqrt=tf.convert_to_tensor(q_sqrt),
white=white,
full_cov=full_cov,
num_samples=num_samples,
)
assert_allclose(samples_S12.shape, samples.shape)
assert_allclose(samples_S1_S2.shape, [Data.S1, Data.S2, num_samples, Data.N, Data.Dy])
assert_allclose(means_S12, means)
assert_allclose(vars_S12, variables)
assert_allclose(means_S1_S2.numpy().reshape(Data.S1 * Data.S2, Data.N, Data.Dy), means)
if full_cov:
vars_s1_s2 = vars_S1_S2.numpy().reshape(Data.S1 * Data.S2, Data.Dy, Data.N, Data.N)
assert_allclose(vars_s1_s2, variables)
else:
vars_s1_s2 = vars_S1_S2.numpy().reshape(Data.S1 * Data.S2, Data.N, Data.Dy)
assert_allclose(vars_s1_s2, variables)
# -------------------------------------------
# Test utility functions used in conditionals
# -------------------------------------------
# _mix_latent_gps
@pytest.mark.parametrize("full_cov", [True, False])
@pytest.mark.parametrize("full_output_cov", [True, False])
def test_broadcasting_mix_latent_gps(full_cov: bool, full_output_cov: bool) -> None:
S, N = 7, 20 # batch size, num data points
P, L = 10, 5 # observation dimensionality, num latent GPs
W = np.random.randn(P, L) # mixing matrix
g_mu = np.random.randn(S, N, L) # mean of the L latent GPs
g_sqrt_diag = np.tril(np.random.randn(S * L, N, N), -1) # [L*S, N, N]
g_sqrt_diag = np.reshape(g_sqrt_diag, [L, S, N, N])
g_var_diag = g_sqrt_diag @ np.transpose(g_sqrt_diag, [0, 1, 3, 2]) # [L, S, N, N]
g_var = np.zeros([S, N, L, N, L])
for l in range(L):
g_var[:, :, l, :, l] = g_var_diag[l, :, :, :] # replace diagonal elements by g_var_diag
# reference numpy implementation for mean
f_mu_ref = g_mu @ W.T # [S, N, P]
# reference numpy implementation for variance
g_var_tmp = np.transpose(g_var, [0, 1, 3, 2, 4]) # [S, N, N, L, L]
f_var_ref = W @ g_var_tmp @ W.T # [S, N, N, P, P]
f_var_ref = np.transpose(f_var_ref, [0, 1, 3, 2, 4]) # [S, N, P, N, P]
if not full_cov:
g_var_diag = np.array([g_var_diag[:, :, n, n] for n in range(N)]) # [N, L, S]
g_var_diag = np.transpose(g_var_diag, [2, 0, 1]) # [S, N, L]
# run gpflow's implementation
f_mu, f_var = mix_latent_gp(
tf.convert_to_tensor(W),
tf.convert_to_tensor(g_mu),
tf.convert_to_tensor(g_var_diag),
full_cov,
full_output_cov,
)
# we strip down f_var_ref to the elements we need
if not full_output_cov and not full_cov:
f_var_ref = np.array([f_var_ref[:, :, p, :, p] for p in range(P)]) # [P, S, N, N]
f_var_ref = np.array([f_var_ref[:, :, n, n] for n in range(N)]) # [N, P, S]
f_var_ref = np.transpose(f_var_ref, [2, 0, 1]) # [S, N, P]
elif not full_output_cov and full_cov:
f_var_ref = np.array([f_var_ref[:, :, p, :, p] for p in range(P)]) # [P, S, N, N]
f_var_ref = np.transpose(f_var_ref, [1, 0, 2, 3]) # [S, P, N, N]
elif full_output_cov and not full_cov:
f_var_ref = np.array([f_var_ref[:, n, :, n, :] for n in range(N)]) # [N, S, P, P]
f_var_ref = np.transpose(f_var_ref, [1, 0, 2, 3]) # [S, N, P, P]
else:
pass # f_var_ref has shape [..., N, P, N, P] as expected
# check equality for mean and variance of f
assert_allclose(f_mu_ref, f_mu)
assert_allclose(f_var_ref, f_var)
```
#### File: gpflow/kernels/test_changepoints.py
```python
import numpy as np
import gpflow
def test_changepoint_with_X1_X2() -> None:
N = 100
X = np.linspace(0, 100, N).reshape(N, 1)
base_k1 = gpflow.kernels.Matern32(lengthscales=0.2)
base_k2 = gpflow.kernels.Matern32(lengthscales=2.0)
k = gpflow.kernels.ChangePoints([base_k1, base_k2], [0.0], steepness=5.0)
K = k(X)
assert K.shape == [N, N]
N2 = 25
X2 = np.linspace(0, 50, N2).reshape(N2, 1)
K = k(X, X2)
assert K.shape == [N, N2]
```
#### File: gpflow/likelihoods/test_heteroskedastic.py
```python
import numpy as np
import tensorflow as tf
from gpflow.likelihoods import HeteroskedasticTFPConditional
tf.random.set_seed(99012)
class Data:
rng = np.random.RandomState(123)
N = 5
Y = rng.randn(N, 1)
f_mean = rng.randn(N, 2)
f_var = rng.randn(N, 2) ** 2
def test_analytic_mean_and_var() -> None:
"""
Test that quadrature computation used in HeteroskedasticTFPConditional
of the predictive mean and variance is close to the analytical version,
which can be computed for the special case of N(y | mean=f1, scale=exp(f2)),
where f1, f2 ~ GP.
"""
analytic_mean = Data.f_mean[:, [0]]
analytic_variance = np.exp(Data.f_mean[:, [1]] + Data.f_var[:, [1]]) ** 2 + Data.f_var[:, [0]]
likelihood = HeteroskedasticTFPConditional()
y_mean, y_var = likelihood.predict_mean_and_var(Data.f_mean, Data.f_var)
np.testing.assert_allclose(y_mean, analytic_mean)
np.testing.assert_allclose(y_var, analytic_variance, rtol=1.5e-6)
```
#### File: gpflow/models/test_cglb.py
```python
from typing import Tuple
import numpy as np
import tensorflow as tf
from gpflow.base import RegressionData
from gpflow.config import default_float
from gpflow.kernels import SquaredExponential
from gpflow.models import CGLB, GPR, SGPR
from gpflow.models.cglb import NystromPreconditioner, cglb_conjugate_gradient
from gpflow.utilities import to_default_float as tdf
def data(rng: np.random.RandomState) -> Tuple[RegressionData, tf.Tensor, tf.Tensor]:
n: int = 100
t: int = 20
d: int = 2
x = rng.randn(n, d)
xs = rng.randn(t, d) # test points
c = np.array([[-1.4], [0.5]])
y = np.sin(x @ c + 0.5 * rng.randn(n, 1))
z = rng.randn(10, 2)
return (tdf(x), tdf(y)), tdf(z), tdf(xs)
def test_cglb_check_basics() -> None:
"""
* Quadratic term of CGLB with v=0 is equivalent to the quadratic term of SGPR.
* Log determinant term of CGLB is less or equal to SGPR log determinant.
In the test the `logdet_term` method returns negative half of the logdet bound,
therefore we run the opposite direction of the sign.
"""
rng: np.random.RandomState = np.random.RandomState(999)
train, z, _ = data(rng)
noise = 0.2
sgpr = SGPR(train, kernel=SquaredExponential(), inducing_variable=z, noise_variance=noise)
# `v_grad_optimization=True` turns off the CG in the quadratic term
cglb = CGLB(
train,
kernel=SquaredExponential(),
inducing_variable=z,
noise_variance=noise,
v_grad_optimization=True,
)
sgpr_common = sgpr._common_calculation()
cglb_common = cglb._common_calculation()
sgpr_quad_term = sgpr.quad_term(sgpr_common)
cglb_quad_term = cglb.quad_term(cglb_common)
np.testing.assert_almost_equal(sgpr_quad_term, cglb_quad_term)
sgpr_logdet = sgpr.logdet_term(sgpr_common)
cglb_logdet = cglb.logdet_term(cglb_common)
assert cglb_logdet >= sgpr_logdet
x = train[0]
K = SquaredExponential()(x) + noise * tf.eye(x.shape[0], dtype=default_float())
gpr_logdet = -0.5 * tf.linalg.logdet(K)
assert cglb_logdet <= gpr_logdet
def test_conjugate_gradient_convergence() -> None:
"""
Check that the method of conjugate gradients implemented can solve a linear system of equations
"""
rng: np.random.RandomState = np.random.RandomState(999)
noise = 1e-3
train, z, _ = data(rng)
x, y = train
n = x.shape[0]
b = tf.transpose(y)
k = SquaredExponential()
K = k(x) + noise * tf.eye(n, dtype=default_float())
Kinv_y = tf.linalg.solve(K, y) # We could solve by cholesky instead
model = CGLB((x, y), kernel=k, inducing_variable=z, noise_variance=noise)
common = model._common_calculation()
initial = tf.zeros_like(b)
A = common.A
LB = common.LB
max_error = 0.01
max_steps = 200
restart_cg_step = 200
preconditioner = NystromPreconditioner(A, LB, noise)
v = cglb_conjugate_gradient(
K, b, initial, preconditioner, max_error, max_steps, restart_cg_step
)
# NOTE: with smaller `max_error` we can reduce the `rtol`
np.testing.assert_allclose(Kinv_y, tf.transpose(v), rtol=0.1)
def test_cglb_quad_term_guarantees() -> None:
"""
Check that when conjugate gradient is used to evaluate the quadratic term,
the obtained solution is:
1. Smaller than the solution computed by Cholesky decomposition
2. Within the error tolerance of the solution computed by Cholesky
"""
rng: np.random.RandomState = np.random.RandomState(999)
max_error: float = 1e-2
noise: float = 1e-2
train, z, _ = data(rng)
x, y = train
k = SquaredExponential()
K = k(x) + noise * tf.eye(x.shape[0], dtype=default_float())
def inv_quad_term(K: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
"""
For PSD K, compute -0.5 * y.T K^{-1} y via Cholesky decomposition
"""
L = tf.linalg.cholesky(K)
Linvy = tf.linalg.triangular_solve(L, y)
return -0.5 * tf.reduce_sum(tf.square(Linvy))
cholesky_quad_term = inv_quad_term(K, y)
cglb = CGLB(
train,
kernel=k,
inducing_variable=z,
noise_variance=noise,
cg_tolerance=max_error,
max_cg_iters=100,
restart_cg_iters=10,
)
common = cglb._common_calculation()
cglb_quad_term = cglb.quad_term(common)
assert cglb_quad_term <= cholesky_quad_term
assert np.abs(cglb_quad_term - cholesky_quad_term) <= max_error
def test_cglb_predict() -> None:
"""
Test that 1.) The predict method returns the same variance estimate as SGPR.
2.) The predict method returns the same mean as SGPR for v=0.
3.) The predict method returns a mean very similar to GPR when CG is run to low tolerance.
"""
rng: np.random.RandomState = np.random.RandomState(999)
train, z, xs = data(rng)
noise = 0.2
gpr = GPR(train, kernel=SquaredExponential(), noise_variance=noise)
sgpr = SGPR(train, kernel=SquaredExponential(), inducing_variable=z, noise_variance=noise)
cglb = CGLB(
train,
kernel=SquaredExponential(),
inducing_variable=z,
noise_variance=noise,
)
gpr_mean, _ = gpr.predict_y(xs, full_cov=False)
sgpr_mean, sgpr_cov = sgpr.predict_y(xs, full_cov=False)
cglb_mean, cglb_cov = cglb.predict_y(
xs, full_cov=False, cg_tolerance=1e6
) # set tolerance high so v stays at 0.
assert np.allclose(sgpr_cov, cglb_cov)
assert np.allclose(sgpr_mean, cglb_mean)
cglb_mean, _ = cglb.predict_y(xs, full_cov=False, cg_tolerance=1e-12)
assert np.allclose(gpr_mean, cglb_mean)
```
#### File: gpflow/models/test_vgp_posterior.py
```python
from typing import Tuple
import numpy as np
import pytest
import gpflow
from gpflow.base import AnyNDArray, RegressionData
from gpflow.models.vgp import VGP_deprecated, VGP_with_posterior
from gpflow.posteriors import PrecomputeCacheType
def make_models(
regression_data: RegressionData, likelihood: gpflow.likelihoods.Likelihood
) -> Tuple[VGP_deprecated, VGP_with_posterior]:
"""Helper function to create models"""
k = gpflow.kernels.Matern52()
likelihood = gpflow.likelihoods.Gaussian()
mold = VGP_deprecated(data=regression_data, kernel=k, likelihood=likelihood)
mnew = VGP_with_posterior(data=regression_data, kernel=k, likelihood=likelihood)
return mold, mnew
def _get_data_for_tests() -> Tuple[AnyNDArray, AnyNDArray, AnyNDArray]:
"""Helper function to create testing data"""
X = np.random.randn(5, 6)
Y = np.random.randn(5, 2)
X_new = np.random.randn(3, 10, 5, 6)
return X, X_new, Y
@pytest.mark.parametrize(
"likelihood", [gpflow.likelihoods.Gaussian(), gpflow.likelihoods.Exponential()]
)
@pytest.mark.parametrize("full_cov", [True, False])
@pytest.mark.parametrize("full_output_cov", [True, False])
def test_old_vs_new_gp_fused(
likelihood: gpflow.likelihoods.Likelihood,
full_cov: bool,
full_output_cov: bool,
) -> None:
X, X_new, Y = _get_data_for_tests()
mold, mnew = make_models((X, Y), likelihood)
mu_old, var2_old = mold.predict_f(X_new, full_cov=full_cov, full_output_cov=full_output_cov)
mu_new_fuse, var2_new_fuse = mnew.predict_f(
X_new, full_cov=full_cov, full_output_cov=full_output_cov
)
# check new fuse is same as old version
np.testing.assert_allclose(mu_new_fuse, mu_old)
np.testing.assert_allclose(var2_new_fuse, var2_old)
@pytest.mark.parametrize("cache_type", [PrecomputeCacheType.TENSOR, PrecomputeCacheType.VARIABLE])
@pytest.mark.parametrize(
"likelihood", [gpflow.likelihoods.Gaussian(), gpflow.likelihoods.Exponential()]
)
@pytest.mark.parametrize("full_cov", [True, False])
@pytest.mark.parametrize("full_output_cov", [True, False])
def test_old_vs_new_with_posterior(
cache_type: PrecomputeCacheType,
likelihood: gpflow.likelihoods.Likelihood,
full_cov: bool,
full_output_cov: bool,
) -> None:
X, X_new, Y = _get_data_for_tests()
mold, mnew = make_models((X, Y), likelihood)
mu_old, var2_old = mold.predict_f(X_new, full_cov=full_cov, full_output_cov=full_output_cov)
mu_new_cache, var2_new_cache = mnew.posterior(cache_type).predict_f(
X_new, full_cov=full_cov, full_output_cov=full_output_cov
)
# check new cache is same as old version
np.testing.assert_allclose(mu_old, mu_new_cache)
np.testing.assert_allclose(var2_old, var2_new_cache)
```
#### File: gpflow/posteriors/conftest.py
```python
from inspect import isabstract
from typing import DefaultDict, Iterable, Set, Type
import pytest
import gpflow.ci_utils
from gpflow.posteriors import AbstractPosterior
@pytest.fixture(name="tested_posteriors", scope="package")
def _tested_posteriors() -> DefaultDict[str, Set[Type[AbstractPosterior]]]:
return DefaultDict(set)
@pytest.fixture(scope="package", autouse=True)
def _ensure_all_posteriors_are_tested_fixture(
tested_posteriors: DefaultDict[str, Set[Type[AbstractPosterior]]]
) -> Iterable[None]:
"""
This fixture ensures that all concrete posteriors have unit tests which compare the predictions
from the fused and precomputed code paths. When adding a new concrete posterior class to
GPFlow, ensure that it is also tested in this manner.
This autouse, package scoped fixture will always be executed when tests in this package are run.
"""
# Code here will be executed before any of the tests in this package.
yield # Run tests in this package.
# Code here will be executed after all of the tests in this package.
available_posteriors = list(gpflow.ci_utils.subclasses(AbstractPosterior))
concrete_posteriors = set([k for k in available_posteriors if not isabstract(k)])
messages = []
for key, key_tested_posteriors in tested_posteriors.items():
untested_posteriors = concrete_posteriors - key_tested_posteriors
if untested_posteriors:
messages.append(
f"For key '{key}' no tests have been registered for the following posteriors: {untested_posteriors}."
)
if messages:
raise AssertionError("\n".join(messages))
```
#### File: gpflow/posteriors/test_bo_integration.py
```python
from typing import (
Any,
Callable,
DefaultDict,
Generic,
Iterator,
List,
Set,
Tuple,
Type,
TypeVar,
cast,
)
import numpy as np
import pytest
import tensorflow as tf
from _pytest.fixtures import SubRequest
import gpflow
from gpflow.base import RegressionData
from gpflow.config import default_float
from gpflow.inducing_variables import InducingPoints, InducingVariables
from gpflow.kernels import Kernel, Matern52
from gpflow.likelihoods import Exponential, Likelihood
from gpflow.models import GPR, SGPR, SVGP, VGP, GPModel, training_loss_closure
from gpflow.models.vgp import update_vgp_data
from gpflow.posteriors import AbstractPosterior, PrecomputeCacheType
_M = TypeVar("_M", bound=GPModel, covariant=True)
_CreateModel = Callable[[RegressionData], _M]
# I'd like to make this a `dataclass`, but mypy get confused about `create_model` being a function
# member, but that doesn't take `self`.
class _ModelFactory(Generic[_M]):
def __init__(
self,
create_model: _CreateModel[_M],
multi_output: bool,
atol: float,
rtol: float,
) -> None:
self.create_model = create_model
self.multi_output = multi_output
self.atol = atol
self.rtol = rtol
_MODEL_FACTORIES: List[_ModelFactory[Any]] = []
# This exists to make it easy to disable tf.function, for debugging.
_COMPILE = True
_MAXITER = 10
_DEFAULT_ATOL = 1e-10
_DEFAULT_RTOL = 1e-7
@pytest.fixture(name="register_posterior_bo_integration_test")
def _register_posterior_bo_integration_test(
request: SubRequest,
tested_posteriors: DefaultDict[str, Set[Type[AbstractPosterior]]],
) -> Callable[[AbstractPosterior], None]:
def _register_posterior(posterior: AbstractPosterior) -> None:
tested_posteriors[request.function.__name__].add(posterior.__class__)
return _register_posterior
def model_factory(
multi_output: bool = False, atol: float = _DEFAULT_ATOL, rtol: float = _DEFAULT_RTOL
) -> Callable[[_CreateModel[_M]], _ModelFactory[_M]]:
""" Decorator for adding a function to the `_MODEL_FACTORIES` list. """
def register(create_model: _CreateModel[_M]) -> _ModelFactory[_M]:
model_factory = _ModelFactory(
create_model,
multi_output,
atol,
rtol,
)
_MODEL_FACTORIES.append(model_factory)
return model_factory
return register
def create_kernel() -> Kernel:
return Matern52()
def create_likelihood() -> Likelihood:
return Exponential()
def create_inducing_points(data: RegressionData) -> InducingPoints:
n_features = data[0].shape[1]
n_inducing_points = 5
rng = np.random.default_rng(20220208)
Z = tf.constant(rng.random((n_inducing_points, n_features)))
return InducingPoints(Z)
def create_q(
inducing_variable: InducingVariables, *, row_scale: int = 1, column_scale: int = 1
) -> Tuple[bool, tf.Tensor, tf.Tensor]:
n_inducing_points = inducing_variable.num_inducing
rng = np.random.default_rng(20220133)
q_diag = True
q_mu = tf.constant(rng.random((row_scale * n_inducing_points, column_scale)))
q_sqrt = tf.constant(rng.random((row_scale * n_inducing_points, column_scale))) ** 2
return q_diag, q_mu, q_sqrt
@model_factory(rtol=1e-3)
def create_gpr(data: RegressionData) -> GPR:
return GPR(data=data, kernel=create_kernel())
@model_factory(rtol=1e-4)
def create_sgpr(data: RegressionData) -> SGPR:
return SGPR(data=data, kernel=create_kernel(), inducing_variable=create_inducing_points(data))
@model_factory(rtol=5e-3)
def create_vgp(data: RegressionData) -> VGP:
return VGP(data=data, kernel=create_kernel(), likelihood=create_likelihood())
@model_factory()
def create_svgp__independent_single_output(data: RegressionData) -> SVGP:
inducing_variable = create_inducing_points(data)
q_diag, q_mu, q_sqrt = create_q(inducing_variable)
return SVGP(
kernel=create_kernel(),
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@model_factory(multi_output=True)
def create_svgp__fully_correlated_multi_output(data: RegressionData) -> SVGP:
n_outputs = data[1].shape[1]
kernel = gpflow.kernels.SharedIndependent(create_kernel(), output_dim=n_outputs)
inducing_variable = create_inducing_points(data)
q_diag, q_mu, q_sqrt = create_q(inducing_variable, row_scale=n_outputs)
return SVGP(
kernel=kernel,
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@model_factory(multi_output=True)
def create_svgp__independent_multi_output(data: RegressionData) -> SVGP:
n_outputs = data[1].shape[1]
kernel = gpflow.kernels.SharedIndependent(create_kernel(), output_dim=n_outputs)
inducing_variable = gpflow.inducing_variables.SharedIndependentInducingVariables(
create_inducing_points(data)
)
q_diag, q_mu, q_sqrt = create_q(inducing_variable, column_scale=n_outputs)
return SVGP(
kernel=kernel,
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@model_factory(multi_output=True)
def create_svgp__fallback_independent_latent_posterior(data: RegressionData) -> SVGP:
n_outputs = data[1].shape[1]
rng = np.random.default_rng(20220131)
kernel = gpflow.kernels.LinearCoregionalization(
[create_kernel()],
W=tf.constant(rng.standard_normal((n_outputs, 1))),
)
inducing_variable = gpflow.inducing_variables.FallbackSeparateIndependentInducingVariables(
[create_inducing_points(data)]
)
q_diag, q_mu, q_sqrt = create_q(inducing_variable)
return SVGP(
kernel=kernel,
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@model_factory(multi_output=True)
def create_svgp__linear_coregionalization(data: RegressionData) -> SVGP:
n_outputs = data[1].shape[1]
rng = np.random.default_rng(20220131)
kernel = gpflow.kernels.LinearCoregionalization(
[create_kernel()], W=tf.constant(rng.standard_normal((n_outputs, 1)))
)
inducing_variable = gpflow.inducing_variables.SharedIndependentInducingVariables(
create_inducing_points(data)
)
q_diag, q_mu, q_sqrt = create_q(inducing_variable)
return SVGP(
kernel=kernel,
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@pytest.fixture(params=_MODEL_FACTORIES)
def _model_factory(request: SubRequest) -> _ModelFactory[Any]:
return cast(_ModelFactory[Any], request.param)
@pytest.fixture
def _f_minimum(_model_factory: _ModelFactory[Any]) -> tf.Tensor:
return (
tf.constant(
[
[0.2, 0.4],
[0.4, 0.6],
[0.6, 0.8],
],
dtype=default_float(),
)
if _model_factory.multi_output
else tf.constant([[0.3, 0.5]], dtype=default_float())
)
@pytest.fixture
def _f(_f_minimum: tf.Tensor) -> Callable[[tf.Tensor], tf.Tensor]:
def f(X: tf.Tensor) -> tf.Tensor:
err = X[:, None, :] - _f_minimum[None, :, :]
err_sq = err ** 2
return tf.reduce_sum(err_sq, axis=-1)
return f
@pytest.fixture
def _data(
_f: Callable[[tf.Tensor], tf.Tensor], _f_minimum: tf.Tensor
) -> Tuple[tf.Variable, tf.Variable]:
n_initial_data = 3
n_outputs, n_features = _f_minimum.shape
rng = np.random.default_rng(20220126)
X = tf.Variable(
rng.random((n_initial_data, n_features)),
shape=[None, n_features],
dtype=default_float(),
trainable=False,
)
Y = tf.Variable(
_f(X),
shape=[None, n_outputs],
dtype=default_float(),
trainable=False,
)
return X, Y
@pytest.fixture
def _extend_data(
_data: Tuple[tf.Variable, tf.Variable], _f: Callable[[tf.Tensor], tf.Tensor]
) -> Callable[[GPModel], Iterator[int]]:
n_iterations = 3
rng = np.random.default_rng(20220127)
X, Y = _data
n_features = X.shape[1]
def iterate(model: GPModel) -> Iterator[int]:
for i in range(n_iterations):
X_new = tf.constant(rng.random((1, n_features)))
Y_new = _f(X_new)
X_i = tf.concat([X, X_new], axis=0)
Y_i = tf.concat([Y, Y_new], axis=0)
if isinstance(model, VGP):
update_vgp_data(model, (X_i, Y_i))
else:
X.assign(X_i)
Y.assign(Y_i)
yield i
return iterate
@pytest.fixture
def _X_new(_data: Tuple[tf.Variable, tf.Variable]) -> tf.Tensor:
rng = np.random.default_rng(20220128)
X, _Y = _data
n_features = X.shape[1]
return tf.constant(rng.random((3, n_features)))
@pytest.fixture
def _optimize(_data: Tuple[tf.Variable, tf.Variable]) -> Callable[[GPModel], None]:
def optimize(model: GPModel) -> None:
gpflow.optimizers.Scipy().minimize(
training_loss_closure(model, _data, compile=_COMPILE),
variables=model.trainable_variables,
options=dict(maxiter=_MAXITER),
method="BFGS",
compile=_COMPILE,
)
return optimize
def test_posterior_bo_integration__predict_f(
register_posterior_bo_integration_test: Callable[[AbstractPosterior], None],
_model_factory: _ModelFactory[Any],
_data: Tuple[tf.Variable, tf.Variable],
_extend_data: Callable[[GPModel], Iterator[int]],
_X_new: tf.Tensor,
) -> None:
"""
Check that data added incrementally is correctly reflected in `predict_f`.
"""
_X, Y = _data
n_rows_new = _X_new.shape[0]
n_outputs = Y.shape[1]
model = _model_factory.create_model(_data)
posterior = model.posterior(PrecomputeCacheType.VARIABLE)
register_posterior_bo_integration_test(posterior)
predict_f = posterior.predict_f
if _COMPILE:
predict_f = tf.function(predict_f)
for _ in _extend_data(model):
posterior.update_cache()
compiled_mean, compiled_var = predict_f(_X_new)
np.testing.assert_equal((n_rows_new, n_outputs), compiled_mean.shape)
np.testing.assert_equal((n_rows_new, n_outputs), compiled_var.shape)
eager_model = _model_factory.create_model(_data)
eager_mean, eager_var = eager_model.predict_f(_X_new)
np.testing.assert_allclose(
eager_mean, compiled_mean, rtol=_model_factory.rtol, atol=_model_factory.atol
)
np.testing.assert_allclose(
eager_var, compiled_var, rtol=_model_factory.rtol, atol=_model_factory.atol
)
def test_posterior_bo_integration__optimization(
register_posterior_bo_integration_test: Callable[[AbstractPosterior], None],
_model_factory: _ModelFactory[Any],
_data: Tuple[tf.Variable, tf.Variable],
_extend_data: Callable[[GPModel], Iterator[int]],
_X_new: tf.Tensor,
_optimize: Callable[[GPModel], None],
) -> None:
"""
Check that data added incrementally is considered when optimizing a model.
"""
_X, Y = _data
n_rows_new = _X_new.shape[0]
n_outputs = Y.shape[1]
model = _model_factory.create_model(_data)
posterior = model.posterior(PrecomputeCacheType.VARIABLE)
register_posterior_bo_integration_test(posterior)
predict_f = posterior.predict_f
if _COMPILE:
predict_f = tf.function(predict_f)
# Add all the data first, and then `optimize`, so that both models are optimized the same number
# of times and with the same data, so they converge to the same result.
for _ in _extend_data(model):
pass
_optimize(model)
posterior.update_cache()
compiled_mean, compiled_var = predict_f(_X_new)
np.testing.assert_equal((n_rows_new, n_outputs), compiled_mean.shape)
np.testing.assert_equal((n_rows_new, n_outputs), compiled_var.shape)
eager_model = _model_factory.create_model(_data)
_optimize(eager_model)
eager_mean, eager_var = eager_model.predict_f(_X_new)
np.testing.assert_allclose(
eager_mean, compiled_mean, rtol=_model_factory.rtol, atol=_model_factory.atol
)
np.testing.assert_allclose(
eager_var, compiled_var, rtol=_model_factory.rtol, atol=_model_factory.atol
)
```
#### File: gpflow/utilities/test_model_utils.py
```python
import pytest
import tensorflow as tf
import gpflow
from gpflow.base import TensorType
from gpflow.utilities import add_noise_cov
@pytest.mark.parametrize("input_tensor", [tf.constant([[1.0, 0.5], [0.5, 1.0]])])
@pytest.mark.parametrize("variance", [gpflow.Parameter(1.0, dtype=tf.float32)])
@pytest.mark.parametrize("expected_tensor", [tf.constant([[2.0, 0.5], [0.5, 2.0]])])
def test_add_noise_cov(
input_tensor: TensorType, variance: TensorType, expected_tensor: TensorType
) -> None:
actual_tensor = add_noise_cov(input_tensor, variance)
tf.debugging.assert_equal(actual_tensor, expected_tensor)
``` |
{
"source": "joelberkeley/trieste",
"score": 2
} |
#### File: trieste/acquisition/rule.py
```python
from __future__ import annotations
import copy
from abc import ABC, abstractmethod
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Generic, Optional, TypeVar, Union
import tensorflow as tf
from .. import types
from ..data import Dataset
from ..models import ProbabilisticModel
from ..observer import OBJECTIVE
from ..space import Box, SearchSpace
from ..types import TensorType
from .function import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
ExpectedImprovement,
GreedyAcquisitionFunctionBuilder,
SingleModelAcquisitionBuilder,
SingleModelGreedyAcquisitionBuilder,
)
from .optimizer import AcquisitionOptimizer, automatic_optimizer_selector, batchify
from .sampler import ExactThompsonSampler, RandomFourierFeatureThompsonSampler, ThompsonSampler
T_co = TypeVar("T_co", covariant=True)
""" Unbound covariant type variable. """
SP_contra = TypeVar("SP_contra", bound=SearchSpace, contravariant=True)
""" Contravariant type variable bound to :class:`~trieste.space.SearchSpace`. """
class AcquisitionRule(ABC, Generic[T_co, SP_contra]):
"""
The central component of the acquisition API.
An :class:`AcquisitionRule` can produce any value from the search space for this step, and the
historic data and models. This value is typically a set of query points, either on its own as
a `TensorType` (see e.g. :class:`EfficientGlobalOptimization`), or within some context
(see e.g. :class:`TrustRegion`). Indeed, to use an :class:`AcquisitionRule` in the main
:class:`~trieste.bayesian_optimizer.BayesianOptimizer` Bayesian optimization loop, the rule
must return either a `TensorType` or `State`-ful `TensorType`.
"""
@abstractmethod
def acquire(
self,
search_space: SP_contra,
datasets: Mapping[str, Dataset],
models: Mapping[str, ProbabilisticModel],
) -> T_co:
"""
Return a value of type `T_co`. Typically this will be a set of query points, either on its
own as a `TensorType` (see e.g. :class:`EfficientGlobalOptimization`), or within some
context (see e.g. :class:`TrustRegion`).
**Type hints:**
- The search space must be a :class:`~trieste.space.SearchSpace`. The exact type of
:class:`~trieste.space.SearchSpace` depends on the specific :class:`AcquisitionRule`.
:param search_space: The local acquisition search space for *this step*.
:param datasets: The known observer query points and observations for each tag.
:param models: The model to use for each :class:`~trieste.data.Dataset` in ``datasets``
(matched by tag).
:return: A value of type `T_co`.
"""
def acquire_single(
self,
search_space: SP_contra,
dataset: Dataset,
model: ProbabilisticModel,
) -> T_co:
"""
A convenience wrapper for :meth:`acquire` that uses only one model, dataset pair.
:param search_space: The global search space over which the optimization problem
is defined.
:param dataset: The known observer query points and observations.
:param model: The model to use for the dataset.
:return: A value of type `T_co`.
"""
if isinstance(dataset, dict) or isinstance(model, dict):
raise ValueError(
"AcquisitionRule.acquire_single method does not support multiple datasets "
"or models: use acquire instead"
)
return self.acquire(search_space, {OBJECTIVE: dataset}, {OBJECTIVE: model})
class EfficientGlobalOptimization(AcquisitionRule[TensorType, SP_contra]):
"""Implements the Efficient Global Optimization, or EGO, algorithm."""
def __init__(
self,
builder: Optional[
AcquisitionFunctionBuilder
| GreedyAcquisitionFunctionBuilder
| SingleModelAcquisitionBuilder
| SingleModelGreedyAcquisitionBuilder
] = None,
optimizer: AcquisitionOptimizer[SP_contra] | None = None,
num_query_points: int = 1,
):
"""
:param builder: The acquisition function builder to use. Defaults to
:class:`~trieste.acquisition.ExpectedImprovement`.
:param optimizer: The optimizer with which to optimize the acquisition function built by
``builder``. This should *maximize* the acquisition function, and must be compatible
with the global search space. Defaults to
:func:`~trieste.acquisition.optimizer.automatic_optimizer_selector`.
:param num_query_points: The number of points to acquire.
"""
if num_query_points <= 0:
raise ValueError(
f"Number of query points must be greater than 0, got {num_query_points}"
)
if builder is None:
if num_query_points == 1:
builder = ExpectedImprovement()
else:
raise ValueError(
"""Need to specify a batch acquisition function when number of query points
is greater than 1"""
)
if optimizer is None:
optimizer = automatic_optimizer_selector
if isinstance(
builder, (SingleModelAcquisitionBuilder, SingleModelGreedyAcquisitionBuilder)
):
builder = builder.using(OBJECTIVE)
if isinstance(builder, AcquisitionFunctionBuilder):
# Joint batch acquisitions require batch optimizers
optimizer = batchify(optimizer, num_query_points)
self._builder: Union[AcquisitionFunctionBuilder, GreedyAcquisitionFunctionBuilder] = builder
self._optimizer = optimizer
self._num_query_points = num_query_points
self._acquisition_function: Optional[AcquisitionFunction] = None
def __repr__(self) -> str:
""""""
return f"""EfficientGlobalOptimization(
{self._builder!r},
{self._optimizer!r},
{self._num_query_points!r})"""
def acquire(
self,
search_space: SP_contra,
datasets: Mapping[str, Dataset],
models: Mapping[str, ProbabilisticModel],
) -> TensorType:
"""
Return the query point(s) that optimizes the acquisition function produced by ``builder``
(see :meth:`__init__`).
:param search_space: The local acquisition search space for *this step*.
:param datasets: The known observer query points and observations.
:param models: The models of the specified ``datasets``.
:return: The single (or batch of) points to query.
"""
if self._acquisition_function is None:
self._acquisition_function = self._builder.prepare_acquisition_function(
datasets, models
)
else:
self._acquisition_function = self._builder.update_acquisition_function(
self._acquisition_function, datasets, models
)
points = self._optimizer(search_space, self._acquisition_function)
if isinstance(self._builder, GreedyAcquisitionFunctionBuilder):
for _ in range(
self._num_query_points - 1
): # greedily allocate remaining batch elements
self._acquisition_function = self._builder.update_acquisition_function(
self._acquisition_function, datasets, models, pending_points=points
)
chosen_point = self._optimizer(search_space, self._acquisition_function)
points = tf.concat([points, chosen_point], axis=0)
return points
class DiscreteThompsonSampling(AcquisitionRule[TensorType, SearchSpace]):
r"""
Implements Thompson sampling for choosing optimal points.
This rule returns the minimizers of functions sampled from our model and evaluated across
a discretization of the search space (containing `N` candidate points).
The model is sampled either exactly (with an :math:`O(N^3)` complexity), or sampled
approximately through a random Fourier `M` feature decompisition
(with an :math:`O(\min(n^3,M^3))` complexity for a model trained on `n` points).
"""
def __init__(
self,
num_search_space_samples: int,
num_query_points: int,
num_fourier_features: Optional[int] = None,
):
"""
:param num_search_space_samples: The number of points at which to sample the posterior.
:param num_query_points: The number of points to acquire.
:num_fourier_features: The number of features used to approximate the kernel. We
recommend first trying 1000 features, as this typically perfoms well for a wide
range of kernels. If None, then we perfom exact Thompson sampling.
"""
if not num_search_space_samples > 0:
raise ValueError(f"Search space must be greater than 0, got {num_search_space_samples}")
if not num_query_points > 0:
raise ValueError(
f"Number of query points must be greater than 0, got {num_query_points}"
)
if num_fourier_features is not None and num_fourier_features <= 0:
raise ValueError(
f"Number of fourier features must be greater than 0, got {num_query_points}"
)
self._num_search_space_samples = num_search_space_samples
self._num_query_points = num_query_points
self._num_fourier_features = num_fourier_features
def __repr__(self) -> str:
""""""
return f"""DiscreteThompsonSampling(
{self._num_search_space_samples!r},
{self._num_query_points!r},
{self._num_fourier_features!r})"""
def acquire(
self,
search_space: SearchSpace,
datasets: Mapping[str, Dataset],
models: Mapping[str, ProbabilisticModel],
) -> TensorType:
"""
Sample `num_search_space_samples` (see :meth:`__init__`) points from the
``search_space``. Of those points, return the `num_query_points` points at which
random samples yield the **minima** of the model posterior.
:param search_space: The local acquisition search space for *this step*.
:param datasets: Unused.
:param models: The model of the known data. Uses the single key `OBJECTIVE`.
:return: The ``num_query_points`` points to query.
:raise ValueError: If ``models`` do not contain the key `OBJECTIVE`, or it contains any
other key.
"""
if models.keys() != {OBJECTIVE}:
raise ValueError(
f"dict of models must contain the single key {OBJECTIVE}, got keys {models.keys()}"
)
if datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"""
dict of datasets must contain the single key {OBJECTIVE},
got keys {datasets.keys()}
"""
)
if self._num_fourier_features is None: # Perform exact Thompson sampling
thompson_sampler: ThompsonSampler = ExactThompsonSampler(
self._num_query_points, models[OBJECTIVE]
)
else: # Perform approximate Thompson sampling
thompson_sampler = RandomFourierFeatureThompsonSampler(
self._num_query_points,
models[OBJECTIVE],
datasets[OBJECTIVE],
num_features=self._num_fourier_features,
)
query_points = search_space.sample(self._num_search_space_samples)
thompson_samples = thompson_sampler.sample(query_points)
return thompson_samples
class TrustRegion(AcquisitionRule[types.State[Optional["TrustRegion.State"], TensorType], Box]):
"""Implements the *trust region* acquisition algorithm."""
@dataclass(frozen=True)
class State:
"""The acquisition state for the :class:`TrustRegion` acquisition rule."""
acquisition_space: Box
""" The search space. """
eps: TensorType
"""
The (maximum) vector from the current best point to each bound of the acquisition space.
"""
y_min: TensorType
""" The minimum observed value. """
is_global: bool | TensorType
"""
`True` if the search space was global, else `False` if it was local. May be a scalar boolean
`TensorType` instead of a `bool`.
"""
def __deepcopy__(self, memo: dict[int, object]) -> TrustRegion.State:
box_copy = copy.deepcopy(self.acquisition_space, memo)
return TrustRegion.State(box_copy, self.eps, self.y_min, self.is_global)
def __init__(
self,
rule: AcquisitionRule[TensorType, Box] | None = None,
beta: float = 0.7,
kappa: float = 1e-4,
):
"""
:param rule: The acquisition rule that defines how to search for a new query point in a
given search space. Defaults to :class:`EfficientGlobalOptimization` with default
arguments.
:param beta: The inverse of the trust region contraction factor.
:param kappa: Scales the threshold for the minimal improvement required for a step to be
considered a success.
"""
if rule is None:
rule = EfficientGlobalOptimization()
self._rule = rule
self._beta = beta
self._kappa = kappa
def __repr__(self) -> str:
""""""
return f"TrustRegion({self._rule!r}, {self._beta!r}, {self._kappa!r})"
def acquire(
self,
search_space: Box,
datasets: Mapping[str, Dataset],
models: Mapping[str, ProbabilisticModel],
) -> types.State[State | None, TensorType]:
"""
Construct a local search space from ``search_space`` according the trust region algorithm,
and use that with the ``rule`` specified at :meth:`~TrustRegion.__init__` to find new
query points. Return a function that constructs these points given a previous trust region
state.
If no ``state`` is specified (it is `None`), ``search_space`` is used as the search space
for this step.
If a ``state`` is specified, and the new optimum improves over the previous optimum
by some threshold (that scales linearly with ``kappa``), the previous acquisition is
considered successful.
If the previous acquisition was successful, ``search_space`` is used as the new
search space. If the previous step was unsuccessful, the search space is changed to the
trust region if it was global, and vice versa.
If the previous acquisition was over the trust region, the size of the trust region is
modified. If the previous acquisition was successful, the size is increased by a factor
``1 / beta``. Conversely, if it was unsuccessful, the size is reduced by the factor
``beta``.
**Note:** The acquisition search space will never extend beyond the boundary of the
``search_space``. For a local search, the actual search space will be the
intersection of the trust region and ``search_space``.
:param search_space: The local acquisition search space for *this step*.
:param datasets: The known observer query points and observations. Uses the data for key
`OBJECTIVE` to calculate the new trust region.
:param models: The models of the specified ``datasets``.
:return: A function that constructs the next acquisition state and the recommended query
points from the previous acquisition state.
:raise KeyError: If ``datasets`` does not contain the key `OBJECTIVE`.
"""
dataset = datasets[OBJECTIVE]
global_lower = search_space.lower
global_upper = search_space.upper
y_min = tf.reduce_min(dataset.observations, axis=0)
def go(state: TrustRegion.State | None) -> tuple[TrustRegion.State | None, TensorType]:
if state is None:
eps = 0.5 * (global_upper - global_lower) / (5.0 ** (1.0 / global_lower.shape[-1]))
is_global = True
else:
tr_volume = tf.reduce_prod(
state.acquisition_space.upper - state.acquisition_space.lower
)
step_is_success = y_min < state.y_min - self._kappa * tr_volume
eps = (
state.eps
if state.is_global
else state.eps / self._beta
if step_is_success
else state.eps * self._beta
)
is_global = step_is_success or not state.is_global
if is_global:
acquisition_space = search_space
else:
xmin = dataset.query_points[tf.argmin(dataset.observations)[0], :]
acquisition_space = Box(
tf.reduce_max([global_lower, xmin - eps], axis=0),
tf.reduce_min([global_upper, xmin + eps], axis=0),
)
points = self._rule.acquire(acquisition_space, datasets, models)
state_ = TrustRegion.State(acquisition_space, eps, y_min, is_global)
return state_, points
return go
``` |
{
"source": "joelbm24/Yatzy",
"score": 3
} |
#### File: lib/yatzy/dice.py
```python
from lib.yatzy.die import Die
class Dice():
def __init__(self):
self.dice = [Die() for i in range(5)]
self.has_rolled = False
self.roll_amount = 0
def roll(self):
dice = [die for die in self.dice if die.kept == False]
self.has_rolled = True
self.roll_amount += 1
for die in dice:
die.roll()
def reset(self):
self.dice = [Die() for i in range(5)]
self.has_rolled = False
self.roll_amount = 0
def keep(self, index):
self.dice[index].kept = True
def unkeep(self, index):
self.dice[index].kept = False
def _checkForKind(self, kind, strict=False):
dice_values = [die.value for die in self.dice]
dice_values.sort()
for value in dice_values:
if strict:
if dice_values.count(value) == kind:
return True
else:
if dice_values.count(value) >= kind:
return True
return False
def checkThreeOfAKind(self):
return self._checkForKind(3)
def checkFourOfAKind(self):
return self._checkForKind(4)
def checkFullHouse(self):
return self._checkForKind(2, strict=True) and self._checkForKind(3, strict=True)
def checkSmallStraight(self):
small_straight1 = [1,2,3,4]
small_straight2 = [2,3,4,5]
small_straight3 = [3,4,5,6]
values = []
for die in self.dice:
if die.value not in values:
values.append(die.value)
values.sort()
values1 = values[:4]
values2 = values[1:5]
result1 = values1 == small_straight1 or values2 == small_straight1
result2 = values1 == small_straight2 or values2 == small_straight2
result3 = values1 == small_straight3 or values2 == small_straight3
return result1 or result2 or result3
def checkLargeStraight(self):
large_straight1 = [1,2,3,4,5]
large_straight2 = [2,3,4,5,6]
values = [die.value for die in self.dice]
values.sort()
return values == large_straight1 or values == large_straight2
def checkYahtzee(self):
values = self.getValues()
return values.count(values[0]) == 5
def getValues(self):
return list( map(lambda die: die.value, self.dice) )
``` |
{
"source": "JoelBondurant/RandomCodeSamples",
"score": 3
} |
#### File: RandomCodeSamples/python/defectXML.py
```python
import datetime, re
class WaferMeasurementEvent:
def __init__(self, waferId, area, pid):
self.waferId = waferId
self.area = area
self.pid = pid
def mapKey(self):
return self.waferId
def xml(self):
xml = '\t\t<WaferMeasurementEvent>\n\t\t\t<WaferId>'
xml += self.waferId + '</WaferId>\n\t\t\t<AreaInspected>' + str(float(self.area)) + '</AreaInspected>\n\t\t\t'
xml += '<Metric>\n\t\t\t\t<MetricId>PID</MetricId>\n\t\t\t\t<ControlChartId>A</ControlChartId>\n\t\t\t\t'
xml += '<Type>COUNT</Type>\n\t\t\t\t<Value>' + str(float(self.pid)) + '</Value>\n\t\t\t\t'
xml += '<ReviewFraction>0.0</ReviewFraction>\n\t\t\t</Metric>\n\t\t</WaferMeasurementEvent>\n'
return xml
class LotMeasurementEvent:
def __init__(self, lotId, finishTime, toolId):
self.lotId = lotId
self.finishTime = finishTime
self.toolId = toolId
self.waferMeasurementEvents = {}
def matches(self, other):
return (self.lotId == other.lotId and self.finishTime == other.finishTime and self.toolId == other.toolId)
def mapKey(self):
return (self.lotId + self.finishTime + self.toolId)
def addWaferMeasurementEvent(self, waferMeasurementEvent):
self.waferMeasurementEvents[waferMeasurementEvent.mapKey()] = waferMeasurementEvent
def xml(self):
xml = '<LotMeasurementEvent>\n\t<Type>DEFECT</Type>\n\t<LotId>' + self.lotId + '</LotId>\n\t<ToolId>'
xml += self.toolId + '</ToolId>\n\t<DateTime>' + self.finishTime + '</DateTime>\n\t<WaferMeasurementEvents>\n'
pids = []
for waferMeasurementEvent in self.waferMeasurementEvents.values():
xml += waferMeasurementEvent.xml()
pid = int(waferMeasurementEvent.pid)
pids.append(pid)
CLValue = float(sum(pids))/float(len(pids))
xml += '\t</WaferMeasurementEvents>\n\t<ControlCharts>\n\t\t<ControlChart>\n\t\t\t<ControlChartId>A'
xml += '</ControlChartId>\n\t\t\t<Type>X_BAR</Type>\n\t\t\t<CL>' + str(CLValue) + '</CL>\n\t\t\t'
xml += '<UCL>'+str(2.0*CLValue)+'</UCL>\n\t\t\t<UCLSigma>2.7</UCLSigma>\n\t\t\t<LCL>0.0</LCL>\n\t\t'
xml += '</ControlChart>\n\t</ControlCharts>\n</LotMeasurementEvent>\n'
return xml
def makeEvents(fileName, startDate, endDate):
print("Making event file: " + fileName)
# Map defect data into memory model from text file.
lotMeasurementEvents = {}
defectDataFile = open('C:/SensorAnalytics/trunk/FactoryData/NatSemi/095-DefectData/020-Merge/merged.txt', 'rt')
for textLine in defectDataFile:
textLineSplit = textLine.split('\t')
lotId = textLineSplit[0]
waferId = 'WAF-' + textLineSplit[1]
finishTime = textLineSplit[2]
eventTime = datetime.datetime.strptime(finishTime, '%Y-%m-%dT%H:%M:%S')
if not (startDate <= eventTime < endDate):
continue
toolId = textLineSplit[3]
toolId = re.sub('-.+', '', toolId) # needed to pull off -B and -PM1 additions in defect tool names.
area = textLineSplit[4]
pid = textLineSplit[5][:-1]
lotMeasurementEvent = LotMeasurementEvent(lotId, finishTime, toolId)
if (lotMeasurementEvent.mapKey() in lotMeasurementEvents):
lotMeasurementEvent = lotMeasurementEvents[lotMeasurementEvent.mapKey()]
else:
lotMeasurementEvents[lotMeasurementEvent.mapKey()] = lotMeasurementEvent
waferMeasurementEvent = WaferMeasurementEvent(waferId, area, pid)
lotMeasurementEvent.addWaferMeasurementEvent(waferMeasurementEvent)
defectDataFile.close()
eventsHeader = '<?xml version="1.0"?>\n<Events xmlns="http://www.sensoranalytics.com" '
eventsHeader += 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sensoranalytics.com events.xsd">\n'
xmlFile = open(fileName, 'wt')
xmlFile.write(eventsHeader)
for lotMeasurementEvent in lotMeasurementEvents.values():
xmlFile.write(lotMeasurementEvent.xml())
xmlFile.write('</Events>')
xmlFile.close()
print(fileName + ' written.')
```
#### File: RandomCodeSamples/python/fuelprices.py
```python
import os
import json
import requests
import datetime
import xml.etree.ElementTree as ET
from analyticobjects.util import temporal
from analyticobjects.util import logger
def fetch_fuel_data():
"""Get basic fuel price data."""
return requests.get('http://www.fueleconomy.gov/ws/rest/fuelprices').text
def parse_fuel_data(xml_response):
"""Parse XML response into json."""
root = ET.fromstring(xml_response)
vals = {'datebin': str(datetime.datetime.now().date())}
for child in root:
vals[child.tag] = float(child.text)
return json.dumps(vals)
def write_fuel_data():
"""Persist fuel data."""
fuel_data = fetch_fuel_data()
fuel_json = parse_fuel_data(fuel_data)
ts = temporal.datetimestamp()
base_dir = '/dataVol/collector/json'
if not os.path.exists(base_dir):
os.makedirs(base_dir)
fp = os.path.join(base_dir, 'fuel_data_' + ts + '.json')
with open(fp, 'w') as fout:
fout.write(fuel_json)
logger.info('Fuel prices downloaded: %s' % fp)
def run():
"""Extra layers of abstraction."""
write_fuel_data()
```
#### File: RandomCodeSamples/python/logger.py
```python
import os, sys, logging, datetime, inspect, traceback
LOGGING_ROOT = '/var/log/analyticobjects'
class TermColor:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Logger:
@staticmethod
def introspect_caller():
"""Climb up the call stack looking for what module is calling this."""
caller = 'console'
stack = inspect.stack()
try:
for frame in stack:
module = inspect.getmodule(frame[0])
if hasattr(module, '__name__'):
module_name = module.__name__
if len(module_name) > 1:
caller = module_name
except:
pass
if caller == '__main__':
caller = os.path.basename(sys.argv[0]).replace('.py','')
return caller
def __init__(self, level = None):
#print(TermColor.BOLD+'NOTICE:'+TermColor.WARNING+' analyticobjects.util.logger.init called.'+TermColor.ENDC)
self.datestamp = datetime.datetime.today().date()
caller = Logger.introspect_caller()
logr = logging.getLogger(caller)
logr.handlers = []
level = level or logging.INFO
logr.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
filepath = os.path.join(LOGGING_ROOT, caller)
datestr = datetime.datetime.strftime(self.datestamp, '%Y%m%d')
filename = caller + '_' + datestr + '.log'
try:
os.makedirs(filepath)
except:
pass
logpath = os.path.join(filepath, filename)
filehandler = logging.FileHandler(logpath)
filehandler.setFormatter(formatter)
logr.addHandler(filehandler)
consolehandler = logging.StreamHandler()
consolehandler.setLevel(level)
consolehandler.setFormatter(formatter)
logr.addHandler(consolehandler)
self.logr = logr
def info(self, msg):
self.logr.info(str(msg))
def debug(self, msg):
self.logr.debug(TermColor.OKBLUE+str(msg)+TermColor.ENDC)
def warn(self, msg):
self.logr.warn(TermColor.WARNING+str(msg)+TermColor.ENDC)
def error(self, msg):
self.logr.error(TermColor.FAIL+str(msg)+TermColor.ENDC)
def critical(self, msg):
self.logr.critical(TermColor.FAIL+str(msg)+TermColor.ENDC)
def exception(self, ex, msg = 'EXCEPTION:'):
self.logr.critical(TermColor.BOLD+TermColor.FAIL+'\n!************EXCEPTION-BEGIN***************!\n'+TermColor.ENDC)
self.logr.critical(TermColor.WARNING+'ExceptionMessage: '+str(msg)+TermColor.ENDC)
self.logr.critical(TermColor.WARNING+'ExceptionType: '+str(type(ex))+TermColor.ENDC)
self.logr.critical(TermColor.WARNING+'Exception: '+str(ex)+TermColor.ENDC)
self.logr.critical(TermColor.BOLD+TermColor.OKGREEN+'Exception Details:'+TermColor.ENDC)
self.logr.critical(TermColor.BOLD+TermColor.OKGREEN+traceback.format_exc()+TermColor.ENDC)
self.logr.critical(TermColor.OKBLUE+'Stack Traceback:'+TermColor.ENDC)
for stackframe in traceback.format_stack():
self.logr.critical(TermColor.OKBLUE+stackframe.replace('\n','')+TermColor.ENDC)
local_vars = inspect.trace()[-1][0].f_locals
self.logr.critical(TermColor.WARNING+'Local variables:'+TermColor.ENDC)
for lvar in local_vars:
self.logr.critical(TermColor.WARNING+'%s:\n\t%s' % (str(lvar), str(local_vars[lvar]))+TermColor.ENDC)
self.logr.critical(TermColor.BOLD+TermColor.FAIL+'\n!************EXCEPTION-END*****************!\n'+TermColor.ENDC)
_logr = Logger()
def rollover():
global _logr
today = datetime.datetime.today().date()
if today != _logr.datestamp:
_logr = Logger()
def pre():
rollover()
env_level = os.getenv('LOGGING_LEVEL')
if env_level:
if env_level == 'CRITICAL':
_logr.logr.setLevel(logging.CRITICAL)
elif env_level == 'ERROR':
_logr.logr.setLevel(logging.ERROR)
elif env_level == 'WARNING':
_logr.logr.setLevel(logging.WARNING)
elif env_level == 'INFO':
_logr.logr.setLevel(logging.INFO)
elif env_level == 'DEBUG':
_logr.logr.setLevel(logging.DEBUG)
elif env_level == 'NOTSET':
_logr.logr.setLevel(logging.NOTSET)
def info(msg):
pre()
_logr.info(msg)
def debug(msg):
pre()
_logr.debug(msg)
def warn(msg):
pre()
_logr.warn(msg)
def error(msg):
pre()
_logr.error(msg)
def critical(msg):
pre()
_logr.critical(msg)
def exception(ex, msg = 'EXCEPTION:'):
pre()
_logr.exception(ex, msg)
```
#### File: RandomCodeSamples/python/pitones3.py
```python
import math
import wave
import struct
def synthComplex(toneNum, wavFile):
coef = [1]
freq = [100 * math.exp(toneNum / (2 * math.pi))]
datasize = 5000
frate = 44100.00
amp = 8000.0
sine_list=[]
for x in range(datasize):
samp = 0
for k in range(len(freq)):
samp = samp + coef[k] * math.sin(2*math.pi*freq[k]*(x/frate))
sine_list.append(samp)
nchannels = 1
sampwidth = 2
framerate = int(frate)
nframes=datasize
comptype= "NONE"
compname= "not compressed"
for s in sine_list:
wavFile.writeframes(struct.pack('h', int(s*amp/2)))
def arccot(x, unity):
sum = xpower = unity // x
n = 3
sign = -1
while 1:
xpower = xpower // (x*x)
term = xpower // n
if not term:
break
sum += sign * term
sign = -sign
n += 2
return sum
def pi(digits):
unity = 10 ** (digits + 10)
pi = 4 * (4 * arccot(5, unity) - arccot(239, unity))
return pi // 10 ** 10
print("Starting pi tones.")
piTonesFile = wave.open("pitones.wav","w")
piTonesFile.setparams((1, 2, 44100, 10000, "NONE", "not compressed"))
numTones = 1000
piStr = str(pi(numTones))
for i in range(1, numTones):
toneNum = int(piStr[i]) + 1
synthComplex(toneNum, piTonesFile)
piTonesFile.close()
print("Finished pi tones.")
```
#### File: RandomCodeSamples/python/proc.py
```python
import datetime
def uptime(asstr = False):
"""Get system uptime>"""
raw = ''
with open('/proc/uptime','r') as ut:
raw = ut.read()[:-1]
uts = list(map(lambda x: int(float(x)), raw.split(' ')))
if asstr:
uts = str(datetime.timedelta(seconds = uts[0]))
return uts
```
#### File: RandomCodeSamples/python/singleton.py
```python
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
```
#### File: RandomCodeSamples/python/sql.py
```python
import MySQLdb, os, sys, re, time
import collections
from warnings import filterwarnings
try:
from aiobj.util import logger
except ImportError:
pass
try:
import tabulate
except ImportError:
pass
try:
import pandas as pd, pandas
except ImportError:
pass
try:
import sqlalchemy
except ImportError:
pass
filterwarnings('ignore', category = MySQLdb.Warning)
class SQL:
""" Class to manage SQL connections. """
DEFAULT_DB = 'ops'
OPS = 'ops'
TGOLAP = 'tgolap'
TGCONF = 'tgconf'
def __init__(
self, db = DEFAULT_DB, host = None, port = None, user = None,
passwd = <PASSWORD>, autocommit = False, printsql = False,
use_unicode = True, charset = 'utf8', managed = True,
alchemy = False, connect = True, autoretry = True, retryperiod = 240):
self.db = db
self.host = host or os.getenv('SQL_HOST', '127.0.0.1')
self.user = user or os.getenv('SQL_APP_USER', 'notset')
self.passwd = passwd or os.getenv('SQL_APP_PASS', 'notset')
self.port = port or int(os.getenv('SQL_PORT', 3306))
self.printsql = printsql
self.autocommit = autocommit
self.autoretry = autoretry
self.retryperiod = retryperiod
self.use_unicode = use_unicode
self.charset = charset
self.managed = managed
self.alchemy = alchemy
self.alchemy_engine = None
if not connect:
return
if not alchemy:
self.conn = MySQLdb.connect(
host=self.host, port=self.port, user=self.user,
passwd=self.passwd, db=self.db, use_unicode = self.use_unicode,
charset = self.charset)
self.conn.autocommit(self.autocommit)
self.conn.set_character_set(self.charset)
sqlsetup = "SET NAMES utf8; "
sqlsetup += "SET CHARACTER SET %s; " % self.charset
sqlsetup += "SET character_set_connection = %s; " % self.charset
sqlsetup += "SET collation_connection = 'utf8_bin'; "
self.execute(sqlsetup)
else:
constr_base = 'mysql+mysqldb://%s:%s@%s:%s/%s?charset=%s'
constr = constr_base % (self.user, self.passwd, self.host, self.port, self.db, self.charset)
ae = sqlalchemy.create_engine(constr, encoding = self.charset)
self.alchemy_engine = ae
def __del__(self):
try:
if self.managed:
self.conn.close()
except:
pass
def getconn(self):
return self.conn
def close(self):
self.conn.close()
def commit(self):
try:
self.conn.commit()
except:
self.conn.rollback()
raise
def commitclose(self):
try:
self.conn.commit()
except:
self.conn.rollback()
raise
finally:
self.close()
def rollback(self):
self.conn.rollback()
def reconnect(self):
try:
self.conn.close()
except:
pass
self.conn = MySQLdb.connect(
host=self.host, port=self.port, user=self.user, passwd=self.passwd,
db=self.db, use_unicode=self.use_unicode, charset=self.charset)
self.conn.autocommit(self.autocommit)
self.execute("SET collation_connection = 'utf8_bin';")
def ping(self):
self.conn.ping()
def __printsql__(self, sqltxt, args = None):
if self.printsql:
msg = 'Executing SQL:\n%s' % sqltxt
if not args is None:
msg += '\n args = ' + str(args)
logger.info(msg)
def insert_id(self, sqltxt, args = None):
"""
A method to execute a sql insert statement returning the inserted id.
"""
self.__printsql__(sqltxt, args)
curs = self.conn.cursor()
curs.execute(sqltxt, args)
insertid = self.conn.insert_id()
curs.close()
return insertid
def execute(self, sqltxt, args = None):
"""
A method to execute a sql statement with no return result set.
"""
self.__printsql__(sqltxt, args)
started = time.time()
complete = False
ex0 = Exception('SQL.execute exception')
while (not complete) and time.time() - started < self.retryperiod:
try:
curs = self.conn.cursor()
curs.execute(sqltxt, args)
lastrowid = curs.lastrowid
curs.close()
complete = True
except Exception as ex:
ex0 = ex
print(ex)
time.sleep(10)
if not complete:
if ex0:
raise ex0
else:
raise TimeoutError('Max query time exceeded. SQL execution was not completed.')
return lastrowid
def executemany(self, sqltxt, args = None):
"""
A method to execute a sql statement with a parameter array.
Use this for fast multi-row inserts.
"""
self.__printsql__(sqltxt, args)
started = time.time()
complete = False
ex0 = Exception('SQL.executemany exception')
while (not complete) and time.time() - started < self.retryperiod:
try:
curs = self.conn.cursor()
curs.executemany(sqltxt, args)
lastrowid = curs.lastrowid
curs.close()
complete = True
except Exception as ex:
ex0 = ex
print(ex)
time.sleep(10)
if not complete:
if ex0:
raise ex0
else:
raise TimeoutError('Max query time exceeded. SQL execution was not completed.')
return lastrowid
def callproc(self, sqltxt, args = ()):
"""
A method to execute a sql procedure
"""
self.__printsql__(sqltxt, args)
curs = self.conn.cursor()
curs.callproc(sqltxt, args)
curs.close()
def executeall(self, sqltxtlist):
"""
A method to execute a list of sql statements with no return result sets.
"""
curs = self.conn.cursor()
for sqltxt in sqltxtlist:
self.__printsql__(sqltxt)
curs.execute(sqltxt)
curs.close()
def execute_ddl(self, sqltxt, args = None):
"""
A method to silently execute ddl.
"""
try:
self.execute(sqltxt, args)
except (MySQLdb.Error, MySQLdb.Warning) as err:
logger.debug('Expected DDL Warning:\n' + str(sys.exc_info()))
logger.debug(err.args[0])
logger.debug(type(err))
logger.debug(err)
except Exception as ex:
logger.exception(ex, ("Unexpected DDL Warning:\n\n %s\n\n" % sqltxt))
def executeall_ddl(self, sqltxtlist):
"""
A method to silently execute a list of ddl statements.
"""
try:
self.executeall(sqltxtlist)
except (MySQLdb.Error, MySQLdb.Warning) as err:
logger.debug('Expected DDL Warning:\n' + str(sys.exc_info()))
logger.debug(err.args[0])
logger.debug(type(err))
logger.debug(err)
except Exception as ex:
logger.exception(ex, ("Unexpected DDL Warning:\n" + str(sqltxtlist)))
def execute_batches(self, sqltxt, iter_params):
"""
A method to execute sql over an iteratable parameter source.
Features:
Includes tuple wrapping of list elements.
"""
if type(iter_params) == list:
elem0 = iter_params[0]
if not hasattr(elem0, '__len__'):
iter_params = list(map(lambda x: (x,), iter_params))
for params in iter_params:
self.execute(sqltxt, params)
def report(self, sqltxt, args = None):
"""
A function to return a MySQLdb result set as a formatted string table.
"""
self.__printsql__(sqltxt)
rs = self.fetchall(sqltxt, args, header = True)
return tabulate.tabulate(rs, headers = 'firstrow')
def pprint(self, sqltxt, args = None):
"""
A function to print sql and return a MySQLdb result set.
"""
self.__printsql__(sqltxt)
rs = self.fetchall(sqltxt, args, header = True)
print(tabulate.tabulate(rs, headers = 'firstrow'))
@staticmethod
def tuples2lists(tx):
"""Convert a tuple of tuples to a list of lists."""
return list(map(SQL.tuples2lists, tx)) if isinstance(tx, (list, tuple)) else tx
@staticmethod
def nullify(obj):
"""Convert empty strings to NULL/None."""
if obj != None and type(obj) != str and isinstance(obj, collections.Iterable):
return list(map(SQL.nullify, obj))
if obj != None and type(obj) == str and re.match(r'^\s*$', obj):
return None
return obj
def pandas_dataframe(self, sqltxt, args = None):
"""
A function to return a MySQLdb result set in a Pandas DataFrame.
"""
self.__printsql__(sqltxt)
rs = self.fetchall(sqltxt, args, header = True)
rs = SQL.tuples2lists(rs)
if len(rs) == 0:
return pandas.DataFrame(columns = rs[0])
return pandas.DataFrame(rs[1:], columns = rs[0])
def _jsonify(self, table_rs):
"""A function to take tabular result set data with header and jsonify it."""
jout = []
header = table_rs[0]
for row_num in range(1, len(table_rs)):
jout.append({})
for col_num in range(len(header)):
val = table_rs[row_num][col_num]
try:
float(val)
except:
val = str(val)
jout[row_num-1][header[col_num]] = val
return jout
def fetchall(self, sqltxt, args = None, header = False, jsonify = False):
"""
A function to execute sql and return a MySQLdb result set.
"""
self.__printsql__(sqltxt, args)
curs = self.conn.cursor()
curs.execute(sqltxt, args)
rs = curs.fetchall()
curs.close()
if header or jsonify:
headr = [col_desc[0] for col_desc in curs.description]
rs = (tuple(headr),) + rs
if jsonify:
rs = self._jsonify(rs)
return rs
def fetchone(self, sqltxt, args = None, header = False, jsonify = False):
"""
Gets one row from a sql statement.
"""
self.__printsql__(sqltxt, args)
curs = self.conn.cursor()
curs.execute(sqltxt, args)
rs = curs.fetchone()
curs.close()
if header or jsonify:
headr = [col_desc[0] for col_desc in curs.description]
rs = (tuple(headr),) + rs
if jsonify:
rs = self._jsonify(rs)
return rs
_singletons = {}
@staticmethod
def singleton(key = None, db = DEFAULT_DB):
""" Generate/get a singleton sql connection instance. """
if key == None:
conn_key = db
else:
conn_key = key
conn = None
if not conn_key in SQL._singletons:
SQL._singletons[conn_key] = SQL(db)
conn = SQL._singletons[conn_key]
try:
conn.ping()
except:
conn.reconnect()
return conn
@staticmethod
def generate_insert(table_name, columns, upsert_columns = None):
""" Generate sql insert/upsert statement. """
sqltxt = 'INSERT INTO ' + table_name + '\n (' + ','.join(columns) + ')'
sqltxt += '\nVALUES (' + ('%s,'*len(columns))[:-1] + ')'
if upsert_columns:
sqltxt += '\nON DUPLICATE KEY UPDATE\n'
sqltxt += (''.join([colname + '=VALUES(' + colname + '),' for colname in upsert_columns]))[:-1]
return sqltxt+';\n'
@staticmethod
def generate_select(table_name, columns):
""" Generate sql select statement. """
sqltxt = 'SELECT ' + ','.join(columns)
sqltxt += '\nFROM %s;\n' % table_name
return sqltxt
def copy_table(self, src_table, src_columns, dst_table, dst_columns = None, dst_upsertable = None):
""" Copy a small sql table. """
if not dst_columns:
dst_columns = src_columns
src_data = self.fetchall(SQL.generate_select(src_table, src_columns))
lastrowid = self.executemany(SQL.generate_insert(dst_table, dst_columns, dst_upsertable), src_data)
self.commit()
return lastrowid
``` |
{
"source": "JoelBrito13/FootballBets",
"score": 2
} |
#### File: FootballBets/bets/models.py
```python
from django.core.exceptions import ValidationError
import datetime
import requests
import json
from django.db import models
from users.models import Person
URL_UPDATE = "https://apiv2.apifootball.com/" \
"?action=get_predictions" \
"&APIkey=<KEY>"
class Game(models.Model):
# Constants in Model class
match_id = models.IntegerField(primary_key=True)
country_name = models.CharField(max_length=50)
league_name = models.CharField(max_length=70)
match_date = models.DateField()
match_status = models.CharField(max_length=50, null=True)
match_time = models.TimeField()
match_hometeam_name = models.CharField(max_length=70)
match_hometeam_score = models.IntegerField(null=True)
match_awayteam_name = models.CharField(max_length=70)
match_awayteam_score = models.IntegerField(null=True)
prob_HW = models.FloatField()
prob_D = models.FloatField()
prob_AW = models.FloatField()
def update(self):
if self.match_finished() and not self.match_status == 'Finished':
url = "{}&match_id={}".format(URL_UPDATE, self.match_id)
jsonResponse = self.make_request(url)[0]
self.match_status = jsonResponse['match_status']
self.match_hometeam_score = jsonResponse['match_hometeam_score']
self.match_awayteam_score = jsonResponse['match_awayteam_score']
self.save()
def match_finished(self):
return self.match_date < datetime.date.today()
def add_game(self, search_id):
current = Game.objects.filter(match_id=search_id)
if not current:
url = "{}&match_id={}".format(URL_UPDATE, search_id)
json_response = self.make_request(url)[0]
self.match_id = int(json_response['match_id'])
self.country_name = json_response['country_name']
self.league_name = json_response['league_name']
self.match_date = json_response['match_date']
self.match_status = json_response['match_status']
self.match_time = json_response['match_time']
self.match_hometeam_name = json_response['match_hometeam_name']
self.match_awayteam_name = json_response['match_awayteam_name']
self.prob_HW = float(json_response['prob_HW'])
self.prob_D = float(json_response['prob_D'])
self.prob_AW = float(json_response['prob_AW'])
self.save()
return self
return current
def save(self, **kwargs):
if Game.objects.exists() and not self.pk:
raise ValidationError('Error Saving Game')
return super().save(**kwargs)
@staticmethod
def make_request(url):
myResponse = requests.get(url, verify=True)
if (myResponse.ok):
return json.loads(myResponse.content)
return myResponse.raise_for_status() # Error in request
def __str__(self):
return "{} match: {}, {} x {} - {}".format(self.league_name,
self.match_id,
self.match_hometeam_name,
self.match_awayteam_name,
self.match_date)
class Bet(models.Model):
# Constants in Model class
HOME_WIN = 'HW'
DRAW = 'D'
AWAY_WIN = 'AW'
GAME_RESULTS = (
(HOME_WIN, 'Home Win'),
(DRAW, 'Draw'),
(AWAY_WIN, 'Away Win')
)
game = models.ForeignKey(Game,
on_delete=models.CASCADE)
user = models.ForeignKey(Person,
on_delete=models.CASCADE)
game_bet = models.CharField(
max_length=2,
choices=GAME_RESULTS
)
amount = models.FloatField(null=False,
default=0)
game_finished = models.BooleanField(
default=False
)
balance = models.FloatField(
default=0
)
def update_status(self):
if self.game_finished:
return
self.game.update()
if self.game.match_status == 'Finished':
self.define_profit() # self.balance = profit of the game
self.user.insert_credits(
self.balance
)
self.game_finished = True
self.save()
def define_profit(self):
if self.game_bet == self.define_result():
self.balance = self.amount * 100 / self.get_prob()
return self.balance
def define_result(self):
home = self.game.match_hometeam_score
away = self.game.match_awayteam_score
print(self.__str__())
if home > away:
return self.HOME_WIN
elif home < away:
return self.AWAY_WIN
else:
return self.DRAW
def get_prob(self):
if self.game_bet == self.HOME_WIN:
return self.game.prob_HW
if self.game_bet == self.AWAY_WIN:
return self.game.prob_AW
if self.game_bet == self.DRAW:
return self.game.prob_D
def __str__(self):
return "{} Bet ({} bet: {},{}€ profit:{})".format(self.user, self.game, self.game_bet, self.amount,
self.balance)
```
#### File: FootballBets/bets/serializers.py
```python
from bets.models import Bet, Game
from rest_framework import serializers
class BetSerializer(serializers.ModelSerializer):
class Meta:
model = Bet
fields = (
'id',
'game',
'user',
'game_bet',
'amount',
'game_finished',
'balance'
)
def __init__(self, instance=None, data=None, **kwargs):
if "data" in kwargs:
data = kwargs.pop("data")
if "game" in data:
search_id = data.pop("game")
game = Game()
#print(game.add_game(search_id))
super(BetSerializer, self).__init__(instance=instance, data=data, **kwargs)
class GameSerializer(serializers.ModelSerializer):
class Meta:
model = Game
fields = (
'match_id',
'country_name',
'league_name',
'match_date',
'match_status',
'match_time',
'match_hometeam_name',
'match_hometeam_score',
'match_awayteam_name',
'match_awayteam_score',
'prob_HW',
'prob_D',
'prob_AW'
)
def __init__(self, instance=None, data=None, **kwargs):
super(GameSerializer, self).__init__(instance=instance, data=data, **kwargs)
``` |
{
"source": "JoelBuenrostro/Renta-de-bicicletas",
"score": 2
} |
#### File: JoelBuenrostro/Renta-de-bicicletas/main.py
```python
from flask import Flask, render_template, flash, request, jsonify, Markup
INTERCEPT = -121.029547
COEF_HOLIDAY = -23.426176
COEF_HOUR = 8.631624
COEF_SEASON_1 = 3.861149
COEF_SEASON_2 = -1.624812
COEF_SEASON_3 = -41.245562
COEF_SEASON_4 = 39.009224
COEF_TEMP = 426.900259
MEAN_HOLIDAY = 0
MEAN_HOUR = 11.6
MEAN_SEASON_1 = 0
MEAN_SEASON_2 = 0
MEAN_SEASON_3 = 1
MEAN_SEASON_4 = 0
MEAN_TEMP = 0.4967
app = Flask(__name__)
@app.route("/", methods=['POST', 'GET'])
def index():
return render_template('index.html',
mean_holiday=MEAN_HOLIDAY,
mean_hour=MEAN_HOUR,
mean_season1=MEAN_SEASON_1,
mean_season2=MEAN_SEASON_2,
mean_season3=MEAN_SEASON_3,
mean_season4=MEAN_SEASON_4,
mean_temp=MEAN_TEMP,
model_intercept=INTERCEPT,
model_holiday=COEF_HOLIDAY,
model_hour=COEF_HOUR,
model_season1=COEF_SEASON_1,
model_season2=COEF_SEASON_2,
model_season3=COEF_SEASON_4,
model_temp=COEF_TEMP)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "joelburke02/muler",
"score": 2
} |
#### File: src/muler/igrins.py
```python
r"""
IGRINS Spectrum
---------------
A container for an IGRINS spectrum of :math:`M=28` total total orders :math:`m`, each with vectors for wavelength flux and uncertainty, e.g. :math:`F_m(\lambda)`.
IGRINSSpectrum
##############
"""
import warnings
from muler.echelle import EchelleSpectrum, EchelleSpectrumList
from astropy.time import Time
import numpy as np
import astropy
from astropy.io import fits
from astropy import units as u
from astropy.wcs import WCS, FITSFixedWarning
from astropy.nddata import StdDevUncertainty
import copy
# See Issue: https://github.com/astropy/specutils/issues/779
warnings.filterwarnings(
"ignore", category=astropy.utils.exceptions.AstropyDeprecationWarning
)
warnings.filterwarnings("ignore", category=FITSFixedWarning)
# See Issue: https://github.com/astropy/specutils/issues/800
warnings.filterwarnings("ignore", category=RuntimeWarning)
# Convert PLP index number to echelle order m
## Note that these technically depend on grating temperature
## For typical operating temperature, offsets should be exact.
grating_order_offsets = {"H": 98, "K": 71}
class IGRINSSpectrum(EchelleSpectrum):
r"""
A container for IGRINS spectra
Args:
file (str): A path to a reduced IGRINS spectrum from plp
order (int): which spectral order to read
cached_hdus (list) :
List of two fits HDUs, one for the spec_a0v.fits, and one for the
sn.fits file, to reduce file I/O for multiorder access.
If provided, must give both HDUs. Optional, default is None.
"""
def __init__(self, *args, file=None, order=10, cached_hdus=None, **kwargs):
self.ancillary_spectra = None
self.noisy_edges = (450, 1950)
self.instrumental_resolution = 45_000.0
if file is not None:
# Determine the band
if "SDCH" in file:
band = "H"
elif "SDCK" in file:
band = "K"
else:
raise NameError("Cannot identify file as an IGRINS spectrum")
grating_order = grating_order_offsets[band] + order
sn_file = file[:-13] + "sn.fits"
if cached_hdus is not None:
hdus = cached_hdus[0]
sn_hdus = cached_hdus[1]
else:
hdus = fits.open(str(file))
try:
sn_hdus = fits.open(sn_file)
except:
sn_hdus = None
hdr = hdus[0].header
lamb = hdus["WAVELENGTH"].data[order].astype(np.float64) * u.micron
flux = hdus["SPEC_DIVIDE_A0V"].data[order].astype(np.float64) * u.ct
meta_dict = {
"x_values": np.arange(0, 2048, 1, dtype=np.int),
"m": grating_order,
"header": hdr,
}
if sn_hdus is not None:
sn = sn_hdus[0].data[10]
unc = np.abs(flux / sn)
uncertainty = StdDevUncertainty(unc)
mask = np.isnan(flux) | np.isnan(uncertainty.array)
else:
uncertainty = None
mask = np.isnan(flux)
super().__init__(
spectral_axis=lamb.to(u.Angstrom),
flux=flux,
mask=mask,
wcs=None,
uncertainty=uncertainty,
meta=meta_dict,
**kwargs,
)
else:
super().__init__(*args, **kwargs)
@property
def site_name(self):
"""Which pipeline does this spectrum originate from?"""
# TODO: add a check lookup dictionary for other telescopes
# to ensure astropy compatibility
return self.meta["header"]["TELESCOP"]
@property
def RA(self):
"""The right ascension from header files"""
return self.meta["header"]["OBJRA"] * u.deg
@property
def DEC(self):
"""The declination from header files"""
return self.meta["header"]["OBJDEC"] * u.deg
@property
def astropy_time(self):
"""The astropy time based on the header"""
mjd = self.meta["header"]["MJD-OBS"]
return Time(mjd, format="mjd", scale="utc")
class IGRINSSpectrumList(EchelleSpectrumList):
r"""
An enhanced container for a list of IGRINS spectral orders
"""
def __init__(self, *args, **kwargs):
self.normalization_order_index = 14
super().__init__(*args, **kwargs)
@staticmethod
def read(file, precache_hdus=True):
"""Read in a SpectrumList from a file
Parameters
----------
file : (str)
A path to a reduced IGRINS spectrum from plp
"""
assert ".spec_a0v.fits" in file
hdus = fits.open(file, memmap=False)
sn_file = file[:-13] + "sn.fits"
sn_hdus = fits.open(sn_file, memmap=False)
cached_hdus = [hdus, sn_hdus]
n_orders, n_pix = hdus["WAVELENGTH"].data.shape
list_out = []
for i in range(n_orders):
spec = IGRINSSpectrum(file=file, order=i, cached_hdus=cached_hdus)
list_out.append(spec)
return IGRINSSpectrumList(list_out)
```
#### File: muler/tests/test_hpf.py
```python
import pytest
import time
from muler.hpf import HPFSpectrum, HPFSpectrumList
from specutils import Spectrum1D
# from astropy.nddata.nduncertainty import StdDevUncertainty
import numpy as np
import glob
import astropy
local_files = glob.glob("data/Goldilocks_*.spectra.fits")
file = local_files[0]
def test_basic():
"""Do the basic methods work?"""
spec = HPFSpectrum(file=file, order=10)
assert spec is not None
assert isinstance(spec, Spectrum1D)
assert isinstance(spec.flux, np.ndarray)
assert len(spec.flux) == len(spec.wavelength)
assert spec.mask.sum() > 0
new_spec = spec.remove_nans()
assert new_spec.shape[0] < spec.shape[0]
assert new_spec.shape[0] > 0
assert new_spec.mask is not None
new_spec = spec.normalize()
assert new_spec.shape[0] == spec.shape[0]
assert np.nanmedian(new_spec.flux) == 1
new_spec = spec.trim_edges()
assert new_spec.shape[0] < spec.shape[0]
assert new_spec.shape[0] > 0
assert new_spec.mask is not None
ax = new_spec.plot(label="demo", color="r")
assert ax is not None
def test_equivalent_width():
"""Can we measure equivalent widths?"""
spec = HPFSpectrum(file=file, order=4)
mu = 8600 # make sure it is in given order
equivalent_width = spec.measure_ew(mu)
assert equivalent_width is not None
assert type(equivalent_width) is not int
assert type(equivalent_width) is astropy.units.quantity.Quantity
assert equivalent_width.unit is spec.wavelength.unit
def test_smoothing():
"""Does smoothing and outlier removal work?"""
spec = HPFSpectrum(file=file, order=10)
new_spec = spec.remove_outliers(threshold=3)
assert len(new_spec.flux) > 0
assert new_spec.shape[0] <= spec.shape[0]
assert new_spec.shape[0] > 0
assert new_spec.mask is not None
def test_uncertainty():
"""Does uncertainty propagation work?"""
spec = HPFSpectrum(file=file, order=10)
assert spec.uncertainty is not None
assert hasattr(spec.uncertainty, "array")
assert len(spec.flux) == len(spec.uncertainty.array)
assert spec.flux.unit == spec.uncertainty.unit
new_spec = spec.remove_nans()
assert len(new_spec.flux) == len(new_spec.uncertainty.array)
assert np.all(new_spec.uncertainty.array > 0)
snr_old_vec = spec.flux / spec.uncertainty.array
snr_old_med = np.nanmedian(snr_old_vec.value)
new_spec = spec.normalize()
snr_vec = new_spec.flux / new_spec.uncertainty.array
snr_med = np.nanmedian(snr_vec.value)
assert np.isclose(snr_med, snr_old_med, atol=0.005)
def test_sky_and_lfc():
"""Do we track sky and lfc?"""
spec = HPFSpectrum(file=file, order=10)
assert spec.sky is not None
assert isinstance(spec.sky, Spectrum1D)
assert spec.sky == spec.meta["sky"]
assert spec.lfc is not None
assert isinstance(spec.lfc, Spectrum1D)
assert hasattr(spec.sky, "flux")
assert isinstance(spec.sky.flux, np.ndarray)
assert len(spec.sky.flux) == len(spec.flux)
assert spec.flux.unit == spec.sky.unit
new_spec = spec.remove_nans()
assert new_spec.sky is not None
assert hasattr(new_spec.sky, "flux")
new_spec2 = new_spec.normalize()
assert new_spec2.sky is not None
assert isinstance(new_spec2.sky, Spectrum1D)
assert hasattr(new_spec2.sky, "flux")
## Normalize should scale both target and sky flux by the same scalar
# assert np.nanmedian(new_spec2.sky.flux.value) != np.nanmedian(
# new_spec.sky.flux.value
# )
assert np.median(new_spec2.flux.value) == 1.0
# The sky/lfc fibers should not have their own sky/lfc fibers: that's redundant
assert "sky" not in spec.sky.meta.keys()
assert "lfc" not in spec.sky.meta.keys()
assert "sky" not in spec.lfc.meta.keys()
assert "lfc" not in spec.lfc.meta.keys()
assert spec.lfc.meta["provenance"] == "Laser Frequency Comb"
assert spec.sky.meta["provenance"] == "Sky fiber"
assert spec.meta["provenance"] == "Target fiber"
def test_RV():
"""Does RV shifting work"""
spec = HPFSpectrum(file=file)
assert spec.uncertainty is not None
assert hasattr(spec, "barycentric_correct")
correction_velocity = spec.estimate_barycorr()
assert isinstance(spec.RA, astropy.units.quantity.Quantity)
assert isinstance(spec.DEC, astropy.units.quantity.Quantity)
assert correction_velocity is not None
assert isinstance(correction_velocity, astropy.units.quantity.Quantity)
new_spec = spec.barycentric_correct()
assert new_spec is not None
assert isinstance(new_spec, Spectrum1D)
@pytest.mark.parametrize(
"precache_hdus", [True, False],
)
def test_spectrumlist_performance(precache_hdus):
"""Does the Spectrum List work?"""
t0 = time.time()
spec_list = HPFSpectrumList.read(file, precache_hdus=precache_hdus)
t1 = time.time()
net_time = t1 - t0
print(f"\n\t Precached HDUs {precache_hdus}: {net_time:0.5f} seconds", end="\t")
assert spec_list is not None
``` |
{
"source": "joelburton/django-ninja",
"score": 2
} |
#### File: django-ninja/ninja/operation.py
```python
import pydantic
import django
from django.http import HttpResponse, HttpResponseNotAllowed
from typing import Callable, List, Any, Union, Optional, Sequence
from ninja.responses import Response
from ninja.errors import InvalidInput
from ninja.constants import NOT_SET
from ninja.schema import Schema
from ninja.signature import ViewSignature, is_async
class Operation:
def __init__(
self,
path: str,
methods: List[str],
view_func: Callable,
*,
auth: Optional[Union[Sequence[Callable], Callable, object]] = NOT_SET,
response: Any = None,
):
self.is_async = False
self.path: str = path
self.methods: List[str] = methods
self.view_func: Callable = view_func
self.api = None
self.auth_param: Optional[Union[Sequence[Callable], Callable, object]] = auth
self.auth_callbacks: Sequence[Callable] = []
self._set_auth(auth)
self.signature = ViewSignature(self.path, self.view_func)
self.models = self.signature.models
self.response_model = self._create_response_model(response)
def run(self, request, **kw):
unauthorized = self._run_authentication(request)
if unauthorized:
return unauthorized
values, errors = self._get_values(request, kw)
if errors:
return Response({"detail": errors}, status=422)
result = self.view_func(request, **values)
return self._create_response(result)
def set_api_instance(self, api):
self.api = api
if self.auth_param == NOT_SET and api.auth != NOT_SET:
# if api instance have auth and operation not - then we set auth from api instance
self._set_auth(self.api.auth)
def _set_auth(self, auth: Optional[Union[Sequence[Callable], Callable, object]]):
if auth is not None and auth is not NOT_SET:
self.auth_callbacks = isinstance(auth, Sequence) and auth or [auth]
def _run_authentication(self, request):
if not self.auth_callbacks:
return
for callback in self.auth_callbacks:
result = callback(request)
if result is not None:
request.auth = result
return
return Response({"detail": "Unauthorized"}, status=401)
def _create_response(self, result: Any):
if isinstance(result, HttpResponse):
return result
if self.response_model is None:
return Response(result)
resp_object = ResponseObject(result)
# ^ we need object because getter_dict seems work only with from_orm
result = self.response_model.from_orm(resp_object).dict()["response"]
return Response(result)
def _get_values(self, request, path_params):
values, errors = {}, []
for model in self.models:
try:
data = model.resolve(request, path_params)
values.update(data)
except (pydantic.ValidationError, InvalidInput) as e:
items = []
for i in e.errors():
i["loc"] = (model._in,) + i["loc"]
items.append(i)
errors.extend(items)
return values, errors
def _create_response_model(self, response_param):
if response_param is None:
return
attrs = {"__annotations__": {"response": response_param}}
return type("Response", (Schema,), attrs)
class AsyncOperation(Operation):
def __init__(self, *args, **kwargs):
if django.VERSION < (3, 1): # pragma: no cover
raise Exception("Async operations are supported only with Django 3.1+")
super().__init__(*args, **kwargs)
self.is_async = True
async def run(self, request, **kw):
unauthorized = self._run_authentication(request)
if unauthorized:
return unauthorized
values, errors = self._get_values(request, kw)
if errors:
return Response({"detail": errors}, status=422)
result = await self.view_func(request, **values)
return self._create_response(result)
class PathView:
def __init__(self):
self.operations = []
self.is_async = False # if at least one operation is async - will become True
def add(
self,
path: str,
methods: List[str],
view_func: Callable,
*,
auth: Optional[Union[Sequence[Callable], Callable, object]] = NOT_SET,
response=None,
):
if is_async(view_func):
self.is_async = True
operation = AsyncOperation(
path, methods, view_func, auth=auth, response=response
)
else:
operation = Operation(
path, methods, view_func, auth=auth, response=response
)
self.operations.append(operation)
return operation
def get_view(self):
if self.is_async:
view = self._async_view
else:
view = self._sync_view
view.__func__.csrf_exempt = True
# TODO: ^ this should probably be configurable in settings or Ninja app
return view
def _sync_view(self, request, *a, **kw):
operation, error = self._find_operation(request)
if error:
return error
return operation.run(request, *a, **kw)
async def _async_view(self, request, *a, **kw):
from asgiref.sync import sync_to_async
operation, error = self._find_operation(request)
if error:
return error
if operation.is_async:
return await operation.run(request, *a, **kw)
else:
return await sync_to_async(operation.run)(request, *a, **kw)
def _find_operation(self, request):
allowed_methods = set()
for op in self.operations:
allowed_methods.update(op.methods)
if request.method in op.methods:
return op, None
return (
None,
HttpResponseNotAllowed(allowed_methods, content=b"Method not allowed"),
)
class ResponseObject(object):
"Basically this is just a helper to be able to pass response to pydantic's from_orm"
def __init__(self, response):
self.response = response
```
#### File: django-ninja/ninja/utils.py
```python
def normalize_path(path: str) -> str:
while "//" in path:
path = path.replace("//", "/")
return path
```
#### File: django-ninja/tests/test_schema.py
```python
from typing import List
from ninja import Schema
from ninja.schema import Field
from django.db.models import QuerySet, Manager
class FakeManager(Manager):
def __init__(self, items):
self._items = items
def all(self):
return self._items
def __str__(self):
return "FakeManager"
class FakeQS(QuerySet):
def __init__(self, items):
self._result_cache = items
self._prefetch_related_lookups = False
def __str__(self):
return "FakeQS"
class Tag:
def __init__(self, id, title):
self.id = id
self.title = title
class User:
name = "John"
group_set = FakeManager([1, 2, 3])
@property
def tags(self):
return FakeQS([Tag(1, "foo"), Tag(2, "bar")])
class TagSchema(Schema):
id: str
title: str
class UserSchema(Schema):
name: str
groups: List[int] = Field(..., alias="group_set")
tags: List[TagSchema]
def test_schema():
user = User()
schema = UserSchema.from_orm(user)
assert schema.dict() == {
"name": "John",
"groups": [1, 2, 3],
"tags": [{"id": "1", "title": "foo"}, {"id": "2", "title": "bar"}],
}
``` |
{
"source": "joelbygger/adventofcode20",
"score": 4
} |
#### File: python/Day11/main.py
```python
from seating import Seating
import sys
def main(file, task):
ignore_floor = False
tolerant = False
if task == '1':
ignore_floor = False
tolerant = False
elif task == '2':
ignore_floor = True
tolerant = True
else:
print("This task is not supported...")
return
seating = Seating(file)
seating.iterate_until_stable(ignore_floor, tolerant)
print("Occupied seats task {}: {}".format(task, seating.count_occupied()))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Wrong number of argument, supply file and 1 or 2 for task 1 or 2.")
else:
main(sys.argv[1], sys.argv[2])
```
#### File: python/Day11/seating.py
```python
import copy
def _direction():
# If array index start at 0, 0 and we say that is top left, (x, y)
yield -1, -1 # UL
yield -1, 0 # L
yield -1, 1 # UR
yield 0, -1 # U
yield 0, 1 # D
yield 1, -1 # DL
yield 1, 0 # R
yield 1, 1 # DR
# def _in_matrix(pos, seats):
# return 0 <= pos[0] < len(seats[0]) and 0 <= pos[1] < len(seats)
class Seating:
def __init__(self, file):
with open(file) as f:
# A list of char arrays.
self._seats = [list(x) for x in f.read().splitlines()]
def _valid_position(self, pos):
return 0 <= pos[0] < len(self._seats[0]) and 0 <= pos[1] < len(self._seats)
def _calc_pos(self, pos, d, ignore_floor):
n_pos = (pos[0] + d[0], pos[1] + d[1])
if ignore_floor:
while True:
if not self._valid_position(n_pos) or not self._floor(self._seats[n_pos[1]][n_pos[0]]):
break
n_pos = (n_pos[0] + d[0], n_pos[1] + d[1])
return n_pos
def _get_neighbor_seats(self, pos, ignore_floor):
ns_pos = [self._calc_pos(pos, d, ignore_floor) for d in _direction()]
ns_pos_valid = filter(self._valid_position, ns_pos)
return [self._seats[x[1]][x[0]] for x in ns_pos_valid]
@staticmethod
def _free(seat):
return seat == 'L'
@staticmethod
def _floor(seat):
return seat == '.'
@staticmethod
def _occupied(seat):
return seat == '#'
def _seat_change(self, pos, neighbors, tolerant):
curr = self._seats[pos[1]][pos[0]]
occupied_cnt = len([n for n in neighbors if self._occupied(n)])
if self._free(curr) and occupied_cnt == 0:
curr = '#'
elif self._occupied(curr):
if not tolerant:
if occupied_cnt >= 4:
curr = 'L'
else:
if occupied_cnt >= 5:
curr = 'L'
return curr
def _iterate(self, ignore_floor, tolerant):
new_seats = copy.deepcopy(self._seats)
for y, row in enumerate(self._seats):
for x, seat in enumerate(row):
neighbors = self._get_neighbor_seats((x, y), ignore_floor)
seat = self._seat_change((x, y), neighbors, tolerant)
if seat != self._seats[y][x]:
new_seats[y][x] = seat
if self._seats == new_seats:
return True
else:
self._seats = copy.deepcopy(new_seats)
return False
def iterate_until_stable(self, ignore_floor, tolerant):
while True:
if self._iterate(ignore_floor, tolerant):
break
return
def iterate_times(self, iterations, ignore_floor, tolerant):
while True:
if iterations == 0 or self._iterate(ignore_floor, tolerant):
break
iterations -= 1
return
def count_occupied(self):
cnt = 0
for r in self._seats:
for s in r:
cnt += self._occupied(s)
return cnt
def get_seats(self):
return copy.deepcopy(self._seats)
``` |
{
"source": "joelcampusanorojas/bot-serverless",
"score": 3
} |
#### File: bot-serverless/HttpTrigger/__init__.py
```python
import logging
import azure.functions as func
import os
import requests
from .luis import get_luis
from .search import get_search
def main(req: func.HttpRequest) -> func.HttpResponse:
req_body = req.get_json()
text = req_body.get('text').lower()
luis_response = get_luis(text)
if(luis_response == 'knowledge_base'):
search_response = get_search(text)
#logging.info(luis_response)
return func.HttpResponse(f"Hello, your said: {text}. The intent in LUIS is: {luis_response}. The search result is : {search_response}")
``` |
{
"source": "joelcarlson/OpenKE",
"score": 2
} |
#### File: OpenKE/config/Config.py
```python
import numpy as np
import tensorflow as tf
import os
import time
import datetime
import ctypes
import json
import logging
# create file handler which logs even debug messages
l1 = logging.getLogger('root')
# l1.setLevel(logging.DEBUG)
l1.setLevel(logging.WARNING)
gv_log = logging.FileHandler('debug.log')
gv_log.setLevel(logging.DEBUG)
l1.addHandler(gv_log)
class Config(object):
'''
use ctypes to call C functions from python and set essential parameters.
'''
def __init__(self):
base_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '../release/Base.so'))
self.lib = ctypes.cdll.LoadLibrary(base_file)
self.lib.sampling.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64, ctypes.c_int64, ctypes.c_int64]
self.lib.getHeadBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.lib.getTailBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.lib.testHead.argtypes = [ctypes.c_void_p]
self.lib.testTail.argtypes = [ctypes.c_void_p]
self.lib.getTestBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.lib.getValidBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.lib.getBestThreshold.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.lib.test_triple_classification.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.test_flag = False
self.in_path = None
self.out_path = None
self.bern = 0
self.hidden_size = 100
self.train_times = 0
self.margin = 1.0
self.nbatches = 100
self.negative_ent = 1
self.negative_rel = 0
self.workThreads = 1
self.alpha = 0.001
self.lmbda = 0.000
self.log_on = 1
self.exportName = None
self.importName = None
self.export_steps = 0
self.opt_method = "SGD"
self.optimizer = None
self.test_link_prediction = False
self.test_triple_classification = False
self.early_stopping = None # It expects a tuple of the following: (patience, min_delta)
self.freeze_train_embeddings = False
self.embedding_initializer_path = None
def init_link_prediction(self):
r'''
import essential files and set essential interfaces for link prediction
'''
self.lib.importTestFiles()
self.lib.importTypeFiles()
self.test_h = np.zeros(self.lib.getEntityTotal(), dtype = np.int64)
self.test_t = np.zeros(self.lib.getEntityTotal(), dtype = np.int64)
self.test_r = np.zeros(self.lib.getEntityTotal(), dtype = np.int64)
self.test_h_addr = self.test_h.__array_interface__['data'][0]
self.test_t_addr = self.test_t.__array_interface__['data'][0]
self.test_r_addr = self.test_r.__array_interface__['data'][0]
def init_triple_classification(self):
r'''
import essential files and set essential interfaces for triple classification
'''
self.lib.importTestFiles()
self.lib.importTypeFiles()
self.test_pos_h = np.zeros(self.lib.getTestTotal(), dtype = np.int64)
self.test_pos_t = np.zeros(self.lib.getTestTotal(), dtype = np.int64)
self.test_pos_r = np.zeros(self.lib.getTestTotal(), dtype = np.int64)
self.test_neg_h = np.zeros(self.lib.getTestTotal(), dtype = np.int64)
self.test_neg_t = np.zeros(self.lib.getTestTotal(), dtype = np.int64)
self.test_neg_r = np.zeros(self.lib.getTestTotal(), dtype = np.int64)
self.test_pos_h_addr = self.test_pos_h.__array_interface__['data'][0]
self.test_pos_t_addr = self.test_pos_t.__array_interface__['data'][0]
self.test_pos_r_addr = self.test_pos_r.__array_interface__['data'][0]
self.test_neg_h_addr = self.test_neg_h.__array_interface__['data'][0]
self.test_neg_t_addr = self.test_neg_t.__array_interface__['data'][0]
self.test_neg_r_addr = self.test_neg_r.__array_interface__['data'][0]
self.valid_pos_h = np.zeros(self.lib.getValidTotal(), dtype = np.int64)
self.valid_pos_t = np.zeros(self.lib.getValidTotal(), dtype = np.int64)
self.valid_pos_r = np.zeros(self.lib.getValidTotal(), dtype = np.int64)
self.valid_neg_h = np.zeros(self.lib.getValidTotal(), dtype = np.int64)
self.valid_neg_t = np.zeros(self.lib.getValidTotal(), dtype = np.int64)
self.valid_neg_r = np.zeros(self.lib.getValidTotal(), dtype = np.int64)
self.valid_pos_h_addr = self.valid_pos_h.__array_interface__['data'][0]
self.valid_pos_t_addr = self.valid_pos_t.__array_interface__['data'][0]
self.valid_pos_r_addr = self.valid_pos_r.__array_interface__['data'][0]
self.valid_neg_h_addr = self.valid_neg_h.__array_interface__['data'][0]
self.valid_neg_t_addr = self.valid_neg_t.__array_interface__['data'][0]
self.valid_neg_r_addr = self.valid_neg_r.__array_interface__['data'][0]
self.relThresh = np.zeros(self.lib.getRelationTotal(), dtype = np.float32)
self.relThresh_addr = self.relThresh.__array_interface__['data'][0]
# prepare for train and test
def init(self):
self.trainModel = None
if self.in_path != None:
self.lib.setInPath(ctypes.create_string_buffer(self.in_path.encode(), len(self.in_path) * 2))
self.lib.setBern(self.bern)
self.lib.setWorkThreads(self.workThreads)
self.lib.randReset()
self.lib.importTrainFiles()
logging.warning('Imported train')
self.relTotal = self.lib.getRelationTotal()
self.entTotal = self.lib.getEntityTotal()
self.trainTotal = self.lib.getTrainTotal()
logging.warning('Got train total: {}'.format(self.trainTotal))
self.testTotal = self.lib.getTestTotal()
logging.warning('Got test total: {}'.format(self.testTotal))
self.validTotal = self.lib.getValidTotal()
logging.warning('Got val total: {}'.format(self.validTotal))
self.batch_size = int(self.lib.getTrainTotal() / self.nbatches)
logging.warning('Set batch size: {}'.format(self.batch_size))
self.batch_seq_size = self.batch_size * (1 + self.negative_ent + self.negative_rel)
self.batch_h = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = np.int64) # 1d list of 0s of shape (1 + self.negative_ent + self.negative_rel,)
self.batch_t = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = np.int64)
self.batch_r = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = np.int64)
self.batch_y = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = np.float32)
self.batch_h_addr = self.batch_h.__array_interface__['data'][0]
self.batch_t_addr = self.batch_t.__array_interface__['data'][0]
self.batch_r_addr = self.batch_r.__array_interface__['data'][0]
self.batch_y_addr = self.batch_y.__array_interface__['data'][0]
if self.freeze_train_embeddings:
self.ent_embedding_initializer = self.set_ent_embedding_initializer(self.embedding_initializer_path)
self.rel_embedding_initializer = self.set_rel_embedding_initializer(self.embedding_initializer_path)
logging.warning('Initialized embeddings from: {}'.format(self.embedding_initializer_path))
if self.test_link_prediction:
self.init_link_prediction()
if self.test_triple_classification:
self.init_triple_classification()
def set_freeze_train_embeddings(self, freeze_train_embeddings):
self.freeze_train_embeddings = freeze_train_embeddings
def set_embedding_initializer_path(self, embedding_initializer_path):
self.embedding_initializer_path = embedding_initializer_path
# def set_test_in_path(self, path):
# self.in_path = path
def get_ent_total(self):
return self.entTotal
def get_rel_total(self):
return self.relTotal
def set_lmbda(self, lmbda):
self.lmbda = lmbda
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def set_opt_method(self, method):
self.opt_method = method
def set_test_link_prediction(self, flag):
self.test_link_prediction = flag
def set_test_triple_classification(self, flag):
self.test_triple_classification = flag
def set_log_on(self, flag):
self.log_on = flag
def set_alpha(self, alpha):
self.alpha = alpha
def set_in_path(self, path):
self.in_path = path
def set_out_files(self, path):
self.out_path = path
def set_bern(self, bern):
self.bern = bern
def set_dimension(self, dim):
self.hidden_size = dim
def set_train_times(self, times):
self.train_times = times
def set_nbatches(self, nbatches):
self.nbatches = nbatches
def set_margin(self, margin):
self.margin = margin
def set_work_threads(self, threads):
self.workThreads = threads
def set_ent_neg_rate(self, rate):
self.negative_ent = rate
def set_rel_neg_rate(self, rate):
self.negative_rel = rate
def set_import_files(self, path):
self.importName = path
def set_export_files(self, path, steps = 0):
self.exportName = path
self.export_steps = steps
def set_export_steps(self, steps):
self.export_steps = steps
def set_early_stopping(self, early_stopping):
self.early_stopping = early_stopping
# call C function for sampling
def sampling(self):
self.lib.sampling(self.batch_h_addr, self.batch_t_addr, self.batch_r_addr, self.batch_y_addr, self.batch_size, self.negative_ent, self.negative_rel)
# save model
def save_tensorflow(self):
with self.graph.as_default():
with self.sess.as_default():
self.saver.save(self.sess, self.exportName)
# restore model
def restore_tensorflow(self):
with self.graph.as_default():
with self.sess.as_default():
self.saver.restore(self.sess, self.importName)
def export_variables(self, path = None):
with self.graph.as_default():
with self.sess.as_default():
if path == None:
self.saver.save(self.sess, self.exportName)
else:
self.saver.save(self.sess, path)
def import_variables(self, path = None):
with self.graph.as_default():
with self.sess.as_default():
if path == None:
self.saver.restore(self.sess, self.importName)
else:
self.saver.restore(self.sess, path)
def get_parameter_lists(self):
return self.trainModel.parameter_lists
def get_parameters_by_name(self, var_name):
with self.graph.as_default():
with self.sess.as_default():
if var_name in self.trainModel.parameter_lists:
return self.sess.run(self.trainModel.parameter_lists[var_name])
else:
return None
def get_parameters(self, mode = "numpy"):
res = {}
lists = self.get_parameter_lists()
for var_name in lists:
if mode == "numpy":
res[var_name] = self.get_parameters_by_name(var_name)
else:
res[var_name] = self.get_parameters_by_name(var_name).tolist()
return res
def save_parameters(self, path = None):
if path == None:
path = self.out_path
embedding_dict = self.get_parameters("list")
# OpenKE saves embeddings for ComplEx in a 4 key dict, we
# want to conform to our own format, so we will reshape this dictionary before saving:
if "ent_re_embeddings" in embedding_dict.keys():
embedding_dict["ent_embeddings"] = [re+im for (re,im) in\
zip(embedding_dict["ent_re_embeddings"], embedding_dict["ent_im_embeddings"])]
embedding_dict["rel_embeddings"] = [re+im for (re,im) in\
zip(embedding_dict["rel_re_embeddings"], embedding_dict["rel_im_embeddings"])]
del embedding_dict['ent_re_embeddings']
del embedding_dict['ent_im_embeddings']
del embedding_dict['rel_re_embeddings']
del embedding_dict['rel_im_embeddings']
dir_name = os.path.dirname(path)
os.makedirs(dir_name, exist_ok=True)
f = open(path, "w")
f.write(json.dumps(embedding_dict))
f.close()
def set_parameters_by_name(self, var_name, tensor):
with self.graph.as_default():
with self.sess.as_default():
if var_name in self.trainModel.parameter_lists:
self.trainModel.parameter_lists[var_name].assign(tensor).eval()
def set_parameters(self, lists):
for i in lists:
self.set_parameters_by_name(i, lists[i])
def set_model(self, model):
self.model = model
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.Session()
with self.sess.as_default():
initializer = tf.contrib.layers.xavier_initializer(uniform = True)
with tf.variable_scope("model", reuse=None, initializer = initializer):
self.trainModel = self.model(config = self)
if self.optimizer != None:
pass
elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate = self.alpha, initial_accumulator_value=0.1)
elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
self.optimizer = tf.train.AdadeltaOptimizer(self.alpha)
elif self.opt_method == "Adam" or self.opt_method == "adam":
self.optimizer = tf.train.AdamOptimizer(self.alpha)
else:
self.optimizer = tf.train.GradientDescentOptimizer(self.alpha)
grads_and_vars = self.optimizer.compute_gradients(self.trainModel.loss)
if self.freeze_train_embeddings:
# The below based vaguely on this SO question:
# https://stackoverflow.com/questions/35803425/update-only-part-of-the-word-embedding-matrix-in-tensorflow
# Goal: Take indices from indexedSlices object, which encodes which values are involved in the forward pass
# and are trainable, mask any gradients which apply to values we don;t want to change (i.e. those
# created during initial training) and apply masked gradient update (impacting only those embeddings created
# during test/val)
# Get the grads and vars for each embedding
if len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)) == 2: # TransE, DistMult
ent_grads_and_var = self.optimizer.compute_gradients(self.trainModel.loss, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)[0]) # Ent embeddings
rel_grads_and_var = self.optimizer.compute_gradients(self.trainModel.loss, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)[1]) # Rel embeddings
# Extract the gradients for entities and relationships
ent_grads = ent_grads_and_var[0][0]
rel_grads = rel_grads_and_var[0][0]
# Create a mask of 1s and 0s for whether or not the gradient corresponds to a value in the new data or not
# That is, if the index of the gradient (and by extension the entity or relation) is greater than or equal to the
# length of the training embedding (self.xxx_embedding_length) then we set it to 1, else 0. If the value is 0, then
# the gradient will not be propogated
ent_mask = tf.cast(ent_grads.indices >= tf.constant(self.ent_embedding_length, dtype=tf.int64), tf.float32)
rel_mask = tf.cast(rel_grads.indices >= tf.constant(self.rel_embedding_length, dtype=tf.int64), tf.float32)
# Mask the gradients using the above derived mask
# The mask has to be reshaped to conform to the shape of the gradients.values
ent_grads_masked = tf.reshape(ent_mask, [tf.shape(ent_mask)[0],1]) * ent_grads.values
rel_grads_masked = tf.reshape(rel_mask, [tf.shape(rel_mask)[0],1]) * rel_grads.values
# Reconstruct the grad and var tuple for ent and rel
# This reconstruction is required because tuples are immutable
# We should probbaly find a more principled way of doing this without relying on indices that have no names. makes it all a bit opaque
ent_indexedSlices = tf.IndexedSlices(values=ent_grads_masked, indices=grads_and_vars[0][0].indices, dense_shape=grads_and_vars[0][0].dense_shape)
ent_variable = grads_and_vars[0][1]
ent_grads_and_var_tuple = (ent_indexedSlices,ent_variable)
rel_indexedSlices = tf.IndexedSlices(values=rel_grads_masked, indices=grads_and_vars[1][0].indices, dense_shape=grads_and_vars[1][0].dense_shape)
rel_variable = grads_and_vars[1][1]
rel_grads_and_var_tuple = (rel_indexedSlices,rel_variable)
# swap in the newly reconstructed embedding grad+var tuples
grads_and_vars[0] = ent_grads_and_var_tuple
grads_and_vars[1] = rel_grads_and_var_tuple
self.train_op = self.optimizer.apply_gradients(grads_and_vars)
# Hack together ComplEx for evaluation purposes, fix this section up later
# If the performance is good
if len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)) == 4: # ComplEx , find a better way to know what model we have chosen
ent1_grads_and_var = self.optimizer.compute_gradients(self.trainModel.loss,\
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)[0]) # Ent real embeddings
ent2_grads_and_var = self.optimizer.compute_gradients(self.trainModel.loss,\
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)[1]) # Ent im embeddings
rel1_grads_and_var = self.optimizer.compute_gradients(self.trainModel.loss,\
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)[2]) # Rel real embeddings
rel2_grads_and_var = self.optimizer.compute_gradients(self.trainModel.loss,\
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)[3]) # Rel im embeddings
# Extract the gradients for entities and relationships
ent1_grads = ent1_grads_and_var[0][0]
ent2_grads = ent2_grads_and_var[0][0]
rel1_grads = rel1_grads_and_var[0][0]
rel2_grads = rel2_grads_and_var[0][0]
# Create a mask of 1s and 0s for whether or not the gradient corresponds to a value in the new data or not
# That is, if the index of the gradient (and by extension the entity or relation) is greater than or equal to the
# length of the training embedding (self.xxx_embedding_length) then we set it to 1, else 0. If the value is 0, then
# the gradient will not be propogated
ent1_mask = tf.cast(ent1_grads.indices >= tf.constant(self.ent_embedding_length, dtype=tf.int64), tf.float32)
ent2_mask = tf.cast(ent2_grads.indices >= tf.constant(self.ent_embedding_length, dtype=tf.int64), tf.float32)
rel1_mask = tf.cast(rel1_grads.indices >= tf.constant(self.rel_embedding_length, dtype=tf.int64), tf.float32)
rel2_mask = tf.cast(rel2_grads.indices >= tf.constant(self.rel_embedding_length, dtype=tf.int64), tf.float32)
# Mask the gradients using the above derived mask
# The mask has to be reshaped to conform to the shape of the gradients.values
ent1_grads_masked = tf.reshape(ent1_mask, [tf.shape(ent1_mask)[0],1]) * ent1_grads.values
ent2_grads_masked = tf.reshape(ent2_mask, [tf.shape(ent2_mask)[0],1]) * ent2_grads.values
rel1_grads_masked = tf.reshape(rel1_mask, [tf.shape(rel1_mask)[0],1]) * rel1_grads.values
rel2_grads_masked = tf.reshape(rel2_mask, [tf.shape(rel2_mask)[0],1]) * rel2_grads.values
# Reconstruct the grad and var tuple for ent and rel
# This reconstruction is required because tuples are immutable
# We should probbaly find a more principled way of doing this without relying on indices that have no names. makes it all a bit opaque
ent1_indexedSlices = tf.IndexedSlices(values=ent1_grads_masked, indices=grads_and_vars[0][0].indices, dense_shape=grads_and_vars[0][0].dense_shape)
ent1_variable = grads_and_vars[0][1]
ent1_grads_and_var_tuple = (ent1_indexedSlices,ent1_variable)
ent2_indexedSlices = tf.IndexedSlices(values=ent2_grads_masked, indices=grads_and_vars[1][0].indices, dense_shape=grads_and_vars[1][0].dense_shape)
ent2_variable = grads_and_vars[1][1]
ent2_grads_and_var_tuple = (ent2_indexedSlices,ent2_variable)
rel1_indexedSlices = tf.IndexedSlices(values=rel1_grads_masked, indices=grads_and_vars[2][0].indices, dense_shape=grads_and_vars[2][0].dense_shape)
rel1_variable = grads_and_vars[2][1]
rel1_grads_and_var_tuple = (rel1_indexedSlices,rel1_variable)
rel2_indexedSlices = tf.IndexedSlices(values=rel2_grads_masked, indices=grads_and_vars[3][0].indices, dense_shape=grads_and_vars[3][0].dense_shape)
rel2_variable = grads_and_vars[3][1]
rel2_grads_and_var_tuple = (rel2_indexedSlices,rel2_variable)
# swap in the newly reconstructed embedding grad+var tuples
grads_and_vars[0] = ent1_grads_and_var_tuple
grads_and_vars[1] = ent2_grads_and_var_tuple
grads_and_vars[2] = rel1_grads_and_var_tuple
grads_and_vars[3] = rel2_grads_and_var_tuple
# Pass a few things for debugging
self.ent1_variable_before = ent1_variable
self.ent1_grads = ent1_grads
self.ent1_mask = ent1_mask
self.ent1_grads_masked = ent1_grads_masked
logging.debug("ent1_var: {}".format(ent1_variable))
self.train_op = self.optimizer.apply_gradients(grads_and_vars)
self.ent1_variable_after = ent1_variable
else:
logging.warning('Models currently supported: TransE_freeze, DistMult_freeze, ComplEx_freeze')
else:
self.train_op = self.optimizer.apply_gradients(grads_and_vars)
self.saver = tf.train.Saver()
self.sess.run(tf.initialize_all_variables())
def train_step(self, batch_h, batch_t, batch_r, batch_y):
feed_dict = {
self.trainModel.batch_h: batch_h,
self.trainModel.batch_t: batch_t,
self.trainModel.batch_r: batch_r,
self.trainModel.batch_y: batch_y
}
# _, loss,ent1_grads,ent1_mask,ent1_grads_masked,ent1_variable_before,ent1_variable_after, ld_res,ld_loss_func, ld_y = self.sess.run([self.train_op, self.trainModel.loss, self.ent1_grads, self.ent1_mask, self.ent1_grads_masked, self.ent1_variable_before, self.ent1_variable_after,self.trainModel.ld_res, self.trainModel.ld_loss_func, self.trainModel.ld_y], feed_dict)
_, loss, pos_ent_mean_magnitude, pos_ent_min, pos_ent_max, pos_ent_sd = self.sess.run([self.train_op, self.trainModel.loss, self.trainModel.pos_ent_mean_magnitude, self.trainModel.pos_ent_min, self.trainModel.pos_ent_max, self.trainModel.pos_ent_sd], feed_dict)
# if len(np.where(ent1_grads.indices == 4627)[0] > 0):
# check_this_one = np.where(ent1_grads.indices == 4627)[0][0]
# l1.debug("ent1_grads.values.shape : {}".format(ent1_grads.values.shape))
# l1.debug("ent1_grads.values ({}) : {}".format(check_this_one, ent1_grads.values[check_this_one][0:10]))
# l1.debug(sum([sum(abs(vect)) for vect in ent1_grads.values]))
# l1.debug("ent1_grads.indices : {}".format(ent1_grads.indices[check_this_one]))
# l1.debug("max(ent1_grads.indices) : {}".format(max(ent1_grads.indices)))
# l1.debug("min(ent1_grads.indices) : {}".format(min(ent1_grads.indices)))
# l1.debug("ent1_mask.shape : {}".format(ent1_mask.shape))
# l1.debug("ent1_mask : {}".format(ent1_mask))
# l1.debug("sum(ent1_mask) : {}".format(sum(ent1_mask)))
# l1.debug("ent1_grads_masked.shape : {}".format(ent1_grads_masked.shape))
# l1.debug(sum([sum(abs(vect)) for vect in ent1_grads_masked]))
# l1.debug("ent1_grads_masked : {}".format(ent1_grads_masked[check_this_one][0:10]))
# l1.debug("ent1_variable_before : {}".format(ent1_variable_before[check_this_one][0:10]))
# l1.debug("ent1_variable_after : {}".format(ent1_variable_after[check_this_one][0:10]))
# l1.debug("ent1_variable_before == ent1_variable_after: {}".format(ent1_variable_before == ent1_variable_after))
# l1.debug("res = {}".format(", ".join([str(x) for x in ld_res])))
# l1.debug("y = {}".format(", ".join([str(x) for x in ld_y])))
# l1.debug("loss = {}".format(ld_loss_func))
# l1.debug("------")
self.pos_ent_mean_magnitude = pos_ent_mean_magnitude
self.pos_ent_min = pos_ent_min
self.pos_ent_max = pos_ent_max
self.pos_ent_sd = pos_ent_sd
return loss
def test_step(self, test_h, test_t, test_r):
feed_dict = {
self.trainModel.predict_h: test_h,
self.trainModel.predict_t: test_t,
self.trainModel.predict_r: test_r,
}
predict = self.sess.run(self.trainModel.predict, feed_dict)
return predict
def run(self):
with self.graph.as_default():
with self.sess.as_default():
if self.importName != None:
self.restore_tensorflow()
if self.early_stopping is not None:
patience, min_delta = self.early_stopping
best_loss = np.finfo('float32').max
wait_steps = 0
for times in range(self.train_times):
t_init = time.time()
loss = 0.0
pos_ent_mean_magnitude = 0.0
pos_ent_min = 0.0
pos_ent_max = 0.0
pos_ent_sd = 0.0
for batch in range(self.nbatches):
self.sampling()
loss += self.train_step(self.batch_h, self.batch_t, self.batch_r, self.batch_y)
pos_ent_mean_magnitude += self.pos_ent_mean_magnitude
pos_ent_min += self.pos_ent_min
pos_ent_max += self.pos_ent_max
pos_ent_sd += self.pos_ent_sd
if self.log_on:
t_end = time.time()
pos_ent_mean_magnitude /= (self.nbatches)
pos_ent_min /= (self.nbatches)
pos_ent_max /= (self.nbatches)
pos_ent_sd /= (self.nbatches)
print('Epoch: {}, loss: {}, time: {}, mag: {}, sd: {}, [{}, {}]'.format(times,\
round(loss, 2),\
round(t_end - t_init, 0),\
round(pos_ent_mean_magnitude, 3),\
round(pos_ent_sd, 3),\
round(pos_ent_min, 3),\
round(pos_ent_max, 3)))
# if self.exportName != None and (self.export_steps!=0 and times % self.export_steps == 0):
# self.save_tensorflow()
# print("times: {} , export_steps: {}, div: , out_path:{}".format(times, self.export_steps, self.out_path))
if times > 0:
if self.out_path != None and (self.export_steps!=0 and times % self.export_steps == 0):
self.save_parameters(self.out_path + "_{}".format(times))
if self.early_stopping is not None:
if loss + min_delta < best_loss:
best_loss = loss
wait_steps = 0
elif wait_steps < patience:
wait_steps += 1
else:
print('Early stopping. Losses have not been improved enough in {} times'.format(patience))
break
if self.exportName != None:
self.save_tensorflow()
if self.out_path != None:
self.save_parameters(self.out_path)
def test(self):
with self.graph.as_default():
with self.sess.as_default():
if self.importName != None:
self.restore_tensorflow()
if self.test_link_prediction:
total = self.lib.getTestTotal()
for times in range(total):
self.lib.getHeadBatch(self.test_h_addr, self.test_t_addr, self.test_r_addr)
res = self.test_step(self.test_h, self.test_t, self.test_r)
self.lib.testHead(res.__array_interface__['data'][0])
self.lib.getTailBatch(self.test_h_addr, self.test_t_addr, self.test_r_addr)
res = self.test_step(self.test_h, self.test_t, self.test_r)
self.lib.testTail(res.__array_interface__['data'][0])
if self.log_on:
print(times)
self.lib.test_link_prediction()
if self.test_triple_classification:
self.lib.getValidBatch(self.valid_pos_h_addr, self.valid_pos_t_addr, self.valid_pos_r_addr, self.valid_neg_h_addr, self.valid_neg_t_addr, self.valid_neg_r_addr)
res_pos = self.test_step(self.valid_pos_h, self.valid_pos_t, self.valid_pos_r)
res_neg = self.test_step(self.valid_neg_h, self.valid_neg_t, self.valid_neg_r)
self.lib.getBestThreshold(self.relThresh_addr, res_pos.__array_interface__['data'][0], res_neg.__array_interface__['data'][0])
self.lib.getTestBatch(self.test_pos_h_addr, self.test_pos_t_addr, self.test_pos_r_addr, self.test_neg_h_addr, self.test_neg_t_addr, self.test_neg_r_addr)
res_pos = self.test_step(self.test_pos_h, self.test_pos_t, self.test_pos_r)
res_neg = self.test_step(self.test_neg_h, self.test_neg_t, self.test_neg_r)
self.lib.test_triple_classification(self.relThresh_addr, res_pos.__array_interface__['data'][0], res_neg.__array_interface__['data'][0])
def predict_head_entity(self, t, r, k):
r'''This mothod predicts the top k head entities given tail entity and relation.
Args:
t (int): tail entity id
r (int): relation id
k (int): top k head entities
Returns:
list: k possible head entity ids
'''
self.init_link_prediction()
if self.importName != None:
self.restore_tensorflow()
test_h = np.array(range(self.entTotal))
test_r = np.array([r] * self.entTotal)
test_t = np.array([t] * self.entTotal)
res = self.test_step(test_h, test_t, test_r).reshape(-1).argsort()[:k]
print(res)
return res
def predict_tail_entity(self, h, r, k):
r'''This mothod predicts the top k tail entities given head entity and relation.
Args:
h (int): head entity id
r (int): relation id
k (int): top k tail entities
Returns:
list: k possible tail entity ids
'''
self.init_link_prediction()
if self.importName != None:
self.restore_tensorflow()
test_h = np.array([h] * self.entTotal)
test_r = np.array([r] * self.entTotal)
test_t = np.array(range(self.entTotal))
res = self.test_step(test_h, test_t, test_r).reshape(-1).argsort()[:k]
print(res)
return res
def predict_relation(self, h, t, k):
r'''This methods predict the relation id given head entity and tail entity.
Args:
h (int): head entity id
t (int): tail entity id
k (int): top k relations
Returns:
list: k possible relation ids
'''
self.init_link_prediction()
if self.importName != None:
self.restore_tensorflow()
test_h = np.array([h] * self.relTotal)
test_r = np.array(range(self.relTotal))
test_t = np.array([t] * self.relTotal)
res = self.test_step(test_h, test_t, test_r).reshape(-1).argsort()[:k]
print(res)
return res
def predict_triple(self, h, t, r, thresh = None):
r'''This method tells you whether the given triple (h, t, r) is correct of wrong
Args:
h (int): head entity id
t (int): tail entity id
r (int): relation id
thresh (fload): threshold for the triple
'''
self.init_triple_classification()
if self.importName != None:
self.restore_tensorflow()
res = self.test_step(np.array([h]), np.array([t]), np.array([r]))
if thresh != None:
if res < thresh:
print("triple (%d,%d,%d) is correct" % (h, t, r))
else:
print("triple (%d,%d,%d) is wrong" % (h, t, r))
return
self.lib.getValidBatch(self.valid_pos_h_addr, self.valid_pos_t_addr, self.valid_pos_r_addr, self.valid_neg_h_addr, self.valid_neg_t_addr, self.valid_neg_r_addr)
res_pos = self.test_step(self.valid_pos_h, self.valid_pos_t, self.valid_pos_r)
res_neg = self.test_step(self.valid_neg_h, self.valid_neg_t, self.valid_neg_r)
self.lib.getBestThreshold(self.relThresh_addr, res_pos.__array_interface__['data'][0], res_neg.__array_interface__['data'][0])
if res < self.relThresh[r]:
print("triple (%d,%d,%d) is correct" % (h, t, r))
else:
print("triple (%d,%d,%d) is wrong" % (h, t, r))
def set_ent_embedding_initializer(self, embedding_path):
# This function needs to take a path for the embedding file produced by the initial training
# And a list of the entities in the new (val or test, or oov or whatever) data (that are not in the old data?)
# and create a new matrix that is composed of the training embeddings with random values initialized for the
# new embeddings append to it
# We also need to get the indices for updates
# Need to fix this - right now it leaves the file open. Just use a with statement
try:
embs = open(embedding_path, 'r')
# Store configuration file values
except FileNotFoundError:
raise Exception('Entity embedding file not found: {}'.format(embedding_path))
embedding_dict = json.loads(embs.read())
# If the embeddings were produced by OpenKE, we will have to combine them
# if "ent_re_embeddings" in embedding_dict.keys():
# embedding_dict["ent_embeddings"] = [re+im for (re,im) in\
# zip(embedding_dict["ent_re_embeddings"], embedding_dict["ent_im_embeddings"])]
# del embedding_dict['ent_re_embeddings']
# del embedding_dict['ent_im_embeddings']
ent_embedding = embedding_dict["ent_embeddings"]
self.ent_embedding_length = len(ent_embedding)
# Compare to length of the training embedding to the total number of entities to see how many
# new rows we need to append to the embdding initializer
if self.entTotal > self.ent_embedding_length:
print("New entities found:")
print("-- Total Entities in embedding file: {}".format(self.ent_embedding_length))
print("-- Total Entities in data: {} ".format(self.entTotal))
required_new_vectors = self.entTotal - self.ent_embedding_length
# Perform Xavier initialization for the new embeddings: sqrt(6 / (fan_in + fan_out))
# Not clear whether we should initialize to the same fan size as the original embeddings
# (i.e. self.ent_embedding_length)
# Or the fan size for the original + new embeddings (i.e. self.entTotal)
ent_bound = np.sqrt(6 / (self.entTotal + self.hidden_size))
# new_ent_embedding = [np.random.uniform(-ent_bound,ent_bound,self.hidden_size).tolist()\
# for x in range(required_new_vectors)]
# PyTorch-BigGraph initalizes with draws from a standard normal, so we will too
# Updated: We initialize the sd to be 0.4 to be in accordance with empirical embedding sd
new_ent_embedding = [np.random.normal(loc=0.0, scale=0.4, size=self.hidden_size).tolist()\
for x in range(required_new_vectors)]
ent_embedding = ent_embedding + new_ent_embedding
# self.ent_update_slices = [self.ent_embedding_length - idx for idx in range(required_new_vectors)]
return ent_embedding
def set_rel_embedding_initializer(self, embedding_path):
# TODO: Combine this and the set_ent_embedding_initializer, lots of duplicated code
try:
embs = open(embedding_path, 'r')
# Store configuration file values
except FileNotFoundError:
raise Exception('Relation embedding file not found: {}'.format(embedding_path))
embedding_dict = json.loads(embs.read())
# If the embeddings were produced by OpenKE, we will have to combine them
# if "rel_re_embeddings" in embedding_dict.keys():
# embedding_dict["rel_embeddings"] = [re+im for (re,im) in\
# zip(embedding_dict["rel_re_embeddings"], embedding_dict["rel_im_embeddings"])]
# del embedding_dict['rel_re_embeddings']
# del embedding_dict['rel_im_embeddings']
rel_embedding = embedding_dict["rel_embeddings"]
self.rel_embedding_length = len(rel_embedding)
if self.relTotal > self.rel_embedding_length :
print("New relationships found:")
print("-- Total Relationships in embedding file: {}".format(len(rel_embedding)))
print("-- Total Relationships in data: {} ".format(self.relTotal))
required_new_vectors = self.relTotal - self.rel_embedding_length
# TODO: Find a good way to initialize the vectors
# new_rel_embedding = tf.Variable(name="new_rel_embedding",\
# shape = [self.relTotal - len(rel_embedding), self.hidden_size],\
# initializer = tf.contrib.layers.xavier_initializer(uniform = False))
# print(new_rel_embedding.initialized_value())
rel_bound = np.sqrt(6 / (self.relTotal + self.hidden_size)) # Xavier init: sqrt(6 / (fan_in + fan_out))
# new_rel_embedding = [np.random.uniform(-rel_bound, rel_bound, self.hidden_size).tolist()\
# for x in range(required_new_vectors)]
new_rel_embedding = [np.random.normal(loc=0.0, scale=0.4, size=self.hidden_size).tolist()\
for x in range(required_new_vectors)]
rel_embedding = rel_embedding + new_rel_embedding
# self.rel_update_slices = [self.rel_embedding_length - idx for idx in range(required_new_vectors)]
return rel_embedding
```
#### File: OpenKE/models/ComplEx_freeze.py
```python
import numpy as np
import tensorflow as tf
from .Model import Model
import logging
l1 = logging.getLogger('root')
l1.setLevel(logging.WARNING)
# l1.setLevel(logging.DEBUG)
gv_log = logging.FileHandler('y_and_res.log')
gv_log.setLevel(logging.DEBUG)
l1.addHandler(gv_log)
class ComplEx_freeze(Model):
def embedding_def(self):
config = self.get_config()
# Real is first half of embedding, Im is second
real_idx = config.hidden_size // 2
im_idx = config.hidden_size
logging.warning("real_idx {}".format(real_idx))
ent1_initilializer = tf.constant_initializer(np.array(config.ent_embedding_initializer)[:,0:real_idx] , verify_shape=True)
ent2_initilializer = tf.constant_initializer(np.array(config.ent_embedding_initializer)[:,real_idx:im_idx] , verify_shape=True)
rel1_initilializer = tf.constant_initializer(np.array(config.rel_embedding_initializer)[:,0:real_idx] , verify_shape=True)
rel2_initilializer = tf.constant_initializer(np.array(config.rel_embedding_initializer)[:,real_idx:im_idx] , verify_shape=True)
self.ent1_embeddings = tf.get_variable(name = "ent1_embeddings",\
shape = [config.entTotal, config.hidden_size//2],\
initializer = ent1_initilializer,\
trainable = True) #initialize with old embeddings
self.ent2_embeddings = tf.get_variable(name = "ent2_embeddings",\
shape = [config.entTotal, config.hidden_size//2],\
initializer = ent2_initilializer,\
trainable = True) #initialize with old embeddings
self.rel1_embeddings = tf.get_variable(name = "rel1_embeddings",\
shape = [config.relTotal, config.hidden_size//2],\
initializer = rel1_initilializer,\
trainable = True) #initialize with old embeddings
self.rel2_embeddings = tf.get_variable(name = "rel2_embeddings",\
shape = [config.relTotal, config.hidden_size//2],\
initializer = rel2_initilializer,\
trainable = True) #initialize with old embeddings
self.parameter_lists = {"ent_re_embeddings":self.ent1_embeddings, \
"ent_im_embeddings":self.ent2_embeddings, \
"rel_re_embeddings":self.rel1_embeddings, \
"rel_im_embeddings":self.rel2_embeddings}
r'''
ComplEx extends DistMult by introducing complex-valued embeddings so as to better model asymmetric relations.
It is proved that HolE is subsumed by ComplEx as a special case.
'''
def _calc(self, e1_h, e2_h, e1_t, e2_t, r1, r2):
return e1_h * e1_t * r1 + e2_h * e2_t * r1 + e1_h * e2_t * r2 - e2_h * e1_t * r2
def loss_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
batch_size = config.batch_size
negative_ent = config.negative_ent
negative_rel = config.negative_rel
#To get positive triples and negative triples for training
#To get labels for the triples, positive triples as 1 and negative triples as -1
#The shapes of h, t, r, y are (batch_size, 1 + negative_ent + negative_rel)
h, t, r = self.get_all_instance()
# y = self.get_all_labels()
logging.warning("h dim: {}".format(h.shape)) # (neg_ent + neg_rel + 1)*batch_size (+1 is from 1 pos_ent per set of negs)
# logging.warning("y dim: {}".format(y.shape))
#Embedding entities and relations of triples
e1_h = tf.nn.embedding_lookup(self.ent1_embeddings, h)
e2_h = tf.nn.embedding_lookup(self.ent2_embeddings, h)
e1_t = tf.nn.embedding_lookup(self.ent1_embeddings, t)
e2_t = tf.nn.embedding_lookup(self.ent2_embeddings, t)
r1 = tf.nn.embedding_lookup(self.rel1_embeddings, r)
r2 = tf.nn.embedding_lookup(self.rel2_embeddings, r)
#Calculating score functions for all positive triples and negative triples
res = tf.reduce_sum(self._calc(e1_h, e2_h, e1_t, e2_t, r1, r2), 1, keep_dims = False)
# Labels are simply a list of 1s as long as the batch size, with an accompanying zero
labels = tf.stack(tf.split(tf.tile([1,0],[batch_size]), batch_size))
# Get positive and negative scores. Positive scores are the first N_batch size, and
# the remaining are the negative scores. for each positive score there are negative_ent + negative_rel
# negative scores
pos_scores = tf.split(res[0:batch_size], batch_size)
neg_scores = tf.split(res[batch_size:], batch_size)
# shortcut to save computation time
logsumexp_neg_scores = tf.math.reduce_logsumexp(neg_scores, 1, keep_dims=True)
logits = tf.concat([pos_scores, logsumexp_neg_scores], axis=1)
loss_func = tf.losses.softmax_cross_entropy(onehot_labels=labels,
logits=logits,
reduction=tf.losses.Reduction.SUM)
logging.warning("Res dim: {}".format(res.shape))
# logging.warning("- y * res dim: {}".format((- y * res).shape))
l1.debug("res : {}".format(res))
# l1.debug("y : {}".format(y))
# l1.debug("y2 : {}".format(y_cross_ent)) # Convert y to cross entropy range
l1.debug("------")
# For freezing embeddings using a typical regularizer such as this is not particularly meaningful, as it is tabulating the
# function for many vectors that we have no wish to change
regul_func = tf.reduce_mean(e1_h ** 2) + tf.reduce_mean(e1_t ** 2) + tf.reduce_mean(e2_h ** 2) + tf.reduce_mean(e2_t ** 2) + tf.reduce_mean(r1 ** 2) + tf.reduce_mean(r2 ** 2)
# I am imagining some future scenario where a part of the loss function is something that
# Penalizes distributional differences between positive and negative samples, since we can almost guarantee
# that negative samples will be drawn from the (much larger) training set. For now, I just
# wish to be able to track the mean magnitude of the newly produced vectors
self.pos_ent_mean_magnitude = tf.reduce_mean(tf.reduce_mean(tf.math.abs(e1_h[0:batch_size,]), 1)) # Mean of means of embeddings
self.pos_ent_min = tf.reduce_min(e1_h[0:batch_size,])
self.pos_ent_max = tf.reduce_max(e1_h[0:batch_size,])
self.pos_ent_sd = tf.reduce_mean(tf.math.reduce_std(e1_h[0:batch_size,], 1)) # mean of sds of embeddings
# Another option is to clamp max norm of the weight vectors using something like the keras.constrains.MaxNorm function after weight update
# See:
# https://stats.stackexchange.com/questions/257996/what-is-maxnorm-constraint-how-is-it-useful-in-convolutional-neural-networks
# https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/keras/constraints.py
# http://cs231n.github.io/neural-networks-2/#reg
#Calculating loss to get what the framework will optimize
self.loss = loss_func + config.lmbda * regul_func
def predict_def(self):
config = self.get_config()
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e1 = tf.nn.embedding_lookup(self.ent1_embeddings, predict_h)
predict_t_e1 = tf.nn.embedding_lookup(self.ent1_embeddings, predict_t)
predict_r_e1 = tf.nn.embedding_lookup(self.rel1_embeddings, predict_r)
predict_h_e2 = tf.nn.embedding_lookup(self.ent2_embeddings, predict_h)
predict_t_e2 = tf.nn.embedding_lookup(self.ent2_embeddings, predict_t)
predict_r_e2 = tf.nn.embedding_lookup(self.rel2_embeddings, predict_r)
self.predict = -tf.reduce_sum(self._calc(predict_h_e1, predict_h_e2, predict_t_e1, predict_t_e2, predict_r_e1, predict_r_e2), 1, keep_dims = True)
``` |
{
"source": "joelcarlss/IoT-autocurtains",
"score": 3
} |
#### File: IoT-autocurtains/thing/curtain.py
```python
from machine import Pin, ADC
import time
class Curtain:
def __init__(self, uPin, dPin):
self.bottomLevel = 1543
self.currentLevel = 45
self.lowestAllowedPercent = 90
self.upPin = Pin(uPin, mode=Pin.OUT)
self.downPin = Pin(dPin, mode=Pin.IN)
def goToPercent(self, amout):
if amout > self.lowestAllowedPercent and amout < 0:
return
def getCurrentPercent(self):
return int(self.currentLevel / self.bottomLevel * 100)
def getValueFromPercent(self, percent):
return int(self.bottomLevel * (percent / 100))
def setLevel(self, level):
if level >= 0 and level <= (self.bottomLevel * (self.lowestAllowedPercent / 100)):
if level > self.currentLevel:
self.goDownToLevel(level)
else:
self.goUpToLevel(level)
def setPercent(self, percent):
if percent > self.lowestAllowedPercent or percent < 0:
return
level = self.getValueFromPercent(percent)
self.setLevel(level)
def goDownToLevel(self, level):
while (self.currentLevel < level):
self.stepDown()
self.currentLevel += 1
def goUpToLevel(self, level):
while (self.currentLevel > level):
self.stepUp()
self.currentLevel -= 1
def stepDown(self):
print("going DOWN form {}". format(self.currentLevel))
time.sleep(0.1)
def stepUp(self):
print("going UP form {}".format(self.currentLevel))
time.sleep(0.1)
```
#### File: IoT-autocurtains/thing/joystick.py
```python
from machine import Pin, ADC
import time
# Class made by <NAME>
class Joystick:
# neutral position (0-1023)
xNeutral = 490
yNeutral = 467
def __init__(self, xPin, yPin, zPin):
adc = ADC(bits=10)
self.xPin = adc.channel(pin=xPin, attn=ADC.ATTN_11DB)
self.yPin = adc.channel(pin=yPin, attn=ADC.ATTN_11DB)
self.zPin = Pin(zPin, mode=Pin.IN, pull=Pin.PULL_UP)
def getXValue(self):
return self.xPin.value() - self.xNeutral
def getYValue(self):
return self.yPin.value() - self.yNeutral
def getZValue(self):
return not self.zPin.value()
joystick = Joystick('P17', 'P16', 'P10')
while True:
print("X: %s" % (joystick.getXValue()))
print("Y: %s" % (joystick.getYValue()))
print(joystick.getZValue())
print('-' * 10)
time.sleep(0.1)
```
#### File: IoT-autocurtains/thing/main.py
```python
from mqtt import MQTTClient
import machine
import time
import ujson
from curtain import Curtain
c = Curtain('P10', 'P11')
def sub_cb(topic, msg):
try:
percent = int(msg)
c.setPercent(percent)
except Exception as e:
print(e)
with open('env.json') as fp:
data = ujson.load(fp)
print(data["mqtt"]["username"])
client = MQTTClient("solarfruit14", "io.adafruit.com",
user=data["mqtt"]["username"], password=data["mqtt"]["password"], port=1883)
client.set_callback(sub_cb)
client.connect()
client.subscribe(topic="joelcarlss/feeds/autocurtain")
print('Sending current value')
client.publish(topic="joelcarlss/feeds/autocurtain", msg="0")
print("Listening")
while True:
client.check_msg()
time.sleep(2)
``` |
{
"source": "joelcarranza/gpxpy",
"score": 3
} |
#### File: gpxpy/tests/gpxpy_test.py
```python
from gpxpy import *
import unittest
import StringIO
import math
import gpxpy.tests
import datetime
class ParseTest(unittest.TestCase):
def test_parse_track1(self):
gpx = gpxpy.tests.load('track-1.gpx')
# parsed correctly - 1 track
self.assertEquals(len(gpx.tracks),1)
self.assertEquals(len(gpx.waypoints),0)
self.assertEquals(len(gpx.routes),0)
trk = gpx.tracks[0]
self.assertEquals(trk.name,"Example GPX Document")
# 1 segment, 3 points
self.assertEquals(len(trk),1)
self.assertEquals(len(list(trk.points())),3)
seg = trk[0]
self.assertEquals(len(seg),3)
# first waypoint
wpt = seg[0]
self.assertEquals(wpt.lat,47.644548)
self.assertEquals(wpt.lon,-122.326897)
self.assertEquals(wpt.ele,4.46)
# test waypoints in track
for p in trk.points():
assert p.lat is not None
assert p.lon is not None
assert p.time is not None
assert p.ele is not None
assert p.name is None
def test_parse_route1(self):
gpx = gpxpy.tests.load('route-1.gpx')
# parsed correctly - 1 route,19 pts
self.assertEquals(len(gpx.tracks),0)
self.assertEquals(len(gpx.waypoints),0)
self.assertEquals(len(gpx.routes),1)
rte = gpx.routes[0]
self.assertEquals(rte.name,"Oregon to Utah")
self.assertEquals(len(rte),19)
# names, no times, no elevation
for p in rte:
assert p.name is not None
assert p.lat is not None
assert p.lon is not None
assert p.time is None
assert p.ele is None
def test_parse_wpt1(self):
gpx = gpxpy.tests.load('waypoints-1.gpx')
# parsed correctly - 1 route,19 pts
self.assertEquals(len(gpx.tracks),0)
self.assertEquals(len(gpx.waypoints),7)
self.assertEquals(len(gpx.routes),0)
# names, no times, no elevation
for p in gpx.waypoints:
assert p.name is not None
assert p.cmt is not None
assert p.desc is not None
assert p.sym is not None
assert p.lat is not None
assert p.lon is not None
assert p.time is not None
assert p.ele is not None
class TrackTest(unittest.TestCase):
def setUp(self):
self.gpx = gpxpy.tests.load('track-2.gpx')
self.track = self.gpx.tracks[0]
self.seg = self.track[0]
def test_timestamp(self):
assert self.track.timespan() is not None
assert self.seg.timespan() is not None
self.assertEquals(self.track.timespan(),self.seg.timespan())
ts = self.seg.timespan()
assert ts[0] < ts[1]
def test_bounds(self):
assert self.track.bounds() is not None
assert self.seg.bounds() is not None
self.assertEquals(self.track.bounds(),self.seg.bounds())
b = self.seg.bounds()
assert b[0] < b[2]
assert b[1] < b[3]
def test_length(self):
assert self.track.length() is not None
assert self.seg.length() is not None
self.assertEquals(self.track.length(),self.seg.length())
assert self.track.length() > 0
def test_filter(self):
self.gpx.filter(lambda x:True)
self.assertEquals(len(self.gpx.tracks),1)
self.assertEquals(len(self.track),1)
self.gpx.filter(lambda x:False)
self.assertEquals(len(self.gpx.waypoints),0)
self.assertEquals(len(self.gpx.tracks),0)
self.assertEquals(len(self.gpx.routes),0)
def test_split(self):
self.assertEquals(len(self.track),1)
# TODO: if the sense of split right here?
self.track.split(lambda p0,p1:p0.time.hour == p1.time.hour)
self.assertEquals(len(self.track),2)
def test_join(self):
# split apart into segments
i = 0
gpx1 = GPX()
while i + 10 < len(self.seg):
pts = self.seg[i:i+10]
gpx1.new_track(points=pts)
i += 10
assert len(gpx1.tracks) > 1
# one big join
gpx1.join()
assert len(gpx1.tracks) == 1
# TODO need to test multicondition join
class RouteTest(unittest.TestCase):
def setUp(self):
self.gpx = gpxpy.tests.load('route-1.gpx')
self.route = self.gpx.routes[0]
def test_timestamp(self):
assert self.route.timespan() is None
def test_bounds(self):
b = self.route.bounds()
assert b is not None
assert b[0] < b[2]
assert b[1] < b[3]
def test_length(self):
l = self.route.length()
assert l is not None
assert l > 0
def test_filter(self):
self.gpx.filter(lambda x:True)
self.assertEquals(len(self.gpx.routes),1)
self.assertEquals(len(self.route),19)
self.gpx.filter(lambda x:False)
self.assertEquals(len(self.gpx.routes),0)
class WaypointTest(unittest.TestCase):
def test_dist(self):
# result taken from http://en.wikipedia.org/wiki/Great-circle_distance
d = Waypoint(36.12,-86.67).dist(Waypoint(33.94,-118.40))
self.assertAlmostEquals(d,2887260,0)
class WriteTest(unittest.TestCase):
def test_write(self):
g = GPX()
g.waypoints.append(Waypoint(47.644548,-122.326897))
p = Path()
p.append(Waypoint(47.644548,-122.326897))
t = Track()
t.append(p)
g.tracks.append(t)
s = StringIO.StringIO()
g.write(s)
print s.getvalue()
g2 = GPX()
g2.load(StringIO.StringIO(s.getvalue()))
self.assertEquals(len(g2.waypoints),1)
if __name__ == "__main__":
unittest.main()
```
#### File: gpxpy/tools/tokml.py
```python
import sys
import argparse
import isodate
from gpxpy import GPX
import functools
import xml.etree.ElementTree as ET
ET._namespace_map['http://www.google.com/kml/ext/2.2'] = 'gx'
# TODO: this should go into tokml!
# Taken from http://effbot.org/zone/element-builder.htm
class _K(object):
def __call__(self, tag, *children, **attrib):
elem = ET.Element(tag)
for key,value in attrib.items():
c = ET.SubElement(elem,key)
c.text = str(value)
for item in children:
if isinstance(item, dict):
elem.attrib.update(item)
elif isinstance(item, basestring):
if len(elem):
elem[-1].tail = (elem[-1].tail or "") + item
else:
elem.text = (elem.text or "") + item
elif ET.iselement(item):
elem.append(item)
else:
raise TypeError("bad argument: %r" % item)
return elem
def __getattr__(self, tag):
return functools.partial(self, tag)
# create factory object
K = _K()
def _wptstring(wpt):
return "%f,%f,0" % (wpt.lon,wpt.lat)
class KMLWriter():
"""docstring for KMLWriter"""
def __init__(self,root):
self._root = [root]
self.gxTracks = True
def document(self,**attr):
attr = self._fattr(**attr)
f = K.Document(**attr)
self.append(f)
self._root.append(f)
def folder(self,name,**attr):
attr = self._fattr(name=name,**attr)
f = K.Folder(**attr)
self.append(f)
self._root.append(f)
def parent(self):
self._root.pop()
def append(self,el):
self._root[-1].append(el)
def _fattr(self,name=None,description=None,style=None):
result = {}
if name is not None:
result['name'] = name
if description is not None:
result['description'] = description
if style is not None:
result['styleUrl'] = style
return result
def waypoint(self,wpt,**attr):
if 'name' not in attr:
attr['name'] = wpt.name
if 'style' not in attr:
attr['style'] = "#gpx-waypoint"
if 'description' not in attr:
attr['description'] = wpt.desc or ''
attr = self._fattr(**attr)
el = K.Placemark(
K.Point(coordinates=_wptstring(wpt)),
**attr)
if wpt.time:
el.append(K.TimeStamp(when=isodate.datetime_isoformat(wpt.time)))
self.append(el)
def lineStyle(self,id,color,width=1,labelColor=None, labelScale=None):
style = K.Style(dict(id=id))
style.append(K.LineStyle(color=color,width=width))
if labelColor is not None or labelScale is not None:
style.append(K.LabelStyle(color=color if labelColor else "ffffffff",scale=labelScale if labelScale else 1.0))
self.append(style)
def iconStyle(self,id,href,color=None,scale=1,labelColor=None,labelScale=None):
attr = dict(scale=scale)
if color is not None:
attr['color'] = color
style = K.Style(dict(id=id))
style.append(K.IconStyle(K.Icon(href=href),**attr))
if labelColor is not None or labelScale is not None:
style.append(K.LabelStyle(color=color if labelColor else "ffffffff",scale=labelScale if labelScale else 1.0))
self.append(style)
def track_path(self,pl,points):
if self.gxTracks:
trk = ET.SubElement(pl,'{http://www.google.com/kml/ext/2.2}Track')
for w in points:
ET.SubElement(trk,'when').text = isodate.datetime_isoformat(w.time)
for w in points:
ET.SubElement(trk,'{http://www.google.com/kml/ext/2.2}coord').text = " ".join(map(str,w.tuple3d()))
else:
pl.append(K.LineString(coordinates="\n".join(map(_wptstring,points))))
def track(self,track,**attr):
if 'name' not in attr:
attr['name'] = track.name
if 'style' not in attr:
attr['style'] = "#gpx-track"
if len(track) == 1:
# TODO: supply
fattr = self._fattr(**attr)
pl = K.Placemark(
**fattr
)
self.track_path(pl,list(track.points()))
self.append(pl)
else:
self.folder(track.name)
lastp = None
for s in track:
# TODO: number and name!
fattr = self._fattr(**attr)
pl = K.Placemark(
**fattr
)
self.track_path(pl,list(s))
self.append(pl)
if lastp:
pl = K.Placemark(styleUrl='#gpx-track-missing')
self.track_path(pl,[lastp,s[0]])
self.append(pl)
lastp = s[-1]
self.parent()
def route(self,rte,**attr):
if 'name' not in attr:
attr['name'] = rte.name
if 'style' not in attr:
attr['style'] = "#gpx-route"
attr = self._fattr(**attr)
self.append(K.Placemark(
K.LineString(coordinates="\n".join(map(_wptstring,rte.points()))),
**attr
))
def gpx(self,gpx,createFolders=True):
if gpx.waypoints:
if createFolders:
self.folder("Waypoints")
for w in gpx.waypoints:
self.waypoint(w)
if createFolders:
self.parent()
if gpx.tracks:
if createFolders:
self.folder("Tracks")
for t in gpx.tracks:
self.track(t)
if createFolders:
self.parent()
if gpx.routes:
if createFolders:
self.folder("Routes")
for r in gpx.routes:
self.route(r)
if createFolders:
self.parent()
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
_indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def parse_color(colorstring):
"parse a string of format RRGGBB(AA)?"
if colorstring[0] == '#': colorstring = colorstring[1:]
if len(colorstring) != 6 and len(colorstring) != 8:
raise ValueError, "input #%s is not in #RRGGBB(AA)? format" % colorstring
r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:6]
if len(colorstring) > 6:
a = colorstring[6:]
else:
a = 'ff'
return a+b+g+r
def run():
parser = argparse.ArgumentParser(description='Generate KML from a GPX file')
parser.add_argument('-i', metavar='file',
type=argparse.FileType('r'),default=sys.stdin,help="GPX file to process. If none is specified STDIN will be use")
parser.add_argument('-o', metavar='file',
type=argparse.FileType('w'),default=sys.stdout,help="file name of resulting KML file. If none is specified STDOUT will be used")
parser.add_argument('--kml-name',dest='kmlname')
parser.add_argument('--kml-desc',dest='kmldesc')
# TODO: move these defaults to constants at top of file
parser.add_argument('-wpt-icon',dest='wpticon',
default='http://maps.google.com/mapfiles/ms/micons/ylw-pushpin.png')
parser.add_argument('-wpt-scale',dest='wptscale',type=float,default=1.0)
# TODO: this should support a list of colors
# which we rotate through
parser.add_argument('-track-color',dest='trkcolor',type=parse_color,default='#ff7e00')
parser.add_argument('-track-width',dest='trkwidth',type=int,default=3)
# TODO: this should support a list of colors
# which we rotate through
parser.add_argument('-route-color',dest='routecolor',type=parse_color,default='#ff7e00')
parser.add_argument('-route-width',dest='routewidth',type=int,default=3)
args = parser.parse_args()
gpx = GPX()
gpx.load(args.i)
kml = K.kml(dict(xmlns='http://www.opengis.net/kml/2.2'))
w= KMLWriter(kml)
w.document(name=args.kmlname,description=args.kmldesc)
w.iconStyle("gpx-waypoint",args.wpticon,args.wptscale)
w.lineStyle("gpx-track",args.trkcolor,args.trkwidth)
w.lineStyle("gpx-track-missing","cccccccc",args.trkwidth)
w.lineStyle("gpx-route",args.routecolor,args.routewidth)
w.gpx(gpx)
_indent(kml)
ET.ElementTree(kml).write(args.o)
if __name__ == "__main__":
run()
```
#### File: gpxpy/gpxpy/xmlutil.py
```python
from xml.etree.cElementTree import Element
import pytz
# this is going to fail on 2.5???
import isodate
# Taken from: http://infix.se/2007/02/06/gentlemen-indent-your-xml
def indent(elem, level=0):
"Indents an ElementTree"
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class XAttr(object):
"""Really simple model for dealing with xml"""
def __init__(self, name,elname=None,type='s',attr=False):
self.name = name
self.elname = elname if elname else name
self.type = type
self.attr = attr
def tostr(self,value):
if self.type == 'd':
text = isodate.datetime_isoformat(value.astimezone(pytz.utc))
else:
text = str(value)
return text
def fromstr(self,text):
type = self.type
if type == 's':
value = text
elif type == 'd':
value = isodate.parse_datetime(text).astimezone(pytz.utc)
elif type == 'i':
value = int(text)
elif type == 'n':
value = float(text)
else:
raise Error("Unknown format")
return value
def init(self,values,attrs):
for attr in attrs:
if attr.name in values:
setattr(self,attr.name,values[attr.name])
else:
setattr(self,attr.name,None)
def parse(ns,el,attrs):
"parse from XML element to construct model"
model = dict()
for attr in attrs:
value = None
text = None
if attr.attr:
text = el.attrib[attr.elname]
else:
child = el.find("{%s}%s" % (ns,attr.elname))
if child is not None:
text = child.text
if text:
model[attr.name] = attr.fromstr(text)
return model
def write(el,model,attrs):
"construct element representing model from attributes"
for attr in attrs:
value = getattr(model,attr.name)
if value is not None:
text = attr.tostr(value)
if attr.attr:
el.attrib[attr.elname] = text
else:
c = Element(attr.elname)
c.text = text
el.append(c)
``` |
{
"source": "joelchooks/CivicTechExchange",
"score": 2
} |
#### File: civictechprojects/helpers/context_preload.py
```python
from django.conf import settings
from urllib.parse import urljoin, urlparse
from civictechprojects.models import Event
from civictechprojects.caching.cache import ProjectCache, GroupCache
from common.helpers.constants import FrontEndSection
from common.helpers.front_end import section_url
from common.helpers.redirectors import RedirectTo
from common.helpers.request_helpers import url_params
def about_project_preload(context, request):
context = default_preload(context, request)
query_args = url_params(request)
project_id = query_args['id']
project_json = ProjectCache.get(project_id)
if project_json is not None:
context['title'] = project_json['project_name'] + ' | DemocracyLab'
context['description'] = project_json['project_short_description'] or project_json['project_description'][:300]
if 'project_thumbnail' in project_json:
context['og_image'] = project_json['project_thumbnail']['publicUrl']
else:
print('Failed to preload project info, no cache entry found: ' + project_id)
return context
def about_event_preload(context, request):
context = default_preload(context, request)
query_args = url_params(request)
event_id = query_args['id']
event = Event.get_by_id_or_slug(event_id)
event_json = event.hydrate_to_json()
if event_json is not None:
context['title'] = event_json['event_name'] + ' | DemocracyLab'
context['description'] = event_json['event_short_description']
if 'event_thumbnail' in event_json:
context['og_image'] = event_json['event_thumbnail']['publicUrl']
slug_or_id = event.event_slug or event.id
context['canonical_url'] = section_url(FrontEndSection.AboutEvent, {'id': slug_or_id})
else:
print('Failed to preload event info, no cache entry found: ' + event_id)
return context
def about_group_preload(context, request):
context = default_preload(context, request)
query_args = url_params(request)
group_id = query_args['id']
group_json = GroupCache.get(group_id)
if group_json is not None:
context['title'] = group_json['group_name'] + ' | DemocracyLab'
context['description'] = group_json['group_short_description']
if 'group_thumbnail' in group_json:
context['og_image'] = group_json['group_thumbnail']['publicUrl']
else:
print('Failed to preload group info, no cache entry found: ' + group_id)
return context
def companies_preload(context, request):
context = default_preload(context, request)
context['title'] = 'DemocracyLab | Corporate Engagement'
context['description'] = 'Do well by doing good! Engage employees at custom events to build culture and spark innovation. Differentiate your brand by sponsoring our public hackathons.'
return context
def about_us_preload(context, request):
context = default_preload(context, request)
context['title'] = 'DemocracyLab | About'
context['description'] = 'Learn About democracyLab, the nonprofit connecting skilled individuals to tech-for-good projects.'
return context
def donate_preload(context, request):
context = default_preload(context, request)
context['title'] = 'Donate | DemocracyLab'
context['description'] = 'Your donation empowers people who use technology for public good by connecting tech-for-good projects to skilled volunteers and socially responsible companies.'
return context
def edit_profile_preload(context, request):
context = default_preload(context, request)
context['title'] = 'Update User Profile | DemocracyLab'
context['description'] = 'Update User Profile page'
return context
def create_event_preload(context, request):
context = default_preload(context, request)
context['title'] = 'Create an Event | DemocracyLab'
context['description'] = 'Create event page'
return context
def my_projects_preload(context, request):
context = default_preload(context, request)
context['title'] = 'My Projects | DemocracyLab'
context['description'] = 'My Projects page'
return context
def my_groups_preload(context, request):
context = default_preload(context, request)
context['title'] = 'My Groups | DemocracyLab'
context['description'] = 'My Groups page'
return context
def my_events_preload(context, request):
context = default_preload(context, request)
context['title'] = 'My Events | DemocracyLab'
context['description'] = 'My Events page'
return context
def videos_preload(context, request):
context = default_preload(context, request)
if settings.VIDEO_PAGES:
query_args = url_params(request)
video_id = query_args['id']
if video_id in settings.VIDEO_PAGES:
video_json = settings.VIDEO_PAGES[video_id]
context['YOUTUBE_VIDEO_URL'] = video_json['video_url']
if 'video_description' in video_json:
context['description'] = video_json['video_description']
if 'video_thumbnail' in video_json:
context['og_image'] = video_json['video_thumbnail']
else:
print('Redirecting invalid video id: ' + video_id)
raise RedirectTo(section_url(FrontEndSection.VideoOverview, {'id': 'overview'}))
return context
def default_preload(context, request):
context['title'] = 'DemocracyLab'
context['description'] = 'Everyone has something to contribute to the technical solutions society needs. ' \
'Volunteer today to connect with other professionals volunteering their time.'
context['og_type'] = 'website'
context['og_image'] = settings.STATIC_CDN_URL + '/img/Democracylab_is_a_global_volunteer_tech_for_good_nonprofit.png'
url = settings.PROTOCOL_DOMAIN + request.get_full_path()
# Remove parameters for canonical urls by default
context['canonical_url'] = urljoin(url, urlparse(url).path)
return context
preload_urls = [
{'section': FrontEndSection.AboutProject.value, 'handler': about_project_preload},
{'section': FrontEndSection.AboutEvent.value, 'handler': about_event_preload},
{'section': FrontEndSection.EditProfile.value, 'handler': edit_profile_preload},
{'section': FrontEndSection.AboutUs.value, 'handler': about_us_preload},
{'section': FrontEndSection.CreateEvent.value, 'handler': create_event_preload},
{'section': FrontEndSection.MyProjects.value, 'handler': my_projects_preload},
{'section': FrontEndSection.MyGroups.value, 'handler': my_groups_preload},
{'section': FrontEndSection.MyEvents.value, 'handler': my_events_preload},
{'section': FrontEndSection.Donate.value, 'handler': donate_preload},
{'section': FrontEndSection.AboutGroup.value, 'handler': about_group_preload},
{'section': FrontEndSection.Companies.value, 'handler': companies_preload},
{'section': FrontEndSection.VideoOverview.value, 'handler': videos_preload}
]
def context_preload(section, request, context):
handler = next((preload_url['handler'] for preload_url in preload_urls if preload_url['section'] == section), default_preload)
return handler(context, request)
```
#### File: common/helpers/date_helpers.py
```python
from datetime import datetime
from enum import Enum
class DateTimeFormats(Enum):
UTC_DATETIME = '%Y-%m-%dT%H:%M:%SZ'
DATE_LOCALIZED = '%x'
DATEPICKER_DATE = '%a %b %d %Y %H:%M:%S GMT%z'
MONTH_DD_YYYY = '%B %d, %Y'
def datetime_field_to_datetime(field):
# For some reason django's DateTimeField's are sometimes rendered as plain strings, so we need to parse them back into datetimes
if isinstance(field, str):
return datetime.strptime(field, DateTimeFormats.UTC_DATETIME.value)
else:
return field
def datetime_to_string(date_time, date_time_format):
return date_time.strftime(date_time_format.value)
def parse_front_end_datetime(date_str):
# Example date string: Mon Feb 03 2020 18:30:00 GMT-0800 (Pacific Standard Time)
# First chop off extraneous timezone name at the end
pruned_date_str = date_str[0: date_str.index('(') - 1]
# Parse according to format
return datetime.strptime(pruned_date_str, DateTimeFormats.DATEPICKER_DATE.value)
``` |
{
"source": "joelcolinschwartz/ReflectDirect",
"score": 3
} |
#### File: joelcolinschwartz/ReflectDirect/exoplanetsubspots.py
```python
import numpy as np
pi = np.pi
def sub_observerstellar(times,worb,wrot,inc,obl,sol,longzero=0):
"""Calculates an exoplanet's sub-observer and -stellar locations over time.
Calculates time-dependent, trigonometric values of an exoplanet's sub-
observer and sub-stellar locations when on a circular orbit. Planet
coordinates are colatitude (theta) and longitude (phi). Orbital phase
is zero when planet is opposite star from observer (superior conjunction)
and increases CCW when system is viewed above star's North pole. See
Appendix A of `Schwartz et al. (2016) <https://arxiv.org/abs/1511.05152>`_.
Args:
times (1d array, int, or float):
Discrete time values in any unit, with total number *n_time*.
At t=0 planet is at superior conjunction.
worb (int or float):
Orbital angular frequency in radians per unit time. Positive
values are prograde orbits (CCW), negative are retrograde (CW).
wrot (int or float):
Rotational angular frequency in radians per unit time.
For prograde orbits, positive values are prograde rotation,
negative are retrograde (vice versa for retrograde orbits).
inc (int or float):
Inclination of orbital plane to the observer, in radians.
Zero is face-on, pi/2 is edge-on.
obl (int or float):
Obliquity relative to the ``worb`` vector, in radians.
This is the tilt of the planet's spin axis. Zero is North
pole up, pi/2 is maximal tilt, pi is North pole down.
sol (int or float):
The orbital phase of Northern Summer solstice, in radians.
If the ``wrot`` vector is projected into the orbital plane,
then this phase is where that projection points at the star.
longzero (int or float):
Longitude of the sub-observer point when t=0, in radians.
Default is zero.
Returns:
trigvals (ndarray):
Array of trigonometric values with shape (8, *n_time*). First
dimension is organized as:
- sin theta_obs
- cos theta_obs
- sin phi_obs
- cos phi_obs
- sin theta_st
- cos theta_st
- sin phi_st
- cos phi_st
"""
if isinstance(times,np.ndarray) and (times.size == times.shape[0]):
timeA = times
N_time = timeA.size # Number of time steps from input array
elif isinstance(times,(int,float)):
timeA = np.array([times])
N_time = 1
else:
print('sub_observerstellar aborted: input times should be ndarray (1D), int, or float.')
return
phaseA = worb*timeA # Orbital phases
phiGen = wrot*timeA - longzero # General expression for PhiObs (without overall negative sign)
cThObs = (np.cos(inc)*np.cos(obl)) + (np.sin(inc)*np.sin(obl)*np.cos(sol))
cThObsfull = np.repeat(cThObs,N_time)
sThObs = (1.0 - (cThObs**2.0))**0.5
sThObsfull = np.repeat(sThObs,N_time)
cThSt = np.sin(obl)*np.cos(phaseA - sol)
sThSt = (1.0 - (cThSt**2.0))**0.5
sol_md = (sol % (2.0*pi))
inc_rd = round(inc,4) # Rounded inclination, for better comparison
p_obl_rd = round((pi - obl),4) # Rounded 180 degrees - obliquity, for better comparison
cond_face = (((inc == 0) or (inc == pi)) and ((obl == 0) or (obl == pi))) # Pole-observer 1: face-on inclination
cond_north = ((sol_md == 0) and ((inc == obl) or (inc_rd == -p_obl_rd))) # Ditto 2: North pole view
cond_south = ((sol == pi) and ((inc_rd == p_obl_rd) or (inc == -obl))) # Ditto 3: South pole view
if cond_face or cond_north or cond_south:
if (obl == (pi/2.0)):
aII = np.sin(phaseA)*np.cos(sol) # Special "double-over-pole" time-dependent factor
cPhiSt = np.ones(N_time)
sPhiSt = np.zeros(N_time)
g_i = (sThSt != 0) # Excluding "star-over-pole" situations (g_i are "good indicies")
cPhiSt[g_i] = (-np.sin(phiGen[g_i])*aII[g_i])/sThSt[g_i]
sPhiSt[g_i] = (-np.cos(phiGen[g_i])*aII[g_i])/sThSt[g_i]
else:
aI = np.cos(phaseA)*np.cos(obl) # Alternate "observer-over-pole" time-dependent factor
bI = np.sin(phaseA) # Ditto
cPhiSt = ((np.cos(phiGen)*aI) + (np.sin(phiGen)*bI))/sThSt
sPhiSt = ((-np.sin(phiGen)*aI) + (np.cos(phiGen)*bI))/sThSt
else:
a = (np.sin(inc)*np.cos(phaseA)) - (cThObs*cThSt) # Normal time-dependent factor
b = ((np.sin(inc)*np.sin(phaseA)*np.cos(obl) - np.cos(inc)*np.sin(obl)*np.sin(phaseA - sol))) # Ditto
if (obl == (pi/2.0)):
cPhiSt = np.ones(N_time)
sPhiSt = np.zeros(N_time)
g_i = (sThSt != 0) # Excluding "star-over-pole" situations (g_i are "good_indicies")
cPhiSt[g_i] = ((np.cos(phiGen[g_i])*a[g_i]) + (np.sin(phiGen[g_i])*b[g_i]))/(sThObs*sThSt[g_i])
sPhiSt[g_i] = ((-np.sin(phiGen[g_i])*a[g_i]) + (np.cos(phiGen[g_i])*b[g_i]))/(sThObs*sThSt[g_i])
else:
cPhiSt = ((np.cos(phiGen)*a) + (np.sin(phiGen)*b))/(sThObs*sThSt)
sPhiSt = ((-np.sin(phiGen)*a) + (np.cos(phiGen)*b))/(sThObs*sThSt)
trigvals = np.stack((sThObsfull,cThObsfull,np.sin(-phiGen),np.cos(-phiGen),sThSt,cThSt,sPhiSt,cPhiSt))
return trigvals
```
#### File: joelcolinschwartz/ReflectDirect/reflectdirect.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as pat
import ipywidgets as widgets
from pathlib import Path
from numpy.lib import stride_tricks
from scipy.special import sph_harm
from scipy.interpolate import RectBivariateSpline
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.cbook import get_sample_data
from ipywidgets import Layout
from IPython.display import display as IPy_display
import exoplanetsubspots as exoss
pi = np.pi
def _rolling(vec,window):
"""Rolls a window over a vector and makes a new array."""
new_dim = ((vec.size - window + 1),window)
new_bytes = (vec.itemsize,vec.itemsize)
return stride_tricks.as_strided(vec,shape=new_dim,strides=new_bytes)
## Use RD folder's absolute path to load reliably, especially when making Sphinx docs.
folder_path = str(Path(__file__).parent.absolute())
kernel_widths_ = np.load(folder_path + '/kernel_width_values_all5deg.npy')[:-1,:,:,:19] # Pre-handling duplicates and unneeded
kernel_domcolats_ = np.load(folder_path + '/kernel_domcolat_values_all5deg.npy')[:-1,:,:,:19]
def _serial_shift(ary):
"""Rolls a window over an array and makes a new aray."""
twice_ary = np.tile(ary,(2,1,1,1))
new_dim = (ary.shape[0],)+ary.shape
new_bytes = (ary.strides[0],)+ary.strides
return stride_tricks.as_strided(twice_ary,shape=new_dim,strides=new_bytes)
shifted_domcolats_ = _serial_shift(np.copy(kernel_domcolats_))
kernel_delta_domcolats_ = np.absolute(kernel_domcolats_[np.newaxis,:,:,:,:] - shifted_domcolats_)
phase_4mesh_,inc_4mesh_,ss_,oo_ = np.meshgrid(np.linspace(0,2*pi,73),np.linspace(0,pi/2,19),
np.linspace(0,2*pi,73),np.linspace(0,pi/2,19),indexing='ij')
del ss_,oo_
phase_4mesh_,inc_4mesh_ = phase_4mesh_[:-1,:,:,:],inc_4mesh_[:-1,:,:,:]
shifted_phase_4mesh_ = _serial_shift(np.copy(phase_4mesh_))
sol_2mesh_,obl_2mesh_ = np.meshgrid(np.linspace(0,2*pi,73),np.linspace(0,pi/2,19),indexing='ij')
colat_ticks_ = np.array([r'$0^{\circ}$',r'$45^{\circ}$',r'$90^{\circ}$',r'$135^{\circ}$',r'$180^{\circ}$'])
long_ticks_ = np.array([r'$-180^{\circ}$',r'$-90^{\circ}$',r'$0^{\circ}$',r'$90^{\circ}$',r'$180^{\circ}$'])
wlong_ticks_ = np.array([r'$0^{\circ}$',r'$25^{\circ}$',r'$50^{\circ}$',r'$75^{\circ}$',r'$100^{\circ}$'])
obl_ticks_ = np.array([r'$0^{\circ}$',r'$30^{\circ}$',r'$60^{\circ}$',''])
sol_ticks_ = np.array([r'$0^{\circ}$','',r'$90^{\circ}$','',r'$180^{\circ}$','',r'$270^{\circ}$',''])
relph_ticks_ = np.array([r'$-2^{\circ}$',r'$-1^{\circ}$',r'$0^{\circ}$',r'$1^{\circ}$',r'$2^{\circ}$'])
def _combine_2_colormaps(cm1,va1,vb1,n1,cm2,va2,vb2,n2,power,name):
"""Creates a new colormap by joining two others."""
c1 = cm1(np.linspace(va1,vb1,n1))
c2 = cm2(np.linspace(va2,vb2,n2))
C = np.vstack((c1,c2))
L = np.sum([0.2126,0.7152,0.0722]*C[:,0:3],axis=1)
lwi,Li = L.argmin(),np.indices(L.shape)[0]
d_Li = np.absolute(Li-lwi)
C[:,0:3] *= (d_Li[:,np.newaxis]/d_Li.max())**power
return LinearSegmentedColormap.from_list(name,C)
darkmid_BrBG_ = _combine_2_colormaps(cm.BrBG,0.4,0,128,cm.BrBG,0.99,0.59,128,0.5,'darkmid_BrBG_')
def _rotate_ccw_angle(X,Y,ang):
"""Rotates arrays CCW by an angle."""
S_ang,C_ang = np.sin(ang),np.cos(ang)
X_new = C_ang*X - S_ang*Y
Y_new = S_ang*X + C_ang*Y
return X_new,Y_new
def Geometry_Reference(ref_save=False,**kwargs):
"""Makes a reference diagram about exoplanetary systems.
.. image:: _static/geomref_example.png
:width: 60%
:align: center
For example, this shows how important angles are defined. See
Appendix A of
`Schwartz et al. (2016) <https://arxiv.org/abs/1511.05152>`_.
Args:
ref_save (bool):
Save the diagram as "geometry_reference.pdf" in the current
working directory. Default is False.
.. note::
Keywords are only used by the class :class:`DirectImaging_Planet`
for the interactive function :func:`Sandbox_Reflection()
<reflectdirect.DirectImaging_Planet.Sandbox_Reflection>`.
"""
## Default keywords
_active = kwargs.get('_active',False)
incD = kwargs.get('incD',85)
oblD = kwargs.get('oblD',0)
solD = kwargs.get('solD',0)
ratRO = kwargs.get('ratRO',10.0)
phaseD = kwargs.get('phaseD',[0])
ph_colors = kwargs.get('ph_colors',['k'])
name = kwargs.get('name','NONE')
reference = kwargs.get('reference',True) # Gets set to False by Geometry_Diagram
if _active:
comp_tweak,comp_siz = 0.04,'medium'
else:
plt.figure(figsize=(8,8))
comp_tweak,comp_siz = 0.03,'x-large'
plt.xlim([-1.5,1.5])
plt.ylim([-1.35,1.65])
plt.xticks([])
plt.yticks([])
plt.gca().set_aspect(1.0)
if reference:
here_incD,here_oblD,here_solD,here_ratRO,here_phaseD = 45,45,70,'N/A',225
else:
here_incD,here_oblD,here_solD,here_ratRO,here_phaseD = incD,oblD,solD,ratRO,phaseD
star_color = (1,0.75,0)
orbit = plt.Circle((0,0),radius=1,color='k',fill=False,zorder=0)
plt.gca().add_patch(orbit)
star = plt.Circle((0,0),radius=0.33,color=star_color,fill=True,zorder=1)
plt.gca().add_patch(star)
plt.plot([0,0],[-1.35,0],'k',lw=2,ls=':',zorder=-2)
compass = 1.15
plt.plot([0,0],[1.0,compass],'k',lw=1,ls='-',zorder=0)
plt.text(comp_tweak,compass,r'$0^{\circ}$',size=comp_siz,ha='left',va='bottom')
plt.plot([-1.0,-compass],[0,0],'k',lw=1,ls='-',zorder=0)
plt.text(-compass,comp_tweak,r'$90^{\circ}$',size=comp_siz,ha='right',va='bottom')
plt.text(-comp_tweak-0.01,-compass,r'$180^{\circ}$',size=comp_siz,ha='right',va='top')
plt.plot([1.0,compass],[0,0],'k',lw=1,ls='-',zorder=0)
plt.text(compass,-comp_tweak-0.01,r'$270^{\circ}$',size=comp_siz,ha='left',va='top')
axis_color = (0,0.9,0)
tri_x,tri_y = 0.2*np.array([0,0.5,-0.5]),0.2*np.array([1,-0.5,-0.5])
tsol_x,tsol_y = _rotate_ccw_angle(tri_x,tri_y,np.radians(here_solD+180))
sol_x,sol_y = _rotate_ccw_angle(0.67,0,np.radians(here_solD+90))
triang = pat.Polygon(np.array([sol_x+tsol_x,sol_y+tsol_y]).T,color=axis_color,fill=True,zorder=1)
plt.gca().add_patch(triang)
plt.plot([sol_x/0.67,sol_x],[sol_y/0.67,sol_y],color=axis_color,lw=2,ls='-',zorder=-1)
plt.plot([0,0],[1.0,0.8],'k',lw=1,ls='--',zorder=-2)
sang = pat.Arc((0,0),1.7,1.7,angle=90,theta1=0,theta2=here_solD,lw=1,color=(0.67,0.67,0),zorder=-3)
plt.gca().add_patch(sang)
cupx,cupy,to_ell = 1.12,1.16,0.49
plt.plot([-cupx,-cupx+to_ell],[cupy,cupy],'k',lw=2,ls=':',zorder=-1)
starup = plt.Circle((-cupx,cupy),radius=0.1,color=star_color,fill=True,zorder=0)
plt.gca().add_patch(starup)
ix,iy = np.array([0,0]),np.array([0,0.3])
plt.plot(-cupx+ix,cupy+iy,'k',lw=1,ls='--',zorder=-2)
if reference and not _active:
plt.plot(-cupx-iy,cupy+ix,'k',lw=1,ls='--',zorder=-2)
plt.text(-cupx+comp_tweak,cupy+iy[1],r'$0^{\circ}$',size=comp_siz,ha='left',va='top')
plt.text(-cupx-iy[1],cupy-comp_tweak,r'$90^{\circ}$',size=comp_siz,ha='left',va='top')
plt.text(-cupx+0.7*to_ell,cupy+0.02,'To\nobserver',size='medium',ha='center',va='bottom')
iy += [-0.3,0]
nix,niy = _rotate_ccw_angle(ix,iy,np.radians(here_incD))
plt.plot(-cupx+nix,cupy+niy,'k',zorder=0)
iang = pat.Arc((-cupx,cupy),0.4,0.4,angle=90,theta1=0,theta2=here_incD,lw=1,color=(0.67,0,0.67),zorder=-3)
plt.gca().add_patch(iang)
planet_color = '0.5'
plt.plot([cupx-to_ell,1.5],[cupy,cupy],'k',zorder=-1)
planet = plt.Circle((cupx,cupy),radius=0.15,color=planet_color,fill=True,zorder=1)
plt.gca().add_patch(planet)
ox,oy = np.array([0,0]),np.array([0,0.3])
plt.plot(cupx+ox,cupy+oy,'k',lw=1,ls='--',zorder=-2)
if reference and not _active:
plt.plot(cupx+ox,cupy-oy,'k',lw=1,ls='--',zorder=-2)
plt.text(cupx+comp_tweak,cupy+oy[1],r'$0^{\circ}$',size=comp_siz,ha='left',va='top')
plt.text(cupx-oy[1]-0.02,cupy-comp_tweak,r'$90^{\circ}$',size=comp_siz,ha='left',va='top')
plt.text(cupx+comp_tweak,cupy-oy[1],r'$180^{\circ}$',size=comp_siz,ha='left',va='bottom')
plt.text(cupx-0.5*to_ell,cupy+0.7*oy[1],'North',size='medium',ha='right',va='center')
oy += [-0.2,0]
nox,noy = _rotate_ccw_angle(ox,oy,np.radians(here_oblD))
plt.plot(cupx+nox,cupy+noy,c=axis_color,lw=2,zorder=0)
oang = pat.Arc((cupx,cupy),0.45,0.45,angle=90,theta1=0,theta2=here_oblD,lw=1,color=(0,0.67,0.67),zorder=-3)
plt.gca().add_patch(oang)
cex,cey = 1.5,1.65
iarc = pat.Ellipse((-cex,cey),2.0,2.0,lw=2,ec='0.75',fc=(1,0,1,0.05),zorder=-4)
plt.gca().add_patch(iarc)
oarc = pat.Ellipse((cex,cey),2.0,2.0,lw=2,ec='0.75',fc=(0,1,1,0.05),zorder=-4)
plt.gca().add_patch(oarc)
if _active:
n = 0
for p in here_phaseD:
if isinstance(p,(int,float)):
phx,phy = _rotate_ccw_angle(0,1,np.radians(p))
planet_loc = plt.Circle((phx,phy),radius=0.1,color=ph_colors[n],fill=True,zorder=1)
plt.gca().add_patch(planet_loc)
n += 1
else:
phx,phy = _rotate_ccw_angle(0,1,np.radians(here_phaseD))
planet_loc = plt.Circle((phx,phy),radius=0.1,color=planet_color,fill=True,zorder=1)
plt.gca().add_patch(planet_loc)
tex_y = 1.6
if _active:
tex_x,lab_siz = 0.65,'medium'
plt.text(-tex_x,tex_y,r'$%.1f^{\circ}$' % here_incD,color='k',size=lab_siz,ha='right',va='top')
plt.text(tex_x,tex_y,r'$%.1f^{\circ}$' % here_oblD,color='k',size=lab_siz,ha='left',va='top')
plt.text(0,0,'$%.1f^{\circ}$' % here_solD,color='k',size=lab_siz,ha='center',va='center')
plt.text(-0.95,-1.25,'Spins per orbit:' '\n' '$%.2f$' % here_ratRO,
color='k',size=lab_siz,ha='center',va='bottom')
plt.text(0,tex_y,'Geometry',color='k',size=lab_siz,ha='center',va='top')
elif reference:
tex_x,lab_siz = 1.02,'x-large'
plt.text(-0.5,0.32,'North',size='medium',ha='center',va='center')
plt.text(-tex_x,tex_y,'Inclination angle',color='k',size=lab_siz,ha='center',va='top')
plt.text(tex_x,tex_y,'Obliquity angle',color='k',size=lab_siz,ha='center',va='top')
plt.text(-0.67,0.08,'Solstice\nangle',color='k',size=lab_siz,ha='center',va='top')
plt.text(0,0,'Star',color='k',size=lab_siz,ha='center',va='center')
plt.text(-0.45,-0.55,'Orbital\nphase',color='k',size=lab_siz,ha='center',va='bottom')
plt.plot([-0.707,-0.45],[-0.707,-0.575],'k',lw=1,ls='--',zorder=-2)
rat_tex = r'$\omega_{\mathrm{rot}} \ / \ \omega_{\mathrm{orb}}$'
plt.text(-0.95,-1.25,'Spins per orbit:\n'+rat_tex+' is\n+ for prograde spins\n— for retrograde spins',
color='k',size=lab_siz,ha='center',va='bottom')
tim_tex = r'At $0^{\circ}$ phase:' '\n' '$t = nT_{\mathrm{orb}}$,' '\n' r'$n=0,\pm1,\pm2,...$'
plt.text(0.21,0.775,tim_tex,color='k',size=lab_siz,ha='center',va='top')
plt.text(0.6,-0.6,'Planet',color='k',size=lab_siz,ha='right',va='bottom')
connect = pat.Arc((0.5,0),0.5,1.3,angle=0,theta1=-70,theta2=85,lw=1,ls='--',color='0.75',zorder=-3)
plt.gca().add_patch(connect)
plt.text(1.3,-1.3,'Not to\nscale',color='k',size=lab_siz,ha='center',va='bottom',fontstyle='italic')
plt.text(0,tex_y,'Geometry\nReference',color='k',size=lab_siz,ha='center',va='top',weight='bold')
else:
tex_x,lab_siz = 1.02,'x-large'
plt.text(-tex_x,tex_y,r'Inclination: $%.1f^{\circ}$' % here_incD,color='k',size=lab_siz,ha='center',va='top')
plt.text(tex_x,tex_y,r'Obliquity: $%.1f^{\circ}$' % here_oblD,color='k',size=lab_siz,ha='center',va='top')
plt.text(0,0,'Solstice:' '\n' '$%.1f^{\circ}$' % here_solD,color='k',size=lab_siz,ha='center',va='center')
rat_tex = r'$\omega_{\mathrm{rot}} \ / \ \omega_{\mathrm{orb}} \ = \ %.2f$' % here_ratRO
plt.text(-0.95,-1.25,'Spins per orbit:\n'+rat_tex,color='k',size=lab_siz,ha='center',va='bottom')
plt.text(1.3,-1.3,'Not to\nscale',color='k',size=lab_siz,ha='center',va='bottom',fontstyle='italic')
plt.text(0,tex_y,'Geometry of\n{}'.format(name),color='k',size=lab_siz,ha='center',va='top')
plt.text(0.25,-1.25,'To observer',color='k',size=lab_siz,ha='left',va='bottom')
plt.plot([0,0.25],[-1.3,-1.25],'k',lw=1,ls='--',zorder=-2)
if reference and not _active:
plt.tight_layout()
if ref_save:
plt.savefig('geometry_reference.pdf')
plt.show()
class DirectImaging_Planet:
"""An exoplanet that is directly imaged using reflected starlight.
This class is based on the model, equations, and discussion of
`Schwartz et al. (2016) <https://arxiv.org/abs/1511.05152>`_,
S16 in the methods below. It has two sets of planetary
parameters, a primary and an alternate, that users control. These
sets make calling many of the class methods simple and consistent.
Several methods store figures that can be saved later.
Planet coordinates are colatitude and longitude. Orbital phase
is zero when planet is opposite star from observer and
increases CCW when system is viewed above star's North pole.
Methods:
Update Param Sets:
- :func:`Adjust_Geometry`
- :func:`Adjust_MotionTimes`
- :func:`Build_Amap`
- :func:`InvertFlipBlend_Amap`
- :func:`Setup_ProRet_Degeneracy`
Use Param Sets:
- :func:`Kernel_WidthDomColat`
- :func:`Light_Curves`
- :func:`SubOS_TimeDeg`
Visualize Param Sets:
- :func:`EquiRect_Amap`
- :func:`Geometry_Diagram`
- :func:`KChar_Evolve_Plot`
- :func:`Kernels_Plot`
- :func:`LightCurve_Plot`
- :func:`Orthographic_Viewer`
- :func:`Sandbox_Reflection` --- interactive
- :func:`SpinAxis_Constraints`
Other:
- :func:`Info_Printout`
- :func:`Kernel2D`
- :func:`KernelClat`
- :func:`KernelLong`
Attributes:
name (str):
Your exoplanet's name.
times (1d array):
Time array based on the primary orbital period.
n_clat (int):
Number of colatitudes for the planetary grid.
n_long (int):
Number of longitudes for the planetary grid.
clat_vec (1d array):
Colatitude vector, zero to 180 degrees.
long_vec (1d array):
Longitude vector, zero to 360 degrees and zero in center.
mono_long_vec (1d array):
Monotonic longitude vector, -180 to 180 degrees.
clats (2d array):
Colatitude array, based on ``clat_vec``.
longs (2d array):
Longitude array, based on ``long_vec``.
mono_longs (2d array):
Monotonic longitude array, based on ``mono_long_vec``.
delta_clat (float):
Gap between colatitudes.
delta_long (float):
Gap between longitudes.
cos_clats (2d array):
Cosine of colatitudes.
cos_longs (2d array):
Cosine of longitudes.
sin_clats (2d array):
Sine of colatitudes.
sin_longs (2d array):
Sine of longitudes.
Primary Params (append ``_b`` for Alternates):
albedos (2d array):
The planet's albedo values with shape (``n_clat``,
``n_long``).
incD (int or float):
Inclination of orbital plane to the observer, in degrees.
Zero is face-on, 90 is edge-on.
longzeroD (int or float):
Longitude of the sub-observer point when t=0, in degrees.
oblD (int or float):
Obliquity relative to the orbital angular frequency vector,
in degrees. This is the tilt of the planet's spin axis.
Zero is North pole up, 90 is maximal tilt, 180 is
North pole down.
orbT (int or float):
Orbital period of the planet in any time unit.
ratRO (int or float):
Ratio of the planet's rotational and orbital angular
frequencies. This is how many spins the planet makes
per orbit. Can be fractional, and negative numbers are
retrograde rotation.
solD (int or float):
The orbital phase of Northern Summer solstice, in degrees.
If the rotational angular frequency vector is projected
into the orbital plane, then this phase is where that
projection points at the star.
Stored Figures:
- fig_equi --- :func:`EquiRect_Amap`
- fig_geom --- :func:`Geometry_Diagram`
- fig_kchar --- :func:`KChar_Evolve_Plot`
- fig_kern --- :func:`Kernels_Plot`
- fig_light --- :func:`LightCurve_Plot`
- fig_orth --- :func:`Orthographic_Viewer`
- fig_sand --- :func:`Sandbox_Reflection`
- fig_spin --- :func:`SpinAxis_Constraints`
"""
def _odd_check(self,n,number,quality):
"""Makes sure your input number is odd."""
if (n % 2) == 1:
return n
else:
print('Input {} is even, added 1 to {}.'.format(number,quality))
return n + 1
def _colat_long(self,n_clat,n_long):
"""Sets up colatitude and longitude attributes."""
self.n_clat = self._odd_check(n_clat,'number of colatitudes','include the equator')
self.clat_vec = np.linspace(0,pi,self.n_clat)
self.n_long = self._odd_check(n_long,'number of longitudes','include the prime meridian')
self.mono_long_vec = np.linspace(-pi,pi,self.n_long)
self.long_vec = self.mono_long_vec % (2.0*pi)
self.clats,self.longs = np.meshgrid(self.clat_vec,self.long_vec,indexing='ij')
ignore,self.mono_longs = np.meshgrid(self.clat_vec,self.mono_long_vec,indexing='ij')
del ignore
self.delta_clat = pi/(self.n_clat - 1)
self.delta_long = 2.0*pi/(self.n_long - 1)
self.sin_clats = np.sin(self.clats)
self.cos_clats = np.cos(self.clats)
self.sin_longs = np.sin(self.longs)
self.cos_longs = np.cos(self.longs)
def _import_image(self,filename):
"""Imports a png image to make a brightness map."""
rawvalues = plt.imread(filename)
if rawvalues.ndim == 2:
return rawvalues
else:
return np.sum(rawvalues[:,:,:3]*[0.2126,0.7152,0.0722],axis=2)
def _pixel_bounds(self,i,skip):
"""Returns low and high limits for pixels."""
low = max(int(round((i-0.5)*skip)),0)
high = int(round((i+0.5)*skip))
if high == low:
high += 1
return low,high
def _convert_image_pixels(self,kind,img_values):
"""Converts an input image into a brightness map."""
rows,cols = img_values.shape
if kind in ['pngA','aryA']:
row_skip,col_skip = (rows-1)/(self.n_clat-1),(cols-1)/(self.n_long-1)
pre_albedos = np.zeros(self.clats.shape)
for r in np.arange(self.n_clat):
for c in np.arange(self.n_long):
r_low,r_high = self._pixel_bounds(r,row_skip)
c_low,c_high = self._pixel_bounds(c,col_skip)
pixel_sum = np.sum(img_values[r_low:r_high,c_low:c_high])
pre_albedos[r,c] = pixel_sum/((r_high-r_low)*(c_high-c_low))
elif kind in ['pngI','aryI']:
r_v,c_v = np.linspace(0,pi,rows),np.linspace(-pi,pi,cols)
img_interp = RectBivariateSpline(r_v,c_v,img_values)
pre_albedos = img_interp(self.clat_vec,self.mono_long_vec)
return pre_albedos
def _rolling_amap(self,image,n):
"""Rolls the longitudes of a brightness map."""
roll_image = np.copy(image)
roll_image[:,:-1] = np.roll(roll_image[:,:-1],n,axis=1)
roll_image[:,-1] = roll_image[:,0]
return roll_image
def _linear_convert(self,image,lims):
"""Converts values of an array into a given range."""
lower_img,upper_img = image.min(),image.max()
if lower_img != upper_img:
new_low,new_high = lims
linear_slope = (new_high - new_low)/(upper_img - lower_img)
new_image = linear_slope*(image - lower_img) + new_low
else:
new_image = image
return new_image
def _amap_average(self,image):
"""Calculates the mean value of a brightness map."""
return np.sum(image[:,:-1]*self.sin_clats[:,:-1])*self.delta_clat*self.delta_long/(4.0*pi)
def InvertFlipBlend_Amap(self,image='pri',into='alt',invert=False,flip='none',blend='none'):
"""Inverts, flips, and blends a given albedo map.
Args:
image (str or ndarray):
The source map. If string, can be
- 'pri' to use primary map (default),
- 'alt' to use alternate map.
Otherwise, an ndarry or values.
into (str):
Where the new map goes. Can be
- 'pri' for the primary map,
- 'alt' for the alternate map (default),
- 'none' to just return the map.
.. note::
If you try to put an ``image`` ndarray ``into`` the primary
or alternate map, it should have shape (``n_clat``,
``n_long``).
invert (bool):
Linearly change lower albedo values to higher
values and vice versa. Default is False.
flip (str):
Can be
- 'EW' to flip map about the prime meridian,
- 'NS' to flip map about the equator,
- 'both' to flip map both ways,
- 'none' to do nothing (default).
blend (str):
Can be
- 'EW' to blend map into Jupiter-like bands,
- 'NS' to blend map into beach ball-like stripes,
- 'both' to blend map into a uniform ball,
- 'none' to do nothing (default).
Effect:
If ``into`` is 'pri' or 'alt', stores new albedo map as ``albedos``
or ``albedos_b``, respectively.
Returns:
New albedo map with same shape as source map, if ``into``
is 'none'.
"""
if isinstance(image,str):
if image == 'pri':
old_image = self.albedos
new_image = np.copy(self.albedos)
elif image == 'alt':
old_image = self.albedos_b
new_image = np.copy(self.albedos_b)
else:
if into in ['pri','alt']:
if image.shape != (self.n_clat,self.n_long):
print('InvertFlipBlend_Amap warning: you tried storing an image with shape {},'.format(image.shape))
print(' but right now {} is expecting albedo maps with shape ({}, {}).'.format(self.name,
self.n_clat,
self.n_long))
print(' I stopped this function so you do not get errors later on.')
return
old_image = image
new_image = np.copy(image)
if invert:
inv_lims = [old_image.max(),old_image.min()]
new_image = self._linear_convert(new_image,inv_lims)
if flip == 'both':
new_image = np.fliplr(np.flipud(new_image))
elif flip == 'EW':
new_image = np.fliplr(new_image)
elif flip == 'NS':
new_image = np.flipud(new_image)
if blend == 'both':
new_image[:,:] = self._amap_average(new_image)
elif blend == 'EW':
ns_values = np.sum(new_image[:,:-1],axis=1)*self.delta_long/(2.0*pi)
new_image = np.tile(ns_values,(self.n_long,1)).transpose()
elif blend == 'NS':
ew_values = np.sum(new_image*self.sin_clats,axis=0)*self.delta_clat/2.0
new_image = np.tile(ew_values,(self.n_clat,1))
new_image[:,-1] = new_image[:,0]
if into == 'pri':
self.albedos = new_image
elif into == 'alt':
self.albedos_b = new_image
elif into == 'none':
return new_image
def Build_Amap(self,kind='ylm',mp_data=[[1,-1,1.0],[2,0,-1.0]],primeD=0,limit=True,alb_lims=[0.0,1.0],
into='pri',invert=False,flip='none',blend='none'):
"""Creates an albedo map from input data.
Args:
kind (str):
Can be
- 'pngA' to average values from a png image,
- 'pngI' to interpolate values from a png image,
- 'ylm' to use spherical harmonics (default),
- 'aryA' to average values from a 2D array,
- 'aryI' to interpolate values from a 2D array.
mp_data:
Depends on ``kind``.
- For either 'png' this is the file path to your
image.
- For 'ylm' this is an n-by-3 list of spherical
harmonics with entries [degree ell, order m,
coefficient]. Default list is
[ [1, -1, 1.0], [2, 0, -1.0] ].
- For either 'ary' this is your 2D array itself.
.. note::
All png images are assumed to be equirectangular maps,
which means:
- poles on top and bottom edges,
- equator horizontal across middle,
- prime meridian vertical in center,
- anti-prime meridian on left and right edges.
primeD (int or float):
Longitude of the prime meridian in degrees,
relative to the input data. Rounded to the nearest grid
longitude. Default is zero.
limit (bool):
Set the lowest and highest albedo values. Default is True.
alb_lims (list):
The albedo limits as [lower, upper]. Default is [0, 1.0].
into (str):
Where the new map goes. Can be
- 'pri' for the primary map,
- 'alt' for the alternate map (default),
- 'none' to just return the map.
invert (bool):
Linearly change lower albedo values to higher
values and vice versa. Default is False.
flip (str):
Can be
- 'EW' to flip map about the prime meridian,
- 'NS' to flip map about the equator,
- 'both' to flip map both ways,
- 'none' to do nothing (default).
blend (str):
Can be
- 'EW' to blend map into Jupiter-like bands,
- 'NS' to blend map into beach ball-like stripes,
- 'both' to blend map into a uniform ball,
- 'none' to do nothing (default).
Effect:
If ``into`` is 'pri' or 'alt', stores new albedo map as ``albedos``
or ``albedos_b``, respectively.
Returns:
New albedo map with shape (``n_clat``, ``n_long``), if ``into``
is 'none'.
"""
if (kind == 'pngA') or (kind == 'pngI'):
img_values = self._import_image(mp_data)
pre_albedos = self._convert_image_pixels(kind,img_values)
elif kind == 'ylm':
pre_albedos = np.zeros(self.clats.shape)
for y in np.arange(len(mp_data)):
ell,m,c = mp_data[y]
if abs(m) <= ell:
pre_albedos += c*np.real(sph_harm(m,ell,self.longs,self.clats))
else:
print('Ylm warning for component {} in your list: degree {:.0f} is not >= |order {:.0f}|.'
.format(y,ell,m))
elif (kind == 'aryA') or (kind == 'aryI'):
pre_albedos = self._convert_image_pixels(kind,mp_data)
else:
print('Build_Amap aborted because kind must be one of below.')
print('\"pngA\" or \"pngI\": values averaged or interpolated from a png image.')
print('\"ylm\": n-by-3 list of spherical harmonics with entries [degree ell, order m, coefficient].')
print('\"aryA\" or \"aryI\": values averaged or interpolated from an ndarray (2D).')
return
if (primeD % 360.0) != 0:
simple_prime = ((primeD+180.0) % 360.0) - 180.0
n_prime = round((simple_prime/360.0)*(self.n_long-1))
primed_albedos = self._rolling_amap(pre_albedos,-n_prime)
ang_amap_rotated = (-n_prime/(self.n_long-1))*360.0
if ang_amap_rotated < 0:
directions = ['East','West']
else:
directions = ['West','East']
print('You asked to put the prime meridian {:.2f} degrees {} of normal on your input albedo map.'
.format(abs(simple_prime),directions[0]))
print('There are only {} unique longitudes, so I rotated your map {:.2f} degrees to the {}.'
.format(self.n_long-1,abs(ang_amap_rotated),directions[1]))
else:
primed_albedos = pre_albedos
if limit:
primed_albedos = self._linear_convert(primed_albedos,alb_lims)
if into in ['pri','alt']:
self.InvertFlipBlend_Amap(primed_albedos,into,invert,flip,blend)
elif into == 'none':
return self.InvertFlipBlend_Amap(primed_albedos,into,invert,flip,blend)
def _setup_for_actmodule(self):
"""Initializes attributes for the interactive module."""
self._xph_lig = 'no'
self._xph_med = 'no'
self._xph_drk = 'no'
head_space = {'description_width': 'initial'}
layfull = Layout(width='100%',align_self='center')
laynearfull = Layout(width='95%',align_self='center')
self._orb_act = widgets.FloatSlider(value=0,min=0,max=360,step=0.1,
description='Orbital Phase:',
layout=Layout(width='50%',align_self='center'),
style=head_space,continuous_update=False)
self._inc_act = widgets.FloatSlider(value=85,min=0,max=90,step=0.1,
description='Inclination:',
layout=layfull,continuous_update=False)
self._obl_act = widgets.FloatSlider(value=0,min=0,max=180,step=0.1,
description='Obliquity:',
layout=layfull,continuous_update=False)
self._sol_act = widgets.FloatSlider(value=0,min=0,max=360,step=0.1,
description='Solstice:',
layout=layfull,continuous_update=False)
self._ratRO_act = widgets.FloatSlider(value=72,min=-400,max=400,step=0.1,
description='Spins per Orbit:',
layout=layfull,style=head_space,continuous_update=False)
self._res_act = widgets.IntSlider(value=101,min=11,max=301,step=1,
description='Time Steps per Spin:',
layout=layfull,style=head_space,continuous_update=False)
self._zlong_act = widgets.IntSlider(value=0,min=-180,max=180,step=1,
description=r'Initial Longitude:',
layout=layfull,style=head_space,continuous_update=False)
self._ligcur_act = widgets.Dropdown(description='Light Curve:',
options=[('Flux (map \u00D7 kernel)','flux'),
('Apparent Brightness','appar')],
value='flux',
layout=layfull)
self._spax_act = widgets.Dropdown(description='Axis Constraint:',
options=[('Rotational (red)','wid'),
('Orbital (blue)','dom'),
('Combined','both')],
value='wid',
layout=layfull,style=head_space)
self._pslot_act = widgets.Dropdown(description='Extra Phase Slot:',
options=[('Light','light'),
('Medium','medium'),
('Dark','dark'),
('All','all')],
value='light',
layout=laynearfull,style=head_space)
first_pword = '<center><font color="blue">Ready to save/clear orbital phases</font></center>'
self._pword_act = widgets.HTML(value=first_pword,layout=laynearfull)
self._psav_act = widgets.Button(description='Save',button_style='success',
layout=laynearfull)
self._psav_act.on_click(lambda x: self._savebutton_click())
self._pclr_act = widgets.Button(description='Clear',button_style='warning',
layout=laynearfull)
self._pclr_act.on_click(lambda x: self._clearbutton_click())
self._title_act = widgets.HTML(value='<center><b>Interact with '+self.name+'</b><center>',
layout=layfull)
def _setup_figurevars(self):
"""Initializes figure attributes."""
null_draw = 'Figure not made yet'
self.fig_equi = null_draw
self.fig_geom = null_draw
self.fig_kchar = null_draw
self.fig_kern = null_draw
self.fig_light = null_draw
self.fig_orth = null_draw
self.fig_sand = null_draw
self.fig_spin = null_draw
def __init__(self,name='This Exoplanet',n_clat=37,n_long=73,
kind='ylm',mp_data=[[1,-1,1.0],[2,0,-1.0]],primeD=0,limit=True,alb_lims=[0.0,1.0],
invert=False,flip='none',blend='none',
orbT=(24.0*360.0),ratRO=10.0,
incD=85,oblD=0,solD=0,longzeroD=0):
"""*Constructor for the class DirectImaging_Planet.*
All arguments are for your **primary** map and params.
For your alternate map, inverts the primary map. Other
alternate params are set equal to the primary values.
Args:
name (str):
Your exoplanet's name. Default is 'This Exoplanet'.
n_clat (int):
Number of colatitudes for the planetary grid.
Method ensures this is odd so the equator is included.
Default is 37.
n_long (int):
Number of longitudes for the planetary grid.
Method ensures this is odd so the prime meridian is
included. Default is 73.
kind (str):
Style of planetary map. Can be
- 'pngA' to average values from a png image,
- 'pngI' to interpolate values from a png image,
- 'ylm' to use spherical harmonics (default),
- 'aryA' to average values from a 2D array,
- 'aryI' to interpolate values from a 2D array.
mp_data:
Depends on ``kind``.
- For either 'png' this is the file path to your
image.
- For 'ylm' this is an n-by-3 list of spherical
harmonics with entries [degree ell, order m,
coefficient]. Default list is
[ [1, -1, 1.0], [2, 0, -1.0] ].
- For either 'ary' this is your 2D array itself.
.. note::
All png images are assumed to be equirectangular maps,
which means:
- poles on top and bottom edges,
- equator horizontal across middle,
- prime meridian vertical in center,
- anti-prime meridian on left and right edges.
primeD (int or float):
Longitude of the prime meridian in degrees, relative to
the input data. Rounded to the nearest grid longitude.
Default is zero.
limit (bool):
Set the lowest and highest albedo values. Default is True.
alb_lims (list):
The albedo limits as [lower, upper]. Default is [0, 1.0].
invert (bool):
Linearly change lower albedo values to higher values and
vice versa. Default is False.
flip (str):
Can be
- 'EW' to flip map about the prime meridian,
- 'NS' to flip map about the equator,
- 'both' to flip map both ways,
- 'none' to do nothing (default).
blend (str):
Can be
- 'EW' to blend map into Jupiter-like bands,
- 'NS' to blend map into beach ball-like stripes,
- 'both' to blend map into a uniform ball,
- 'none' to do nothing (default).
orbT (int or float):
Orbital period of the planet in any unit.
Default is 8640.0 (number of hours in one year).
ratRO (int or float):
Ratio of the planet's rotational and orbital angular
frequencies. Default is 10.0.
incD (int or float):
Inclination in degrees. Default is 85.
oblD (int or float):
Obliquity in degrees. Default is zero.
solD (int or float):
Solstice in degrees. Default is zero.
longzeroD (int or float):
Longitude of the sub-observer point when t=0, in degrees.
Default is zero.
"""
self.name = name
self._colat_long(n_clat,n_long)
self.Build_Amap(kind,mp_data,primeD,limit,alb_lims,'pri',invert,flip,blend)
self.orbT = orbT
self.ratRO = ratRO
self._rot_res = self.n_long - 1
self._orb_min = -0.5
self._orb_max = 0.5
steps_per_orbit = self._rot_res*abs(ratRO)
n_default = round(max(steps_per_orbit,360)*(self._orb_max-self._orb_min)) + 1
self.times = np.linspace(self._orb_min,self._orb_max,n_default)*abs(orbT)
self.incD = incD
self.oblD = oblD
self.solD = solD
self.longzeroD = longzeroD
self.InvertFlipBlend_Amap('pri','alt',invert=True,flip='none',blend='none')
self.orbT_b = orbT
self.ratRO_b = ratRO
self.incD_b = incD
self.oblD_b = oblD
self.solD_b = solD
self.longzeroD_b = longzeroD
self._setup_for_actmodule()
self._setup_figurevars()
def _compare_param(self,new,old):
"""Checks if a new value matches the old value."""
if isinstance(new,str):
return old
else:
return new
def Adjust_Geometry(self,which='both',incD='no',oblD='no',solD='no',longzeroD='no'):
"""Changes the geometry your planet is in.
Args:
which (str):
Can be
- 'pri' to adjust primary params,
- 'alt' to adjust alternate params,
- 'both'.
incD (int, float, or str):
New inclination in degrees (0 to 90), or any string to
keep the current value. Default is 'no'. Other args have
same format.
oblD:
New obliquity (0 to 180).
solD:
New solstice (0 to 360).
longzeroD:
New sub-observer longitude at t=0.
"""
if which in ['pri','both']:
self.incD = self._compare_param(incD,self.incD)
self.oblD = self._compare_param(oblD,self.oblD)
self.solD = self._compare_param(solD,self.solD)
self.longzeroD = self._compare_param(longzeroD,self.longzeroD)
if which in ['alt','both']:
self.incD_b = self._compare_param(incD,self.incD_b)
self.oblD_b = self._compare_param(oblD,self.oblD_b)
self.solD_b = self._compare_param(solD,self.solD_b)
self.longzeroD_b = self._compare_param(longzeroD,self.longzeroD_b)
def Adjust_MotionTimes(self,which='both',orbT='no',ratRO='no',
orb_min='no',orb_max='no',rot_res='no'):
"""Changes the orbital and rotational params of your planet.
Args:
which (str):
Can be
- 'pri' to adjust primary ``orbT`` and ``ratRO``,
- 'alt' to adjust alternate values,
- 'both'.
orbT (int, float, or str):
New orbital period in any unit, or any string to keep
the current value. Default is 'no'. Other args
have same format.
ratRO:
New rotational-to-orbital frequency ratio.
**The args below are set relative to the primary params.**
orb_min:
New minimum time in orbits, can be negative.
orb_max:
New maximum time in orbits, can be negative.
rot_res:
New number of time steps per rotation.
.. note::
Whatever you choose for ``rot_res``, there will be at least
360 time steps per full orbit.
Effect:
Also updates ``times``, the time array based on the primary
orbital period.
"""
if which in ['pri','both']:
self.orbT = self._compare_param(orbT,self.orbT)
self.ratRO = self._compare_param(ratRO,self.ratRO)
if which in ['alt','both']:
self.orbT_b = self._compare_param(orbT,self.orbT_b)
self.ratRO_b = self._compare_param(ratRO,self.ratRO_b)
self._orb_min = self._compare_param(orb_min,self._orb_min)
self._orb_max = self._compare_param(orb_max,self._orb_max)
self._rot_res = self._compare_param(rot_res,self._rot_res)
steps_per_orbit = self._rot_res*abs(self.ratRO)
n_steps = round(max(steps_per_orbit,360)*(self._orb_max-self._orb_min)) + 1
self.times = np.linspace(self._orb_min,self._orb_max,n_steps)*abs(self.orbT)
def Setup_ProRet_Degeneracy(self):
"""Sets your alternate params for a specific light curve degeneracy.
The degeneracy involves the albedo map and is **usually** prograde
vs. retrograde rotation (but see note below). Discussed in Section 4.5
and Appendix B3 of S16.
When a planet has zero obliquity and its orbit is edge-on to you
(inclination 90 degrees), you cannot tell from a light curve whether:
- the planet does *N* spins per orbit (``ratRO``) with an albedo
map *A*, or
- it does 1.0--*N* spins with an East-West flipped *A*.
Most often *N* and 1.0--*N* have opposite signs, so one version spins
prograde and the other retrograde. This light curve degeneracy breaks
down if the planet is tilted or its orbit is not edge-on.
After running this method, test with :func:`Light_Curves`,
:func:`LightCurve_Plot`, or :func:`Orthographic_Viewer`.
.. note::
If *N* is between 0 and 1.0, both versions of the planet spin
prograde. And when *N* = 0.5, their spins are identical!
Effect:
Calls :func:`InvertFlipBlend_Amap`, :func:`Adjust_Geometry`,
and :func:`Adjust_MotionTimes`, using your primary params to
setup the alternate params as described.
"""
self.InvertFlipBlend_Amap(flip='EW')
self.Adjust_Geometry('alt',self.incD,self.oblD,self.solD,-self.longzeroD)
self.Adjust_MotionTimes('alt',ratRO=1.0-self.ratRO)
def _describe_amap(self,low,high):
"""Checks if a brightness map is realistic."""
if low < 0:
return 'No, some < 0'
elif high > 1.0:
return 'Semi, some > 1'
else:
return 'Yes'
def Info_Printout(self):
"""Prints many of the current model parameters for your planet.
Grouped by grid, albedo map, motion, and geometry. The latter three
are broken down further into the primary and alternate cases.
"""
print('Below are some parameters you are using to model {}.'.format(self.name))
print('')
form_cols = '{:^12} {:^14} {:^18}'
print(form_cols.format('**Grid**','Number','Separation (deg)'))
form_cols = '{:<12} {:^14} {:^18.2f}'
print(form_cols.format('Colatitudes',self.n_clat,np.degrees(self.delta_clat)))
act_long = '{}(+1)'.format(self.n_long-1)
print(form_cols.format('Longitudes',act_long,np.degrees(self.delta_long)))
print('')
form_cols = '{:^16} {:^14} {:^14} {:^14} {:^16}'
print(form_cols.format('**Albedo Map**','Low','Average','High','Realistic?'))
form_cols = '{:<16} {:^14.3f} {:^14.3f} {:^14.3f} {:^16}'
a_low,a_avg,a_high = self.albedos.min(),self._amap_average(self.albedos),self.albedos.max()
print(form_cols.format('Primary',a_low,a_avg,a_high,self._describe_amap(a_low,a_high)))
a_low_b,a_avg_b,a_high_b = self.albedos_b.min(),self._amap_average(self.albedos_b),self.albedos_b.max()
print(form_cols.format('Alternate',a_low_b,a_avg_b,a_high_b,self._describe_amap(a_low_b,a_high_b)))
print('')
form_cols = '{:^14} {:^24} {:^22} {:^17} {:^17}'
print(form_cols.format('**Motion**','Orbital Period (units)','Rot./Orb. Frequency',
'Low t (orbits)','High t (orbits)'))
form_cols = '{:<14} {:^24.3f} {:^22.4f} {:^17.4f} {:^17.4f}'
if isinstance(self.times,(int,float)):
low_t,high_t = self.times,self.times
else:
low_t,high_t = self.times[0],self.times[-1]
print(form_cols.format('Primary',self.orbT,self.ratRO,
low_t/abs(self.orbT),high_t/abs(self.orbT)))
form_cols = '{:<14} {:^24.3f} {:^22.4f} {:^17} {:^17}'
low_orb_b = '(({:.4f}))'.format(low_t/abs(self.orbT_b))
high_orb_b = '(({:.4f}))'.format(high_t/abs(self.orbT_b))
print(form_cols.format('Alternate',self.orbT_b,self.ratRO_b,low_orb_b,high_orb_b))
print('')
form_cols = '{:^14} {:^20} {:^18} {:^18} {:^22}'
print(form_cols.format('**Geometry**','Inclination (deg)','Obliquity (deg)',
'Solstice (deg)','t=0 Longitude (deg)'))
form_cols = '{:<14} {:^20.2f} {:^18.2f} {:^18.2f} {:^22.2f}'
print(form_cols.format('Primary',self.incD,self.oblD,self.solD,self.longzeroD))
print(form_cols.format('Alternate',self.incD_b,self.oblD_b,self.solD_b,self.longzeroD_b))
def Geometry_Diagram(self,which='pri',**kwargs):
"""Makes a diagram of the geometry your planet is in.
.. image:: _static/geomdiag_example.png
:width: 60%
:align: center
This shows its inclination, obliquity, solstice, and spins per orbit.
Args:
which (str):
Can be
- 'pri' to use primary params (default),
- 'alt' to use alternate params.
.. note::
Keywords are only used by the interactive function :func:`Sandbox_Reflection`.
Effect:
Stores this matplotlib figure as ``fig_geom``, **overwriting**
the previous version. You can save the image later by
calling ``fig_geom.savefig(...)``.
"""
## Takes almost all keywords from Geometry_Reference: _active,incD,oblD,solD,ratRO,phaseD,ph_colors...
reference = False # ...except this one; you never make the reference diagram here.
if kwargs.get('_active',False):
Geometry_Reference(reference=reference,**kwargs)
else:
if which == 'pri':
Geometry_Reference(incD=self.incD,oblD=self.oblD,solD=self.solD,ratRO=self.ratRO,
phaseD=self.solD,name=self.name,reference=reference)
elif which == 'alt':
Geometry_Reference(incD=self.incD_b,oblD=self.oblD_b,solD=self.solD_b,ratRO=self.ratRO_b,
phaseD=self.solD_b,name='Alt. '+self.name,reference=reference)
plt.tight_layout()
self.fig_geom = plt.gcf()
plt.show()
def _flat_style(self,albs,v_l,v_h,grat):
"""Styles plots for equirectangular maps."""
if albs.min() < 0:
m_c,lab,d_c = darkmid_BrBG_,'value',(1,0,0)
elif albs.max() > 1.0:
m_c,lab,d_c = cm.magma,'value',(0,1,0)
else:
m_c,lab,d_c = cm.bone,'albedo',(0,1,0)
cart = plt.imshow(albs,cmap=m_c,extent=[-180,180,180,0],vmin=v_l,vmax=v_h)
cbar = plt.colorbar(cart)
cbar.set_label(label=lab,size='large')
if grat:
plt.axvline(-90,c=d_c,ls=':',lw=1)
plt.axvline(0,c=d_c,ls='--',lw=1)
plt.axvline(90,c=d_c,ls=':',lw=1)
plt.axhline(45,c=d_c,ls=':',lw=1)
plt.axhline(90,c=d_c,ls='--',lw=1)
plt.axhline(135,c=d_c,ls=':',lw=1)
plt.ylabel('Colatitude',size='x-large')
plt.yticks(np.linspace(0,180,5),colat_ticks_,size='large')
plt.xlabel('Longitude',size='x-large')
plt.xticks(np.linspace(-180,180,5),long_ticks_,size='large')
def _single_amap_colorbounds(self,low,high):
"""Gives limits for a brightness map's colorbar."""
if low < 0:
bound = max(abs(low),high)
v_l,v_h = -bound,bound
elif high > 1.0:
v_l,v_h = 0,high
else:
v_l,v_h = 0,1.0
return v_l,v_h
def _double_amap_colorbounds(self,alt,same_scale):
"""Gives limits for two colorbars that may be related."""
mast_l,mast_h = self.albedos.min(),self.albedos.max()
alt_l,alt_h = self.albedos_b.min(),self.albedos_b.max()
if alt and same_scale:
if (mast_l < 0) and (alt_l < 0):
bound = max(abs(mast_l),mast_h,abs(alt_l),alt_h)
vm_l,vm_h,va_l,va_h = -bound,bound,-bound,bound
elif (mast_l >= 0) and (alt_l >= 0) and (mast_h > 1.0) and (alt_h > 1.0):
vm_l,va_l = 0,0
bound = max(mast_h,alt_h)
vm_h,va_h = bound,bound
else:
vm_l,vm_h = self._single_amap_colorbounds(mast_l,mast_h)
va_l,va_h = self._single_amap_colorbounds(alt_l,alt_h)
else:
vm_l,vm_h = self._single_amap_colorbounds(mast_l,mast_h)
va_l,va_h = self._single_amap_colorbounds(alt_l,alt_h)
return vm_l,vm_h,va_l,va_h
def EquiRect_Amap(self,alt=True,same_scale=True,grat=True):
"""Shows your albedo maps in equirectangular projection.
.. image:: _static/equirect_example.png
:align: center
This projection is a simple rectangle: colatitudes are horizontal lines
and longitudes are vertical lines. The primary map is always shown, and
the color schemes adapt to the albedo values you are using (real,
semi-real, or unrealistic).
Args:
alt (bool):
Include the alternate map. Default is True.
same_scale (bool):
If the primary and alternate maps have the same color scheme,
then show both on the same color scale. Default is True.
grat (bool):
Overlay a basic graticule. Default is True.
Effect:
Stores this matplotlib figure as ``fig_equi``, **overwriting**
the previous version. You can save the image later by
calling ``fig_equi.savefig(...)``.
"""
vm_l,vm_h,va_l,va_h = self._double_amap_colorbounds(alt,same_scale)
if alt:
plt.figure(figsize=(16,4))
plt.subplot(121)
else:
plt.figure(figsize=(9,4))
self._flat_style(self.albedos,vm_l,vm_h,grat)
plt.title('Map of {}'.format(self.name),size='x-large')
if alt:
plt.subplot(122)
self._flat_style(self.albedos_b,va_l,va_h,grat)
plt.title('Alternate Map of {}'.format(self.name),size='x-large')
plt.tight_layout()
self.fig_equi = plt.gcf()
plt.show()
def _convert_omega_rad(self,orbT,ratRO,incD,oblD,solD,longzeroD):
"""Converts params to angular frequencies and radians."""
worb = 2.0*pi/orbT
wrot = ratRO*abs(worb)
inc,obl,sol,longzero = np.radians(incD),np.radians(oblD),np.radians(solD),np.radians(longzeroD)
return worb,wrot,inc,obl,sol,longzero
def SubOS_TimeDeg(self,which='pri',times=0,orbT=(24.0*360.0),ratRO=10.0,incD=85,oblD=0,solD=0,longzeroD=0,
bypass_time='no'):
"""Calculates an planet's sub-observer and -stellar locations over time.
Wrapper for :func:`exoplanetsubspots.sub_observerstellar` that
works with the class :class:`DirectImaging_Planet`. See Appendix A
of S16.
Args:
which (str):
The param set to use. Can be
- 'pri' for primary (default),
- 'alt' for alternate,
- '_c' for custom, see Optional below.
bypass_time (int, float, 1d array, or str):
Time value(s) in place of the instance ``times``. All
other primary or alternate params are still used. Canceled
if any string. Default is 'no'.
Optional:
times, orbT, ratRO, incD, oblD, solD, longzeroD:
Custom set of params to use if ``which`` is '_c'.
Standard definitions and formats apply.
See the :class:`class and constructor <DirectImaging_Planet>`
docstrings.
Returns:
Array of trigonometric values with shape (8, # of time steps).
First dimension is ordered:
- sin theta_obs
- cos theta_obs
- sin phi_obs
- cos phi_obs
- sin theta_st
- cos theta_st
- sin phi_st
- cos phi_st
"""
if which == 'pri':
here_times = self.times
worb,wrot,inc,obl,sol,longzero = self._convert_omega_rad(self.orbT,self.ratRO,
self.incD,self.oblD,self.solD,self.longzeroD)
elif which == 'alt':
here_times = self.times
worb,wrot,inc,obl,sol,longzero = self._convert_omega_rad(self.orbT_b,self.ratRO_b,
self.incD_b,self.oblD_b,self.solD_b,
self.longzeroD_b)
elif which == '_c':
here_times = times
worb,wrot,inc,obl,sol,longzero = self._convert_omega_rad(orbT,ratRO,incD,oblD,solD,longzeroD)
if (which != '_c') and not isinstance(bypass_time,str):
here_times = bypass_time
return exoss.sub_observerstellar(here_times,worb,wrot,inc,obl,sol,longzero)
def Kernel2D(self,os_trigs):
"""Calculates a planet's 2D kernel of reflection.
This kernel is the product of visibility and illumination at each
location on an exoplanet. See Section 2 of S16.
Args:
os_trigs (ndarray):
Trig values describing the sub-observer and sub-stellar
points, with shape (8, # of time steps). Should
be formatted like the output of :func:`SubOS_TimeDeg`.
Returns:
2D kernel with shape (# of time steps, ``n_clat``, ``n_long``).
"""
St_o,Ct_o,Sp_o,Cp_o,St_s,Ct_s,Sp_s,Cp_s = os_trigs[:,:,np.newaxis,np.newaxis]
Vis = (self.sin_clats*St_o*(self.cos_longs*Cp_o + self.sin_longs*Sp_o)) + (self.cos_clats*Ct_o)
v_ind = (Vis < 0)
Vis[v_ind] = 0
Ilu = (self.sin_clats*St_s*(self.cos_longs*Cp_s + self.sin_longs*Sp_s)) + (self.cos_clats*Ct_s)
i_ind = (Ilu < 0)
Ilu[i_ind] = 0
return (Vis*Ilu)/pi
def KernelLong(self,k2d):
"""Calculates a planet's longitudinal kernel.
Marginalizes the 2D kernel over colatitude.
Args:
k2d (ndarray):
2D kernel with shape (# of time steps, ``n_clat``,
``n_long``), like output from :func:`Kernel2D`.
Returns:
Longitudinal kernel with shape (# of time steps, ``n_long``).
"""
return np.sum(k2d*self.sin_clats,axis=1)*self.delta_clat
def KernelClat(self,k2d):
"""Calculates a planet's colatitudinal kernel.
Marginalizes the 2D kernel over longitude.
Args:
k2d (ndarray):
2D kernel with shape (# of time steps, ``n_clat``,
``n_long``), like output from :func:`Kernel2D`.
Returns:
Colatitudinal kernel with shape (# of time steps, ``n_clat``).
"""
return np.sum(k2d,axis=2)*self.delta_long
def Kernel_WidthDomColat(self,which='pri',keep_kernels=False,times=0,orbT=(24.0*360.0),ratRO=10.0,
incD=85,oblD=0,solD=0,longzeroD=0,bypass_time='no'):
"""Calculates characteristics of the kernel over time.
The kernel of reflection has a longitudinal width (standard
deviation) and a dominant colatitude (weighted average) that change
throughout a planet's orbit. See Section 2 of S16.
Args:
which (str):
The param set to use. Can be
- 'pri' for primary (default),
- 'alt' for alternate,
- '_c' for custom, see Optional below.
keep_kernels (bool):
Output all kernel info, not just the characteristics,
see Returns below. Default is False.
bypass_time (int, float, 1d array, or str):
Time value(s) in place of the instance ``times``. All other
primary or alternate params are still used. Canceled if
any string. Default is 'no'.
Optional:
times, orbT, ratRO, incD, oblD, solD, longzeroD:
Custom set of params to use if ``which`` is '_c'.
Standard definitions and formats apply.
See the :class:`class and constructor <DirectImaging_Planet>`
docstrings.
Returns:
sig_long (array):
Longitudinal widths, shape (# of time steps).
dom_clat (array):
Dominant colatitudes, shape (# of time steps).
If ``keep_kernels`` is True, also:
actual_mu (array):
Mean longitudes, shape (# of time steps).
klong (array):
Longitudinal kernel, shape (# of time steps, ``n_long``).
kclat (array):
Colatitudinal kernel, shape (# of time steps, ``n_clat``).
k2d (array):
2D kernel, shape (# of time steps, ``n_clat``, ``n_long``).
"""
if which == 'pri':
os_trigs = self.SubOS_TimeDeg(bypass_time=bypass_time)
elif which == 'alt':
os_trigs = self.SubOS_TimeDeg(which,bypass_time=bypass_time)
elif which == '_c':
os_trigs = self.SubOS_TimeDeg(which,times,orbT,ratRO,incD,oblD,solD,longzeroD)
k2d = self.Kernel2D(os_trigs)
klong = self.KernelLong(k2d)
kclat = self.KernelClat(k2d)
twice_long = np.tile(self.long_vec[:-1],2)
shift_long = _rolling(twice_long[:-1],self.n_long-1)
klong_norm = np.sum(klong[:,:-1],axis=1)*self.delta_long
klong_hat = klong/klong_norm[:,np.newaxis]
virtual_mu = np.sum(shift_long*klong_hat[:,np.newaxis,:-1],axis=2)*self.delta_long
arg_square = np.absolute(shift_long - virtual_mu[:,:,np.newaxis])
toshrink = (arg_square > pi)
arg_square[toshrink] = 2.0*pi - arg_square[toshrink]
var_long = np.sum((arg_square**2.0)*klong_hat[:,np.newaxis,:-1],axis=2)*self.delta_long
sig_long = (var_long.min(axis=1))**0.5
coord_move_i = var_long.argmin(axis=1)
actual_mu = virtual_mu[np.arange(len(coord_move_i)),coord_move_i] - coord_move_i*self.delta_long
actual_mu = actual_mu % (2.0*pi)
kclat_norm = np.sum(kclat*self.sin_clats[:,0],axis=1)*self.delta_clat
kclat_hat = kclat/kclat_norm[:,np.newaxis]
dom_clat = np.sum(self.clat_vec*kclat_hat*self.sin_clats[:,0],axis=1)*self.delta_clat
if keep_kernels:
return sig_long,dom_clat,actual_mu,klong,kclat,k2d
else:
return sig_long,dom_clat
def Kernels_Plot(self,phaseD,which='pri',grat=True,fixed_lims=True,force_bright=True,
over_amap=False,albs=np.array([[1.0]]),
orbT=(24.0*360.0),ratRO=10.0,incD=85,oblD=0,solD=0,longzeroD=0,bypass_time='no'):
"""Diagrams your planet's kernel at a given orbital phase.
.. image:: _static/kernplot_example.png
:align: center
This includes the 2D, longitudinal, and colatitudinal versions
of the kernel. The diagram also shows you the kernel's mean
longitude (pink circle), longitudinal width (red bars), and
dominant colatitude (blue circle). If you want to get the actual
data instead, use :func:`Kernel_WidthDomColat`.
Args:
phaseD (int or float):
Orbital phase of the planet in degrees. Standard range
is [0, 360).
which (str):
The param set to use. Can be
- 'pri' for primary (default),
- 'alt' for alternate,
- '_c' for custom, see Optional below.
grat (bool):
Overlay basic graticules. Default is True.
fixed_lims (bool):
Keep the plotted limits for the relative long. and
colat. kernels fixed at [0, 1.0]. Default is True.
force_bright (bool):
Use the full color scale to draw the 2D kernel.
The false brightness can make dark drawings (like
crescent phases) easier to see. Default is True.
over_amap (bool):
Draw a dim version of the albedo map with the 2D kernel.
This map is not affected by ``force_bright``. Default is
False.
bypass_time (int, float, 1d array, or str):
Time value(s) in place of the instance ``times``. All
other primary or alternate params are still used. Canceled
if any string. Default is 'no'.
Optional:
albs (2D array):
Custom albedo map to use if ``which`` is '_c'.
Its shape should be, or work with, (n_clat, n_long).
Default is ``np.array( [ [ 1.0 ] ] )``.
times, orbT, ratRO, incD, oblD, solD, longzeroD:
Custom set of params to use if ``which`` is '_c'.
Standard definitions and formats apply.
See the :class:`class and constructor <DirectImaging_Planet>`
docstrings.
Effect:
Stores this matplotlib figure as ``fig_kern``, **overwriting**
the previous version. You can save the image later by
calling ``fig_kern.savefig(...)``.
"""
if which == 'pri':
here_albs = self.albedos
time = self.orbT*(phaseD/360.0)
here_incD,here_oblD,here_solD = self.incD,self.oblD,self.solD
sig_long,dom_clat,actual_mu,klong,kclat,k2d = self.Kernel_WidthDomColat(keep_kernels=True,
bypass_time=time)
elif which == 'alt':
here_albs = self.albedos_b
time = self.orbT_b*(phaseD/360.0)
here_incD,here_oblD,here_solD = self.incD_b,self.oblD_b,self.solD_b
sig_long,dom_clat,actual_mu,klong,kclat,k2d = self.Kernel_WidthDomColat(which,keep_kernels=True,
bypass_time=time)
elif which == '_c':
here_albs = albs
time = orbT*(phaseD/360.0)
here_incD,here_oblD,here_solD = incD,oblD,solD
sig_long,dom_clat,actual_mu,klong,kclat,k2d = self.Kernel_WidthDomColat(which,True,time,orbT,ratRO,
incD,oblD,solD,longzeroD)
sig_long,dom_clat,actual_mu,klong,kclat,k2d = sig_long[0],dom_clat[0],actual_mu[0],klong[0],kclat[0],k2d[0]
tot_k2d = np.sum(k2d[:,:-1]*self.sin_clats[:,:-1])*self.delta_clat*self.delta_long
kern_frac = tot_k2d/(2.0/3.0)
r,c = 2,3
plt.figure(figsize=(12,8))
plt.subplot2grid((r,c),(1,0),colspan=2)
if over_amap:
if force_bright:
to_view = (0.15*np.absolute(here_albs)/np.absolute(here_albs.max())) + (0.85*k2d/k2d.max())
else:
to_view = (0.15*np.absolute(here_albs)/np.absolute(here_albs.max())) + kern_frac*(0.85*k2d/k2d.max())
else:
to_view = k2d/k2d.max()
if not force_bright:
to_view *= kern_frac
plt.contourf(np.degrees(self.mono_longs),np.degrees(self.clats),to_view,65,
cmap=cm.gray,vmin=0,vmax=1.0)
if grat:
d_c = (0,1,0)
plt.axvline(-90,c=d_c,ls=':',lw=1)
plt.axvline(0,c=d_c,ls='--',lw=1)
plt.axvline(90,c=d_c,ls=':',lw=1)
plt.axhline(45,c=d_c,ls=':',lw=1)
plt.axhline(90,c=d_c,ls='--',lw=1)
plt.axhline(135,c=d_c,ls=':',lw=1)
plt.ylabel('Colatitude',size='large')
plt.yticks(np.linspace(0,180,5),colat_ticks_,size='medium')
plt.ylim([180,0])
plt.xlabel('Longitude',size='large')
plt.xticks(np.linspace(-180,180,5),long_ticks_,size='medium')
plt.xlim([-180,180])
plt.subplot2grid((r,c),(0,0),colspan=2)
klong_rel = kern_frac*klong/klong.max()
plt.plot(np.degrees(self.mono_long_vec),klong_rel,c=cm.Reds(0.5),lw=4)
if fixed_lims:
plt.yticks(np.linspace(0,1.0,5),size='medium')
plt.ylim([-0.05,1.15])
y_mu = 1.075
else:
plt.yticks(size='medium')
plt.ylim([-0.05*klong_rel.max(),1.15*klong_rel.max()])
y_mu = 1.075*klong_rel.max()
if actual_mu > pi:
actual_mu -= 2.0*pi
plt.scatter(np.degrees(actual_mu),y_mu,s=100,color=cm.Reds(0.33),edgecolor='k',marker='o',zorder=3)
if (actual_mu + sig_long) > pi:
plt.plot([-180,-180+np.degrees(actual_mu+sig_long-pi)],[y_mu,y_mu],c=cm.Reds(0.75),lw=3)
if (actual_mu - sig_long) < -pi:
plt.plot([180,180-np.degrees(-pi-actual_mu+sig_long)],[y_mu,y_mu],c=cm.Reds(0.75),lw=3)
plt.plot([np.degrees(actual_mu-sig_long),np.degrees(actual_mu+sig_long)],[y_mu,y_mu],c=cm.Reds(0.75),lw=3)
if grat:
d_c = '0.33'
plt.axvline(-90,c=d_c,ls=':',lw=1)
plt.axvline(0,c=d_c,ls='--',lw=1)
plt.axvline(90,c=d_c,ls=':',lw=1)
plt.ylabel('Relative Longitudinal Kernel',size='large')
plt.xticks(np.linspace(-180,180,5),long_ticks_,size='medium')
plt.xlim([-180,180])
plt.subplot2grid((r,c),(1,2))
kclat_rel = kern_frac*kclat/kclat.max()
plt.plot(kclat_rel,np.degrees(self.clat_vec),c=cm.Blues(0.5),lw=4)
plt.yticks(np.linspace(0,180,5),colat_ticks_,size='medium')
plt.ylim([180,0])
if fixed_lims:
plt.xticks(np.linspace(0,1.0,5),size='medium')
plt.xlim([-0.05,1.15])
y_dom = 1.075
else:
plt.xticks(size='large')
plt.xlim([-0.05*kclat_rel.max(),1.15*kclat_rel.max()])
y_dom = 1.075*kclat_rel.max()
plt.scatter(y_dom,np.degrees(dom_clat),s=100,color=cm.Blues(0.75),edgecolor='k',marker='o',zorder=3)
if grat:
d_c = '0.33'
plt.axhline(45,c=d_c,ls=':',lw=1)
plt.axhline(90,c=d_c,ls='--',lw=1)
plt.axhline(135,c=d_c,ls=':',lw=1)
plt.xlabel('Relative Colatitudinal Kernel',size='large')
plt.subplot2grid((r,c),(0,2))
plt.text(0,0.9,r'%s' '\n' 'at $%.2f^{\circ}$ phase' % (self.name,phaseD),color='k',size='x-large',
ha='center',va='center',weight='bold')
plt.text(0,0.7,r'Inclination: $%.2f^{\circ}$' % here_incD,color='k',size='x-large',ha='center',va='center')
plt.text(0,0.6,'Obliquity: $%.2f^{\circ}$' % here_oblD,color='k',size='x-large',ha='center',va='center')
plt.text(0,0.5,'Solstice: $%.2f^{\circ}$' % here_solD,color='k',size='x-large',ha='center',va='center')
plt.text(0,0.325,'Mean Longitude: $%.2f^{\circ}$' % (np.degrees(actual_mu)),
color=cm.Reds(0.33),size='x-large',ha='center',va='center')
plt.text(0,0.225,'Longitudinal Width: $%.2f^{\circ}$' % (np.degrees(sig_long)),
color=cm.Reds(0.75),size='x-large',ha='center',va='center')
plt.text(0,0.05,'Dominant Colatitude: $%.2f^{\circ}$' % (np.degrees(dom_clat)),
color=cm.Blues(0.75),size='x-large',ha='center',va='center')
plt.xlim([-0.5,0.5])
plt.ylim([0,1.0])
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().axis('off')
plt.tight_layout()
self.fig_kern = plt.gcf()
plt.show()
def _kcevo_style(self,char,times,sig_long,dom_clat,i,imax,ax1,ax2,_active,phasesD_I,ph_colors):
"""Styles part of plots for kernel characteristics."""
if char == 'wid':
ax1.plot(times,np.degrees(sig_long),c=cm.Reds(0.85-0.7*i/(imax-1)),zorder=1)
elif char == 'dom':
ax1.plot(times,np.degrees(dom_clat),c=cm.Blues(0.85-0.7*i/(imax-1)),zorder=1)
elif char == 'both':
liwi, = ax1.plot(times,np.degrees(sig_long),c=cm.Reds(0.85-0.7*i/(imax-1)),
label='Long. Width',zorder=1)
doco, = ax2.plot(times,np.degrees(dom_clat),c=cm.Blues(0.85-0.7*i/(imax-1)),
label='Dom. Colat.',zorder=1)
if _active:
n = 0
for p in phasesD_I:
if isinstance(p,(int,float)):
pt_ind = round(p % 360.0)
ax1.scatter(times[pt_ind],np.degrees(sig_long[pt_ind]),
color=ph_colors[n],edgecolor='k',s=100,marker='o',zorder=2)
ax2.scatter(times[pt_ind],np.degrees(dom_clat[pt_ind]),
color=ph_colors[n],edgecolor='k',s=100,marker='o',zorder=2)
n += 1
ax1.legend(handles=[liwi,doco],loc='best',fontsize='medium')
def _kcevo_loop(self,char,explode,gap,times,incD,oblD,solD,ax1,ax2,_active,phasesD_I,ph_colors):
"""Checks which style of kernel characteristics plot to make."""
if explode == 'inc':
imax = int(90//gap) + 1
ex_lab = r' - Inclination: $0^{\circ}$ dark to $%.1f^{\circ}$ light in $%.1f^{\circ}$ gaps' % ((imax-1)*gap,
gap)
for i in np.arange(imax):
now_incD = i*gap
sig_long,dom_clat = self.Kernel_WidthDomColat('_c',False,times,1,1,now_incD,oblD,solD)
self._kcevo_style(char,times,sig_long,dom_clat,i,imax,ax1,ax2,_active,phasesD_I,ph_colors)
elif explode == 'obl':
imax = int(90//gap) + 1
ex_lab = r' - Obliquity: $0^{\circ}$ dark to $%.1f^{\circ}$ light in $%.1f^{\circ}$ gaps' % ((imax-1)*gap,
gap)
for i in np.arange(imax):
now_oblD = i*gap
sig_long,dom_clat = self.Kernel_WidthDomColat('_c',False,times,1,1,incD,now_oblD,solD)
self._kcevo_style(char,times,sig_long,dom_clat,i,imax,ax1,ax2,_active,phasesD_I,ph_colors)
elif explode == 'sol':
imax = int(360//gap)
ex_lab = r' - Solstice: $0^{\circ}$ dark to $%.1f^{\circ}$ light in $%.1f^{\circ}$ gaps' % ((imax-1)*gap,
gap)
for i in np.arange(imax):
now_solD = i*gap
sig_long,dom_clat = self.Kernel_WidthDomColat('_c',False,times,1,1,incD,oblD,now_solD)
self._kcevo_style(char,times,sig_long,dom_clat,i,imax,ax1,ax2,_active,phasesD_I,ph_colors)
elif explode == 'none':
ex_lab = ''
sig_long,dom_clat = self.Kernel_WidthDomColat('_c',False,times,1,1,incD,oblD,solD)
self._kcevo_style(char,times,sig_long,dom_clat,1,3,ax1,ax2,_active,phasesD_I,ph_colors)
return 'Kernel Characteristics of {}'.format(self.name)+ex_lab
def _kcevo_stylewid(self,ax,s_tick,s_lab,_active):
"""Styles part of plots for kernel widths."""
ax.set_ylim(0,110)
ax.set_yticks(np.linspace(0,100,5))
ax.set_yticklabels(wlong_ticks_,size=s_tick)
if not _active:
ax.set_ylabel('Longitudinal Width',color=cm.Reds(0.75),size=s_lab)
ax.tick_params('y',colors=cm.Reds(0.75))
ax.set_xlim(0,1)
ax.set_xticks(np.linspace(0,1,5))
ax.set_xlabel('Time (orbits)',size=s_lab)
def _kcevo_styledom(self,ax,s_tick,s_lab,_active):
"""Styles part of plots for dominant colatitudes."""
ax.set_ylim(180,0)
ax.set_yticks(np.linspace(0,180,5))
ax.set_yticklabels(colat_ticks_,size=s_tick)
if not _active:
ax.set_ylabel('Dominant Colatitude',color=cm.Blues(0.75),size=s_lab)
ax.tick_params('y',colors=cm.Blues(0.75))
ax.set_xlim(0,1)
ax.set_xticks(np.linspace(0,1,5))
ax.set_xlabel('Time (orbits)',size=s_lab)
def KChar_Evolve_Plot(self,char,which='pri',explode='none',gap=10,incD=85,oblD=0,solD=0,**kwargs):
"""Plots the kernel's characteristics over a full orbit.
.. image:: _static/kcharevo_example.png
:align: center
If you want to get the actual data instead, use
:func:`Kernel_WidthDomColat`.
Args:
char (str):
The characteristic to show. Can be
- 'wid' for longitudinal width,
- 'dom' for dominant colatitude,
- 'both'.
which (str):
The param set to use. Can be
- 'pri' for primary (default),
- 'alt' for alternate,
- '_c' for custom, see Optional below.
explode (str):
The geometry param to vary, starting at zero. This shows you
many evolutions instead of one curve. Can be
- 'inc' for inclination,
- 'obl' for obliquity,
- 'sol' for solstice,
- 'none' to cancel (default).
gap (int or float):
When you choose to ``explode``, the exploded param's
spacing in degrees. Default is 10.
Optional:
incD, oblD, solD:
Custom set of params to use if ``which`` is '_c'.
Standard definitions and formats apply.
See the :class:`class and constructor <DirectImaging_Planet>`
docstrings.
.. note::
Keywords are only used by the interactive function :func:`Sandbox_Reflection`.
Effect:
Stores this matplotlib figure as ``fig_kchar``, **overwriting**
the previous version. You can save the image later by
calling ``fig_kchar.savefig(...)``.
"""
## Default keywords
_active = kwargs.get('_active',False)
phasesD_I = kwargs.get('phasesD_I',[0])
ph_colors = kwargs.get('ph_colors',['k'])
times = np.linspace(0,1,361)
if which == 'pri':
here_incD,here_oblD,here_solD = self.incD,self.oblD,self.solD
elif which == 'alt':
here_incD,here_oblD,here_solD = self.incD_b,self.oblD_b,self.solD_b
elif which == '_c':
here_incD,here_oblD,here_solD = incD,oblD,solD
if _active:
ax1 = plt.subplot(236)
ax2 = ax1.twinx()
self._kcevo_stylewid(ax1,'medium','medium',_active)
self._kcevo_styledom(ax2,'medium','medium',_active)
tit = self._kcevo_loop(char,explode,gap,times,here_incD,here_oblD,here_solD,ax1,ax2,
_active,phasesD_I,ph_colors)
# 'datalim' continues to be the best option, others mess up the interactive module.
ax1.set(adjustable='datalim',aspect=1.0/ax1.get_data_ratio())
ax2.set(adjustable='datalim',aspect=1.0/ax2.get_data_ratio())
else:
plt.figure(figsize=(10,5))
ax1 = plt.subplot(111)
if char in ['wid','dom']:
if char == 'wid':
self._kcevo_stylewid(ax1,'large','x-large',_active)
else:
self._kcevo_styledom(ax1,'large','x-large',_active)
tit = self._kcevo_loop(char,explode,gap,times,here_incD,here_oblD,here_solD,ax1,0,
_active,phasesD_I,ph_colors)
elif char == 'both':
ax2 = ax1.twinx()
self._kcevo_stylewid(ax1,'large','x-large',_active)
self._kcevo_styledom(ax2,'large','x-large',_active)
tit = self._kcevo_loop(char,explode,gap,times,here_incD,here_oblD,here_solD,ax1,ax2,
_active,phasesD_I,ph_colors)
plt.title(tit,size='large')
plt.tight_layout()
self.fig_kchar = plt.gcf()
plt.show()
def Light_Curves(self,which='pri',albs=np.array([[1.0]]),
times=0,orbT=(24.0*360.0),ratRO=10.0,incD=85,oblD=0,solD=0,longzeroD=0):
"""Calculates light curves of your planet.
Gives you both the exoplanet's flux (the sum of [*AK*], where *A* is
the albedo map and *K* is the kernel) and its apparent brightness
(the flux divided by the sum of *K*) over time.
Args:
which (str):
The param set to use. Can be
- 'pri' for primary (default),
- 'alt' for alternate,
- '_c' for custom, see Optional below.
Optional:
albs (2D array):
Custom albedo map to use if ``which`` is '_c'.
Its shape should be, or work with, (``n_clat``, ``n_long``).
Default is ``np.array( [ [ 1.0 ] ] )``.
times, orbT, ratRO, incD, oblD, solD, longzeroD:
Custom set of params to use if ``which`` is '_c'.
Standard definitions and formats apply.
See the :class:`class and constructor <DirectImaging_Planet>`
docstrings.
Returns:
flux_ak (array):
flux with shape (# of time steps).
appar_a (array):
apparent brightness with shape (# of time steps).
"""
if which == 'pri':
here_albs = self.albedos
os_trigs = self.SubOS_TimeDeg()
elif which == 'alt':
here_albs = self.albedos_b
os_trigs = self.SubOS_TimeDeg(which)
elif which == '_c':
here_albs = albs
os_trigs = self.SubOS_TimeDeg(which,times,orbT,ratRO,incD,oblD,solD,longzeroD)
k2d = self.Kernel2D(os_trigs)
flux_ak = np.sum(here_albs[:,:-1]*k2d[:,:,:-1]*self.sin_clats[:,:-1],axis=(1,2))*self.delta_clat*self.delta_long
marg_k = np.sum(k2d[:,:,:-1]*self.sin_clats[:,:-1],axis=(1,2))*self.delta_clat*self.delta_long
appar_a = flux_ak/marg_k
return flux_ak,appar_a
def _lc_style(self,which,F_ak,A_app,show,diff,diff_only):
"""Styles plots of light curves."""
alph = lambda d: 0.25 if d else 1.0
if which == 'pri':
orbT,l_c,labf,laba,zo = self.orbT,(1,0,1,alph(diff)),'Flux',r'$A_{\mathrm{apparent}}$',2
elif which == 'alt':
orbT,l_c,labf,laba,zo = self.orbT_b,(0,1,1,alph(diff)),'Alt. Flux',r'Alt. $A_{\mathrm{apparent}}$',1
elif which == 'diff':
orbT,l_c,labf,laba,zo = self.orbT,'y',r'$\Delta$ Flux',r'$\Delta \ A_{\mathrm{apparent}}$',3
T = self.times/abs(orbT)
check = (not diff_only) or (diff_only and (which == 'diff'))
if (show in ['both','flux']) and check:
plt.plot(T,F_ak,c=l_c,label=labf,zorder=zo)
if (show in ['both','appar']) and check:
plt.plot(T,A_app,c=l_c,ls='--',label=laba,zorder=zo)
def LightCurve_Plot(self,alt=True,diff=False,diff_only=False,show='flux',**kwargs):
"""Plots light curves of your planet.
.. image:: _static/lcplot_example.png
:align: center
Uses the primary and alternate params to calculate the light curves.
If you want to get the actual data instead, use :func:`Light_Curves`.
Args:
alt (bool):
Include the alternate case. Default is True.
diff (bool):
Include the difference between the primary and alternate
light curves, if ``alt`` is True. Default is False.
diff_only (bool):
Plot **only** the difference light curve, if ``alt`` is
True. Default is False.
show (str):
Which light curves to calculate. Can be
- 'flux', the sum of [*AK*] where *A* is the albedo map
and *K* is the kernel (default),
- 'appar' for apparent brightness, or flux divided by
sum of the kernel,
- 'both'.
.. note::
Keywords are only used by the interactive function :func:`Sandbox_Reflection`.
Effect:
Stores this matplotlib figure as ``fig_light``, **overwriting**
the previous version. You can save the image later by
calling ``fig_light.savefig(...)``.
"""
if kwargs.get('_active',False):
## Default keywords
times_I = kwargs.get('times_I',0)
orbT_I = kwargs.get('orbT_I',(24.0*360.0))
ratRO_I = kwargs.get('ratRO_I',10.0)
incD_I = kwargs.get('incD_I',90)
oblD_I = kwargs.get('oblD_I',0)
solD_I = kwargs.get('solD_I',0)
longzeroD_I = kwargs.get('longzeroD_I',0)
ph_color = kwargs.get('ph_color','k')
now_I = kwargs.get('now_I',0)
flux_ak,appar_a = self.Light_Curves('_c',self.albedos,times_I,orbT_I,ratRO_I,
incD_I,oblD_I,solD_I,longzeroD_I)
Ph = np.linspace(-2.5,2.5,times_I.size)
zo = 0
thick = lambda n: 2 if n == 0 else 1
if show == 'flux':
plt.plot(Ph,flux_ak,c=ph_color,lw=thick(now_I),zorder=zo)
elif show == 'appar':
plt.plot(Ph,appar_a,c=ph_color,ls='--',lw=thick(now_I),zorder=zo)
else:
plt.figure(figsize=(10,5))
flux_ak,appar_a = self.Light_Curves('pri')
self._lc_style('pri',flux_ak,appar_a,show,diff,diff_only)
if alt:
flux_ak_b,appar_a_b = self.Light_Curves('alt')
self._lc_style('alt',flux_ak_b,appar_a_b,show,diff,diff_only)
if diff or diff_only:
if self.orbT == self.orbT_b:
self._lc_style('diff',flux_ak-flux_ak_b,appar_a-appar_a_b,show,diff,diff_only)
else:
print('LightCurve_Plot warning: diffs plot only if primary and alternate orbital periods match.')
plt.axhline(0,c='0.67',ls=':',zorder=0)
plt.legend(loc='best',fontsize='large')
plt.ylabel('Value',size='x-large')
plt.xlabel('Time (orbits)',size='x-large')
plt.title('Light Curves of {}'.format(self.name),size='x-large')
plt.tight_layout()
self.fig_light = plt.gcf()
plt.show()
def _orth_project(self,phaseD,orbT,which,incD,oblD,solD,_active,ratRO,longzeroD):
"""Sets up an orthographic projection."""
time = orbT*(phaseD/360.0)
if _active:
os_trigs = self.SubOS_TimeDeg(which,time,orbT,ratRO,incD,oblD,solD,longzeroD)
else:
os_trigs = self.SubOS_TimeDeg(which,bypass_time=time)
k2d = self.Kernel2D(os_trigs)[0]
St_o,Ct_o,Sp_o,Cp_o = os_trigs[:4]
orth_Viz = Ct_o*self.cos_clats + St_o*self.sin_clats*(self.cos_longs*Cp_o + self.sin_longs*Sp_o)
orth_preX = self.sin_clats*(self.sin_longs*Cp_o - self.cos_longs*Sp_o)
orth_preY = St_o*self.cos_clats - Ct_o*self.sin_clats*(self.cos_longs*Cp_o + self.sin_longs*Sp_o)
inc,obl,sol = np.radians(incD),np.radians(oblD),np.radians(solD)
poleN_viz = np.cos(inc)*np.cos(obl) + np.sin(inc)*np.sin(obl)*np.cos(sol)
poleN_x = np.sin(obl)*np.sin(sol)
poleN_y = np.sin(inc)*np.cos(obl) - np.cos(inc)*np.sin(obl)*np.cos(sol)
ang_from_y = np.arctan2(poleN_x,poleN_y)
orth_X,orth_Y = _rotate_ccw_angle(orth_preX,orth_preY,-ang_from_y)
return k2d,orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y
def _orth_style(self,row,sub,s,which,image,v_l,v_h,
orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y,name):
"""Styles plots for orthographic projections."""
plt.subplot(row,sub,s)
if which == 'kern':
m_c = cm.gray
elif image.min() < 0:
m_c = darkmid_BrBG_
elif image.max() > 1.0:
m_c = cm.magma
else:
m_c = cm.bone
ma_image = np.ma.masked_array(image,mask=orth_Viz<0)
cnt_plot = plt.contourf(orth_X,orth_Y,ma_image,65,cmap=m_c,vmin=v_l,vmax=v_h)
for c in cnt_plot.collections:
c.set_edgecolor('face')
if round(poleN_viz,3) >= 0:
plt.scatter(poleN_x,poleN_y,s=100,color=(0,1,0),edgecolor='k',marker='o')
if round(poleN_viz,3) <= 0:
plt.scatter(-poleN_x,-poleN_y,s=70,color=(0,1,0),edgecolor='k',marker='D')
plt.xlim([-1.05,1.05])
plt.ylim([-1.05,1.05])
if name != 'NONE':
plt.title(name,size='large',x=0.1,y=1.0,va='top',ha='left')
plt.gca().set_aspect(1.0)
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().axis('off')
return s+1
def Orthographic_Viewer(self,phaseD,show='real',alt=False,same_scale=True,force_bright=True,**kwargs):
"""Draws your planet's map and kernel in orthographic projection.
.. image:: _static/orthview_example.png
:align: center
Shows everything from the observer's point of view (with one
exception), based on the primary and alternate params you are using.
The North and South poles are drawn as a green circle and diamond,
respectively.
Args:
phaseD (int or float):
Orbital phase of the planet in degrees. Standard range
is [0, 360).
show (str):
Which data to draw. Can be
- 'amap' for the albedo map,
- 'kern' for the kernel,
- 'both' for the map and kernel separate,
- 'real' to multiply the map and kernel (default),
- 'sphere' for the whole globe: the visible and opposite
hemispheres with no kernel.
alt (bool):
Include the alternate albedo map. Default is True.
same_scale (bool):
If the primary and alternate maps have the same color scheme
(and ``alt`` is True), then show both with the same color
scale. Default is True.
force_bright (bool):
Use the full color scale to draw the kernel. Also rescales
the kernel values into [0, 1.0] when ``show`` is 'real'.
The false brightness can make dark drawings (like crescent
phases) easier to see. Default is True.
.. note::
Keywords are only used by the interactive function :func:`Sandbox_Reflection`.
Effect:
Stores this matplotlib figure as ``fig_orth``, **overwriting**
the previous version. You can save the image later by
calling ``fig_orth.savefig(...)``.
"""
if kwargs.get('_active',False):
## Default keywords
orbT_I = kwargs.get('orbT_I',(24.0*360.0))
ratRO_I = kwargs.get('ratRO_I',10.0)
incD_I = kwargs.get('incD_I',90)
oblD_I = kwargs.get('oblD_I',0)
solD_I = kwargs.get('solD_I',0)
longzeroD_I = kwargs.get('longzeroD_I',0)
row,col,s = 2,3,1 # Start on subplot(231)
vm_l,vm_h,va_l,va_h = self._double_amap_colorbounds(alt,same_scale)
(k2d,orth_Viz,orth_X,orth_Y,
poleN_viz,poleN_x,poleN_y) = self._orth_project(phaseD,orbT_I,'_c',incD_I,oblD_I,solD_I,
True,ratRO_I,longzeroD_I)
s = self._orth_style(row,col,s,'amap',self.albedos,vm_l,vm_h,
orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y,'NONE')
plt.text(-0.7,1.04,'Visible Map',color='k',size='medium',ha='center',va='center')
s += 1 # Now on subplot(233)
up = lambda fb: k2d.max() if fb else 1.0/pi
s = self._orth_style(row,col,s,'kern',k2d,0,up(force_bright),
orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y,'NONE')
plt.text(-0.7,1.04,'Kernel',color='k',size='medium',ha='center',va='center')
else:
row = 1
wid = 5
if show in ['both','sphere']:
wid += 5
if alt:
if show in ['amap','both','real']:
wid += 5
if show == 'sphere':
wid += 10
sub,s = wid//5,1
plt.figure(figsize=(wid,5))
vm_l,vm_h,va_l,va_h = self._double_amap_colorbounds(alt,same_scale)
orbT,incD,oblD,solD = self.orbT,self.incD,self.oblD,self.solD
(k2d,orth_Viz,orth_X,orth_Y,
poleN_viz,poleN_x,poleN_y) = self._orth_project(phaseD,orbT,'pri',incD,oblD,solD,False,0,0)
if show in ['kern','both']:
up = lambda fb: k2d.max() if fb else 1.0/pi
s = self._orth_style(row,sub,s,'kern',k2d,0,up(force_bright),
orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y,'Kernel')
if show in ['amap','both','sphere']:
s = self._orth_style(row,sub,s,'amap',self.albedos,vm_l,vm_h,
orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y,'Visible Map')
if show == 'real':
normk = lambda fb: 1.0/k2d.max() if fb else pi
s = self._orth_style(row,sub,s,'real',normk(force_bright)*k2d*self.albedos,vm_l,vm_h,
orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y,r'Kernel $\times$ Map')
if show == 'sphere':
s = self._orth_style(row,sub,s,'amap',self.albedos,vm_l,vm_h,
-orth_Viz,-orth_X,orth_Y,-poleN_viz,-poleN_x,poleN_y,'Far Side of Map')
if alt:
orbT,incD,oblD,solD = self.orbT_b,self.incD_b,self.oblD_b,self.solD_b
(k2d,orth_Viz,orth_X,orth_Y,
poleN_viz,poleN_x,poleN_y) = self._orth_project(phaseD,orbT,'alt',incD,oblD,solD,False,0,0)
if show in ['amap','both','sphere']:
s = self._orth_style(row,sub,s,'amap',self.albedos_b,vm_l,vm_h,
orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y,'Visible Alt. Map')
if show == 'real':
s = self._orth_style(row,sub,s,'real',normk(force_bright)*k2d*self.albedos_b,vm_l,vm_h,
orth_Viz,orth_X,orth_Y,poleN_viz,poleN_x,poleN_y,r'Kernel $\times$ Alt. Map')
if show == 'sphere':
s = self._orth_style(row,sub,s,'amap',self.albedos_b,vm_l,vm_h,
-orth_Viz,-orth_X,orth_Y,-poleN_viz,-poleN_x,poleN_y,'Far Side of Alt. Map')
plt.gcf().suptitle(r'%s at $%.2f^{\circ}$ phase' % (self.name,phaseD),y=0,fontsize='x-large',
verticalalignment='bottom')
plt.tight_layout()
self.fig_orth = plt.gcf()
plt.show()
def _spinax_prob_orignal(self,kchar,k_mu,k_sig,incs,i_mu,i_sig,phases,p_mu,p_sig,obls,
yes_p2,phases2,p2_mu,p2_sig):
"""Calculates a 2D PDF of spin axis constraints."""
full_chi = ((kchar-k_mu)/k_sig)**2.0 + ((incs-i_mu)/i_sig)**2.0 + ((phases-p_mu)/p_sig)**2.0
if yes_p2:
full_chi += ((phases2-p2_mu)/p2_sig)**2.0
prob_like = np.exp(-0.5*full_chi)
n_p,n_i,n_s,n_o = incs.shape
dp,di,ds,do = 2.0*pi/n_p,0.5*pi/(n_i-1),2.0*pi/(n_s-1),0.5*pi/(n_o-1)
if not yes_p2:
norm = (np.sum(prob_like[:,:,:-1,:]*np.sin(incs[:,:,:-1,:])*
np.sin(obls[np.newaxis,np.newaxis,:-1,:]))*dp*di*ds*do)
prob2d = (1.0/norm)*np.sum(prob_like*np.sin(incs),axis=(0,1))*dp*di
else:
norm = (np.sum(prob_like[:,:,:,:-1,:]*np.sin(incs[np.newaxis,:,:,:-1,:])*
np.sin(obls[np.newaxis,np.newaxis,np.newaxis,:-1,:]))*dp*dp*di*ds*do)
prob2d = (1.0/norm)*np.sum(prob_like*np.sin(incs),axis=(0,1,2))*dp*dp*di
return prob2d
def _spinax_prob_redo(self,prob2d,orig_sols,orig_obls,new_sols,new_obls):
"""Re-calculates a 2D PDF of spin axis constraints."""
rbs_probfun = RectBivariateSpline(orig_sols[:,0],orig_obls[0,:],prob2d)
new_prob_like = rbs_probfun(new_sols,new_obls,grid=False)
new_ns,new_no = new_sols.shape
new_ds,new_do = 2.0*pi/(new_ns-1),0.5*pi/(new_no-1)
norm = np.sum(new_prob_like[:-1,:]*np.sin(new_obls[:-1,:]))*new_ds*new_do
new_prob2d = new_prob_like/norm
return new_prob2d
def _spinax_leveling(self,prob2d,sigma_probs,res,obls):
"""Calculates n-sigma contour levels for a 2D PDF."""
hi_prob2d = prob2d.max()
cut_levels = np.linspace(0,hi_prob2d,res)
good_to_sum = (prob2d[np.newaxis,:,:] >= cut_levels[:,np.newaxis,np.newaxis])
good_to_sum[:,-1,:] = False
n_s,n_o = obls.shape
ds,do = 2.0*pi/(n_s-1),0.5*pi/(n_o-1)
total_probs = np.sum(prob2d[np.newaxis,:,:]*good_to_sum*np.sin(obls[np.newaxis,:,:]),axis=(1,2))*ds*do
args_sigma = np.argmin(np.absolute(total_probs[np.newaxis,:] - sigma_probs[:,np.newaxis]),axis=1)
levels_sigma = args_sigma*hi_prob2d/(res-1)
bad_lev_up = ((levels_sigma[1:]-levels_sigma[:-1]) == 0)
levels_sigma[1:] += bad_lev_up*np.array([1.0e-12,1.1e-12,1.2e-12,1.3e-12])
return levels_sigma
def _kchar_grider(self,kind,phase_rat,incD):
"""Calculates an array of kernel characteristics."""
solobl_wgrid,solobl_dgrid = np.zeros((73,19)),np.zeros((73,19))
for s in np.arange(73):
for o in np.arange(19):
solobl_wgrid[s,o],solobl_dgrid[s,o] = self.Kernel_WidthDomColat('_c',False,phase_rat,1,1,incD,o*5,s*5,0)
return solobl_wgrid,solobl_dgrid
def _spinax_style(self,w,h,s,m_c,kind,ax_combo,sols,obls,prob2d,levs,constraint,orig_sols,orig_obls,
kchar,k_mu,now_phaseD,solR,oblR,mark,_active,j,entries):
"""Styles plots for spin axis constraints."""
if kind == 'combo':
axs = ax_combo
else:
axs = plt.subplot(h,w,s,projection='polar')
axs.set_theta_zero_location('S')
axs.set_rlabel_position(45)
c_regs = ('1.0',m_c(0.25),m_c(0.5),m_c(0.75))
if kind == 'info':
axs.text(np.radians(0),np.radians(0),'Predicted\nSpin Axis\nConstraints',color='k',
size='x-large',ha='center',va='center',weight='bold')
axs.text(np.radians(225),np.radians(110),'Orbital\nPhase(s)',color='0.5',
size='x-large',ha='center',va='center')
axs.text(np.radians(210),np.radians(60),'Radial\ndirection:\nobliquity',color='k',
size='x-large',ha='center',va='center')
axs.text(np.radians(150),np.radians(60),'Compass\ndirection:\nsolstice',color='k',
size='x-large',ha='center',va='center')
if constraint in ['real','both']:
axs.text(np.radians(270),np.radians(60),'Red\nregions:\nrotational\ninfo',color=cm.Reds(0.75),
size='x-large',ha='center',va='center')
axs.text(np.radians(90),np.radians(60),'Blue\nregions:\norbital\ninfo',color=cm.Blues(0.75),
size='x-large',ha='center',va='center')
axs.text(np.radians(330),np.radians(60),'Dashed\ncontour:\nno uncertainty',color=(0,0.3,0),
size='x-large',ha='center',va='center')
else:
axs.text(np.radians(270),np.radians(60),'Red\ncontours:\nrotational\ninfo',color=cm.Reds(0.75),
size='x-large',ha='center',va='center')
axs.text(np.radians(90),np.radians(60),'Blue\ncontours:\norbital\ninfo',color=cm.Blues(0.75),
size='x-large',ha='center',va='center')
axs.text(np.radians(330),np.radians(60),'Each\ncontour:\nno uncertainty',color=(0,0.3,0),
size='x-large',ha='center',va='center')
axs.text(np.radians(30),np.radians(60),'Green\nmarker:\ntrue axis',color=(0,0.75,0),
size='x-large',ha='center',va='center')
axs.text(np.radians(180),np.radians(100),'{}'.format(self.name),color='k',
size='x-large',ha='center',va='center')
axs.axes.spines['polar'].set_alpha(0.1)
axs.grid(alpha=0.1)
plt.xticks(alpha=0.1) ## Easy and seems to work
plt.yticks(alpha=0.1) ##
else:
if constraint in ['real','both']:
axs.contourf(sols,obls,prob2d,levels=levs,colors=c_regs)
axs.contour(sols,obls,prob2d,levels=levs,colors='0.5')
if kind == 'single':
if constraint == 'perf':
this_color = m_c(0.33+0.67*(j/entries))
axs.contour(orig_sols,orig_obls,kchar,levels=[k_mu],colors=[this_color],
linewidths=3,linestyles='solid')
elif constraint == 'both':
axs.contour(orig_sols,orig_obls,kchar,levels=[k_mu],colors=[(0,0.3,0)],
linewidths=3,linestyles='dashed')
if m_c == cm.Reds:
axs.text(np.radians(225),np.radians(110),r'$%.0f^{\circ}$' % now_phaseD,color='0.5',
size='x-large',ha='center',va='center')
else:
axs.text(np.radians(225),np.radians(110),
r'$%.0f^{\circ}$' '\n' r'$%.0f^{\circ}$' % (now_phaseD[0],now_phaseD[1]),color='0.5',
size='x-large',ha='center',va='center')
else:
if not _active:
axs.text(np.radians(225),np.radians(110),'Combined',color='0.5',
size='x-large',ha='center',va='center')
axs.scatter(solR,oblR,s=100,color=(0,1,0),edgecolor='k',marker=mark,zorder=2)
axs.set_thetalim([0,2.0*pi])
axs.set_rlim([0,pi/2.0])
ts = lambda a: 'medium' if a else 'large'
axs.set_thetagrids(np.linspace(0,315,8),sol_ticks_,size=ts(_active)) # Match numbers to sol_ticks to avoid error.
axs.set_rgrids(np.linspace(0,pi/2.0,4),obl_ticks_,size=ts(_active))
return s+1
def SpinAxis_Constraints(self,phaseD_list,which='pri',constraint='both',info=True,combine=True,
combine_only=False,keep_probdata=False,res=500,n_sol=361,n_obl=91,
phaseD_sig=10.0,incD_sig=10.0,kwid_sig=10.0,kddc_sig=20.0,**kwargs):
"""Plots how observations may constrain your planet's spin axis.
.. image:: _static/spinaxis_example.png
:align: center
These predictions use the kernel characteristics and assume your
planet's light curves **at single orbital phases** are invertible
(see note below). Discussed in Section 4 of S16.
Say you are fitting a planet's albedo map. We know the kernel depends
on the planet's spin axis (its obliquity and solstice). Invert a
light curve from one orbital phase and you will also fit some
East-West structure of the kernel, like the longitudinal width.
Or invert from two different phases and you fit some North-South
structure, like the **change in** dominant colatitude. So, kernel
characteristics help us estimate constraints on the spin axis without
doing inversions from real data.
Learn more about the kernel and its characteristics with
:func:`Kernel_WidthDomColat`, :func:`Kernels_Plot`,
and :func:`KChar_Evolve_Plot`.
.. note::
Inverting a light curve will depend on the quality of the
observational data. The planet's albedo map matters too:
East-West markings to sense daily brightness changes,
North-South markings to sense longer changes.
We have pre-calculated characteristics stored in numpy binary
files (the obvious two with names ending "values_all5deg.npy").
So, this method rounds inclination, obliquity, and solstice
to the nearest 5 degrees. It also tracks the North (green
circle) or South pole (green diamond) when obliquity is less
than or greater than 90 degrees, respectively.
Args:
phaseD_list (list):
Orbital phases of the planet in degrees. Standard range
is [0, 360). Phases are integers or floats, and list
elements can be
- *phase* for a longitudinal width,
- *[phase, phase]* for a change in dominant colatitude.
which (str):
The param set to use. Can be
- 'pri' for primary (default),
- 'alt' for alternate,
- '_c' for custom, see Note below.
constraint (str):
The type of prediction. Can be
- 'perf' for perfect constraints with no data
uncertainties,
- 'real' to use uncertainties and show {1,2,3}--sigma
regions,
- 'both' (default).
info (bool):
Include a legend subplot. Default is True.
combine (bool):
Join all constraints in a separate subplot. Default is True.
combine_only (bool):
Show **only** the combo constraint. Default is False.
keep_probdata (bool):
Output all probability data, see Returns below. Default
is False.
Optional:
res (int):
Resolution when ``constraint`` is 'real', the number of
probability contours to test. Default is 500.
n_sol (int):
Number of solstice grid points. Default is 361.
n_obl (int):
Number of obliquity grid points. Default is 91.
Very Optional:
**You should probably check out Section 4.1 of S16 before
you change any of these.**
phaseD_sig (float):
Uncertainty on orbital phase, in degrees. Default is 10.0.
incD_sig (float):
Uncertainty on inclination, in degrees. Default is 10.0.
kwid_sig (float):
Uncertainty on longitudinal, width in degrees. Default
is 10.0.
kddc_sig (float):
Uncertainty on change in dominant colatitude, in degrees.
Default is 20.0.
.. note::
Keywords are used by the interactive function :func:`Sandbox_Reflection`.
But if ``which`` is '_c', then enter your custom
params as ``incD_I``, ``solD_I`` and ``oblD_I``.
Standard definitions and formats apply.
See the :class:`class and constructor <DirectImaging_Planet>`
docstrings.
Effect:
Stores this matplotlib figure as ``fig_spin``, **overwriting**
the previous version. You can save the image later by
calling ``fig_spin.savefig(...)``.
Returns:
A list (user_file) if ``keep_probdata`` is True and ``constraint``
is **not** 'perf'.
- First entry is [incD, oblD, solD].
- Other entries are [*id*, 2D PDF, {1,2,3}--sigma
probability levels], where *id* is either a phaseD_list
element or 'Combined'.
"""
## Default keywords
_active = kwargs.get('_active',False)
incD_I = kwargs.get('incD_I',85)
solD_I = kwargs.get('solD_I',0)
oblD_I = kwargs.get('oblD_I',0)
made_combo_flag = False
entries = len(phaseD_list)
if _active:
w,h,sub,s = 3,2,5,5
elif combine_only:
w,h,sub,s = 1,1,1,1
else:
ex = lambda x: 1 if x else 0
sub = entries + ex(info) + ex(combine)
w,h,s = min(sub,3),1+((sub-1)//3),1
p = 0
if which == 'pri':
incD,solD,oblD = self.incD,self.solD,self.oblD
elif which == 'alt':
incD,solD,oblD = self.incD_b,self.solD_b,self.oblD_b
elif which == '_c':
incD,solD,oblD = incD_I,solD_I,oblD_I
mark = 'o'
if oblD > 90.0:
solD,oblD,mark = (solD % 360.0) + 180.0,180.0 - oblD,'D'
i_i,i_s,i_o = round(incD/5),round((solD%360)/5),round(oblD/5)
if keep_probdata:
user_file = [[5*i_i,5*i_o,5*i_s]]
incR,solR,oblR = np.radians(i_i*5),np.radians(i_s*5),np.radians(i_o*5)
incR_sig = np.radians(incD_sig)
combo_prob2d = np.ones(obl_2mesh_.shape)
sigma_probs = np.array([1,0.9973,0.9545,0.6827,0])
new_sols,new_obls = np.meshgrid(np.linspace(0,2.0*pi,n_sol),np.linspace(0,pi/2.0,n_obl),indexing='ij')
if not _active:
plt.figure(figsize=(5*w,5*h))
if info and not combine_only:
s = self._spinax_style(w,h,s,cm.gray,'info','0',new_sols,new_obls,0,0,constraint,
sol_2mesh_,obl_2mesh_,0,0,0,solR,oblR,0,_active,0,entries)
for j in np.arange(entries):
now_phaseD = phaseD_list[j]
if isinstance(now_phaseD,(int,float)):
p += 1
m_c = cm.Reds
i_p = round((now_phaseD%360)/5)
sav_phaseD = 5*i_p
phaseR = np.radians(i_p*5)
phaseR_sig = np.radians(phaseD_sig)
wid_mu = kernel_widths_[i_p,i_i,i_s,i_o]
kchar,k_mu = kernel_widths_[i_p,i_i,:,:],wid_mu
wid_sig = np.radians(kwid_sig)
if constraint in ['real','both']:
prob2d = self._spinax_prob_orignal(kernel_widths_,wid_mu,wid_sig,inc_4mesh_,incR,incR_sig,
phase_4mesh_,phaseR,phaseR_sig,obl_2mesh_,
False,'no','no','no')
else:
prob2d = 1
else:
p += 2
m_c = cm.Blues
i_p,i_p2 = round((now_phaseD[0]%360)/5),round((now_phaseD[1]%360)/5)
sav_phaseD = [5*i_p,5*i_p2]
phaseR,phaseR2 = np.radians(i_p*5),np.radians(i_p2*5)
phaseR_sig = np.radians(phaseD_sig)
dom1,dom2 = kernel_domcolats_[i_p,i_i,i_s,i_o],kernel_domcolats_[i_p2,i_i,i_s,i_o]
ddc_mu = abs(dom1-dom2)
kchar,k_mu = np.absolute(kernel_domcolats_[i_p,i_i,:,:]-kernel_domcolats_[i_p2,i_i,:,:]),ddc_mu
ddc_sig = np.radians(kddc_sig)
if constraint in ['real','both']:
prob2d = self._spinax_prob_orignal(kernel_delta_domcolats_,ddc_mu,ddc_sig,inc_4mesh_,incR,incR_sig,
phase_4mesh_,phaseR,phaseR_sig,obl_2mesh_,
True,shifted_phase_4mesh_,phaseR2,phaseR_sig)
else:
prob2d = 1
if combine or combine_only:
combo_prob2d *= prob2d
if made_combo_flag == False:
axC = plt.subplot(h,w,sub,projection='polar')
made_combo_flag = True
if constraint in ['perf','both']:
if constraint == 'perf':
this_color = m_c(0.33+0.67*(j/entries))
axC.contour(sol_2mesh_,obl_2mesh_,kchar,levels=[k_mu],colors=[this_color],
linewidths=3,linestyles='solid')
else:
axC.contour(sol_2mesh_,obl_2mesh_,kchar,levels=[k_mu],colors=[(0,0.3,0)],alpha=0.2,
linewidths=3,linestyles='dashed')
if constraint in ['real','both']:
new_prob2d = self._spinax_prob_redo(prob2d,sol_2mesh_,obl_2mesh_,new_sols,new_obls)
else:
new_prob2d = 1
if not combine_only:
if constraint in ['real','both']:
levels_sigma = self._spinax_leveling(new_prob2d,sigma_probs,res,new_obls)
if keep_probdata:
user_file.append([sav_phaseD,np.copy(new_prob2d),np.copy(levels_sigma)])
else:
levels_sigma = 1
s = self._spinax_style(w,h,s,m_c,'single','0',new_sols,new_obls,new_prob2d,levels_sigma,constraint,
sol_2mesh_,obl_2mesh_,kchar,k_mu,sav_phaseD,solR,oblR,mark,_active,j,entries)
if combine or combine_only:
if constraint in ['real','both']:
new_combo_prob2d = self._spinax_prob_redo(combo_prob2d,sol_2mesh_,obl_2mesh_,new_sols,new_obls)
levels_sigma = self._spinax_leveling(new_combo_prob2d,sigma_probs.T,res,new_obls)
if keep_probdata:
user_file.append(['Combined',np.copy(new_combo_prob2d),np.copy(levels_sigma)])
else:
new_combo_prob2d,levels_sigma = 1,1
m_c_here = lambda x: cm.Reds if x == entries else (cm.Blues if x == 2*entries else cm.Purples)
s = self._spinax_style(w,h,s,m_c_here(p),'combo',axC,new_sols,new_obls,new_combo_prob2d,levels_sigma,
constraint,sol_2mesh_,obl_2mesh_,kchar,k_mu,0,solR,oblR,mark,_active,0,entries)
if not _active:
plt.tight_layout()
self.fig_spin = plt.gcf()
plt.show()
if keep_probdata:
return user_file
def _savebutton_click(self):
"""Directs a button to save orbital phases."""
if self._pslot_act.value == 'all':
self._pword_act.value = '<center><font color="red">Only save to one slot at a time</font></center>'
else:
word_start = '<center><font color="limegreen">Saved current phase to '
wording = word_start+self._pslot_act.value+' slot</font></center>'
if self._pslot_act.value == 'light':
self._xph_lig = self._orb_act.value
elif self._pslot_act.value == 'medium':
self._xph_med = self._orb_act.value
elif self._pslot_act.value == 'dark':
self._xph_drk = self._orb_act.value
self._pword_act.value = wording
def _clearbutton_click(self):
"""Directs a button to clear orbital phases."""
word_start = '<center><font color="orange">Cleared phase from '+self._pslot_act.value
if self._pslot_act.value == 'all':
self._xph_lig = 'no'
self._xph_med = 'no'
self._xph_drk = 'no'
word_end = ' slots</font></center>'
else:
word_end = ' slot</font></center>'
if self._pslot_act.value == 'light':
self._xph_lig = 'no'
elif self._pslot_act.value == 'medium':
self._xph_med = 'no'
elif self._pslot_act.value == 'dark':
self._xph_drk = 'no'
self._pword_act.value = word_start+word_end
def _check_for_actspin(self,phases,switch):
"""Organizes orbital phases for spin axis constraints."""
new_ph = []
if switch == 'wid':
for p in phases:
if isinstance(p,(int,float)):
new_ph.append(p)
elif switch == 'dom':
c,n = 0,1
for p in phases[1:]:
if isinstance(p,(int,float)):
new_ph.append([phases[c],p])
c = n
n += 1
elif switch == 'both':
c,n = 0,1
lph = len(phases)
for p in phases:
if isinstance(p,(int,float)):
new_ph.append(p)
if (n != lph) and isinstance(phases[n],(int,float)):
new_ph.append([phases[c],phases[n]])
c = n
n += 1
return new_ph
def _actmodule_heart(self,phaseD_I,incD_I,oblD_I,solD_I,ratRO_I,res_I,longzeroD_I,lc_swit,spinax_swit):
"""Sets up and combines several plots about your exoplanet."""
self._pword_act.value = '<center><font color="blue">Ready to save/clear orbital phases</font></center>'
phasesD_single = [phaseD_I,self._xph_lig,self._xph_med,self._xph_drk]
phasesD_forspin = self._check_for_actspin(phasesD_single,spinax_swit)
ph_colors = [(1,0,1),cm.gray(0.6),cm.gray(0.3),cm.gray(0)]
orbT_I = 24.0*360.0
see_spins = abs(ratRO_I)/72.0
num_rel = max(res_I*round(see_spins),self.n_long)
rel_tphase = np.linspace(-2.5,2.5,num_rel)
plt.figure(figsize=(14,9.3))
plt.subplot(232)
self.Geometry_Diagram(which='N/A',_active=True,
incD=incD_I,oblD=oblD_I,solD=solD_I,ratRO=ratRO_I,
phaseD=phasesD_single,ph_colors=ph_colors)
### subplot(231) and subplot(233)
self.Orthographic_Viewer(phaseD_I,show='both',_active=True,
orbT_I=orbT_I,ratRO_I=ratRO_I,
incD_I=incD_I,oblD_I=oblD_I,solD_I=solD_I,
longzeroD_I=longzeroD_I)
plt.subplot(234)
n = 0
for p in phasesD_single:
if isinstance(p,(int,float)):
times_I = orbT_I*((p + rel_tphase)/360.0)
self.LightCurve_Plot(alt=False,show=lc_swit,_active=True,
times_I=times_I,orbT_I=orbT_I,ratRO_I=ratRO_I,
incD_I=incD_I,oblD_I=oblD_I,solD_I=solD_I,
longzeroD_I=longzeroD_I,ph_color=ph_colors[n],now_I=n)
n += 1
n = 0
plt.xlim([-2.5,2.5])
plt.xticks(np.linspace(-2,2,5),relph_ticks_,size='medium')
plt.xlabel('Relative Orbital Phase',size='medium')
plt.yticks(size='medium')
ylab = lambda lc: 'Flux' if lc == 'flux' else ('Apparent Brightness' if lc == 'appar' else '')
plt.ylabel(ylab(lc_swit),size='medium')
plt.gca().set_aspect(1.0/plt.gca().get_data_ratio())
plt.text(0.25,1.01,'Light Curve',color='k',size='medium',ha='center',va='bottom',
transform=plt.gca().transAxes)
plt.text(0.75,1.01,'Rotations: {:.2f}'.format(see_spins),color='k',size='medium',ha='center',va='bottom',
transform=plt.gca().transAxes)
### subplot(236)
self.KChar_Evolve_Plot('both',which='_c',incD=incD_I,oblD=oblD_I,solD=solD_I,
_active=True,phasesD_I=phasesD_single,ph_colors=ph_colors)
plt.text(0.5,1.01,'Kernel Characteristics',color='k',size='medium',ha='center',va='bottom',
transform=plt.gca().transAxes)
### subplot(235,'polar')
if len(phasesD_forspin) == 0:
plt.subplot(235,projection='polar')
plt.gca().set_theta_zero_location('S')
plt.gca().set_rlabel_position(45)
plt.xticks(np.linspace(0,1.75*pi,8),sol_ticks_,size='medium',alpha=0.1) # Match numbers to sol_ticks to avoid error.
plt.yticks(np.linspace(0,pi/2.0,4),obl_ticks_,size='medium',alpha=0.1)
plt.gca().axes.spines['polar'].set_alpha(0.1)
plt.gca().grid(alpha=0.1)
bads = ('SPIN AXIS\nCONSTRAINT WARNING:\n\nYOU NEED\nAT LEAST 2 PHASES TO'
'\nCALCULATE CHANGES IN\nDOMINANT COLATITUDE')
plt.text(np.radians(0),np.radians(0),bads,color=(1.0,0.5,0),size='x-large',
ha='center',va='center',weight='bold')
else:
self.SpinAxis_Constraints(phasesD_forspin,which='_c',constraint='perf',
info=False,combine=False,combine_only=True,_active=True,
incD_I=incD_I,solD_I=solD_I,oblD_I=oblD_I)
plt.text(np.radians(225),np.radians(112),'Spin Axis\nConstraints',color='k',size='medium',
ha='center',va='center')
plt.tight_layout()
self.fig_sand = plt.gcf()
plt.show()
def _reset_actmodule(self):
"""Resets attributes for the interactive module."""
self._xph_lig = 'no'
self._xph_med = 'no'
self._xph_drk = 'no'
self._orb_act.close()
self._inc_act.close()
self._obl_act.close()
self._sol_act.close()
self._ratRO_act.close()
self._res_act.close()
self._zlong_act.close()
self._ligcur_act.close()
self._spax_act.close()
self._pslot_act.close()
self._pword_act.close()
self._psav_act.close()
self._pclr_act.close()
self._title_act.close()
self._orb_act.open()
self._inc_act.open()
self._obl_act.open()
self._sol_act.open()
self._ratRO_act.open()
self._res_act.open()
self._zlong_act.open()
self._ligcur_act.open()
self._spax_act.open()
self._pslot_act.open()
self._pword_act.open()
self._psav_act.open()
self._pclr_act.open()
self._title_act.open()
self._orb_act.value = 0
self._inc_act.value = 85
self._obl_act.value = 0
self._sol_act.value = 0
self._ratRO_act.value = 72
self._res_act.value = 101
self._zlong_act.value = 0
self._ligcur_act.value = 'flux'
self._spax_act.value = 'wid'
self._pslot_act.value = 'light'
first_pword = '<center><font color="blue">Ready to save/clear orbital phases</font></center>'
self._pword_act.value = first_pword
def Sandbox_Reflection(self):
"""Creates an interactive module about your directly imaged planet.
.. image:: _static/sandref_example.png
:align: center
This module lets you explore how a planet's geometry, motion,
kernel, and light curves are related. You can also see predicted
constraints on the planet's spin axis (using the kernel and perfect
data).
.. note::
The larger your ``n_clat`` and ``n_long``, the longer this
module takes to update (e.g. seconds with default values).
The sandbox combines several methods from the class
:class:`DirectImaging_Planet` into one compact display. See each
for details:
- :func:`Geometry_Diagram`
- :func:`Orthographic_Viewer`
- :func:`Light_Curves`
- :func:`KChar_Evolve_Plot`
- :func:`SpinAxis_Constraints`
The planet and light curves are rendered using your primary albedo
map. You have a main orbital phase (magenta) to view and can save
up to 3 extra phases (light, medium, dark) to compare. Each phase
has a color-coded marker on the geometry diagram and kernel
characteristics plot, plus its own light curve.
There are many controls (all angles in degrees):
- Inclination
- Obliquity
- Solstice
- Orbital Phase
- [which] Extra Phase Slot
- Save [extra phase]
- Clear [extra phase(s)]
- Spins per Orbit
- Time Steps per Spin
- Initial Longitude [at zero phase]
- [type of] Light Curve
- [type of] Axis Constraint
.. note::
For the subplot of spin axis constraints, curves are
colored lightest to darkest in the phase order [main, light,
medium, dark]. Red curves are for single phases, blue curves
pairs of phases.
Effect:
Stores this matplotlib figure as ``fig_sand`` **whenever you
interact with the module**. You can save the image later by
calling ``fig_sand.savefig(...)``.
"""
self._reset_actmodule()
ios_col = widgets.Box([self._inc_act,self._obl_act,self._sol_act],
layout=Layout(flex_flow='column',width='45%'))
rrz_col = widgets.Box([self._ratRO_act,self._res_act,self._zlong_act],
layout=Layout(flex_flow='column',width='30%'))
tilisp_col = widgets.Box([self._title_act,self._ligcur_act,self._spax_act],
layout=Layout(flex_flow='column',align_self='center',width='25%'))
top_row = widgets.Box([tilisp_col,ios_col,rrz_col])
savclr_col = widgets.Box([self._psav_act,self._pclr_act],
layout=Layout(flex_flow='column',width='25%'))
info_col = widgets.Box([self._pword_act,self._pslot_act],
layout=Layout(flex_flow='column',width='25%'))
bot_row = widgets.Box([self._orb_act,info_col,savclr_col])
the_connections = {'phaseD_I':self._orb_act,'incD_I':self._inc_act,'oblD_I':self._obl_act,
'solD_I':self._sol_act,'ratRO_I':self._ratRO_act,'res_I':self._res_act,
'longzeroD_I':self._zlong_act,'lc_swit':self._ligcur_act,'spinax_swit':self._spax_act}
inter_out = widgets.interactive_output(self._actmodule_heart,the_connections)
IPy_display(widgets.Box([top_row,bot_row,inter_out],layout=Layout(flex_flow='column')))
``` |
{
"source": "joelcolucci/flask-responsefactory",
"score": 3
} |
#### File: flask-responsefactory/tests/test_app.py
```python
import json
import unittest
from mock_app import app
class AppTestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def tearDown(self):
pass
def test_status(self):
"""Test response contains expected status code"""
response = self.app.get('/status')
status_code = response.status_code
self.assertEqual(status_code, 200)
def test_content(self):
"""Test response contains expected content"""
response = self.app.get('/content')
content = response.data
expected_content = 'hello, world'
self.assertEqual(content, expected_content)
def test_json_mimetype(self):
"""Test response contains expected json/json configuration"""
response = self.app.get('/json')
mimetype = response.mimetype
self.assertEqual(mimetype, 'application/json')
def test_json_serialization(self):
"""Test response contains expected json/json configuration"""
response = self.app.get('/json')
data = response.data
try:
decoded_data = json.loads(data)
except ValueError:
self.fail('Error: No JSON object could be decoded')
def test_headers(self):
"""Test response headers contain expected"""
response = self.app.get('/headers')
header_val = response.headers.get('X-Test-Header')
expected_val = 'header value'
self.assertEqual(header_val, expected_val)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joelcolucci/registerblueprints",
"score": 2
} |
#### File: mock_app/blueprint_one/api.py
```python
import os
from flask import Blueprint
from flask import render_template
SERVICE_NAME = 'blueprint_one'
# Blueprint pattern requires absolute path to template folder
CURRENT_DIRECTORY = os.path.dirname(__file__)
TEMPLATE_FOLDER = os.path.join(CURRENT_DIRECTORY, 'templates')
blueprint_api = Blueprint(SERVICE_NAME,
__name__,
static_url_path='',
static_folder='static',
template_folder=TEMPLATE_FOLDER)
@blueprint_api.route('/blueprint-one', methods=['GET'])
def get_root():
"""Return home template"""
return render_template('{}/index.html'.format(SERVICE_NAME))
``` |
{
"source": "JoelCranston/Sequence",
"score": 2
} |
#### File: Sequence/src/sequ_test.py
```python
import unittest
import argparse
from sequ import *
import random
import sys
import time
class Sequ_TestCase(unittest.TestCase):
def setUp(self):
self.argLists = [['sequ','1','1','5'],
['sequ','1','5'],
['sequ','-1'],
['sequ','5'],
['sequ','1.00','1.01','5.00'],
['sequ','-3','1','3'],
['sequ','10','-1','1'],
['sequ','-f','abc%%def%-+0#2.2fabc','1','1','5'],
['sequ','-s',':','5'],
['sequ','-s','\\','1','5'],
['sequ','-s','\n','1','1','5'],
['sequ','--separator=',':--:','1','1','5'],
['sequ','--separator',':\t:','1','1','5'],
['sequ','-s',':\n:','1','1','5'],
['sequ','--equal-width','-1','1','2'],
['sequ','--format=','%.3f','9.9','.01','10.0'],
['sequ','--format','%.3F','9.9','.01','10.0'],
['sequ','-f','%.3e','9.9','.01','10.'],
['sequ','-f','%.3E','9.9','.01','10.'],
['sequ','-f','%.3g','9.9','.01','10.'],
['sequ','-f','%.3G','9.9','.01','10.'],
['sequ','-W','-1','1','3'],
['sequ','--words','-1','1','3'],
['sequ','-W','-s','\n','-1','1','3'],
['sequ','-p','*','-1','1','3'],
['sequ','-P','-1','1','3'],
['sequ','--pad-spaces','-1','1','3'],
['sequ','--pad','#','-1','1','3'],
['sequ','--words','-p','#','-1','1','3'],
['sequ','-w','5','1','10'],
['sequ','-w','.1','0.01','.13'],
['sequ','-w','-F','floating','1','10000','2'],
['sequ','-w','-F','floating','5.01','1','10'],
['sequ','-F','arabic','1','10'],
['sequ','-F','floating','1','10'],
['sequ','-F','alpha','e'],
['sequ','-F','alpha','x','-1','n'],
['sequ','-F','alpha','b','2','f'],
['sequ','-w','-F','alpha','b','2','f'],
['sequ','-F','alpha','a','e'],
['sequ','-F','ALPHA','C'],
['sequ','-F','ALPHA','A','C'],
['sequ','-F','ALPHA','B','2','F'],
['sequ','-F','ALPHA','Z','-2','N'],
['sequ','-s',': :','-F','ALPHA','A','1','D'],
['sequ','--words','-F','ALPHA','A','1','D'],
['sequ','-F','ROMAN','1','10'],
['sequ','-F','ROMAN','v'],
['sequ','-F','ROMAN','i','v'],
['sequ','-F','ROMAN','V','I','x'],
['sequ','-F','ROMAN','--words','V','I','X'],
['sequ','-s',': :','-F','ROMAN','I','I','V'],
['sequ','-w','-F','ROMAN','I','I','V'],
['sequ','-p','#','-F','ROMAN','I','I','V'],
['sequ','-P','-F','ROMAN','I','I','V'],
['sequ','-s',': :','-F','roman','v'],
['sequ','-w','-F','roman','v'],
['sequ','-p','#','-F','roman','v'],
['sequ','-P','-F','roman','v'],
['sequ','C'],
['sequ','c'],
['sequ','IV'],
['sequ','iv'],
#The follewing result in argparse failures.
['sequ','-w','-10','.1','0'],
['sequ','A','.1','.5'],
['sequ','AA'],
['sequ','-F','alpha','1'],
['sequ','-F','ALPHA','c'],
['sequ','-F','ALPHA','4'],
['sequ','-F','ROMAN','-1','I','V'],
['sequ','-F','roman','5','-1','1'],
['sequ','-F','roman','1','1','iiii'],
['sequ','-n','C:\test.txt',"-F",'alpha'],
['sequ','-n','C:\test.txt','1'],
]
self.name = 'sequ'
self.flags =['-f','-w','-s','-W','-p']
self.formatStrings = ['%f','aa%%a%004.4faa','%++--g','%E',
'%F','%G','%#f','%0010.2f']
self.separators=['','\n',':','a','\\','\t',' ',"'",',',':\n:','---','--\n']
self.pads=[' ','0','#','-','\\']
self.args = argparse.Namespace()
self.args.equalWidth = False
self.args.first = 1
self.args.increment = 1
self.args.last = 1
self.args.format = None
self.args.separator = DEFAULT_SEPARATOR
self.args.pad = None
def test_drange(self):
print('Testing drange')
randomStart=random.randrange(-100,100,1)
randomEnd=random.randrange(randomStart,1000,1)
iter=drange(randomStart,1,randomEnd)
for i in range(randomStart,randomEnd):
assert i == next(iter)
def DISABLEDtest_parseArgs(self):
print('Testing parseArgs')
for i in self.argLists:
sys.argv = i
print(sys.argv)
try:
parseArgs()
# Argparse throws a system exit even though it handles the error correctly
except SystemExit:
time.sleep(.5)
print()
def DISABLEDtest_printSeq(self):
self.args.pad = '0'
print("Testing printSeq")
print("Printing 1 to 4 with separators",self.separators)
for i in range(len(self.separators)):
print('separator = "%s"' % self.separators[i])
self.args.separator = self.separators[i]
printNumSeq(drange(1,1,5),self.args)
self.args.separator=DEFAULT_SEPARATOR
self.args.pad=None
print("Testing with format strings")
print(self.formatStrings)
for i in range(len(self.formatStrings)):
print('Formats string = "%s"' % self.formatStrings[i])
self.args.format = self.formatStrings[i]
printNumSeq(drange(1,1,5),self.args)
self.args.equalWidth = True
self.args.format=None
self.args.first = decimal.Decimal("6")
self.args.increment = decimal.Decimal("1")
self.args.last = decimal.Decimal("10")
print(self.pads)
for i in self.pads:
self.args.pad = i
print("Printing equal width pads with args = %s" % str(self.args))
iter= drange(self.args.first, self.args.increment, self.args.last)
printNumSeq(iter,self.args)
self.args.pad=None
def test_sequ(self):
print('start of full app test')
for i in self.argLists:
print(i)
sys.argv = i
try:
#self.assertRaises(SystemExit, main)
#self.assertEqual(main(), expected)
main()
time.sleep(.1)
except SystemExit:
time.sleep(1)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joelcrocker/django-smart-selects",
"score": 2
} |
#### File: django-smart-selects/smart_selects/tests.py
```python
import unittest
import django
from db_fields import ChainedForeignKey, GroupedForeignKey
def has_new_migrations():
return (django.VERSION[:2] >= (1, 7),
"This test requires Django migrations introduced in Django 1.7.")
class AssertReconstructibleMixin(object):
def assert_reconstructible(self, *field_args, **field_kwargs):
field_instance = self.field_class(*field_args, **field_kwargs)
name, path, args, kwargs = field_instance.deconstruct()
new_instance = self.field_class(*args, **kwargs)
for attr_name in self.deconstruct_attrs:
self.assertEqual(
getattr(field_instance, attr_name),
getattr(new_instance, attr_name)
)
@unittest.skipUnless(*has_new_migrations())
class ChainedForeignKeyTests(AssertReconstructibleMixin, unittest.TestCase):
def setUp(self):
self.field_class = ChainedForeignKey
self.deconstruct_attrs = [
'chain_field', 'model_field', 'show_all', 'auto_choose',
'view_name',
]
def test_deconstruct_basic(self):
self.assert_reconstructible(
'myapp.MyModel',
chained_field='a_chained_field',
chained_model_field='the_chained_model_field',
show_all=False, auto_choose=True
)
def test_deconstruct_mostly_default(self):
self.assert_reconstructible(
'myapp.MyModel'
)
def test_deconstruct_non_default(self):
self.assert_reconstructible(
'myapp.MyModel',
chained_field='a_chained_field',
chained_model_field='the_chained_model_field',
show_all=True, auto_choose=True
)
@unittest.skipUnless(*has_new_migrations())
class GroupedForeignKeyTests(AssertReconstructibleMixin, unittest.TestCase):
def setUp(self):
self.field_class = GroupedForeignKey
self.deconstruct_attrs = ['group_field']
def test_deconstruct_basic(self):
self.assert_reconstructible('myapp.MyModel', 'the_group_field')
``` |
{
"source": "joelczinn/isochrones",
"score": 2
} |
#### File: isochrones/dartmouth/tri.py
```python
import sys, os
import pandas as pd
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
from scipy.interpolate import LinearNDInterpolator as interpnd
from ..config import ISOCHRONES
from .grid import DartmouthModelGrid
def write_tri(filename=os.path.join(ISOCHRONES,'dartmouth.tri')):
df = DartmouthModelGrid(['g']).df
N = len(df)
pts = np.zeros((N,3))
pts[:,0] = np.array(df['MMo'])
pts[:,1] = np.array(df['age'])
pts[:,2] = np.array(df['feh'])
gmags = np.array(df['g'])
gfn = interpnd(pts,gmags)
with open(filename,'wb') as f:
pickle.dump(gfn.tri,f)
```
#### File: isochrones/extinction/extinction.py
```python
import os, os.path, re
from ..config import on_rtd
if not on_rtd:
DATADIR = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','data'))
# Wavelength dependence of extinction from Schlafly+ (2016)
# http://e.schlaf.ly/apored/extcurve.html
from .schlafly.extcurve_s16 import extcurve
extcurve_0 = extcurve(0.)
#Read data defining effective central wavelengths of filters
FILTERFILE = os.path.join(DATADIR,'filters.txt')
LAMBDA_EFF = {}
for line in open(FILTERFILE,'r'):
if re.match('#', line):
continue
line = line.split()
LAMBDA_EFF[line[0]] = float(line[1])
#Read data defining extinction in different bands (relative to A_V)
EXTINCTIONFILE = '{}/extinction.txt'.format(DATADIR)
EXTINCTION = dict()
EXTINCTION5 = dict()
for line in open(EXTINCTIONFILE,'r'):
line = line.split()
EXTINCTION[line[0]] = float(line[1])
EXTINCTION5[line[0]] = float(line[2])
EXTINCTION['kep'] = 0.85946
EXTINCTION['V'] = 1.0
EXTINCTION['Ks'] = EXTINCTION['K']
EXTINCTION['Kepler'] = EXTINCTION['kep']
from astropy.coordinates import SkyCoord
from six.moves import urllib
import re
def get_AV_infinity(ra,dec,frame='icrs'):
"""
Gets the A_V exctinction at infinity for a given line of sight.
Queries the NED database.
:param ra,dec:
Desired coordinates, in degrees.
:param frame: (optional)
Frame of input coordinates (e.g., ``'icrs', 'galactic'``)
"""
coords = SkyCoord(ra,dec,unit='deg',frame=frame).transform_to('icrs')
rah,ram,ras = coords.ra.hms
decd,decm,decs = coords.dec.dms
if decd > 0:
decsign = '%2B'
else:
decsign = '%2D'
url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon='+'%i' % rah + \
'%3A'+'%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%s' % decsign + '%i' % abs(decd) + '%3A' + '%i' % abs(decm) + '%3A' + '%05.2f' % abs(decs) + \
'&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0'
AV = None
for line in urllib.request.urlopen(url).readlines():
m = re.search(b'^Landolt V \(0.54\)\s+(\d+\.\d+)', line)
if m:
AV = (float(m.group(1)))
break
if AV is None:
raise RuntimeError('AV query fails! URL is {}'.format(url))
return AV
```
#### File: isochrones/isochrones/observation.py
```python
from __future__ import print_function, division
import os, re, sys
import logging
from .config import on_rtd
if not on_rtd:
import numpy as np
import pandas as pd
from configobj import ConfigObj
from asciitree import LeftAligned, Traversal
from asciitree.drawing import BoxStyle, BOX_DOUBLE, BOX_BLANK
from collections import OrderedDict
from itertools import chain, count
try:
from itertools import imap, izip
except ImportError: # Python 3
imap = map
izip = zip
xrange = range
else:
class Traversal(object):
pass
class LeftAligned(object):
pass
from .isochrone import get_ichrone
from .utils import addmags, distance
class NodeTraversal(Traversal):
"""
Custom subclass to traverse tree for ascii printing
"""
def __init__(self, pars=None, **kwargs):
self.pars = pars
super(NodeTraversal,self).__init__(**kwargs)
def get_children(self, node):
return node.children
def get_root(self, node):
return node
return node.get_root()
def get_text(self, node):
text = node.label
if self.pars is not None:
if hasattr(node, 'model_mag'):
text += '; model={:.2f} ({})'.format(node.model_mag(self.pars),
node.lnlike(self.pars))
if type(node)==ModelNode:
root = node.get_root()
if hasattr(root, 'spectroscopy'):
if node.label in root.spectroscopy:
for k,v in root.spectroscopy[node.label].items():
text += ', {}={}'.format(k,v)
modval = node.evaluate(self.pars[node.label], k)
lnl = -0.5*(modval - v[0])**2/v[1]**2
text += '; model={} ({})'.format(modval, lnl)
if node.label in root.limits:
for k,v in root.limits[node.label].items():
text += ', {} limits={}'.format(k,v)
text += ': {}'.format(self.pars[node.label])
else:
if type(node)==ModelNode:
root = node.get_root()
if hasattr(root, 'spectroscopy'):
if node.label in root.spectroscopy:
for k,v in root.spectroscopy[node.label].items():
text += ', {}={}'.format(k,v)
if node.label in root.limits:
for k,v in root.limits[node.label].items():
text += ', {} limits={}'.format(k,v)
#root = node.get_root()
#if hasattr(root,'spectroscopy'):
# if node.label in root.spectroscopy:
# for k,v in root.spectroscopy[node.label].items():
# model = node.evaluate(self.pars[node.label], k)
# text += '\n {}={} (model={})'.format(k,v,model)
return text
class MyLeftAligned(LeftAligned):
"""For custom ascii tree printing
"""
pars = None
def __init__(self, pars=None, **kwargs):
self.pars = pars
self.traverse = NodeTraversal(pars)
super(MyLeftAligned,self).__init__(**kwargs)
class Node(object):
def __init__(self, label):
self.label = label
self.parent = None
self.children = []
self._leaves = None
def __iter__(self):
"""
Iterate through tree, leaves first
following http://stackoverflow.com/questions/6914803/python-iterator-through-tree-with-list-of-children
"""
for node in chain(*imap(iter, self.children)):
yield node
yield self
def __getitem__(self, ind):
for n,i in izip(self, count()):
if i==ind:
return n
@property
def is_root(self):
return self.parent is None
def get_root(self):
if self.is_root:
return self
else:
return self.parent.get_root()
def get_ancestors(self):
if self.parent.is_root:
return []
else:
return [self.parent] + self.parent.get_ancestors()
def print_ascii(self, fout=None, pars=None):
box_tr = MyLeftAligned(pars,draw=BoxStyle(gfx=BOX_DOUBLE, horiz_len=1))
if fout is None:
print(box_tr(self))
else:
fout.write(box_tr(self))
@property
def is_leaf(self):
return len(self.children)==0 and not self.is_root
def _clear_leaves(self):
self._leaves = None
def _clear_all_leaves(self):
if not self.is_root:
self.parent._clear_all_leaves()
self._clear_leaves()
def add_child(self, node):
node.parent = self
self.children.append(node)
self._clear_all_leaves()
def remove_children(self):
self.children = []
self._clear_all_leaves()
def remove_child(self, label):
"""
Removes node by label
"""
ind = None
for i,c in enumerate(self.children):
if c.label==label:
ind = i
if ind is None:
logging.warning('No child labeled {}.'.format(label))
return
self.children.pop(ind)
self._clear_all_leaves()
def attach_to_parent(self, node):
# detach from current parent, if necessary
if self.parent is not None:
self.parent.remove_child(self.label)
node.children += [self]
self.parent = node
self._clear_all_leaves()
@property
def leaves(self):
if self._leaves is None:
self._leaves = self._get_leaves()
return self._leaves
def _get_leaves(self):
if self.is_leaf:
return [self]
else:
leaves = []
for c in self.children:
leaves += c._get_leaves()
return leaves
def select_leaves(self, name):
"""Returns all leaves under all nodes matching name
"""
if self.is_leaf:
return [self] if re.search(name, self.label) else []
else:
leaves = []
if re.search(name, self.label):
for c in self.children:
leaves += c._get_leaves() #all leaves
else:
for c in self.children:
leaves += c.select_leaves(name) #only matching ones
return leaves
@property
def leaf_labels(self):
return [l.label for l in self.leaves]
def get_leaf(self, label):
for l in self.leaves:
if label==l.label:
return l
def get_obs_nodes(self):
return [l for l in self if isinstance(l, ObsNode)]
@property
def obs_leaf_nodes(self):
return self.get_obs_leaves()
def get_obs_leaves(self):
"""Returns the last obs nodes that are leaves
"""
obs_leaves = []
for n in self:
if n.is_leaf:
if isinstance(n, ModelNode):
l = n.parent
else:
l = n
if l not in obs_leaves:
obs_leaves.append(l)
return obs_leaves
def get_model_nodes(self):
return [l for l in self._get_leaves() if isinstance(l, ModelNode)]
@property
def N_model_nodes(self):
return len(self.get_model_nodes())
def print_tree(self):
print(self.label)
def __str__(self):
return self.label
def __repr__(self):
if self.is_leaf:
s = "<{} '{}', parent='{}'>".format(self.__class__,
self.label,
self.parent)
else:
child_labels = [str(c) for c in self.children]
s = "<{} '{}', parent='{}', children={}>".format(self.__class__,
self.label,
self.parent,
child_labels)
return s
class ObsNode(Node):
def __init__(self, observation, source, ref_node=None):
self.observation = observation
self.source = source
self.reference = ref_node
self.children = []
self.parent = None
self._leaves = None
#indices of underlying models, defining physical systems
self._inds = None
self._n_params = None
self._Nstars = None
#for model_mag caching
self._cache_key = None
self._cache_val = None
@property
def instrument(self):
return self.observation.name
@property
def band(self):
return self.observation.band
@property
def value(self):
return (self.source.mag, self.source.e_mag)
@property
def resolution(self):
return self.observation.resolution
@property
def relative(self):
return self.source.relative
@property
def separation(self):
return self.source.separation
@property
def pa(self):
return self.source.pa
@property
def value_str(self):
return '({:.2f}, {:.2f})'.format(*self.value)
def distance(self, other):
"""Coordinate distance from another ObsNode
"""
return distance((self.separation, self.pa), (other.separation, other.pa))
def _in_same_observation(self, other):
return self.instrument==other.instrument and self.band==other.band
@property
def n_params(self):
if self._n_params is None:
self._n_params = 5 * len(self.leaves)
return self._n_params
def _get_inds(self):
inds = [n.index for n in self.leaves]
inds = sorted(list(set(inds)))
return inds
def _clear_leaves(self):
self._leaves = None
self._inds = None
self._n_params = None
self._Nstars = None
@property
def Nstars(self):
"""
dictionary of number of stars per system
"""
if self._Nstars is None:
N = {}
for n in self.get_model_nodes():
if n.index not in N:
N[n.index] = 1
else:
N[n.index] += 1
self._Nstars = N
return self._Nstars
@property
def systems(self):
lst = sorted(self.Nstars.keys())
return lst
@property
def inds(self):
if self._inds is None:
self._inds = self._get_inds()
return self._inds
@property
def label(self):
if self.source.relative:
band_str = 'delta-{}'.format(self.band)
else:
band_str = self.band
return '{} {}={} @({:.2f}, {:.0f} [{:.2f}])'.format(self.instrument,
band_str,
self.value_str, self.separation, self.pa,
self.resolution)
@property
def obsname(self):
return '{}-{}'.format(self.instrument, self.band)
def get_system(self, ind):
system = []
for l in self.get_root().leaves:
try:
if l.index==ind:
system.append(l)
except AttributeError:
pass
return system
def add_model(self, ic, N=1, index=0):
"""
Should only be able to do this to a leaf node.
Either N and index both integers OR index is
list of length=N
"""
if type(index) in [list,tuple]:
if len(index) != N:
raise ValueError('If a list, index must be of length N.')
else:
index = [index]*N
for idx in index:
existing = self.get_system(idx)
tag = len(existing)
self.add_child(ModelNode(ic, index=idx, tag=tag))
def model_mag(self, pardict, use_cache=True):
"""
pardict is a dictionary of parameters for all leaves
gets converted back to traditional parameter vector
"""
if pardict == self._cache_key and use_cache:
#print('{}: using cached'.format(self))
return self._cache_val
#print('{}: calculating'.format(self))
self._cache_key = pardict
# Generate appropriate parameter vector from dictionary
p = []
for l in self.leaf_labels:
p.extend(pardict[l])
assert len(p) == self.n_params
tot = np.inf
#print('Building {} mag for {}:'.format(self.band, self))
for i,m in enumerate(self.leaves):
mag = m.evaluate(p[i*5:(i+1)*5], self.band)
# logging.debug('{}: mag={}'.format(self,mag))
#print('{}: {}({}) = {}'.format(m,self.band,p[i*5:(i+1)*5],mag))
tot = addmags(tot, mag)
self._cache_val = tot
return tot
def lnlike(self, pardict, use_cache=True):
"""
returns log-likelihood of this observation
pardict is a dictionary of parameters for all leaves
gets converted back to traditional parameter vector
"""
mag, dmag = self.value
if np.isnan(dmag):
return 0
if self.relative:
# If this *is* the reference, just return
if self.reference is None:
return 0
mod = (self.model_mag(pardict, use_cache=use_cache) -
self.reference.model_mag(pardict, use_cache=use_cache))
mag -= self.reference.value[0]
else:
mod = self.model_mag(pardict, use_cache=use_cache)
lnl = -0.5*(mag - mod)**2 / dmag**2
# logging.debug('{} {}: mag={}, mod={}, lnlike={}'.format(self.instrument,
# self.band,
# mag,mod,lnl))
return lnl
class DummyObsNode(ObsNode):
def __init__(self, *args, **kwargs):
self.observation = None
self.source = None
self.reference = None
self.children = []
self.parent = None
self._leaves = None
#indices of underlying models, defining physical systems
self._inds = None
self._n_params = None
self._Nstars = None
#for model_mag caching
self._cache_key = None
self._cache_val = None
@property
def label(self):
return '[dummy]'
@property
def value(self):
return None, None
def lnlike(self, *args, **kwargs):
return 0
class ModelNode(Node):
"""
These are always leaves; leaves are always these.
Index keeps track of which physical system node is in.
"""
def __init__(self, ic, index=0, tag=0):
self._ic = ic
self.index = index
self.tag = tag
self.children = []
self.parent = None
self._leaves = None
@property
def label(self):
return '{}_{}'.format(self.index, self.tag)
@property
def ic(self):
if type(self._ic)==type:
self._ic = self._ic()
return self._ic
def get_obs_ancestors(self):
nodes = self.get_ancestors()
return [n for n in nodes if isinstance(n, ObsNode)]
@property
def contributing_observations(self):
"""The instrument-band for all the observations feeding into this model node
"""
return [n.obsname for n in self.get_obs_ancestors()]
def evaluate(self, p, prop):
if prop in self.ic.bands:
return self.evaluate_mag(p, prop)
elif prop=='mass':
return p[0]
elif prop=='age':
return p[1]
elif prop=='feh':
return p[2]
elif prop in ['Teff','logg','radius']:
return getattr(self.ic, prop)(*p[:3])
else:
raise ValueError('property {} cannot be evaluated by Isochrone.'.format(prop))
def evaluate_mag(self, p, band):
return self.ic.mag[band](*p)
def lnlike(self, *args, **kwargs):
return 0
class Source(object):
def __init__(self, mag, e_mag, separation=0., pa=0.,
relative=False, is_reference=False):
self.mag = float(mag)
self.e_mag = float(e_mag)
self.separation = float(separation)
self.pa = float(pa)
self.relative = bool(relative)
self.is_reference = bool(is_reference)
def __str__(self):
return '({}, {}) @({}, {})'.format(self.mag, self.e_mag,
self.separation, self.pa)
def __repr__(self):
return self.__str__()
class Star(object):
"""Theoretical counterpart of Source.
"""
def __init__(self, pars, separation, pa):
self.pars = pars
self.separation = separation
self.pa = pa
def distance(self, other):
return distance((self.separation, self.pa),
(other.separation, other.pa))
class Observation(object):
"""
Contains relevant information about imaging observation
name: identifying string (typically the instrument)
band: photometric bandpass
resolution: *approximate* angular resolution of instrument.
used for source matching between observations
sources: list of Source objects
"""
def __init__(self, name, band, resolution, sources=None,
relative=False):
self.name = name
self.band = band
self.resolution = resolution
if sources is not None:
if not np.all(type(s)==Source for s in sources):
raise ValueError('Source list must be all Source objects.')
self.sources = []
if sources is None:
sources = []
for s in sources:
self.add_source(s)
self.relative = relative
self._set_reference()
def observe(self, stars, unc, ic=None):
"""Creates and adds appropriate synthetic Source objects for list of stars (max 2 for now)
"""
if ic is None:
ic = get_ichrone('mist')
if len(stars) > 2:
raise NotImplementedError('No support yet for > 2 synthetic stars')
mags = [ic(*s.pars)['{}_mag'.format(self.band)].values[0] for s in stars]
d = stars[0].distance(stars[1])
if d < self.resolution:
mag = addmags(*mags) + unc*np.random.randn()
sources = [Source(mag, unc, stars[0].separation, stars[0].pa,
relative=self.relative)]
else:
mags = np.array([m + unc*np.random.randn() for m in mags])
if self.relative:
mags -= mags.min()
sources = [Source(m, unc, s.separation, s.pa, relative=self.relative)
for m,s in zip(mags, stars)]
for s in sources:
self.add_source(s)
self._set_reference()
def add_source(self, source):
"""
Adds source to observation, keeping sorted order (in separation)
"""
if not type(source)==Source:
raise TypeError('Can only add Source object.')
if len(self.sources)==0:
self.sources.append(source)
else:
ind = 0
for s in self.sources:
# Keep sorted order of separation
if source.separation < s.separation:
break
ind += 1
self.sources.insert(ind, source)
#self._set_reference()
@property
def brightest(self):
mag0 = np.inf
s0 = None
for s in self.sources:
if s.mag < mag0:
mag0 = s.mag
s0 = s
return s0
def _set_reference(self):
"""If relative, make sure reference node is set to brightest.
"""
if len(self.sources) > 0:
self.brightest.is_reference = True
def __str__(self):
return '{}-{}'.format(self.name, self.band)
def __repr__(self):
return str(self)
class ObservationTree(Node):
"""Builds a tree of Nodes from a list of Observation objects
Organizes Observations from smallest to largest resolution,
and at each stage attaches each source to the most probable
match from the previous Observation. Admittedly somewhat hack-y,
but should *usually* do the right thing. Check out `obs.print_ascii()`
to visualize what this has done.
"""
spec_props = ['Teff', 'logg', 'feh']
def __init__(self, observations=None, name=None):
if observations is None:
observations = []
if name is None:
self.label = 'root'
else:
self.label = name
self.parent = None
self._observations = []
self._build_tree()
[self.add_observation(obs) for obs in observations]
self._N = None
self._index = None
# Spectroscopic properties
self.spectroscopy = {}
# Limits (such as minimum on logg)
self.limits = {}
# Parallax measurements
self.parallax = {}
# This will be calculated and set at first access
self._Nstars = None
#likelihood cache
self._cache_key = None
self._cache_val = None
@property
def name(self):
return self.label
def _clear_cache(self):
self._cache_key = None
self._cache_val = None
@classmethod
def from_df(cls, df, **kwargs):
"""
DataFrame must have the right columns.
these are: name, band, resolution, mag, e_mag, separation, pa
"""
tree = cls(**kwargs)
for (n,b), g in df.groupby(['name','band']):
#g.sort('separation', inplace=True) #ensures that the first is reference
sources = [Source(**s[['mag','e_mag','separation','pa','relative']])
for _,s in g.iterrows()]
obs = Observation(n, b, g.resolution.mean(),
sources=sources, relative=g.relative.any())
tree.add_observation(obs)
# For all relative mags, set reference to be brightest
return tree
@classmethod
def from_ini(cls, filename):
config = ConfigObj(filename)
def to_df(self):
"""
Returns DataFrame with photometry from observations organized.
This DataFrame should be able to be read back in to
reconstruct the observation.
"""
df = pd.DataFrame()
name = []
band = []
resolution = []
mag = []
e_mag = []
separation = []
pa = []
relative = []
for o in self._observations:
for s in o.sources:
name.append(o.name)
band.append(o.band)
resolution.append(o.resolution)
mag.append(s.mag)
e_mag.append(s.e_mag)
separation.append(s.separation)
pa.append(s.pa)
relative.append(s.relative)
return pd.DataFrame({'name':name,'band':band,'resolution':resolution,
'mag':mag,'e_mag':e_mag,'separation':separation,
'pa':pa,'relative':relative})
def save_hdf(self, filename, path='', overwrite=False, append=False):
"""
Writes all info necessary to recreate object to HDF file
Saves table of photometry in DataFrame
Saves model specification, spectroscopy, parallax to attrs
"""
if os.path.exists(filename):
store = pd.HDFStore(filename)
if path in store:
store.close()
if overwrite:
os.remove(filename)
elif not append:
raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename))
else:
store.close()
df = self.to_df()
df.to_hdf(filename, path+'/df')
with pd.HDFStore(filename) as store:
# store = pd.HDFStore(filename)
attrs = store.get_storer(path+'/df').attrs
attrs.spectroscopy = self.spectroscopy
attrs.parallax = self.parallax
attrs.N = self._N
attrs.index = self._index
store.close()
@classmethod
def load_hdf(cls, filename, path='', ic=None):
"""
Loads stored ObservationTree from file.
You can provide the isochrone to use; or it will default to MIST
TODO: saving and loading must be fixed! save ic type, bands, etc.
"""
store = pd.HDFStore(filename)
try:
samples = store[path+'/df']
attrs = store.get_storer(path+'/df').attrs
except:
store.close()
raise
df = store[path+'/df']
new = cls.from_df(df)
if ic is None:
ic = get_ichrone('mist')
new.define_models(ic, N=attrs.N, index=attrs.index)
new.spectroscopy = attrs.spectroscopy
new.parallax = attrs.parallax
store.close()
return new
def add_observation(self, obs):
"""Adds an observation to observation list, keeping proper order
"""
if len(self._observations)==0:
self._observations.append(obs)
else:
res = obs.resolution
ind = 0
for o in self._observations:
if res > o.resolution:
break
ind += 1
self._observations.insert(ind, obs)
self._build_tree()
self._clear_cache()
def add_spectroscopy(self, label='0_0', **props):
"""
Adds spectroscopic measurement to particular star(s) (corresponding to individual model node)
Default 0_0 should be primary star
legal inputs are 'Teff', 'logg', 'feh', and in form (val, err)
"""
if label not in self.leaf_labels:
raise ValueError('No model node named {} (must be in {}). Maybe define models first?'.format(label, self.leaf_labels))
for k,v in props.items():
if k not in self.spec_props:
raise ValueError('Illegal property {} (only {} allowed).'.format(k, self.spec_props))
if len(v) != 2:
raise ValueError('Must provide (value, uncertainty) for {}.'.format(k))
if label not in self.spectroscopy:
self.spectroscopy[label] = {}
for k,v in props.items():
self.spectroscopy[label][k] = v
self._clear_cache()
def add_limit(self, label='0_0', **props):
"""Define limits to spectroscopic property of particular stars.
Usually will be used for 'logg', but 'Teff' and 'feh' will also work.
In form (min, max): e.g., t.add_limit(logg=(3.0,None))
None will be converted to (-)np.inf
"""
if label not in self.leaf_labels:
raise ValueError('No model node named {} (must be in {}). Maybe define models first?'.format(label, self.leaf_labels))
for k,v in props.items():
if k not in self.spec_props:
raise ValueError('Illegal property {} (only {} allowed).'.format(k, self.spec_props))
if len(v) != 2:
raise ValueError('Must provide (min, max) for {}. (`None` is allowed value)'.format(k))
if label not in self.limits:
self.limits[label] = {}
for k,v in props.items():
vmin, vmax = v
if vmin is None:
vmin = -np.inf
if vmax is None:
vmax = np.inf
self.limits[label][k] = (vmin, vmax)
self._clear_cache()
def add_parallax(self, plax, system=0):
if len(plax)!=2:
raise ValueError('Must enter (value,uncertainty).')
if system not in self.systems:
raise ValueError('{} not in systems ({}).'.format(system,self.systems))
self.parallax[system] = plax
self._clear_cache()
def define_models(self, ic, leaves=None, N=1, index=0):
"""
N, index are either integers or lists of integers.
N : number of model stars per observed star
index : index of physical association
leaves: either a list of leaves, or a pattern by which
the leaves are selected (via `select_leaves`)
If these are lists, then they are defined individually for
each leaf.
If `index` is a list, then each entry must be either
an integer or a list of length `N` (where `N` is the corresponding
entry in the `N` list.)
This bugs up if you call it multiple times. If you want
to re-do a call to this function, please re-define the tree.
"""
self.clear_models()
if leaves is None:
leaves = self._get_leaves()
elif type(leaves)==type(''):
leaves = self.select_leaves(leaves)
# Sort leaves by distance, to ensure system 0 will be assigned
# to the main reference star.
if np.isscalar(N):
N = (np.ones(len(leaves))*N)
#if np.size(index) > 1:
# index = [index]
N = np.array(N).astype(int)
if np.isscalar(index):
index = (np.ones_like(N)*index)
index = np.array(index).astype(int)
# Add the appropriate number of model nodes to each
# star in the highest-resoluion image
for s,n,i in zip(leaves, N, index):
# Remove any previous model nodes (should do some checks here?)
s.remove_children()
s.add_model(ic, n, i)
# For each system, make sure tag _0 is the brightest.
self._fix_labels()
self._N = N
self._index = index
self._clear_all_leaves()
def _fix_labels(self):
"""For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image
"""
for s in self.systems:
mag0 = np.inf
n0 = None
for n in self.get_system(s):
if isinstance(n.parent, DummyObsNode):
continue
mag, _ = n.parent.value
if mag < mag0:
mag0 = mag
n0 = n
# If brightest is not tag _0, then switch them.
if n0 is not None and n0.tag != 0:
n_other = self.get_leaf('{}_{}'.format(s,0))
n_other.tag = n0.tag
n0.tag = 0
def get_system(self, ind):
system = []
for l in self.leaves:
try:
if l.index==ind:
system.append(l)
except AttributeError:
pass
return system
@property
def observations(self):
return self._observations
def select_observations(self, name):
"""Returns nodes whose instrument-band matches 'name'
"""
return [n for n in self.get_obs_nodes() if n.obsname==name]
def clear_models(self):
for n in self:
if isinstance(n, ModelNode):
n.parent.remove_child(n.label)
self._clear_all_leaves()
def trim(self):
"""
Trims leaves from tree that are not observed at highest-resolution level
This is a bit hacky-- what it does is
"""
# Only allow leaves to stay on list (highest-resolution) level
return
for l in self._levels[-2::-1]:
for n in l:
if n.is_leaf:
n.parent.remove_child(n.label)
self._clear_all_leaves() #clears cached list of leaves
def p2pardict(self, p):
"""
Given leaf labels, turns parameter vector into pardict
"""
d = {}
N = self.Nstars
i = 0
for s in self.systems:
age, feh, dist, AV = p[i+N[s]:i+N[s]+4]
for j in xrange(N[s]):
l = '{}_{}'.format(s,j)
mass = p[i+j]
d[l] = [mass, age, feh, dist, AV]
i += N[s] + 4
return d
@property
def param_description(self):
N = self.Nstars
pars = []
for s in self.systems:
for j in xrange(N[s]):
pars.append('mass_{}_{}'.format(s,j))
for p in ['age', 'feh', 'distance', 'AV']:
pars.append('{}_{}'.format(p,s))
return pars
@property
def Nstars(self):
if self._Nstars is None:
N = {}
for n in self.get_model_nodes():
if n.index not in N:
N[n.index] = 1
else:
N[n.index] += 1
self._Nstars = N
return self._Nstars
@property
def systems(self):
# fix this! make sure it is unique!!!
lst = list(chain(*[c.systems for c in self.children]))
return sorted(set(lst))
def print_ascii(self, fout=None, p=None):
pardict = None
if p is not None:
pardict = self.p2pardict(p)
super(ObservationTree, self).print_ascii(fout, pardict)
def lnlike(self, p, use_cache=True):
"""
takes parameter vector, constructs pardict, returns sum of lnlikes of non-leaf nodes
"""
if use_cache and self._cache_key is not None and np.all(p==self._cache_key):
return self._cache_val
self._cache_key = p
pardict = self.p2pardict(p)
# lnlike from photometry
lnl = 0
for n in self:
if n is not self:
lnl += n.lnlike(pardict, use_cache=use_cache)
if not np.isfinite(lnl):
self._cache_val = -np.inf
return -np.inf
# lnlike from spectroscopy
for l in self.spectroscopy:
for prop,(val,err) in self.spectroscopy[l].items():
mod = self.get_leaf(l).evaluate(pardict[l], prop)
lnl += -0.5*(val - mod)**2/err**2
if not np.isfinite(lnl):
self._cache_val = -np.inf
return -np.inf
# enforce limits
for l in self.limits:
for prop,(vmin,vmax) in self.limits[l].items():
mod = self.get_leaf(l).evaluate(pardict[l], prop)
if mod < vmin or mod > vmax or not np.isfinite(mod):
self._cache_val = -np.inf
return -np.inf
# lnlike from parallax
for s,(val,err) in self.parallax.items():
dist = pardict['{}_0'.format(s)][3]
mod = 1./dist * 1000.
lnl += -0.5*(val-mod)**2/err**2
if not np.isfinite(lnl):
self._cache_val = -np.inf
return -np.inf
self._cache_val = lnl
return lnl
def _find_closest(self, n0):
"""returns the node in the tree that is closest to n0, but not
in the same observation
"""
dmin = np.inf
nclose = None
ds = []
nodes = []
ds.append(np.inf)
nodes.append(self)
for n in self:
if n is n0:
continue
try:
if n._in_same_observation(n0):
continue
ds.append(n.distance(n0))
nodes.append(n)
except AttributeError:
pass
inds = np.argsort(ds)
ds = [ds[i] for i in inds]
nodes = [nodes[i] for i in inds]
for d,n in zip(ds, nodes):
try:
if d < n.resolution or n.resolution==-1:
return n
except AttributeError:
pass
# If nothing else works
return self
def _build_tree(self):
#reset leaf cache, children
self._clear_all_leaves()
self.children = []
for i,o in enumerate(self._observations):
s0 = o.brightest
ref_node = ObsNode(o, s0)
for s in o.sources:
if s.relative and not s.is_reference:
node = ObsNode(o, s, ref_node=ref_node)
elif s.relative and s.is_reference:
node = ref_node
else:
node = ObsNode(o, s)
# For first level, no need to choose parent
if i==0:
parent = self
else:
# Find parent (closest node in tree)
parent = self._find_closest(node)
parent.add_child(node)
# If after all this, there are no `ObsNode` nodes,
# then add a dummy.
if len(self.get_obs_nodes())==0:
self.add_child(DummyObsNode())
@classmethod
def synthetic(cls, stars, surveys):
pass
```
#### File: isochrones/tests/test_fits.py
```python
import os, glob
import numpy as np
import tempfile
from isochrones.dartmouth import Dartmouth_Isochrone
from isochrones.mist import MIST_Isochrone
from isochrones import StarModel
mnest = True
try:
import pymultinest
except:
import logging
logging.warning('No PyMultiNest; fits will use emcee')
mnest = False
chainsdir = tempfile.gettempdir()
props = dict(Teff=(5800, 100), logg=(4.5, 0.1),
B=(5.7,0.05), V=(5.0, 0.05))
def test_fitting():
mod_dar = _check_fitting(StarModel(Dartmouth_Isochrone, **props))
mod_mist = _check_fitting(StarModel(MIST_Isochrone, **props))
_check_saving(mod_dar)
_check_saving(mod_mist)
###############
def _check_saving(mod):
filename = os.path.join(chainsdir, '{}.h5'.format(np.random.randint(1000000)))
mod.save_hdf(filename)
newmod = StarModel.load_hdf(filename)
assert np.allclose(mod.samples, newmod.samples)
assert mod.ic.bands == newmod.ic.bands
os.remove(filename)
def _check_fitting(mod):
_fit_emcee(mod)
if mnest:
_fit_mnest(mod)
return mod
def _fit_mnest(mod):
basename = '{}/{}-'.format(chainsdir,np.random.randint(1000000))
mod.fit_multinest(n_live_points=5, max_iter=50,basename=basename,
verbose=False)
foo = mod.mnest_analyzer
files = glob.glob('{}*'.format(basename))
for f in files:
os.remove(f)
def _fit_emcee(mod):
mod.use_emcee = True
mod.fit_mcmc(nburn=20, niter=10, ninitial=10)
mod.samples
```
#### File: isochrones/yapsi/isochrone.py
```python
import os
import pickle
from ..isochrone import Isochrone
from ..config import ISOCHRONES
from .grid import YAPSIModelGrid
class YAPSI_Isochrone(Isochrone):
name = 'dartmouth'
default_bands = YAPSIModelGrid.default_bands
tri_file = os.path.join(ISOCHRONES, 'yapsi.tri')
def __init__(self, **kwargs):
df = YAPSIModelGrid().df
with open(self.tri_file, 'rb') as f:
tri = pickle.load(f)
mags = {b:df[b].values for b in self.default_bands}
Isochrone.__init__(self,df['mass'].values, df['age'].values,
df['feh'].values,df['mass'].values, df['logL'].values,
10**df['logTeff'].values,df['logg'].values,mags,tri=tri,
**kwargs)
``` |
{
"source": "joeld42/darkstar",
"score": 2
} |
#### File: darkstar/scripts/asset_export.py
```python
import os, sys
import string
import subprocess
import shutil
# FIXME NOW: Make one gamedata directory per target platform
PROJECT_DIR = '/Users/joeld/oprojects/darkstar'
CRUNCH_TOOL = '/Users/joeld/Toolkits/crunch-osx/bin_osx/crunch'
TEXTURETOOL_TOOL = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/texturetool'
#BLENDER_TOOL = '/Applications/Blender/blender.app/Contents/MacOS/blender'
#BLENDER_EXPORT_SCRIPT = 'export_scene.py'
#-b srcart/forest.blend -P script/export_scene.py -- ld33_export.cfg
LDJAM_TOOL = '/Users/joeld/oprojects/fips-deploy/darkstar/osx-xcode-debug/ldjamtool'
#'./assets/TestLevelSF.ogex ./gamedata/TestLevelSF.ldjam'
#ASSET_DIR = "assets"
# TMP
ASSET_DIR = "assets"
OGEX_DIR = "assets"
RUNTIME_DATA_DIR = "gamedata"
TEXTURES = {
'TestTile_basecolor.png' : 'DXT5',
'TestTile_metallic.png' : 'DXT5',
'TestTile_normal.png' : 'DXT5',
'TestTile_roughness.png' : 'DXT5',
}
OGEX_FILES = {
'TEST_Stuff.ogex',
}
def fileNeedsUpdate( srcFile, destFile ):
if not os.path.exists( destFile ):
print "DEST", destFile, "does not exist"
return True
destmtime = os.path.getmtime( destFile )
srcmtime = os.path.getmtime( srcFile )
if srcmtime >= destmtime:
return True
# file is up to date
print destFile[ len(PROJECT_DIR)+1: ], "up to date."
return False
if __name__=='__main__':
# -----------------------------------
# OGEX Scene Files
# -----------------------------------
# TODO: Extract list of textures automatically from scenes
for ogex in OGEX_FILES:
srcFile = os.path.join( PROJECT_DIR, OGEX_DIR, ogex )
destFile = os.path.join( PROJECT_DIR, RUNTIME_DATA_DIR, os.path.splitext( ogex )[0] + ".ldjam" )
if fileNeedsUpdate(srcFile, destFile ):
cmd = [ LDJAM_TOOL,
srcFile,
destFile ]
print string.join(cmd, ' ' )
subprocess.call( cmd )
# -----------------------------------
# Textures
# -----------------------------------
for tex, fmt in TEXTURES.iteritems():
srcFile = os.path.join( PROJECT_DIR, ASSET_DIR, tex )
texfile = os.path.split( tex )[-1]
destFile = os.path.join( PROJECT_DIR, RUNTIME_DATA_DIR, os.path.splitext( texfile )[0] + ".dds" )
if fileNeedsUpdate(srcFile, destFile ):
cmd = [ CRUNCH_TOOL,
'-file', srcFile,
'-out', destFile,
'-gamma', '1.0',
'-' + fmt
]
print string.join(cmd, ' ' )
subprocess.call( cmd )
# # Also make pvr version for ios
# destFilePVR = os.path.join( PROJECT_DIR, RUNTIME_DATA_DIR, os.path.splitext( tex )[0] + ".pvr" )
# # TODO: better params
# cmd2 = [TEXTURETOOL_TOOL,
# '-m', # Generate mipmap chain
# '-e', 'PVRTC',
# '-f', 'PVR',
# '--bits-per-pixel-4',
# '-o', destFilePVR,
# srcFile
# ]
# if fileNeedsUpdate(srcFile, destFilePVR ):
# print string.join(cmd2, ' ' )
# subprocess.call( cmd2 )
```
#### File: tools/addons/TK_OpenGex_LDJAM.py
```python
bl_info = {
"name": "Tapnik OpenGEX [LDJAM] (.ogex)",
"description": "Terathon Software OpenGEX Exporter",
"author": "<NAME>",
"version": (2, 0, 0, 0),
"location": "File > Import-Export",
"wiki_url": "http://opengex.org/",
"category": "Import-Export"}
import bpy
import math
from bpy_extras.io_utils import ExportHelper
kNodeTypeNode = 0
kNodeTypeBone = 1
kNodeTypeGeometry = 2
kNodeTypeLight = 3
kNodeTypeCamera = 4
kAnimationSampled = 0
kAnimationLinear = 1
kAnimationBezier = 2
kExportEpsilon = 1.0e-6
structIdentifier = [B"Node $", B"BoneNode $", B"GeometryNode $", B"LightNode $", B"CameraNode $"]
subtranslationName = [B"xpos", B"ypos", B"zpos"]
subrotationName = [B"xrot", B"yrot", B"zrot"]
subscaleName = [B"xscl", B"yscl", B"zscl"]
deltaSubtranslationName = [B"dxpos", B"dypos", B"dzpos"]
deltaSubrotationName = [B"dxrot", B"dyrot", B"dzrot"]
deltaSubscaleName = [B"dxscl", B"dyscl", B"dzscl"]
axisName = [B"x", B"y", B"z"]
class ExportVertex:
__slots__ = ("hash", "vertexIndex", "faceIndex", "position", "normal", "color", "texcoord0", "texcoord1")
def __init__(self):
self.color = [1.0, 1.0, 1.0]
self.texcoord0 = [0.0, 0.0]
self.texcoord1 = [0.0, 0.0]
def __eq__(self, v):
if (self.hash != v.hash):
return (False)
if (self.position != v.position):
return (False)
if (self.normal != v.normal):
return (False)
if (self.color != v.color):
return (False)
if (self.texcoord0 != v.texcoord0):
return (False)
if (self.texcoord1 != v.texcoord1):
return (False)
return (True)
def Hash(self):
h = hash(self.position[0])
h = h * 21737 + hash(self.position[1])
h = h * 21737 + hash(self.position[2])
h = h * 21737 + hash(self.normal[0])
h = h * 21737 + hash(self.normal[1])
h = h * 21737 + hash(self.normal[2])
h = h * 21737 + hash(self.color[0])
h = h * 21737 + hash(self.color[1])
h = h * 21737 + hash(self.color[2])
h = h * 21737 + hash(self.texcoord0[0])
h = h * 21737 + hash(self.texcoord0[1])
h = h * 21737 + hash(self.texcoord1[0])
h = h * 21737 + hash(self.texcoord1[1])
self.hash = h
class OpenGexExporter(bpy.types.Operator, ExportHelper):
"""Export to OpenGEX format"""
bl_idname = "export_scene.ogex"
bl_label = "Export OpenGEX [LDJAM]"
filename_ext = ".ogex"
option_export_selection = bpy.props.BoolProperty(name = "Export Selection Only", description = "Export only selected objects", default = False)
option_sample_animation = bpy.props.BoolProperty(name = "Force Sampled Animation", description = "Always export animation as per-frame samples", default = False)
def Write(self, text):
self.file.write(text)
def IndentWrite(self, text, extra = 0, newline = False):
if (newline):
self.file.write(B"\n")
for i in range(self.indentLevel + extra):
self.file.write(B"\t")
self.file.write(text)
def WriteInt(self, i):
self.file.write(bytes(str(i), "UTF-8"))
def WriteFloat(self, f):
if ((math.isinf(f)) or (math.isnan(f))):
self.file.write(B"0.0")
else:
self.file.write(bytes(str(f), "UTF-8"))
def WriteMatrix(self, matrix):
self.IndentWrite(B"{", 1)
self.WriteFloat(matrix[0][0])
self.Write(B", ")
self.WriteFloat(matrix[1][0])
self.Write(B", ")
self.WriteFloat(matrix[2][0])
self.Write(B", ")
self.WriteFloat(matrix[3][0])
self.Write(B",\n")
self.IndentWrite(B" ", 1)
self.WriteFloat(matrix[0][1])
self.Write(B", ")
self.WriteFloat(matrix[1][1])
self.Write(B", ")
self.WriteFloat(matrix[2][1])
self.Write(B", ")
self.WriteFloat(matrix[3][1])
self.Write(B",\n")
self.IndentWrite(B" ", 1)
self.WriteFloat(matrix[0][2])
self.Write(B", ")
self.WriteFloat(matrix[1][2])
self.Write(B", ")
self.WriteFloat(matrix[2][2])
self.Write(B", ")
self.WriteFloat(matrix[3][2])
self.Write(B",\n")
self.IndentWrite(B" ", 1)
self.WriteFloat(matrix[0][3])
self.Write(B", ")
self.WriteFloat(matrix[1][3])
self.Write(B", ")
self.WriteFloat(matrix[2][3])
self.Write(B", ")
self.WriteFloat(matrix[3][3])
self.Write(B"}\n")
def WriteMatrixFlat(self, matrix):
self.IndentWrite(B"{", 1)
self.WriteFloat(matrix[0][0])
self.Write(B", ")
self.WriteFloat(matrix[1][0])
self.Write(B", ")
self.WriteFloat(matrix[2][0])
self.Write(B", ")
self.WriteFloat(matrix[3][0])
self.Write(B", ")
self.WriteFloat(matrix[0][1])
self.Write(B", ")
self.WriteFloat(matrix[1][1])
self.Write(B", ")
self.WriteFloat(matrix[2][1])
self.Write(B", ")
self.WriteFloat(matrix[3][1])
self.Write(B", ")
self.WriteFloat(matrix[0][2])
self.Write(B", ")
self.WriteFloat(matrix[1][2])
self.Write(B", ")
self.WriteFloat(matrix[2][2])
self.Write(B", ")
self.WriteFloat(matrix[3][2])
self.Write(B", ")
self.WriteFloat(matrix[0][3])
self.Write(B", ")
self.WriteFloat(matrix[1][3])
self.Write(B", ")
self.WriteFloat(matrix[2][3])
self.Write(B", ")
self.WriteFloat(matrix[3][3])
self.Write(B"}")
def WriteColor(self, color):
self.Write(B"{")
self.WriteFloat(color[0])
self.Write(B", ")
self.WriteFloat(color[1])
self.Write(B", ")
self.WriteFloat(color[2])
self.Write(B"}")
def WriteFileName(self, filename):
length = len(filename)
if (length != 0):
if ((length > 2) and (filename[1] == ":")):
self.Write(B"//")
self.Write(bytes(filename[0], "UTF-8"))
self.Write(bytes(filename[2:length].replace("\\", "/"), "UTF-8"))
else:
self.Write(bytes(filename.replace("\\", "/"), "UTF-8"))
def WriteIntArray(self, valueArray):
count = len(valueArray)
k = 0
lineCount = count >> 6
for i in range(lineCount):
self.IndentWrite(B"", 1)
for j in range(63):
self.WriteInt(valueArray[k])
self.Write(B", ")
k += 1
self.WriteInt(valueArray[k])
k += 1
if (i * 64 < count - 64):
self.Write(B",\n")
else:
self.Write(B"\n")
count &= 63
if (count != 0):
self.IndentWrite(B"", 1)
for j in range(count - 1):
self.WriteInt(valueArray[k])
self.Write(B", ")
k += 1
self.WriteInt(valueArray[k])
self.Write(B"\n")
def WriteFloatArray(self, valueArray):
count = len(valueArray)
k = 0
lineCount = count >> 4
for i in range(lineCount):
self.IndentWrite(B"", 1)
for j in range(15):
self.WriteFloat(valueArray[k])
self.Write(B", ")
k += 1
self.WriteFloat(valueArray[k])
k += 1
if (i * 16 < count - 16):
self.Write(B",\n")
else:
self.Write(B"\n")
count &= 15
if (count != 0):
self.IndentWrite(B"", 1)
for j in range(count - 1):
self.WriteFloat(valueArray[k])
self.Write(B", ")
k += 1
self.WriteFloat(valueArray[k])
self.Write(B"\n")
def WriteVector2D(self, vector):
self.Write(B"{")
self.WriteFloat(vector[0])
self.Write(B", ")
self.WriteFloat(vector[1])
self.Write(B"}")
def WriteVector3D(self, vector):
self.Write(B"{")
self.WriteFloat(vector[0])
self.Write(B", ")
self.WriteFloat(vector[1])
self.Write(B", ")
self.WriteFloat(vector[2])
self.Write(B"}")
def WriteVector4D(self, vector):
self.Write(B"{")
self.WriteFloat(vector[0])
self.Write(B", ")
self.WriteFloat(vector[1])
self.Write(B", ")
self.WriteFloat(vector[2])
self.Write(B", ")
self.WriteFloat(vector[3])
self.Write(B"}")
def WriteQuaternion(self, quaternion):
self.Write(B"{")
self.WriteFloat(quaternion[1])
self.Write(B", ")
self.WriteFloat(quaternion[2])
self.Write(B", ")
self.WriteFloat(quaternion[3])
self.Write(B", ")
self.WriteFloat(quaternion[0])
self.Write(B"}")
def WriteVertexArray2D(self, vertexArray, attrib):
count = len(vertexArray)
k = 0
lineCount = count >> 3
for i in range(lineCount):
self.IndentWrite(B"", 1)
for j in range(7):
self.WriteVector2D(getattr(vertexArray[k], attrib))
self.Write(B", ")
k += 1
self.WriteVector2D(getattr(vertexArray[k], attrib))
k += 1
if (i * 8 < count - 8):
self.Write(B",\n")
else:
self.Write(B"\n")
count &= 7
if (count != 0):
self.IndentWrite(B"", 1)
for j in range(count - 1):
self.WriteVector2D(getattr(vertexArray[k], attrib))
self.Write(B", ")
k += 1
self.WriteVector2D(getattr(vertexArray[k], attrib))
self.Write(B"\n")
def WriteVertexArray3D(self, vertexArray, attrib):
count = len(vertexArray)
k = 0
lineCount = count >> 3
for i in range(lineCount):
self.IndentWrite(B"", 1)
for j in range(7):
self.WriteVector3D(getattr(vertexArray[k], attrib))
self.Write(B", ")
k += 1
self.WriteVector3D(getattr(vertexArray[k], attrib))
k += 1
if (i * 8 < count - 8):
self.Write(B",\n")
else:
self.Write(B"\n")
count &= 7
if (count != 0):
self.IndentWrite(B"", 1)
for j in range(count - 1):
self.WriteVector3D(getattr(vertexArray[k], attrib))
self.Write(B", ")
k += 1
self.WriteVector3D(getattr(vertexArray[k], attrib))
self.Write(B"\n")
def WriteMorphPositionArray3D(self, vertexArray, meshVertexArray):
count = len(vertexArray)
k = 0
lineCount = count >> 3
for i in range(lineCount):
self.IndentWrite(B"", 1)
for j in range(7):
self.WriteVector3D(meshVertexArray[vertexArray[k].vertexIndex].co)
self.Write(B", ")
k += 1
self.WriteVector3D(meshVertexArray[vertexArray[k].vertexIndex].co)
k += 1
if (i * 8 < count - 8):
self.Write(B",\n")
else:
self.Write(B"\n")
count &= 7
if (count != 0):
self.IndentWrite(B"", 1)
for j in range(count - 1):
self.WriteVector3D(meshVertexArray[vertexArray[k].vertexIndex].co)
self.Write(B", ")
k += 1
self.WriteVector3D(meshVertexArray[vertexArray[k].vertexIndex].co)
self.Write(B"\n")
def WriteMorphNormalArray3D(self, vertexArray, meshVertexArray, tessFaceArray):
count = len(vertexArray)
k = 0
lineCount = count >> 3
for i in range(lineCount):
self.IndentWrite(B"", 1)
for j in range(7):
face = tessFaceArray[vertexArray[k].faceIndex]
self.WriteVector3D(meshVertexArray[vertexArray[k].vertexIndex].normal if (face.use_smooth) else face.normal)
self.Write(B", ")
k += 1
face = tessFaceArray[vertexArray[k].faceIndex]
self.WriteVector3D(meshVertexArray[vertexArray[k].vertexIndex].normal if (face.use_smooth) else face.normal)
k += 1
if (i * 8 < count - 8):
self.Write(B",\n")
else:
self.Write(B"\n")
count &= 7
if (count != 0):
self.IndentWrite(B"", 1)
for j in range(count - 1):
face = tessFaceArray[vertexArray[k].faceIndex]
self.WriteVector3D(meshVertexArray[vertexArray[k].vertexIndex].normal if (face.use_smooth) else face.normal)
self.Write(B", ")
k += 1
face = tessFaceArray[vertexArray[k].faceIndex]
self.WriteVector3D(meshVertexArray[vertexArray[k].vertexIndex].normal if (face.use_smooth) else face.normal)
self.Write(B"\n")
def WriteTriangle(self, triangleIndex, indexTable):
i = triangleIndex * 3
self.Write(B"{")
self.WriteInt(indexTable[i])
self.Write(B", ")
self.WriteInt(indexTable[i + 1])
self.Write(B", ")
self.WriteInt(indexTable[i + 2])
self.Write(B"}")
def WriteTriangleArray(self, count, indexTable):
triangleIndex = 0
lineCount = count >> 4
for i in range(lineCount):
self.IndentWrite(B"", 1)
for j in range(15):
self.WriteTriangle(triangleIndex, indexTable)
self.Write(B", ")
triangleIndex += 1
self.WriteTriangle(triangleIndex, indexTable)
triangleIndex += 1
if (i * 16 < count - 16):
self.Write(B",\n")
else:
self.Write(B"\n")
count &= 15
if (count != 0):
self.IndentWrite(B"", 1)
for j in range(count - 1):
self.WriteTriangle(triangleIndex, indexTable)
self.Write(B", ")
triangleIndex += 1
self.WriteTriangle(triangleIndex, indexTable)
self.Write(B"\n")
def WriteNodeTable(self, objectRef):
first = True
for node in objectRef[1]["nodeTable"]:
if (first):
self.Write(B"\t\t// ")
else:
self.Write(B", ")
self.Write(bytes(node.name, "UTF-8"))
first = False
@staticmethod
def GetNodeType(node):
if (node.type == "MESH"):
if (len(node.data.polygons) != 0):
return (kNodeTypeGeometry)
elif (node.type == "LAMP"):
type = node.data.type
if ((type == "SUN") or (type == "POINT") or (type == "SPOT")):
return (kNodeTypeLight)
elif (node.type == "CAMERA"):
return (kNodeTypeCamera)
return (kNodeTypeNode)
@staticmethod
def GetShapeKeys(mesh):
shapeKeys = mesh.shape_keys
if ((shapeKeys) and (len(shapeKeys.key_blocks) > 1)):
return (shapeKeys)
return (None)
def FindNode(self, name):
for nodeRef in self.nodeArray.items():
if (nodeRef[0].name == name):
return (nodeRef)
return (None)
@staticmethod
def DeindexMesh(mesh, materialTable):
# This function deindexes all vertex positions, colors, and texcoords.
# Three separate ExportVertex structures are created for each triangle.
vertexArray = mesh.vertices
exportVertexArray = []
faceIndex = 0
for face in mesh.tessfaces:
k1 = face.vertices[0]
k2 = face.vertices[1]
k3 = face.vertices[2]
v1 = vertexArray[k1]
v2 = vertexArray[k2]
v3 = vertexArray[k3]
exportVertex = ExportVertex()
exportVertex.vertexIndex = k1
exportVertex.faceIndex = faceIndex
exportVertex.position = v1.co
exportVertex.normal = v1.normal if (face.use_smooth) else face.normal
exportVertexArray.append(exportVertex)
exportVertex = ExportVertex()
exportVertex.vertexIndex = k2
exportVertex.faceIndex = faceIndex
exportVertex.position = v2.co
exportVertex.normal = v2.normal if (face.use_smooth) else face.normal
exportVertexArray.append(exportVertex)
exportVertex = ExportVertex()
exportVertex.vertexIndex = k3
exportVertex.faceIndex = faceIndex
exportVertex.position = v3.co
exportVertex.normal = v3.normal if (face.use_smooth) else face.normal
exportVertexArray.append(exportVertex)
materialTable.append(face.material_index)
if (len(face.vertices) == 4):
k1 = face.vertices[0]
k2 = face.vertices[2]
k3 = face.vertices[3]
v1 = vertexArray[k1]
v2 = vertexArray[k2]
v3 = vertexArray[k3]
exportVertex = ExportVertex()
exportVertex.vertexIndex = k1
exportVertex.faceIndex = faceIndex
exportVertex.position = v1.co
exportVertex.normal = v1.normal if (face.use_smooth) else face.normal
exportVertexArray.append(exportVertex)
exportVertex = ExportVertex()
exportVertex.vertexIndex = k2
exportVertex.faceIndex = faceIndex
exportVertex.position = v2.co
exportVertex.normal = v2.normal if (face.use_smooth) else face.normal
exportVertexArray.append(exportVertex)
exportVertex = ExportVertex()
exportVertex.vertexIndex = k3
exportVertex.faceIndex = faceIndex
exportVertex.position = v3.co
exportVertex.normal = v3.normal if (face.use_smooth) else face.normal
exportVertexArray.append(exportVertex)
materialTable.append(face.material_index)
faceIndex += 1
colorCount = len(mesh.tessface_vertex_colors)
if (colorCount > 0):
colorFace = mesh.tessface_vertex_colors[0].data
vertexIndex = 0
faceIndex = 0
for face in mesh.tessfaces:
cf = colorFace[faceIndex]
exportVertexArray[vertexIndex].color = cf.color1
vertexIndex += 1
exportVertexArray[vertexIndex].color = cf.color2
vertexIndex += 1
exportVertexArray[vertexIndex].color = cf.color3
vertexIndex += 1
if (len(face.vertices) == 4):
exportVertexArray[vertexIndex].color = cf.color1
vertexIndex += 1
exportVertexArray[vertexIndex].color = cf.color3
vertexIndex += 1
exportVertexArray[vertexIndex].color = cf.color4
vertexIndex += 1
faceIndex += 1
texcoordCount = len(mesh.tessface_uv_textures)
if (texcoordCount > 0):
texcoordFace = mesh.tessface_uv_textures[0].data
vertexIndex = 0
faceIndex = 0
for face in mesh.tessfaces:
tf = texcoordFace[faceIndex]
exportVertexArray[vertexIndex].texcoord0 = tf.uv1
vertexIndex += 1
exportVertexArray[vertexIndex].texcoord0 = tf.uv2
vertexIndex += 1
exportVertexArray[vertexIndex].texcoord0 = tf.uv3
vertexIndex += 1
if (len(face.vertices) == 4):
exportVertexArray[vertexIndex].texcoord0 = tf.uv1
vertexIndex += 1
exportVertexArray[vertexIndex].texcoord0 = tf.uv3
vertexIndex += 1
exportVertexArray[vertexIndex].texcoord0 = tf.uv4
vertexIndex += 1
faceIndex += 1
if (texcoordCount > 1):
texcoordFace = mesh.tessface_uv_textures[1].data
vertexIndex = 0
faceIndex = 0
for face in mesh.tessfaces:
tf = texcoordFace[faceIndex]
exportVertexArray[vertexIndex].texcoord1 = tf.uv1
vertexIndex += 1
exportVertexArray[vertexIndex].texcoord1 = tf.uv2
vertexIndex += 1
exportVertexArray[vertexIndex].texcoord1 = tf.uv3
vertexIndex += 1
if (len(face.vertices) == 4):
exportVertexArray[vertexIndex].texcoord1 = tf.uv1
vertexIndex += 1
exportVertexArray[vertexIndex].texcoord1 = tf.uv3
vertexIndex += 1
exportVertexArray[vertexIndex].texcoord1 = tf.uv4
vertexIndex += 1
faceIndex += 1
for ev in exportVertexArray:
ev.Hash()
return (exportVertexArray)
@staticmethod
def FindExportVertex(bucket, exportVertexArray, vertex):
for index in bucket:
if (exportVertexArray[index] == vertex):
return (index)
return (-1)
@staticmethod
def UnifyVertices(exportVertexArray, indexTable):
# This function looks for identical vertices having exactly the same position, normal,
# color, and texcoords. Duplicate vertices are unified, and a new index table is returned.
bucketCount = len(exportVertexArray) >> 3
if (bucketCount > 1):
# Round down to nearest power of two.
while True:
count = bucketCount & (bucketCount - 1)
if (count == 0):
break
bucketCount = count
else:
bucketCount = 1
hashTable = [[] for i in range(bucketCount)]
unifiedVertexArray = []
for i in range(len(exportVertexArray)):
ev = exportVertexArray[i]
bucket = ev.hash & (bucketCount - 1)
index = OpenGexExporter.FindExportVertex(hashTable[bucket], exportVertexArray, ev)
if (index < 0):
indexTable.append(len(unifiedVertexArray))
unifiedVertexArray.append(ev)
hashTable[bucket].append(i)
else:
indexTable.append(indexTable[index])
return (unifiedVertexArray)
def ProcessBone(self, bone):
if ((self.exportAllFlag) or (bone.select)):
self.nodeArray[bone] = {"nodeType" : kNodeTypeBone, "structName" : bytes("node" + str(len(self.nodeArray) + 1), "UTF-8")}
for subnode in bone.children:
self.ProcessBone(subnode)
def ProcessNode(self, node):
if ((self.exportAllFlag) or (node.select)):
type = OpenGexExporter.GetNodeType(node)
self.nodeArray[node] = {"nodeType" : type, "structName" : bytes("node" + str(len(self.nodeArray) + 1), "UTF-8")}
if (node.parent_type == "BONE"):
boneSubnodeArray = self.boneParentArray.get(node.parent_bone)
if (boneSubnodeArray):
boneSubnodeArray.append(node)
else:
self.boneParentArray[node.parent_bone] = [node]
if (node.type == "ARMATURE"):
skeleton = node.data
if (skeleton):
for bone in skeleton.bones:
if (not bone.parent):
self.ProcessBone(bone)
for subnode in node.children:
self.ProcessNode(subnode)
def ProcessSkinnedMeshes(self):
for nodeRef in self.nodeArray.items():
if (nodeRef[1]["nodeType"] == kNodeTypeGeometry):
armature = nodeRef[0].find_armature()
if (armature):
for bone in armature.data.bones:
boneRef = self.FindNode(bone.name)
if (boneRef):
# If a node is used as a bone, then we force its type to be a bone.
boneRef[1]["nodeType"] = kNodeTypeBone
@staticmethod
def ClassifyAnimationCurve(fcurve):
linearCount = 0
bezierCount = 0
for key in fcurve.keyframe_points:
interp = key.interpolation
if (interp == "LINEAR"):
linearCount += 1
elif (interp == "BEZIER"):
bezierCount += 1
else:
return (kAnimationSampled)
if (bezierCount == 0):
return (kAnimationLinear)
elif (linearCount == 0):
return (kAnimationBezier)
return (kAnimationSampled)
@staticmethod
def AnimationKeysDifferent(fcurve):
keyCount = len(fcurve.keyframe_points)
if (keyCount > 0):
key1 = fcurve.keyframe_points[0].co[1]
for i in range(1, keyCount):
key2 = fcurve.keyframe_points[i].co[1]
if (math.fabs(key2 - key1) > kExportEpsilon):
return (True)
return (False)
@staticmethod
def AnimationTangentsNonzero(fcurve):
keyCount = len(fcurve.keyframe_points)
if (keyCount > 0):
key = fcurve.keyframe_points[0].co[1]
left = fcurve.keyframe_points[0].handle_left[1]
right = fcurve.keyframe_points[0].handle_right[1]
if ((math.fabs(key - left) > kExportEpsilon) or (math.fabs(right - key) > kExportEpsilon)):
return (True)
for i in range(1, keyCount):
key = fcurve.keyframe_points[i].co[1]
left = fcurve.keyframe_points[i].handle_left[1]
right = fcurve.keyframe_points[i].handle_right[1]
if ((math.fabs(key - left) > kExportEpsilon) or (math.fabs(right - key) > kExportEpsilon)):
return (True)
return (False)
@staticmethod
def AnimationPresent(fcurve, kind):
if (kind != kAnimationBezier):
return (OpenGexExporter.AnimationKeysDifferent(fcurve))
return ((OpenGexExporter.AnimationKeysDifferent(fcurve)) or (OpenGexExporter.AnimationTangentsNonzero(fcurve)))
@staticmethod
def MatricesDifferent(m1, m2):
for i in range(4):
for j in range(4):
if (math.fabs(m1[i][j] - m2[i][j]) > kExportEpsilon):
return (True)
return (False)
@staticmethod
def CollectBoneAnimation(armature, name):
path = "pose.bones[\"" + name + "\"]."
curveArray = []
if (armature.animation_data):
action = armature.animation_data.action
if (action):
for fcurve in action.fcurves:
if (fcurve.data_path.startswith(path)):
curveArray.append(fcurve)
return (curveArray)
def ExportKeyTimes(self, fcurve):
self.IndentWrite(B"Key {float {")
keyCount = len(fcurve.keyframe_points)
for i in range(keyCount):
if (i > 0):
self.Write(B", ")
time = fcurve.keyframe_points[i].co[0] - self.beginFrame
self.WriteFloat(time * self.frameTime)
self.Write(B"}}\n")
def ExportKeyTimeControlPoints(self, fcurve):
self.IndentWrite(B"Key (kind = \"-control\") {float {")
keyCount = len(fcurve.keyframe_points)
for i in range(keyCount):
if (i > 0):
self.Write(B", ")
ctrl = fcurve.keyframe_points[i].handle_left[0] - self.beginFrame
self.WriteFloat(ctrl * self.frameTime)
self.Write(B"}}\n")
self.IndentWrite(B"Key (kind = \"+control\") {float {")
for i in range(keyCount):
if (i > 0):
self.Write(B", ")
ctrl = fcurve.keyframe_points[i].handle_right[0] - self.beginFrame
self.WriteFloat(ctrl * self.frameTime)
self.Write(B"}}\n")
def ExportKeyValues(self, fcurve):
self.IndentWrite(B"Key {float {")
keyCount = len(fcurve.keyframe_points)
for i in range(keyCount):
if (i > 0):
self.Write(B", ")
value = fcurve.keyframe_points[i].co[1]
self.WriteFloat(value)
self.Write(B"}}\n")
def ExportKeyValueControlPoints(self, fcurve):
self.IndentWrite(B"Key (kind = \"-control\") {float {")
keyCount = len(fcurve.keyframe_points)
for i in range(keyCount):
if (i > 0):
self.Write(B", ")
ctrl = fcurve.keyframe_points[i].handle_left[1]
self.WriteFloat(ctrl)
self.Write(B"}}\n")
self.IndentWrite(B"Key (kind = \"+control\") {float {")
for i in range(keyCount):
if (i > 0):
self.Write(B", ")
ctrl = fcurve.keyframe_points[i].handle_right[1]
self.WriteFloat(ctrl)
self.Write(B"}}\n")
def ExportAnimationTrack(self, fcurve, kind, target, newline):
# This function exports a single animation track. The curve types for the
# Time and Value structures are given by the kind parameter.
self.IndentWrite(B"Track (target = %", 0, newline)
self.Write(target)
self.Write(B")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
if (kind != kAnimationBezier):
self.IndentWrite(B"Time\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.ExportKeyTimes(fcurve)
self.IndentWrite(B"}\n\n", -1)
self.IndentWrite(B"Value\n", -1)
self.IndentWrite(B"{\n", -1)
self.ExportKeyValues(fcurve)
self.indentLevel -= 1
self.IndentWrite(B"}\n")
else:
self.IndentWrite(B"Time (curve = \"bezier\")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.ExportKeyTimes(fcurve)
self.ExportKeyTimeControlPoints(fcurve)
self.IndentWrite(B"}\n\n", -1)
self.IndentWrite(B"Value (curve = \"bezier\")\n", -1)
self.IndentWrite(B"{\n", -1)
self.ExportKeyValues(fcurve)
self.ExportKeyValueControlPoints(fcurve)
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
def ExportNodeSampledAnimation(self, node, scene):
# This function exports animation as full 4x4 matrices for each frame.
currentFrame = scene.frame_current
currentSubframe = scene.frame_subframe
animationFlag = False
m1 = node.matrix_local.copy()
for i in range(self.beginFrame, self.endFrame):
scene.frame_set(i)
m2 = node.matrix_local
if (OpenGexExporter.MatricesDifferent(m1, m2)):
animationFlag = True
break
if (animationFlag):
self.IndentWrite(B"Animation\n", 0, True)
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"Track (target = %transform)\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"Time\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"Key {float {")
for i in range(self.beginFrame, self.endFrame):
self.WriteFloat((i - self.beginFrame) * self.frameTime)
self.Write(B", ")
self.WriteFloat(self.endFrame * self.frameTime)
self.Write(B"}}\n")
self.IndentWrite(B"}\n\n", -1)
self.IndentWrite(B"Value\n", -1)
self.IndentWrite(B"{\n", -1)
self.IndentWrite(B"Key\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[16]\n")
self.IndentWrite(B"{\n")
for i in range(self.beginFrame, self.endFrame):
scene.frame_set(i)
self.WriteMatrixFlat(node.matrix_local)
self.Write(B",\n")
scene.frame_set(self.endFrame)
self.WriteMatrixFlat(node.matrix_local)
self.IndentWrite(B"}\n", 0, True)
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
scene.frame_set(currentFrame, currentSubframe)
def ExportBoneSampledAnimation(self, poseBone, scene):
# This function exports bone animation as full 4x4 matrices for each frame.
currentFrame = scene.frame_current
currentSubframe = scene.frame_subframe
animationFlag = False
m1 = poseBone.matrix.copy()
for i in range(self.beginFrame, self.endFrame):
scene.frame_set(i)
m2 = poseBone.matrix
if (OpenGexExporter.MatricesDifferent(m1, m2)):
animationFlag = True
break
if (animationFlag):
self.IndentWrite(B"Animation\n", 0, True)
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"Track (target = %transform)\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"Time\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"Key {float {")
for i in range(self.beginFrame, self.endFrame):
self.WriteFloat((i - self.beginFrame) * self.frameTime)
self.Write(B", ")
self.WriteFloat(self.endFrame * self.frameTime)
self.Write(B"}}\n")
self.IndentWrite(B"}\n\n", -1)
self.IndentWrite(B"Value\n", -1)
self.IndentWrite(B"{\n", -1)
self.IndentWrite(B"Key\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[16]\n")
self.IndentWrite(B"{\n")
parent = poseBone.parent
if (parent):
for i in range(self.beginFrame, self.endFrame):
scene.frame_set(i)
if (math.fabs(parent.matrix.determinant()) > kExportEpsilon):
self.WriteMatrixFlat(parent.matrix.inverted() * poseBone.matrix)
else:
self.WriteMatrixFlat(poseBone.matrix)
self.Write(B",\n")
scene.frame_set(self.endFrame)
if (math.fabs(parent.matrix.determinant()) > kExportEpsilon):
self.WriteMatrixFlat(parent.matrix.inverted() * poseBone.matrix)
else:
self.WriteMatrixFlat(poseBone.matrix)
self.IndentWrite(B"}\n", 0, True)
else:
for i in range(self.beginFrame, self.endFrame):
scene.frame_set(i)
self.WriteMatrixFlat(poseBone.matrix)
self.Write(B",\n")
scene.frame_set(self.endFrame)
self.WriteMatrixFlat(poseBone.matrix)
self.IndentWrite(B"}\n", 0, True)
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
scene.frame_set(currentFrame, currentSubframe)
def ExportMorphWeightSampledAnimationTrack(self, block, target, scene, newline):
currentFrame = scene.frame_current
currentSubframe = scene.frame_subframe
self.IndentWrite(B"Track (target = %", 0, newline)
self.Write(target)
self.Write(B")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"Time\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"Key {float {")
for i in range(self.beginFrame, self.endFrame):
self.WriteFloat((i - self.beginFrame) * self.frameTime)
self.Write(B", ")
self.WriteFloat(self.endFrame * self.frameTime)
self.Write(B"}}\n")
self.IndentWrite(B"}\n\n", -1)
self.IndentWrite(B"Value\n", -1)
self.IndentWrite(B"{\n", -1)
self.IndentWrite(B"Key {float {")
for i in range(self.beginFrame, self.endFrame):
scene.frame_set(i)
self.WriteFloat(block.value)
self.Write(B", ")
scene.frame_set(self.endFrame)
self.WriteFloat(block.value)
self.Write(B"}}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
scene.frame_set(currentFrame, currentSubframe)
def ExportNodeTransform(self, node, scene):
posAnimCurve = [None, None, None]
rotAnimCurve = [None, None, None]
sclAnimCurve = [None, None, None]
posAnimKind = [0, 0, 0]
rotAnimKind = [0, 0, 0]
sclAnimKind = [0, 0, 0]
deltaPosAnimCurve = [None, None, None]
deltaRotAnimCurve = [None, None, None]
deltaSclAnimCurve = [None, None, None]
deltaPosAnimKind = [0, 0, 0]
deltaRotAnimKind = [0, 0, 0]
deltaSclAnimKind = [0, 0, 0]
positionAnimated = False
rotationAnimated = False
scaleAnimated = False
posAnimated = [False, False, False]
rotAnimated = [False, False, False]
sclAnimated = [False, False, False]
deltaPositionAnimated = False
deltaRotationAnimated = False
deltaScaleAnimated = False
deltaPosAnimated = [False, False, False]
deltaRotAnimated = [False, False, False]
deltaSclAnimated = [False, False, False]
mode = node.rotation_mode
sampledAnimation = ((self.sampleAnimationFlag) or (mode == "QUATERNION") or (mode == "AXIS_ANGLE"))
if ((not sampledAnimation) and (node.animation_data)):
action = node.animation_data.action
if (action):
for fcurve in action.fcurves:
kind = OpenGexExporter.ClassifyAnimationCurve(fcurve)
if (kind != kAnimationSampled):
if (fcurve.data_path == "location"):
for i in range(3):
if ((fcurve.array_index == i) and (not posAnimCurve[i])):
posAnimCurve[i] = fcurve
posAnimKind[i] = kind
if (OpenGexExporter.AnimationPresent(fcurve, kind)):
posAnimated[i] = True
elif (fcurve.data_path == "delta_location"):
for i in range(3):
if ((fcurve.array_index == i) and (not deltaPosAnimCurve[i])):
deltaPosAnimCurve[i] = fcurve
deltaPosAnimKind[i] = kind
if (OpenGexExporter.AnimationPresent(fcurve, kind)):
deltaPosAnimated[i] = True
elif (fcurve.data_path == "rotation_euler"):
for i in range(3):
if ((fcurve.array_index == i) and (not rotAnimCurve[i])):
rotAnimCurve[i] = fcurve
rotAnimKind[i] = kind
if (OpenGexExporter.AnimationPresent(fcurve, kind)):
rotAnimated[i] = True
elif (fcurve.data_path == "delta_rotation_euler"):
for i in range(3):
if ((fcurve.array_index == i) and (not deltaRotAnimCurve[i])):
deltaRotAnimCurve[i] = fcurve
deltaRotAnimKind[i] = kind
if (OpenGexExporter.AnimationPresent(fcurve, kind)):
deltaRotAnimated[i] = True
elif (fcurve.data_path == "scale"):
for i in range(3):
if ((fcurve.array_index == i) and (not sclAnimCurve[i])):
sclAnimCurve[i] = fcurve
sclAnimKind[i] = kind
if (OpenGexExporter.AnimationPresent(fcurve, kind)):
sclAnimated[i] = True
elif (fcurve.data_path == "delta_scale"):
for i in range(3):
if ((fcurve.array_index == i) and (not deltaSclAnimCurve[i])):
deltaSclAnimCurve[i] = fcurve
deltaSclAnimKind[i] = kind
if (OpenGexExporter.AnimationPresent(fcurve, kind)):
deltaSclAnimated[i] = True
elif ((fcurve.data_path == "rotation_axis_angle") or (fcurve.data_path == "rotation_quaternion") or (fcurve.data_path == "delta_rotation_quaternion")):
sampledAnimation = True
break
else:
sampledAnimation = True
break
positionAnimated = posAnimated[0] | posAnimated[1] | posAnimated[2]
rotationAnimated = rotAnimated[0] | rotAnimated[1] | rotAnimated[2]
scaleAnimated = sclAnimated[0] | sclAnimated[1] | sclAnimated[2]
deltaPositionAnimated = deltaPosAnimated[0] | deltaPosAnimated[1] | deltaPosAnimated[2]
deltaRotationAnimated = deltaRotAnimated[0] | deltaRotAnimated[1] | deltaRotAnimated[2]
deltaScaleAnimated = deltaSclAnimated[0] | deltaSclAnimated[1] | deltaSclAnimated[2]
if ((sampledAnimation) or ((not positionAnimated) and (not rotationAnimated) and (not scaleAnimated) and (not deltaPositionAnimated) and (not deltaRotationAnimated) and (not deltaScaleAnimated))):
# If there's no keyframe animation at all, then write the node transform as a single 4x4 matrix.
# We might still be exporting sampled animation below.
self.IndentWrite(B"Transform")
if (sampledAnimation):
self.Write(B" %transform")
self.IndentWrite(B"{\n", 0, True)
self.indentLevel += 1
self.IndentWrite(B"float[16]\n")
self.IndentWrite(B"{\n")
self.WriteMatrix(node.matrix_local)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
if (sampledAnimation):
self.ExportNodeSampledAnimation(node, scene)
else:
structFlag = False
deltaTranslation = node.delta_location
if (deltaPositionAnimated):
# When the delta location is animated, write the x, y, and z components separately
# so they can be targeted by different tracks having different sets of keys.
for i in range(3):
pos = deltaTranslation[i]
if ((deltaPosAnimated[i]) or (math.fabs(pos) > kExportEpsilon)):
self.IndentWrite(B"Translation %", 0, structFlag)
self.Write(deltaSubtranslationName[i])
self.Write(B" (kind = \"")
self.Write(axisName[i])
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float {", 1)
self.WriteFloat(pos)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
elif ((math.fabs(deltaTranslation[0]) > kExportEpsilon) or (math.fabs(deltaTranslation[1]) > kExportEpsilon) or (math.fabs(deltaTranslation[2]) > kExportEpsilon)):
self.IndentWrite(B"Translation\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float[3] {", 1)
self.WriteVector3D(deltaTranslation)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
translation = node.location
if (positionAnimated):
# When the location is animated, write the x, y, and z components separately
# so they can be targeted by different tracks having different sets of keys.
for i in range(3):
pos = translation[i]
if ((posAnimated[i]) or (math.fabs(pos) > kExportEpsilon)):
self.IndentWrite(B"Translation %", 0, structFlag)
self.Write(subtranslationName[i])
self.Write(B" (kind = \"")
self.Write(axisName[i])
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float {", 1)
self.WriteFloat(pos)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
elif ((math.fabs(translation[0]) > kExportEpsilon) or (math.fabs(translation[1]) > kExportEpsilon) or (math.fabs(translation[2]) > kExportEpsilon)):
self.IndentWrite(B"Translation\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float[3] {", 1)
self.WriteVector3D(translation)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
if (deltaRotationAnimated):
# When the delta rotation is animated, write three separate Euler angle rotations
# so they can be targeted by different tracks having different sets of keys.
for i in range(3):
axis = ord(mode[2 - i]) - 0x58
angle = node.delta_rotation_euler[axis]
if ((deltaRotAnimated[axis]) or (math.fabs(angle) > kExportEpsilon)):
self.IndentWrite(B"Rotation %", 0, structFlag)
self.Write(deltaSubrotationName[axis])
self.Write(B" (kind = \"")
self.Write(axisName[axis])
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float {", 1)
self.WriteFloat(angle)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
else:
# When the delta rotation is not animated, write it in the representation given by
# the node's current rotation mode. (There is no axis-angle delta rotation.)
if (mode == "QUATERNION"):
quaternion = node.delta_rotation_quaternion
if ((math.fabs(quaternion[0] - 1.0) > kExportEpsilon) or (math.fabs(quaternion[1]) > kExportEpsilon) or (math.fabs(quaternion[2]) > kExportEpsilon) or (math.fabs(quaternion[3]) > kExportEpsilon)):
self.IndentWrite(B"Rotation (kind = \"quaternion\")\n", 0, structFlag)
self.IndentWrite(B"{\n")
self.IndentWrite(B"float[4] {", 1)
self.WriteQuaternion(quaternion)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
else:
for i in range(3):
axis = ord(mode[2 - i]) - 0x58
angle = node.delta_rotation_euler[axis]
if (math.fabs(angle) > kExportEpsilon):
self.IndentWrite(B"Rotation (kind = \"", 0, structFlag)
self.Write(axisName[axis])
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float {", 1)
self.WriteFloat(angle)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
if (rotationAnimated):
# When the rotation is animated, write three separate Euler angle rotations
# so they can be targeted by different tracks having different sets of keys.
for i in range(3):
axis = ord(mode[2 - i]) - 0x58
angle = node.rotation_euler[axis]
if ((rotAnimated[axis]) or (math.fabs(angle) > kExportEpsilon)):
self.IndentWrite(B"Rotation %", 0, structFlag)
self.Write(subrotationName[axis])
self.Write(B" (kind = \"")
self.Write(axisName[axis])
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float {", 1)
self.WriteFloat(angle)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
else:
# When the rotation is not animated, write it in the representation given by
# the node's current rotation mode.
if (mode == "QUATERNION"):
quaternion = node.rotation_quaternion
if ((math.fabs(quaternion[0] - 1.0) > kExportEpsilon) or (math.fabs(quaternion[1]) > kExportEpsilon) or (math.fabs(quaternion[2]) > kExportEpsilon) or (math.fabs(quaternion[3]) > kExportEpsilon)):
self.IndentWrite(B"Rotation (kind = \"quaternion\")\n", 0, structFlag)
self.IndentWrite(B"{\n")
self.IndentWrite(B"float[4] {", 1)
self.WriteQuaternion(quaternion)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
elif (mode == "AXIS_ANGLE"):
if (math.fabs(node.rotation_axis_angle[0]) > kExportEpsilon):
self.IndentWrite(B"Rotation (kind = \"axis\")\n", 0, structFlag)
self.IndentWrite(B"{\n")
self.IndentWrite(B"float[4] {", 1)
self.WriteVector4D(node.rotation_axis_angle)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
else:
for i in range(3):
axis = ord(mode[2 - i]) - 0x58
angle = node.rotation_euler[axis]
if (math.fabs(angle) > kExportEpsilon):
self.IndentWrite(B"Rotation (kind = \"", 0, structFlag)
self.Write(axisName[axis])
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float {", 1)
self.WriteFloat(angle)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
deltaScale = node.delta_scale
if (deltaScaleAnimated):
# When the delta scale is animated, write the x, y, and z components separately
# so they can be targeted by different tracks having different sets of keys.
for i in range(3):
scl = deltaScale[i]
if ((deltaSclAnimated[i]) or (math.fabs(scl) > kExportEpsilon)):
self.IndentWrite(B"Scale %", 0, structFlag)
self.Write(deltaSubscaleName[i])
self.Write(B" (kind = \"")
self.Write(axisName[i])
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float {", 1)
self.WriteFloat(scl)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
elif ((math.fabs(deltaScale[0] - 1.0) > kExportEpsilon) or (math.fabs(deltaScale[1] - 1.0) > kExportEpsilon) or (math.fabs(deltaScale[2] - 1.0) > kExportEpsilon)):
self.IndentWrite(B"Scale\n", 0, structFlag)
self.IndentWrite(B"{\n")
self.IndentWrite(B"float[3] {", 1)
self.WriteVector3D(deltaScale)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
scale = node.scale
if (scaleAnimated):
# When the scale is animated, write the x, y, and z components separately
# so they can be targeted by different tracks having different sets of keys.
for i in range(3):
scl = scale[i]
if ((sclAnimated[i]) or (math.fabs(scl) > kExportEpsilon)):
self.IndentWrite(B"Scale %", 0, structFlag)
self.Write(subscaleName[i])
self.Write(B" (kind = \"")
self.Write(axisName[i])
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"float {", 1)
self.WriteFloat(scl)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
elif ((math.fabs(scale[0] - 1.0) > kExportEpsilon) or (math.fabs(scale[1] - 1.0) > kExportEpsilon) or (math.fabs(scale[2] - 1.0) > kExportEpsilon)):
self.IndentWrite(B"Scale\n", 0, structFlag)
self.IndentWrite(B"{\n")
self.IndentWrite(B"float[3] {", 1)
self.WriteVector3D(scale)
self.Write(B"}")
self.IndentWrite(B"}\n", 0, True)
structFlag = True
# Export the animation tracks.
self.IndentWrite(B"Animation (begin = ", 0, True)
self.WriteFloat((action.frame_range[0] - self.beginFrame) * self.frameTime)
self.Write(B", end = ")
self.WriteFloat((action.frame_range[1] - self.beginFrame) * self.frameTime)
self.Write(B")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
structFlag = False
if (positionAnimated):
for i in range(3):
if (posAnimated[i]):
self.ExportAnimationTrack(posAnimCurve[i], posAnimKind[i], subtranslationName[i], structFlag)
structFlag = True
if (rotationAnimated):
for i in range(3):
if (rotAnimated[i]):
self.ExportAnimationTrack(rotAnimCurve[i], rotAnimKind[i], subrotationName[i], structFlag)
structFlag = True
if (scaleAnimated):
for i in range(3):
if (sclAnimated[i]):
self.ExportAnimationTrack(sclAnimCurve[i], sclAnimKind[i], subscaleName[i], structFlag)
structFlag = True
if (deltaPositionAnimated):
for i in range(3):
if (deltaPosAnimated[i]):
self.ExportAnimationTrack(deltaPosAnimCurve[i], deltaPosAnimKind[i], deltaSubtranslationName[i], structFlag)
structFlag = True
if (deltaRotationAnimated):
for i in range(3):
if (deltaRotAnimated[i]):
self.ExportAnimationTrack(deltaRotAnimCurve[i], deltaRotAnimKind[i], deltaSubrotationName[i], structFlag)
structFlag = True
if (deltaScaleAnimated):
for i in range(3):
if (deltaSclAnimated[i]):
self.ExportAnimationTrack(deltaSclAnimCurve[i], deltaSclAnimKind[i], deltaSubscaleName[i], structFlag)
structFlag = True
self.indentLevel -= 1
self.IndentWrite(B"}\n")
def ExportBoneTransform(self, armature, bone, scene):
curveArray = self.CollectBoneAnimation(armature, bone.name)
animation = ((len(curveArray) != 0) or (self.sampleAnimationFlag))
transform = bone.matrix_local.copy()
parentBone = bone.parent
if ((parentBone) and (math.fabs(parentBone.matrix_local.determinant()) > kExportEpsilon)):
transform = parentBone.matrix_local.inverted() * transform
poseBone = armature.pose.bones.get(bone.name)
if (poseBone):
transform = poseBone.matrix.copy()
parentPoseBone = poseBone.parent
if ((parentPoseBone) and (math.fabs(parentPoseBone.matrix.determinant()) > kExportEpsilon)):
transform = parentPoseBone.matrix.inverted() * transform
self.IndentWrite(B"Transform")
if (animation):
self.Write(B" %transform")
self.IndentWrite(B"{\n", 0, True)
self.indentLevel += 1
self.IndentWrite(B"float[16]\n")
self.IndentWrite(B"{\n")
self.WriteMatrix(transform)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
if ((animation) and (poseBone)):
self.ExportBoneSampledAnimation(poseBone, scene)
def ExportMaterialRef(self, material, index):
if (not material in self.materialArray):
self.materialArray[material] = {"structName" : bytes("material" + str(len(self.materialArray) + 1), "UTF-8")}
self.IndentWrite(B"MaterialRef (index = ")
self.WriteInt(index)
self.Write(B") {ref {$")
self.Write(self.materialArray[material]["structName"])
self.Write(B"}}\n")
def ExportMorphWeights(self, node, shapeKeys, scene):
action = None
curveArray = []
indexArray = []
if (shapeKeys.animation_data):
action = shapeKeys.animation_data.action
if (action):
for fcurve in action.fcurves:
if ((fcurve.data_path.startswith("key_blocks[")) and (fcurve.data_path.endswith("].value"))):
keyName = fcurve.data_path.strip("<KEY>")
if ((keyName[0] == "\"") or (keyName[0] == "'")):
index = shapeKeys.key_blocks.find(keyName.strip("\"'"))
if (index >= 0):
curveArray.append(fcurve)
indexArray.append(index)
else:
curveArray.append(fcurve)
indexArray.append(int(keyName))
if ((not action) and (node.animation_data)):
action = node.animation_data.action
if (action):
for fcurve in action.fcurves:
if ((fcurve.data_path.startswith("data.shape_keys.key_blocks[")) and (fcurve.data_path.endswith("].value"))):
keyName = fcurve.data_path.strip("<KEY>")
if ((keyName[0] == "\"") or (keyName[0] == "'")):
index = shapeKeys.key_blocks.find(keyName.strip("\"'"))
if (index >= 0):
curveArray.append(fcurve)
indexArray.append(index)
else:
curveArray.append(fcurve)
indexArray.append(int(keyName))
animated = (len(curveArray) != 0)
referenceName = shapeKeys.reference_key.name if (shapeKeys.use_relative) else ""
for k in range(len(shapeKeys.key_blocks)):
self.IndentWrite(B"MorphWeight", 0, (k == 0))
if (animated):
self.Write(B" %mw")
self.WriteInt(k)
self.Write(B" (index = ")
self.WriteInt(k)
self.Write(B") {float {")
block = shapeKeys.key_blocks[k]
self.WriteFloat(block.value if (block.name != referenceName) else 1.0)
self.Write(B"}}\n")
if (animated):
self.IndentWrite(B"Animation (begin = ", 0, True)
self.WriteFloat((action.frame_range[0] - self.beginFrame) * self.frameTime)
self.Write(B", end = ")
self.WriteFloat((action.frame_range[1] - self.beginFrame) * self.frameTime)
self.Write(B")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
structFlag = False
for a in range(len(curveArray)):
k = indexArray[a]
target = bytes("mw" + str(k), "UTF-8")
fcurve = curveArray[a]
kind = OpenGexExporter.ClassifyAnimationCurve(fcurve)
if ((kind != kAnimationSampled) and (not self.sampleAnimationFlag)):
self.ExportAnimationTrack(fcurve, kind, target, structFlag)
else:
self.ExportMorphWeightSampledAnimationTrack(shapeKeys.key_blocks[k], target, scene, structFlag)
structFlag = True
self.indentLevel -= 1
self.IndentWrite(B"}\n")
def ExportBone(self, armature, bone, scene):
nodeRef = self.nodeArray.get(bone)
if (nodeRef):
self.IndentWrite(structIdentifier[nodeRef["nodeType"]], 0, True)
self.Write(nodeRef["structName"])
self.IndentWrite(B"{\n", 0, True)
self.indentLevel += 1
name = bone.name
if (name != ""):
self.IndentWrite(B"Name {string {\"")
self.Write(bytes(name, "UTF-8"))
self.Write(B"\"}}\n\n")
self.ExportBoneTransform(armature, bone, scene)
for subnode in bone.children:
self.ExportBone(armature, subnode, scene)
# Export any ordinary nodes that are parented to this bone.
boneSubnodeArray = self.boneParentArray.get(bone.name)
if (boneSubnodeArray):
poseBone = None
if (not bone.use_relative_parent):
poseBone = armature.pose.bones.get(bone.name)
for subnode in boneSubnodeArray:
self.ExportNode(subnode, scene, poseBone)
if (nodeRef):
self.indentLevel -= 1
self.IndentWrite(B"}\n")
def ExportNode(self, node, scene, poseBone = None):
# This function exports a single node in the scene and includes its name,
# object reference, material references (for geometries), and transform.
# Subnodes are then exported recursively.
nodeRef = self.nodeArray.get(node)
if (nodeRef):
type = nodeRef["nodeType"]
self.IndentWrite(structIdentifier[type], 0, True)
self.Write(nodeRef["structName"])
if (type == kNodeTypeGeometry):
if (node.hide_render):
self.Write(B" (visible = false)")
self.IndentWrite(B"{\n", 0, True)
self.indentLevel += 1
structFlag = False
# Export the node's name if it has one.
name = node.name
if (name != ""):
self.IndentWrite(B"Name {string {\"")
self.Write(bytes(name, "UTF-8"))
self.Write(B"\"}}\n")
structFlag = True
# Export the object reference and material references.
object = node.data
if (type == kNodeTypeGeometry):
if (not object in self.geometryArray):
# Attempt to sanitize name
geomName = object.name.replace(' ', '_')
geomName = geomName.replace('.', '_').lower()
print ("MESH NAME: "+ geomName)
# old method
#geomName = "geometry" + str(len(self.geometryArray) + 1)
self.geometryArray[object] = {"structName" : bytes(geomName, "UTF-8"), "nodeTable" : [node]}
else:
self.geometryArray[object]["nodeTable"].append(node)
self.IndentWrite(B"ObjectRef {ref {$")
self.Write(self.geometryArray[object]["structName"])
self.Write(B"}}\n")
for i in range(len(node.material_slots)):
self.ExportMaterialRef(node.material_slots[i].material, i)
shapeKeys = OpenGexExporter.GetShapeKeys(object)
if (shapeKeys):
self.ExportMorphWeights(node, shapeKeys, scene)
structFlag = True
elif (type == kNodeTypeLight):
if (not object in self.lightArray):
self.lightArray[object] = {"structName" : bytes("light" + str(len(self.lightArray) + 1), "UTF-8"), "nodeTable" : [node]}
else:
self.lightArray[object]["nodeTable"].append(node)
self.IndentWrite(B"ObjectRef {ref {$")
self.Write(self.lightArray[object]["structName"])
self.Write(B"}}\n")
structFlag = True
elif (type == kNodeTypeCamera):
if (not object in self.cameraArray):
self.cameraArray[object] = {"structName" : bytes("camera" + str(len(self.cameraArray) + 1), "UTF-8"), "nodeTable" : [node]}
else:
self.cameraArray[object]["nodeTable"].append(node)
self.IndentWrite(B"ObjectRef {ref {$")
self.Write(self.cameraArray[object]["structName"])
self.Write(B"}}\n")
structFlag = True
if (structFlag):
self.Write(B"\n")
if (poseBone):
# If the node is parented to a bone and is not relative, then undo the bone's transform.
if (math.fabs(poseBone.matrix.determinant()) > kExportEpsilon):
self.IndentWrite(B"Transform\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[16]\n")
self.IndentWrite(B"{\n")
self.WriteMatrix(poseBone.matrix.inverted())
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n\n")
# Export the transform. If the node is animated, then animation tracks are exported here.
self.ExportNodeTransform(node, scene)
if (node.type == "ARMATURE"):
skeleton = node.data
if (skeleton):
for bone in skeleton.bones:
if (not bone.parent):
self.ExportBone(node, bone, scene)
for subnode in node.children:
if (subnode.parent_type != "BONE"):
self.ExportNode(subnode, scene)
if (nodeRef):
self.indentLevel -= 1
self.IndentWrite(B"}\n")
def ExportSkin(self, node, armature, exportVertexArray):
# This function exports all skinning data, which includes the skeleton
# and per-vertex bone influence data.
self.IndentWrite(B"Skin\n", 0, True)
self.IndentWrite(B"{\n")
self.indentLevel += 1
# Write the skin bind pose transform.
self.IndentWrite(B"Transform\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[16]\n")
self.IndentWrite(B"{\n")
self.WriteMatrix(node.matrix_world)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n\n")
# Export the skeleton, which includes an array of bone node references
# and and array of per-bone bind pose transforms.
self.IndentWrite(B"Skeleton\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
# Write the bone node reference array.
self.IndentWrite(B"BoneRefArray\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
boneArray = armature.data.bones
boneCount = len(boneArray)
self.IndentWrite(B"ref\t\t\t// ")
self.WriteInt(boneCount)
self.IndentWrite(B"{\n", 0, True)
self.IndentWrite(B"", 1)
for i in range(boneCount):
boneRef = self.FindNode(boneArray[i].name)
if (boneRef):
self.Write(B"$")
self.Write(boneRef[1]["structName"])
else:
self.Write(B"null")
if (i < boneCount - 1):
self.Write(B", ")
else:
self.Write(B"\n")
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n\n")
# Write the bind pose transform array.
self.IndentWrite(B"Transform\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[16]\t// ")
self.WriteInt(boneCount)
self.IndentWrite(B"{\n", 0, True)
for i in range(boneCount):
self.WriteMatrixFlat(armature.matrix_world * boneArray[i].matrix_local)
if (i < boneCount - 1):
self.Write(B",\n")
self.IndentWrite(B"}\n", 0, True)
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n\n")
# Export the per-vertex bone influence data.
groupRemap = []
for group in node.vertex_groups:
groupName = group.name
for i in range(boneCount):
if (boneArray[i].name == groupName):
groupRemap.append(i)
break
else:
groupRemap.append(-1)
boneCountArray = []
boneIndexArray = []
boneWeightArray = []
meshVertexArray = node.data.vertices
for ev in exportVertexArray:
boneCount = 0
totalWeight = 0.0
for element in meshVertexArray[ev.vertexIndex].groups:
boneIndex = groupRemap[element.group]
boneWeight = element.weight
if ((boneIndex >= 0) and (boneWeight != 0.0)):
boneCount += 1
totalWeight += boneWeight
boneIndexArray.append(boneIndex)
boneWeightArray.append(boneWeight)
boneCountArray.append(boneCount)
if (totalWeight != 0.0):
normalizer = 1.0 / totalWeight
for i in range(-boneCount, 0):
boneWeightArray[i] *= normalizer
# Write the bone count array. There is one entry per vertex.
self.IndentWrite(B"BoneCountArray\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"unsigned_int16\t\t// ")
self.WriteInt(len(boneCountArray))
self.IndentWrite(B"{\n", 0, True)
self.WriteIntArray(boneCountArray)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n\n")
# Write the bone index array. The number of entries is the sum of the bone counts for all vertices.
self.IndentWrite(B"BoneIndexArray\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"unsigned_int16\t\t// ")
self.WriteInt(len(boneIndexArray))
self.IndentWrite(B"{\n", 0, True)
self.WriteIntArray(boneIndexArray)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n\n")
# Write the bone weight array. The number of entries is the sum of the bone counts for all vertices.
self.IndentWrite(B"BoneWeightArray\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float\t\t// ")
self.WriteInt(len(boneWeightArray))
self.IndentWrite(B"{\n", 0, True)
self.WriteFloatArray(boneWeightArray)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
def ExportGeometry(self, objectRef, scene):
# This function exports a single geometry object.
self.Write(B"\nGeometryObject $")
self.Write(objectRef[1]["structName"])
self.WriteNodeTable(objectRef)
self.Write(B"\n{\n")
self.indentLevel += 1
node = objectRef[1]["nodeTable"][0]
mesh = objectRef[0]
structFlag = False;
# Save the morph state if necessary.
activeShapeKeyIndex = node.active_shape_key_index
showOnlyShapeKey = node.show_only_shape_key
currentMorphValue = []
shapeKeys = OpenGexExporter.GetShapeKeys(mesh)
if (shapeKeys):
node.active_shape_key_index = 0
node.show_only_shape_key = True
baseIndex = 0
relative = shapeKeys.use_relative
if (relative):
morphCount = 0
baseName = shapeKeys.reference_key.name
for block in shapeKeys.key_blocks:
if (block.name == baseName):
baseIndex = morphCount
break
morphCount += 1
morphCount = 0
for block in shapeKeys.key_blocks:
currentMorphValue.append(block.value)
block.value = 0.0
if (block.name != ""):
self.IndentWrite(B"Morph (index = ", 0, structFlag)
self.WriteInt(morphCount)
if ((relative) and (morphCount != baseIndex)):
self.Write(B", base = ")
self.WriteInt(baseIndex)
self.Write(B")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"Name {string {\"", 1)
self.Write(bytes(block.name, "UTF-8"))
self.Write(B"\"}}\n")
self.IndentWrite(B"}\n")
structFlag = True
morphCount += 1
shapeKeys.key_blocks[0].value = 1.0
mesh.update()
self.IndentWrite(B"Mesh (primitive = \"triangles\")\n", 0, structFlag)
self.IndentWrite(B"{\n")
self.indentLevel += 1
armature = node.find_armature()
applyModifiers = (not armature)
# Apply all modifiers to create a new mesh with tessfaces.
# We don't apply modifiers for a skinned mesh because we need the vertex positions
# before they are deformed by the armature modifier in order to export the proper
# bind pose. This does mean that modifiers preceding the armature modifier are ignored,
# but the Blender API does not provide a reasonable way to retrieve the mesh at an
# arbitrary stage in the modifier stack.
exportMesh = node.to_mesh(scene, applyModifiers, "RENDER", True, False)
# Triangulate mesh and remap vertices to eliminate duplicates.
materialTable = []
exportVertexArray = OpenGexExporter.DeindexMesh(exportMesh, materialTable)
triangleCount = len(materialTable)
indexTable = []
unifiedVertexArray = OpenGexExporter.UnifyVertices(exportVertexArray, indexTable)
vertexCount = len(unifiedVertexArray)
# Write the position array.
self.IndentWrite(B"VertexArray (attrib = \"position\")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[3]\t\t// ")
self.WriteInt(vertexCount)
self.IndentWrite(B"{\n", 0, True)
self.WriteVertexArray3D(unifiedVertexArray, "position")
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n\n")
# Write the normal array.
self.IndentWrite(B"VertexArray (attrib = \"normal\")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[3]\t\t// ")
self.WriteInt(vertexCount)
self.IndentWrite(B"{\n", 0, True)
self.WriteVertexArray3D(unifiedVertexArray, "normal")
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
# Write the color array if it exists.
colorCount = len(exportMesh.tessface_vertex_colors)
if (colorCount > 0):
self.IndentWrite(B"VertexArray (attrib = \"color\")\n", 0, True)
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[3]\t\t// ")
self.WriteInt(vertexCount)
self.IndentWrite(B"{\n", 0, True)
self.WriteVertexArray3D(unifiedVertexArray, "color")
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
# Write the texcoord arrays.
texcoordCount = len(exportMesh.tessface_uv_textures)
if (texcoordCount > 0):
self.IndentWrite(B"VertexArray (attrib = \"texcoord\")\n", 0, True)
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[2]\t\t// ")
self.WriteInt(vertexCount)
self.IndentWrite(B"{\n", 0, True)
self.WriteVertexArray2D(unifiedVertexArray, "texcoord0")
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
if (texcoordCount > 1):
self.IndentWrite(B"VertexArray (attrib = \"texcoord[1]\")\n", 0, True)
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[2]\t\t// ")
self.WriteInt(vertexCount)
self.IndentWrite(B"{\n", 0, True)
self.WriteVertexArray2D(unifiedVertexArray, "texcoord1")
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
# If there are multiple morph targets, export them here.
if (shapeKeys):
shapeKeys.key_blocks[0].value = 0.0
for m in range(1, len(currentMorphValue)):
shapeKeys.key_blocks[m].value = 1.0
mesh.update()
node.active_shape_key_index = m
morphMesh = node.to_mesh(scene, applyModifiers, "RENDER", True, False)
# Write the morph target position array.
self.IndentWrite(B"VertexArray (attrib = \"position\", morph = ", 0, True)
self.WriteInt(m)
self.Write(B")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[3]\t\t// ")
self.WriteInt(vertexCount)
self.IndentWrite(B"{\n", 0, True)
self.WriteMorphPositionArray3D(unifiedVertexArray, morphMesh.vertices)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n\n")
# Write the morph target normal array.
self.IndentWrite(B"VertexArray (attrib = \"normal\", morph = ")
self.WriteInt(m)
self.Write(B")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[3]\t\t// ")
self.WriteInt(vertexCount)
self.IndentWrite(B"{\n", 0, True)
self.WriteMorphNormalArray3D(unifiedVertexArray, morphMesh.vertices, morphMesh.tessfaces)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
bpy.data.meshes.remove(morphMesh)
# Write the index arrays.
maxMaterialIndex = 0
for i in range(len(materialTable)):
index = materialTable[i]
if (index > maxMaterialIndex):
maxMaterialIndex = index
if (maxMaterialIndex == 0):
# There is only one material, so write a single index array.
self.IndentWrite(B"IndexArray\n", 0, True)
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"unsigned_int32[3]\t\t// ")
self.WriteInt(triangleCount)
self.IndentWrite(B"{\n", 0, True)
self.WriteTriangleArray(triangleCount, indexTable)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
else:
# If there are multiple material indexes, then write a separate index array for each one.
materialTriangleCount = [0 for i in range(maxMaterialIndex + 1)]
for i in range(len(materialTable)):
materialTriangleCount[materialTable[i]] += 1
for m in range(maxMaterialIndex + 1):
if (materialTriangleCount[m] != 0):
materialIndexTable = []
for i in range(len(materialTable)):
if (materialTable[i] == m):
k = i * 3
materialIndexTable.append(indexTable[k])
materialIndexTable.append(indexTable[k + 1])
materialIndexTable.append(indexTable[k + 2])
self.IndentWrite(B"IndexArray (material = ", 0, True)
self.WriteInt(m)
self.Write(B")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"unsigned_int32[3]\t\t// ")
self.WriteInt(materialTriangleCount[m])
self.IndentWrite(B"{\n", 0, True)
self.WriteTriangleArray(materialTriangleCount[m], materialIndexTable)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
# If the mesh is skinned, export the skinning data here.
if (armature):
self.ExportSkin(node, armature, unifiedVertexArray)
# Restore the morph state.
if (shapeKeys):
node.active_shape_key_index = activeShapeKeyIndex
node.show_only_shape_key = showOnlyShapeKey
for m in range(len(currentMorphValue)):
shapeKeys.key_blocks[m].value = currentMorphValue[m]
mesh.update()
# Delete the new mesh that we made earlier.
bpy.data.meshes.remove(exportMesh)
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.Write(B"}\n")
def ExportLight(self, objectRef):
# This function exports a single light object.
self.Write(B"\nLightObject $")
self.Write(objectRef[1]["structName"])
object = objectRef[0]
type = object.type
self.Write(B" (type = ")
pointFlag = False
spotFlag = False
if (type == "SUN"):
self.Write(B"\"infinite\"")
elif (type == "POINT"):
self.Write(B"\"point\"")
pointFlag = True
else:
self.Write(B"\"spot\"")
pointFlag = True
spotFlag = True
if (not object.use_shadow):
self.Write(B", shadow = false")
self.Write(B")")
self.WriteNodeTable(objectRef)
self.Write(B"\n{\n")
self.indentLevel += 1
# Export the light's color, and include a separate intensity if necessary.
self.IndentWrite(B"Color (attrib = \"light\") {float[3] {")
self.WriteColor(object.color)
self.Write(B"}}\n")
intensity = object.energy
if (intensity != 1.0):
self.IndentWrite(B"Param (attrib = \"intensity\") {float {")
self.WriteFloat(intensity)
self.Write(B"}}\n")
if (pointFlag):
# Export a separate attenuation function for each type that's in use.
falloff = object.falloff_type
if (falloff == "INVERSE_LINEAR"):
self.IndentWrite(B"Atten (curve = \"inverse\")\n", 0, True)
self.IndentWrite(B"{\n")
self.IndentWrite(B"Param (attrib = \"scale\") {float {", 1)
self.WriteFloat(object.distance)
self.Write(B"}}\n")
self.IndentWrite(B"}\n")
elif (falloff == "INVERSE_SQUARE"):
self.IndentWrite(B"Atten (curve = \"inverse_square\")\n", 0, True)
self.IndentWrite(B"{\n")
self.IndentWrite(B"Param (attrib = \"scale\") {float {", 1)
self.WriteFloat(math.sqrt(object.distance))
self.Write(B"}}\n")
self.IndentWrite(B"}\n")
elif (falloff == "LINEAR_QUADRATIC_WEIGHTED"):
if (object.linear_attenuation != 0.0):
self.IndentWrite(B"Atten (curve = \"inverse\")\n", 0, True)
self.IndentWrite(B"{\n")
self.IndentWrite(B"Param (attrib = \"scale\") {float {", 1)
self.WriteFloat(object.distance)
self.Write(B"}}\n")
self.IndentWrite(B"Param (attrib = \"constant\") {float {", 1)
self.WriteFloat(1.0)
self.Write(B"}}\n")
self.IndentWrite(B"Param (attrib = \"linear\") {float {", 1)
self.WriteFloat(object.linear_attenuation)
self.Write(B"}}\n")
self.IndentWrite(B"}\n\n")
if (object.quadratic_attenuation != 0.0):
self.IndentWrite(B"Atten (curve = \"inverse_square\")\n")
self.IndentWrite(B"{\n")
self.IndentWrite(B"Param (attrib = \"scale\") {float {", 1)
self.WriteFloat(object.distance)
self.Write(B"}}\n")
self.IndentWrite(B"Param (attrib = \"constant\") {float {", 1)
self.WriteFloat(1.0)
self.Write(B"}}\n")
self.IndentWrite(B"Param (attrib = \"quadratic\") {float {", 1)
self.WriteFloat(object.quadratic_attenuation)
self.Write(B"}}\n")
self.IndentWrite(B"}\n")
if (object.use_sphere):
self.IndentWrite(B"Atten (curve = \"linear\")\n", 0, True)
self.IndentWrite(B"{\n")
self.IndentWrite(B"Param (attrib = \"end\") {float {", 1)
self.WriteFloat(object.distance)
self.Write(B"}}\n")
self.IndentWrite(B"}\n")
if (spotFlag):
# Export additional angular attenuation for spot lights.
self.IndentWrite(B"Atten (kind = \"angle\", curve = \"linear\")\n", 0, True)
self.IndentWrite(B"{\n")
endAngle = object.spot_size * 0.5
beginAngle = endAngle * (1.0 - object.spot_blend)
self.IndentWrite(B"Param (attrib = \"begin\") {float {", 1)
self.WriteFloat(beginAngle)
self.Write(B"}}\n")
self.IndentWrite(B"Param (attrib = \"end\") {float {", 1)
self.WriteFloat(endAngle)
self.Write(B"}}\n")
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.Write(B"}\n")
def ExportCamera(self, objectRef):
# This function exports a single camera object.
self.Write(B"\nCameraObject $")
self.Write(objectRef[1]["structName"])
self.WriteNodeTable(objectRef)
self.Write(B"\n{\n")
self.indentLevel += 1
object = objectRef[0]
self.IndentWrite(B"Param (attrib = \"fov\") {float {")
self.WriteFloat(object.angle_x)
self.Write(B"}}\n")
self.IndentWrite(B"Param (attrib = \"near\") {float {")
self.WriteFloat(object.clip_start)
self.Write(B"}}\n")
self.IndentWrite(B"Param (attrib = \"far\") {float {")
self.WriteFloat(object.clip_end)
self.Write(B"}}\n")
self.indentLevel -= 1
self.Write(B"}\n")
def ExportObjects(self, scene):
for objectRef in self.geometryArray.items():
self.ExportGeometry(objectRef, scene)
for objectRef in self.lightArray.items():
self.ExportLight(objectRef)
for objectRef in self.cameraArray.items():
self.ExportCamera(objectRef)
def ExportTexture(self, textureSlot, attrib):
# This function exports a single texture from a material.
self.IndentWrite(B"Texture (attrib = \"", 0, True)
self.Write(attrib)
self.Write(B"\")\n")
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"string {\"")
self.WriteFileName(textureSlot.texture.image.filepath)
self.Write(B"\"}\n")
# If the texture has a scale and/or offset, then export a coordinate transform.
uscale = textureSlot.scale[0]
vscale = textureSlot.scale[1]
uoffset = textureSlot.offset[0]
voffset = textureSlot.offset[1]
if ((uscale != 1.0) or (vscale != 1.0) or (uoffset != 0.0) or (voffset != 0.0)):
matrix = [[uscale, 0.0, 0.0, 0.0], [0.0, vscale, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [uoffset, voffset, 0.0, 1.0]]
self.IndentWrite(B"Transform\n", 0, True)
self.IndentWrite(B"{\n")
self.indentLevel += 1
self.IndentWrite(B"float[16]\n")
self.IndentWrite(B"{\n")
self.WriteMatrix(matrix)
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
self.indentLevel -= 1
self.IndentWrite(B"}\n")
def ExportMaterials(self):
# This function exports all of the materials used in the scene.
for materialRef in self.materialArray.items():
material = materialRef[0]
self.Write(B"\nMaterial $")
self.Write(materialRef[1]["structName"])
self.Write(B"\n{\n")
self.indentLevel += 1
if (material.name != ""):
self.IndentWrite(B"Name {string {\"")
self.Write(bytes(material.name, "UTF-8"))
self.Write(B"\"}}\n\n")
intensity = material.diffuse_intensity
diffuse = [material.diffuse_color[0] * intensity, material.diffuse_color[1] * intensity, material.diffuse_color[2] * intensity]
self.IndentWrite(B"Color (attrib = \"diffuse\") {float[3] {")
self.WriteColor(diffuse)
self.Write(B"}}\n")
intensity = material.specular_intensity
specular = [material.specular_color[0] * intensity, material.specular_color[1] * intensity, material.specular_color[2] * intensity]
if ((specular[0] > 0.0) or (specular[1] > 0.0) or (specular[2] > 0.0)):
self.IndentWrite(B"Color (attrib = \"specular\") {float[3] {")
self.WriteColor(specular)
self.Write(B"}}\n")
self.IndentWrite(B"Param (attrib = \"specular_power\") {float {")
self.WriteFloat(material.specular_hardness)
self.Write(B"}}\n")
emission = material.emit
if (emission > 0.0):
self.IndentWrite(B"Color (attrib = \"emission\") {float[3] {")
self.WriteColor([emission, emission, emission])
self.Write(B"}}\n")
diffuseTexture = None
specularTexture = None
emissionTexture = None
transparencyTexture = None
normalTexture = None
for textureSlot in material.texture_slots:
if ((textureSlot) and (textureSlot.use) and (textureSlot.texture.type == "IMAGE")):
if (((textureSlot.use_map_color_diffuse) or (textureSlot.use_map_diffuse)) and (not diffuseTexture)):
diffuseTexture = textureSlot
elif (((textureSlot.use_map_color_spec) or (textureSlot.use_map_specular)) and (not specularTexture)):
specularTexture = textureSlot
elif ((textureSlot.use_map_emit) and (not emissionTexture)):
emissionTexture = textureSlot
elif ((textureSlot.use_map_translucency) and (not transparencyTexture)):
transparencyTexture = textureSlot
elif ((textureSlot.use_map_normal) and (not normalTexture)):
normalTexture = textureSlot
if (diffuseTexture):
self.ExportTexture(diffuseTexture, B"diffuse")
if (specularTexture):
self.ExportTexture(specularTexture, B"specular")
if (emissionTexture):
self.ExportTexture(emissionTexture, B"emission")
if (transparencyTexture):
self.ExportTexture(transparencyTexture, B"transparency")
if (normalTexture):
self.ExportTexture(normalTexture, B"normal")
self.indentLevel -= 1
self.Write(B"}\n")
def ExportMetrics(self, scene):
scale = scene.unit_settings.scale_length
if (scene.unit_settings.system == "IMPERIAL"):
scale *= 0.3048
self.Write(B"Metric (key = \"distance\") {float {")
self.WriteFloat(scale)
self.Write(B"}}\n")
self.Write(B"Metric (key = \"angle\") {float {1.0}}\n")
self.Write(B"Metric (key = \"time\") {float {1.0}}\n")
self.Write(B"Metric (key = \"up\") {string {\"z\"}}\n")
def execute(self, context):
self.file = open(self.filepath, "wb")
self.indentLevel = 0
scene = context.scene
self.ExportMetrics(scene)
originalFrame = scene.frame_current
originalSubframe = scene.frame_subframe
self.restoreFrame = False
self.beginFrame = scene.frame_start
self.endFrame = scene.frame_end
self.frameTime = 1.0 / (scene.render.fps_base * scene.render.fps)
self.nodeArray = {}
self.geometryArray = {}
self.lightArray = {}
self.cameraArray = {}
self.materialArray = {}
self.boneParentArray = {}
self.exportAllFlag = not self.option_export_selection
self.sampleAnimationFlag = self.option_sample_animation
for object in scene.objects:
if (not object.parent):
self.ProcessNode(object)
self.ProcessSkinnedMeshes()
for object in scene.objects:
if (not object.parent):
self.ExportNode(object, scene)
self.ExportObjects(scene)
self.ExportMaterials()
if (self.restoreFrame):
scene.frame_set(originalFrame, originalSubframe)
self.file.close()
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(OpenGexExporter.bl_idname, text = "OpenGEX [LDJAM] (.ogex)")
def register():
bpy.utils.register_class(OpenGexExporter)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.types.INFO_MT_file_export.remove(menu_func)
bpy.utils.unregister_class(OpenGexExporter)
if __name__ == "__main__":
register()
``` |
{
"source": "joeld42/oryol-samples",
"score": 3
} |
#### File: oryol-samples/fips-verbs/webpage.py
```python
import os
import yaml
import shutil
import subprocess
import glob
from string import Template
from mod import log, util, project, emscripten, android
GitHubSamplesURL = 'https://github.com/floooh/oryol-samples/tree/master/src/'
BuildEmscripten = True
BuildWasm = True
ExportAssets = True
EmscConfig = 'webgl2-emsc-ninja-release'
WasmConfig = 'webgl2-wasm-ninja-release'
#-------------------------------------------------------------------------------
def deploy_webpage(fips_dir, proj_dir, webpage_dir) :
"""builds the final webpage under under fips-deploy/oryol-samples-webpage"""
ws_dir = util.get_workspace_dir(fips_dir)
# load the websamples.yml file, should have been created during the last build
with open(webpage_dir + '/websamples.yml', 'r') as f :
samples = yaml.load(f.read())
# create directories
for platform in ['asmjs', 'wasm'] :
platform_dir = '{}/{}'.format(webpage_dir, platform)
if not os.path.isdir(platform_dir) :
os.makedirs(platform_dir)
# link to the Core Samples
content = '<div class="thumb">\n'
content += ' <div class="thumb-title">To Core Samples...</div>\n'
content += ' <div class="img-frame"><a href="http://floooh.github.com/oryol/index.html"><img class="image" src="core_samples.jpg"></img></a></div>\n'
content += '</div>\n'
# build the thumbnail gallery
for sample in samples :
if sample['name'] != '__end__' :
name = sample['name']
imgPath = sample['image']
types = sample['type']
desc = sample['desc']
head, tail = os.path.split(imgPath)
if tail == 'none' :
imgFileName = 'dummy.jpg'
else :
imgFileName = tail
content += '<div class="thumb">\n'
content += ' <div class="thumb-title">{}</div>\n'.format(name)
content += ' <div class="img-frame"><a href="asmjs/{}.html"><img class="image" src="{}" title="{}"></img></a></div>\n'.format(name,imgFileName,desc)
content += ' <div class="thumb-bar">\n'
content += ' <ul class="thumb-list">\n'
if BuildEmscripten and 'emscripten' in types :
content += ' <li class="thumb-item"><a class="thumb-link" href="asmjs/{}.html">asm.js</a></li>\n'.format(name)
if BuildWasm and 'emscripten' in types :
content += ' <li class="thumb-item"><a class="thumb-link" href="wasm/{}.html">wasm</a></li>\n'.format(name)
content += ' </ul>\n'
content += ' </div>\n'
content += '</div>\n'
# populate the html template, and write to the build directory
with open(proj_dir + '/web/index.html', 'r') as f :
templ = Template(f.read())
html = templ.safe_substitute(samples=content)
with open(webpage_dir + '/index.html', 'w') as f :
f.write(html)
# copy other required files
for name in ['style.css', 'dummy.jpg', 'emsc.js', 'wasm.js', 'about.html', 'favicon.png', 'core_samples.jpg'] :
log.info('> copy file: {}'.format(name))
shutil.copy(proj_dir + '/web/' + name, webpage_dir + '/' + name)
# generate emscripten HTML pages
if BuildEmscripten and emscripten.check_exists(fips_dir) :
emsc_deploy_dir = '{}/fips-deploy/oryol-samples/{}'.format(ws_dir, EmscConfig)
for sample in samples :
name = sample['name']
if name != '__end__' and 'emscripten' in sample['type'] :
log.info('> generate emscripten HTML page: {}'.format(name))
for ext in ['js', 'html.mem'] :
src_path = '{}/{}.{}'.format(emsc_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/asmjs/'.format(webpage_dir))
with open(proj_dir + '/web/emsc.html', 'r') as f :
templ = Template(f.read())
src_url = GitHubSamplesURL + sample['src'];
html = templ.safe_substitute(name=name, source=src_url)
with open('{}/asmjs/{}.html'.format(webpage_dir, name, name), 'w') as f :
f.write(html)
# generate WebAssembly HTML pages
if BuildWasm and emscripten.check_exists(fips_dir) :
wasm_deploy_dir = '{}/fips-deploy/oryol-samples/{}'.format(ws_dir, WasmConfig)
for sample in samples :
name = sample['name']
if name != '__end__' and 'emscripten' in sample['type'] :
log.info('> generate wasm HTML page: {}'.format(name))
for ext in ['js', 'wasm.mappedGlobals'] :
src_path = '{}/{}.{}'.format(wasm_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/wasm/'.format(webpage_dir))
for ext in ['html.mem', 'wasm'] :
src_path = '{}/{}.{}'.format(wasm_deploy_dir, name, ext)
if os.path.isfile(src_path) :
shutil.copy(src_path, '{}/wasm/{}.{}.txt'.format(webpage_dir, name, ext))
with open(proj_dir + '/web/wasm.html', 'r') as f :
templ = Template(f.read())
src_url = GitHubSamplesURL + sample['src'];
html = templ.safe_substitute(name=name, source=src_url)
with open('{}/wasm/{}.html'.format(webpage_dir, name), 'w') as f :
f.write(html)
# copy the screenshots
for sample in samples :
if sample['name'] != '__end__' :
img_path = sample['image']
head, tail = os.path.split(img_path)
if tail != 'none' :
log.info('> copy screenshot: {}'.format(tail))
shutil.copy(img_path, webpage_dir + '/' + tail)
#-------------------------------------------------------------------------------
def export_assets(fips_dir, proj_dir, webpage_dir) :
data_src_dir = '{}/data/'.format(proj_dir)
data_dst_dir = '{}/data/'.format(webpage_dir)
if not os.path.exists(data_dst_dir) :
os.makedirs(data_dst_dir)
for ext in ['txt', 'dump', 'kcc', 'tap', 'orb'] :
for data_file in glob.glob('{}/*.{}'.format(data_src_dir, ext)) :
shutil.copy(data_file, data_dst_dir)
tbui_from = '{}/tbui'.format(data_src_dir)
tbui_to = '{}/tbui'.format(data_dst_dir)
shutil.copytree(tbui_from, tbui_to)
nkui_from = '{}/nkui'.format(data_src_dir)
nkui_to = '{}/nkui'.format(data_dst_dir)
shutil.copytree(nkui_from, nkui_to)
#-------------------------------------------------------------------------------
def build_deploy_webpage(fips_dir, proj_dir) :
# if webpage dir exists, clear it first
ws_dir = util.get_workspace_dir(fips_dir)
webpage_dir = '{}/fips-deploy/oryol-samples-webpage'.format(ws_dir)
if os.path.isdir(webpage_dir) :
shutil.rmtree(webpage_dir)
os.makedirs(webpage_dir)
if BuildEmscripten and emscripten.check_exists(fips_dir) :
project.gen(fips_dir, proj_dir, EmscConfig)
project.build(fips_dir, proj_dir, EmscConfig)
if BuildWasm and emscripten.check_exists(fips_dir) :
project.gen(fips_dir, proj_dir, WasmConfig)
project.build(fips_dir, proj_dir, WasmConfig)
# export sample assets
if ExportAssets :
export_assets(fips_dir, proj_dir, webpage_dir)
# deploy the webpage
deploy_webpage(fips_dir, proj_dir, webpage_dir)
log.colored(log.GREEN, 'Generated Samples web page under {}.'.format(webpage_dir))
#-------------------------------------------------------------------------------
def serve_webpage(fips_dir, proj_dir) :
ws_dir = util.get_workspace_dir(fips_dir)
webpage_dir = '{}/fips-deploy/oryol-samples-webpage'.format(ws_dir)
p = util.get_host_platform()
if p == 'osx' :
try :
subprocess.call(
'open http://localhost:8000 ; python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt :
pass
elif p == 'win':
try:
subprocess.call(
'cmd /c start http://localhost:8000 && python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt:
pass
elif p == 'linux':
try:
subprocess.call(
'xdg-open http://localhost:8000; python {}/mod/httpserver.py'.format(fips_dir),
cwd = webpage_dir, shell=True)
except KeyboardInterrupt:
pass
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, args) :
if len(args) > 0 :
if args[0] == 'build' :
build_deploy_webpage(fips_dir, proj_dir)
elif args[0] == 'serve' :
serve_webpage(fips_dir, proj_dir)
else :
log.error("Invalid param '{}', expected 'build' or 'serve'".format(args[0]))
else :
log.error("Param 'build' or 'serve' expected")
#-------------------------------------------------------------------------------
def help() :
log.info(log.YELLOW +
'fips webpage build\n' +
'fips webpage serve\n' +
log.DEF +
' build oryol samples webpage')
``` |
{
"source": "joeld42/tk_build",
"score": 2
} |
#### File: tk_build/tkbuild/artifact.py
```python
import os, sys
import datetime
import pytz
from enum import Enum
import logging
from firebase_admin.firestore import SERVER_TIMESTAMP
DEFAULT_ARTIFACT_DATE = datetime.datetime(2020, 12, 1, tzinfo=pytz.UTC)
MANIFEST_PLIST_TEMPLATE = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>items</key>
<array>
<dict>
<key>assets</key>
<array>
<dict>
<key>kind</key>
<string>software-package</string>
<key>url</key>
<string>{ipaFileURL}</string>
</dict>
</array>
<key>metadata</key>
<dict>
<key>bundle-identifier</key>
<string>{bundleId}</string>
<key>bundle-version</key>
<string>{version} ({buildNum})</string>
<key>kind</key>
<string>software</string>
<key>title</key>
<string>{appTitle}</string>
</dict>
</dict>
</array>
</dict>
</plist>
"""
class TKArtifact(object ):
def __init__(self, id = "0000000"):
self.id = id
self.project = None
self.jobKey = None
self.builtFile = None
self.commitVer = None
self.timestamp = None
self.manifest = None
def shortFilename(self):
return os.path.split( self.builtFile )[-1]
# Adds a manifest for .ipa (ios ad hoc) apps
# TODO support a custom icon that the web server can display
def addManifestInfo(self, appTitle, bundleIdentifier, version, buildNum, ipaFileURL ):
self.manifest = {
"appTitle" : appTitle,
"bundleId" : bundleIdentifier,
"version" : version,
"buildNum" : buildNum,
"ipaFileURL" : ipaFileURL,
"manifestURL" : "unknown" # build agent has to set this after uploading the manifest
}
def toFirebaseDict(self):
fireDict = {
"project" : self.project,
"jobKey" : self.jobKey,
"commitVer" : self.commitVer,
"builtFile" : self.builtfile,
"timestamp" : SERVER_TIMESTAMP if self.timestamp is None else self.timestamp
}
if self.manifest:
fireDict.update({ "manifest" : self.manifest })
return fireDict
def generateManifestFile(self ):
"""Returns a string containing a text plist manifest for this artifact"""
if not self.manifest:
return None
print("Manifest is:")
print( self.manifest )
return MANIFEST_PLIST_TEMPLATE.format( **self.manifest )
@classmethod
def createFromFirebaseDict(cls, id, dataDict ):
artifact = cls( id )
artifact.jobKey = dataDict.get( 'jobKey' )
artifact.project = dataDict.get('project' )
artifact.commitVer = dataDict.get( 'commitVer' )
artifact.builtFile = dataDict.get( 'builtFile' )
artifact.timestamp = dataDict.get( 'timestamp', DEFAULT_ARTIFACT_DATE )
artifact.manifest = dataDict.get( 'manifest' )
return artifact
``` |
{
"source": "JoelDan192/TemporalReconstruction_SE",
"score": 3
} |
#### File: JoelDan192/TemporalReconstruction_SE/create_votes.py
```python
import pandas as pd
questions = pd.DataFrame.from_csv('question_simple.csv', index_col=None)
a_questions = pd.DataFrame.from_csv('question_votes.csv', index_col=None)
get_votes_qv = lambda df: pd.Series((df.VoteType==2).cumsum() + (df.VoteType==3).cumsum(),name='QVotes')
get_score_qv = lambda df: pd.Series((df.VoteType==2).cumsum() - (df.VoteType==3).cumsum(),name='QScore')
predictors_qvotes = ['QuestionId','QuestionCreation','QuestionLastActivity','AcceptedAnsId','AcceptedDate','QVoteCreation']
f_q = lambda df: pd.concat([df[cname] for cname in df.columns.values.tolist() if cname in predictors_qvotes]+[get_score_qv(df),get_votes_qv(df)],axis=1)
a_questions = a_questions.sort_values(by='QVoteCreation').groupby(['QuestionId']).apply(f_q)
a_votes = pd.DataFrame.from_csv('votes-answers.csv', index_col=None)
a_votes = pd.merge(a_votes, a_questions, how='inner', on=['QuestionId'],suffixes=['_v', '_q'])
predictors_raw_votans =['VoteId','VoteCreation','AnsCreation','VoteType','AnsId','QuestionId','AnsWordCount','QuestionCreation','AcceptedAnsId','AcceptedDate']
valid_qavotes = lambda df: df[df.VoteCreation>=df.QVoteCreation]
#Use twice valid_qavotes, could use once to improve efficiency, but check correctness of index selection
get_max_qv = lambda df: valid_qavotes(df).loc[valid_qavotes(df).QVotes.idxmax(),['QScore','QVotes']].squeeze()
get_latest_qv = lambda df : pd.Series([0,0],index=['QScore','QVotes']) if not (df.VoteCreation>=df.QVoteCreation).any() else get_max_qv(df)
get_head = lambda df: [df[cname].iloc[0] for cname in df.columns.values.tolist() if cname in predictors_raw_votans]
get_qv = lambda df : pd.Series(get_head(df),index=predictors_raw_votans).append(get_latest_qv(df)).to_frame()
a_votes = a_votes.sort_values(by='VoteCreation').groupby(['VoteId']).apply(get_qv).unstack(level=-1).reset_index(level=[0],drop=True)
a_votes.drop(a_votes.columns[[0]], axis=1, inplace=True)
a_votes.columns = a_votes.columns.droplevel()
date_placeholder = '2016-07-20T00:00:00.000' #Date After Data Set Collection
#a_votes.loc[a_votes.AcceptedDate == 'None','AcceptedDate'] = pd.to_datetime(date_placeholder)
a_votes['AcceptedDate'].fillna(pd.to_datetime(date_placeholder),inplace=True)
a_votes['AcceptedAge'] = (pd.to_datetime(a_votes.AcceptedDate,format='%Y-%m-%d %H:%M:%S.%f')
-pd.to_datetime(a_votes.QuestionCreation,format='%Y-%m-%d %H:%M:%S.%f')).apply(lambda x: x.astype('timedelta64[D]').item().days)
a_votes['AcceptedAge'] = a_votes['AcceptedAge'] + 1
a_votes.loc[a_votes.AcceptedDate == pd.to_datetime(date_placeholder), 'AcceptedAge'] = -1
a_votes['Age'] = (pd.to_datetime(a_votes.VoteCreation,format='%Y-%m-%d %H:%M:%S.%f')
-pd.to_datetime(a_votes.QuestionCreation,format='%Y-%m-%d %H:%M:%S.%f')).apply(lambda x: x.astype('timedelta64[D]').item().days)
a_votes['Age'] = a_votes['Age'] + 1
a_votes.drop(a_votes.columns[[0, 1, 6, 8]], axis=1, inplace=True)
get_score = lambda df: sum(df.VoteType==2) - sum(df.VoteType==3)
get_votes = lambda df: sum(df.VoteType==2) + sum(df.VoteType==3)
predictors = ['QuestionId','AnsWordCount','AcceptedAnsId','AcceptedAge','QScore',
'QVotes','Score','Votes','Upvotes','Downvotes']
f = lambda df: pd.Series([df.QuestionId.iloc[0],df.AnsWordCount.iloc[0],df.AcceptedAnsId.iloc[0],df.AcceptedAge.iloc[0],
df.QScore.iloc[0],df.QVotes.iloc[0],get_score(df),get_votes(df),sum(df.VoteType==2),sum(df.VoteType==3)],index = predictors)
a_groups = a_votes.sort_values(by='Age').groupby(['AnsId','Age']).apply(f)
a_groups = a_groups.reset_index(level=[0,1],drop=False)
cum_votes = lambda df: pd.Series(df['Votes'].cumsum(),name='CumVotes')
cum_score = lambda df: pd.Series(df['Score'].cumsum(),name='CumScore')
get_cumulative =lambda df: pd.concat([df[cname] for cname in df.columns.values.tolist()] + [cum_votes(df),cum_score(df)],axis=1)
ff = lambda df: get_cumulative(df.sort_values(by='Age'))
a_groups_c = a_groups.groupby(['AnsId']).apply(ff).reset_index(level=[0],drop=True)
prior_quality = float(a_groups_c['Upvotes'].sum())/(a_groups_c['Upvotes'].sum() + a_groups_c['Downvotes'].sum())
a_groups_c['ReScore'] = (a_groups_c['CumScore']+prior_quality)/(a_groups_c['CumVotes']+1.0)
a_groups_c['QReScore'] = a_groups_c['QScore']/(a_groups_c['QVotes']+1.0)
votes_com_f = a_groups_c
from itertools import izip
def rank_ans(df,score_only,re_score):
rk_name = "ReScore_rank" if re_score else "AnsRank"
def rank_iter():
cache = {}
accepted = 0
for row in df.itertuples():
if re_score:
cache[row.AnsId] = row.ReScore
else :
cache[row.AnsId] = row.Score
# rank, nb_ans
if (not score_only) and row.AcceptedAge>-1 and (row.AnsId == row.AcceptedAnsId) and row.Age >=row.AcceptedAge:
accepted = 1
if row.AnsId in cache:
del cache[row.AnsId]
yield (1,len(cache)+accepted,row.Index)
else :
rank = sorted(cache, key= lambda k:cache[k],reverse=True).index(row.AnsId) + 1 + accepted
yield (rank,len(cache)+accepted,row.Index)
ranks, ans_counts, indices = izip(*list(rank_iter())) #TODO: optimize for the future
return [pd.Series(ranks,name=rk_name, index=indices), pd.Series(ans_counts,name="Ans_count", index=indices)]
predictors = ['QuestionId','AnsId','AnsWordCount','AcceptedAnsId','Age',
'Score','Votes','Upvotes','Downvotes','CumScore','CumVotes','QScore'
,'QVotes','ReScore','QReScore','AnsRank','ReScore_rank']
get_ranks = lambda df,score_only=False,re_score=False: pd.concat(
[df[cname] for cname in df.columns.values.tolist() if cname in predictors] + rank_ans(df,score_only,re_score),axis=1)
sort_age_score = lambda df: df.sort_values(by=['Age','Score'],ascending=[True,False])
votes_com_f = votes_com_f.groupby(['QuestionId']).apply(
lambda df: get_ranks(sort_age_score(df))).reset_index(drop=True)
votes_com_f = votes_com_f.groupby(['QuestionId']).apply(
lambda df: get_ranks(sort_age_score(df),score_only=True,re_score=True)).reset_index(drop=True)
votes_com_f['Pbias'] = 1.0/votes_com_f['AnsRank']
votes_com_f['DRank'] = votes_com_f['AnsRank'] - votes_com_f['ReScore_rank']
#AnsRank and Ans_count define unique EPbias
sum_by_rank = lambda df: df.groupby('AnsRank').apply(
lambda df: pd.Series([df.Votes.sum()],name='EPbias').to_frame()).unstack(level=-1).reset_index(level=0,drop=False)
get_ratio = lambda df: sum_by_rank(df).EPbias/(sum_by_rank(df).EPbias.sum())
ratio_per_rank = lambda df: pd.concat([sum_by_rank(df).AnsRank, get_ratio(df)],axis=1)
get_position_bias = lambda df: pd.merge(df,ratio_per_rank(df),how='inner',on=['AnsRank'])
votes = votes_com_f.groupby(['Ans_count']).apply(get_position_bias).reset_index(level=[0,1],drop=True)
votes.columns.values[-1] = "EPbias"
test_epbias = votes.groupby(['Ans_count','AnsRank']).first().reset_index(
level=[0,1],drop=False)[['Ans_count','AnsRank','EPbias']]
test_epbias.to_csv('EPbiasbyAnsCountRank.csv')
votes.to_csv(path_or_buf='AnsVotes_TSeries.csv')
```
#### File: JoelDan192/TemporalReconstruction_SE/makedb.py
```python
import sqlite3
import os
import xml.etree.cElementTree as etree
import logging
ANATHOMY = {
'badges': {
'Id': 'INTEGER',
'UserId': 'INTEGER',
'Name': 'TEXT',
'Date': 'DATETIME',
'Class': 'INTEGER', #1: Gold, 2: Silver, 3:Bronze.
'TagBased': 'INTEGER'#1(True) or 0(False). Based on tags like Java,Python etc.
},
'comments': {
'Id': 'INTEGER',
'PostId': 'INTEGER',
'Score': 'INTEGER',
'Text': 'TEXT',
'CreationDate': 'DATETIME',
'UserId': 'INTEGER',
'UserDisplayName': 'TEXT'
},
'posts': {
'Id': 'INTEGER',
'PostTypeId': 'INTEGER', # 1: Question, 2: Answer
'ParentID': 'INTEGER', # (only present if PostTypeId is 2)
'AcceptedAnswerId': 'INTEGER', # (only present if PostTypeId is 1)
'CreationDate': 'DATETIME',
'Score': 'INTEGER',
'ViewCount': 'INTEGER',
'Body': 'TEXT',
'OwnerUserId': 'INTEGER', # (present only if user has not been deleted)
'OwnerDisplayName': 'TEXT',
'LastEditorUserId': 'INTEGER',
'LastEditorDisplayName': 'TEXT', # ="<NAME>"
'LastEditDate': 'DATETIME', #="2009-03-05T22:28:34.823"
'LastActivityDate': 'DATETIME', #="2009-03-11T12:51:01.480"
'CommunityOwnedDate': 'DATETIME', #(present only if post is community wikied)
'Title': 'TEXT',
'Tags': 'TEXT',
'AnswerCount': 'INTEGER',
'CommentCount': 'INTEGER',
'FavoriteCount': 'INTEGER',
'ClosedDate': 'DATETIME'
},
'votes': {
'Id': 'INTEGER',
'PostId': 'INTEGER',
'UserId': 'INTEGER',
'VoteTypeId': 'INTEGER',
# - 1: AcceptedByOriginator
# - 2: UpMod
# - 3: DownMod
# - 4: Offensive
# - 5: Favorite
# - 6: Close
# - 7: Reopen
# - 8: BountyStart
# - 9: BountyClose
# - 10: Deletion
# - 11: Undeletion
# - 12: Spam
# - 13: InformModerator
'CreationDate': 'DATETIME',
'BountyAmount': 'INTEGER'
},
'posthistory': {
'Id': 'INTEGER',
'PostHistoryTypeId': 'INTEGER',
'PostId': 'INTEGER',
'RevisionGUID': 'INTEGER',
'CreationDate': 'DATETIME',
'UserId': 'INTEGER',
'UserDisplayName': 'TEXT',
'Comment': 'TEXT',
'Text': 'TEXT'
},
'postlinks': {
'Id': 'INTEGER',
'CreationDate': 'DATETIME',
'PostId': 'INTEGER',
'RelatedPostId': 'INTEGER',
'PostLinkTypeId': 'INTEGER',
'LinkTypeId': 'INTEGER'
},
'users': {
'Id': 'INTEGER',
'Reputation': 'INTEGER',
'CreationDate': 'DATETIME',
'DisplayName': 'TEXT',
'LastAccessDate': 'DATETIME',
'WebsiteUrl': 'TEXT',
'Location': 'TEXT',
'Age': 'INTEGER',
'AboutMe': 'TEXT',
'Views': 'INTEGER',
'UpVotes': 'INTEGER',
'DownVotes': 'INTEGER',
'EmailHash': 'TEXT',
'AccountId': 'INTEGER',
'ProfileImageUrl': 'TEXT'
},
'tags': {
'Id': 'INTEGER',
'TagName': 'TEXT',
'Count': 'INTEGER',
'ExcerptPostId': 'INTEGER',
'WikiPostId': 'INTEGER'
}
}
def dump_files(file_names, anathomy,
dump_path='.',
dump_database_name='so-dump.db',
create_query='CREATE TABLE IF NOT EXISTS {table} ({fields})',
insert_query='INSERT INTO {table} ({columns}) VALUES ({values})',
log_filename='so-parser.log'):
logging.basicConfig(filename=os.path.join(dump_path, log_filename), level=logging.INFO)
db = sqlite3.connect(os.path.join(dump_path, dump_database_name))
for file in file_names:
print
"Opening {0}.xml".format(file)
with open(os.path.join(dump_path, file + '.xml')) as xml_file:
tree = etree.iterparse(xml_file)
table_name = file
sql_create = create_query.format(
table=table_name,
fields=", ".join(['{0} {1}'.format(name, type) for name, type in anathomy[table_name].items()]))
print('Creating table {0}'.format(table_name))
try:
logging.info(sql_create)
db.execute(sql_create)
except Exception, e:
logging.warning(e)
for events, row in tree:
try:
if row.attrib.values():
logging.debug(row.attrib.keys())
query = insert_query.format(
table=table_name,
columns=', '.join(row.attrib.keys()),
values=('?, ' * len(row.attrib.keys()))[:-2])
db.execute(query, row.attrib.values())
print ".",
except Exception, e:
logging.warning(e)
print "x",
finally:
row.clear()
print "\n"
db.commit()
del (tree)
if __name__ == '__main__':
dump_files(ANATHOMY.keys(), ANATHOMY)
``` |
{
"source": "JoelDapello/rnnManifolds",
"score": 2
} |
#### File: rnnManifolds/tnn/reciprocalgated_IMNET.py
```python
import os, time, glob, argparse
import numpy as np
from scipy.io import loadmat, savemat
import tensorflow as tf
from tnn import main
from tnn.reciprocalgaternn import tnn_ReciprocalGateCell
import warnings
warnings.filterwarnings("ignore")
host = os.uname()[1]
if host.startswith('braintree'):
DATA_PATH = '/braintree/data2/active/users/qbilius/datasets/imagenet2012_tf_256px'
elif host.startswith('node'):
DATA_PATH = '/om/user/qbilius/imagenet2012_tf_256px'
parser = argparse.ArgumentParser()
parser.add_argument('--train', default=0, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--test_batch_size', default=64, type=int)
parser.add_argument('--gpus', default=['1'], nargs='*')
parser.add_argument('--ntimes', default=5, type=int)
parser.add_argument('--nsteps', default=int(4e5), type=lambda x: int(float(x)))
FLAGS, _ = parser.parse_known_args()
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(FLAGS.gpus)
batch_size = FLAGS.batch_size
NUM_TIMESTEPS = 12 # number of timesteps we are predicting on
NETWORK_DEPTH = 5 # number of total layers in our network
# we always unroll num_timesteps after the first output of the model
TOTAL_TIMESTEPS = NETWORK_DEPTH + NUM_TIMESTEPS
BASE_NAME = './json/5L_imnet128_recip345sig_noBN'
def model_func(input_images, ntimes=TOTAL_TIMESTEPS,
batch_size=batch_size, edges_arr=[],
base_name=BASE_NAME,
tau=0.0, train=False, trainable_flag=False):
# model_name = 'my_model'
model_name = base_name.split('/')[-1]
with tf.variable_scope(model_name, reuse=tf.AUTO_REUSE):
base_name += '.json'
print('Using model {} from {}'.format(model_name, base_name))
# creates the feedforward network graph from json
G = main.graph_from_json(base_name)
for node, attr in G.nodes(data=True):
memory_func, memory_param = attr['kwargs']['memory']
if 'cell_depth' in memory_param:
# this is where you add your custom cell
attr['cell'] = tnn_ReciprocalGateCell
else:
# default to not having a memory cell
# tau = 0.0, trainable = False
attr['kwargs']['memory'][1]['memory_decay'] = tau
attr['kwargs']['memory'][1]['trainable'] = trainable_flag
# add any non feedforward connections here: e.g. [('L2', 'L1')]
G.add_edges_from(edges_arr)
# initialize network to infer the shapes of all the parameters
main.init_nodes(G, input_nodes=['L1'], batch_size=batch_size)
# unroll the network through time
main.unroll(G, input_seq={'L1': input_images}, ntimes=ntimes)
outputs = {}
# start from the final output of the model and 4 timesteps beyond that
for t in range(ntimes-NUM_TIMESTEPS, ntimes):
idx = t - (ntimes - NUM_TIMESTEPS) # keys start at timepoint 0
outputs[idx] = G.node['readout']['outputs'][t]
return outputs
def basenet2(inputs, train=False, conv_only=False):
x = model_func(inputs, ntimes=TOTAL_TIMESTEPS,
batch_size=batch_size, edges_arr=[],
base_name=BASE_NAME, tau=0.0, trainable_flag=False)
return x[NUM_TIMESTEPS-1]
def parse_image(im):
im = tf.decode_raw(im, np.uint8)
im = tf.image.convert_image_dtype(im, dtype=tf.float32)
im = tf.reshape(im, [256, 256, 3])
return im
class Train(object):
def __init__(self, arch, kind):
self.kind = kind
self.train = kind == 'train'
self.name = self.kind
targets = {}
with tf.name_scope(self.kind):
inputs = self.data()
logits = arch(inputs['images'], train=self.train)
targets['softmax_loss'] = self.softmax_loss(inputs['labels'], logits)
targets['loss'] = targets['softmax_loss']
#targets['loss'] = targets['softmax_loss'] + self.reg_loss()
if self.train:
targets['learning_rate'] = self.learning_rate()
self.optimizer = self.get_optimizer(targets['learning_rate'], targets['loss'])
targets['top1'] = self.top_k(inputs['labels'], logits, 1)
targets['top5'] = self.top_k(inputs['labels'], logits, 5)
self.targets = targets
def data(self, num_parallel_calls=5):
filenames = glob.glob(os.path.join(DATA_PATH, '{}_*.tfrecords'.format(self.kind)))
batch_size = FLAGS.batch_size if self.train else FLAGS.test_batch_size
ds = tf.data.Dataset.from_tensor_slices(filenames)
if self.train:
ds = ds.shuffle(buffer_size=20)
ds = ds.flat_map(tf.data.TFRecordDataset)
ds = ds.map(self.parse_data, num_parallel_calls=num_parallel_calls)
ds = ds.prefetch(batch_size)
if self.train:
ds = ds.shuffle(buffer_size=1250 + 2 * batch_size)
ds = ds.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
ds = ds.repeat(None if self.train else 1)
self.iterator = ds.make_initializable_iterator()
inputs = self.iterator.get_next()
return inputs
def parse_data(self, example_proto):
feats = {'images': tf.FixedLenFeature((), tf.string),
'labels': tf.FixedLenFeature((), tf.int64),
'ids': tf.FixedLenFeature((), tf.string)}
feats = tf.parse_single_example(example_proto, feats)
im = parse_image(feats['images'])
if self.train:
im = tf.random_crop(im, size=(224, 224, 3))
im.set_shape([224, 224, 3]) # otherwise fails in tf 1.4
im = tf.image.random_flip_left_right(im)
im = tf.image.resize_images(im, (128, 128))
else:
# im = tf.image.resize_images(im, (224, 224))
im = tf.image.resize_images(im, (128, 128))
feats['images'] = im
return feats
def softmax_loss(self, labels, logits):
softmax_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
softmax_loss = tf.reduce_mean(softmax_loss)
return softmax_loss
def reg_loss(self):
return tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
def top_k(self, labels, logits, k=1):
top = tf.nn.in_top_k(logits, labels, k)
top = tf.reduce_mean(tf.cast(top, tf.float32))
return top
def learning_rate(self):
learning_rate = tf.train.polynomial_decay(learning_rate=5e-3,
global_step=tf.train.get_global_step(),
decay_steps=FLAGS.nsteps,
end_learning_rate=5e-5)
return learning_rate
def get_optimizer(self, learning_rate, loss):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.MomentumOptimizer(learning_rate, .9, use_nesterov=True)
# optimizer = tf.train.AdagradOptimizer(learning_rate)
# optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
capped_grads_and_vars = []
for grad, var in grads_and_vars:
if grad is not None:
# gradient clipping. Some gradients returned are 'None' because
# no relation between the variable and loss; so we skip those.
capped_grad = tf.clip_by_value(grad, -1., 1.)
# capped_grad, _ = tf.clip_by_global_norm(grad, -1., 1.)
capped_grads_and_vars.append((capped_grad, var))
opt_op = optimizer.apply_gradients(capped_grads_and_vars,
global_step=tf.train.get_global_step())
return opt_op
def __call__(self, sess):
if self.train:
start = time.time()
rec, _ = sess.run([self.targets, self.optimizer])
rec['dur'] = time.time() - start
return rec
else:
results = {k:[] for k in self.targets}
durs = []
sess.run(self.iterator.initializer)
while True:
start = time.time()
try:
res = sess.run(self.targets)
except tf.errors.OutOfRangeError:
break
durs.append(time.time() - start)
for k, v in res.items():
results[k].append(v)
rec = {k: np.mean(v) for k,v in results.items()}
rec['dur'] = np.mean(durs)
return rec
def train(restore=True,
save_train_steps=500,
save_val_steps=5000,
save_model_steps=1000,
):
tf.Variable(0, trainable=False, name='global_step')
train = Train(basenet2, 'train')
val = [Train(basenet2, 'val')]
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if restore:
saver.restore(sess, save_path='./recipgated_imnet_saved/model.ckpt-196000')
sess.run(train.iterator.initializer)
step = sess.run(tf.train.get_global_step())
while step <= FLAGS.nsteps:
step = sess.run(tf.train.get_global_step())
results = {'step': step}
if step % save_val_steps == save_val_steps-1:
for v in val:
results[v.name] = v(sess)
if step % save_model_steps == 0:
saver.save(sess=sess,
save_path='./imnet.ckpt/model.ckpt',
global_step=tf.train.get_global_step())
if step % save_train_steps == 0:
results['train'] = train(sess)
else:
sess.run(train.optimizer)
if len(results) > 1: # not only step is available
print(results)
def get_features(ims):
n_batches = (len(ims) - 1) // FLAGS.test_batch_size + 1
stack_depth = ims.shape[0]/n_batches
placeholder = tf.placeholder(shape=(stack_depth, ims[0].shape[0], ims[0].shape[1], 3), dtype=tf.float32)
# placeholder = tf.placeholder(shape=(None, ims[0].shape[0], ims[0].shape[1], 3), dtype=tf.float32)
# ims = tf.tensor
# placeholder = tf.placeholder(shape=tf.shape(ims), dtype=tf.float32)
print('placeholder', placeholder)
basenet2(placeholder, conv_only=True)
ops = tf.get_default_graph().get_operations()
layers = [op.name for op in ops if 'output' in op.name]
print('target layers = ', layers)
# target = tf.get_default_graph().get_tensor_by_name('{}/output:0'.format(layer))
targets = [tf.get_default_graph().get_tensor_by_name(layer+':0') for layer in layers]
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, save_path='./recipgated_imnet_saved/model.ckpt-285000')
n_batches = (len(ims) - 1) // FLAGS.test_batch_size + 1
out = []
for i in range(n_batches):
print('running batch {} of {}'.format(i+1, n_batches))
batch = ims[FLAGS.test_batch_size * i: FLAGS.test_batch_size * (i + 1)]
batch_out = sess.run(targets, feed_dict={placeholder: batch})
# batch_out = sess.run(target, feed_dict={placeholder: batch})
out.append(batch_out)
#out = np.row_stack(out)
return layers, out
def load_HvM_images():
HvM_file = loadmat('../imageData/HvM_128px.mat')
imgs = HvM_file['imgs']
return imgs
def save_HvM_features():
ims = load_HvM_images()
layers, out = get_features(ims)
#import code
#code.interact(local=locals())
import h5py
out_idx = range(len(out[0]))
print(out_idx)
for i in out_idx:
features = np.row_stack([j[i] for j in out])
print('saving features for layer:{}, shaped:{}'.format(layers[i],features.shape))
path = '../featureData/recipgatedt17_HvM_{}_features.mat'.format(layers[i].replace('/','-'))
hf = h5py.File(path, 'w')
hf.create_dataset('features', data=features)
hf.close()
del features
#savemat('../featureData/recipgatedt17_HvM_{}_features.mat'.format(layers[i].replace('/','-')), {
# 'features':features
#})
print("saved HvM features!")
if __name__ == '__main__':
if FLAGS.train:
print('>>> TRAIN MODEL')
train()
else:
print('>>> GET MODEL FEATURES')
save_HvM_features()
``` |
{
"source": "joelday/ml-rescore-es-plugin",
"score": 2
} |
#### File: ml-rescore-es-plugin/contextual_item_ranker/build_model.py
```python
import tensorflow as tf
from typing import Dict
import math
import logging as log
import sys
log.basicConfig(
level=log.DEBUG,
stream=sys.stdout,
)
log.info(f"tensorflow version: {tf.__version__}")
if __name__ == '__main__':
valid_context_keys = [
"key1",
"key2",
"key3",
"key4",
]
item_id = tf.keras.Input(shape=(None,), dtype=tf.string, name="item_id")
context_values = [
tf.keras.Input(shape=(None,), dtype=tf.string, name=k)
for k in valid_context_keys
]
flattened_context_keys = tf.concat(context_values, axis=0)
context_lengths = tf.strings.length(flattened_context_keys)
total_context_characters = tf.reduce_sum(context_lengths)
reshaped_item_id = tf.reshape(item_id, (-1, 1))
item_id_lengths = tf.strings.length(reshaped_item_id)
total_lengths = item_id_lengths + total_context_characters
multiplied_ids = math.pi * tf.cast(total_lengths, tf.float32)
scores = tf.reshape(multiplied_ids, (-1,))
model = tf.keras.Model(
inputs={
**{"item_id": item_id},
**dict(zip(valid_context_keys, context_values))
},
outputs=scores
)
known_keys = ["item_id"] + valid_context_keys
input_signature = {
k: tf.TensorSpec(shape=(None,), dtype=tf.string, name=k)
for k in known_keys
}
@tf.function(input_signature=[input_signature])
def serving_fn(inputs: Dict[str, tf.Tensor]):
return {"scores": model([inputs[k] for k in known_keys])}
model.save(
"model_dir",
signatures={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: serving_fn,
tf.saved_model.PREDICT_METHOD_NAME: serving_fn,
}
)
```
#### File: ml-rescore-es-plugin/tests/test_with_bad_fields.py
```python
from uuid import uuid4 as uuid
import argparse
import requests as r
import logging as log
import random
import string
import sys
import helpers
log.basicConfig(level=log.DEBUG, stream=sys.stdout)
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument("--es-host", required=True)
p.add_argument("--index", required=True)
p.add_argument("--model-domain", default="item_ranker:8500")
args = p.parse_args()
es_host = args.es_host
index = args.index
model_domain = args.model_domain
def generate_request(**kwargs):
return {
"query": {"wildcard": {"name": f"*"}},
"rescore": {
"window_size": 600,
"mlrescore-v1": {
"score_mode": "replace",
"type": "ranking",
"name": "item_ranker",
"model_name": "item_ranker_testing_model",
"domain": model_domain,
"itemid_field": "itemId1",
**kwargs,
}
}
}
request_bodies = [
generate_request(domain="unknown:1234"),
generate_request(name="unknown"),
generate_request(type="unknown"),
generate_request(itemid_field="unknown"),
generate_request(model_name="unknown"),
generate_request(itemid_field="unknown"),
generate_request(score_mode="unknown"),
]
for req in request_bodies:
response = r.post(
f"http://{es_host}:9200/{index}/_search",
json=req
)
assert response.status_code // 100 == 4, f"""
Failed to recieved a 400 response with bad input
parameters!
request:
{req}
status_code:
{response.status_code}
body:
{response.json()}
"""
``` |
{
"source": "joeldcosta/mouse_position_application_RealTime",
"score": 4
} |
#### File: joeldcosta/mouse_position_application_RealTime/mouse_position_application_RealTime_with_IMAGE_V2.py
```python
from tkinter import *
import pyautogui
from PIL import Image, ImageTk
root=Tk()
root.title("Mouse Position Application in Real Time")
# Getting Your Screen Size
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
print(ws,hs)
i_size = 30
img = Image.open("dot.png")
img = img.resize((img.width//i_size,img.height//i_size))
img= ImageTk.PhotoImage(img)
def mouse_position(event):
x, y = pyautogui.position()
m = ("X: "+str(x)+'\n'+ "Y: "+str(y))
x1=event.x
y1=event.y
x2=event.x
y2=event.y
# Draw an oval in the given co-ordinates
#canvas.create_oval(x1,y1,x2,y2,fill="black", width=5)
# Using PNG Image
canvas.create_image(x2,y2,image=img)
# In simple format
#canvas.create_text(x2+0,y2+15,text=f"{m}")
# Same as above but with font, size, color
# Anchor East = e, west = w, north = n, south = s
canvas.create_text(x2+0,y2-25,text=f"{m}", anchor='e', font=("Courier", 10), fill='red')
# Create a canvas widget
canvas = Canvas(root, width=f"{ws}", height=f"{hs}", bg = '#F6F6F6', highlightthickness = 0)
canvas.pack()
# To Make Full Screen
#canvas.master.overrideredirect(True)
# Make screen transparent using alpha
canvas.master.wm_attributes("-alpha",0.4)
# Keep it above all other window screens
canvas.master.wm_attributes("-topmost",True)
# To show result after release use <ButtonRelease-1>
canvas.bind('<ButtonPress>', mouse_position)
canvas.mainloop()
``` |
{
"source": "joeldeaguero/blinkie",
"score": 2
} |
#### File: joeldeaguero/blinkie/main.py
```python
from __future__ import unicode_literals
import prompt_toolkit
from prompt_toolkit.application import Application
from prompt_toolkit.eventloop import use_asyncio_event_loop
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout import Layout, VSplit, HSplit
from prompt_toolkit.widgets import Box, Button, Frame, TextArea
import asyncio
import time
lineCount = 6
debugAxis = 2
debugSleep = 0.2
bytesSentText = TextArea(text="")
bytesReceivedText = TextArea(text="")
def handleHome():
# time.sleep(debugSleep)
sendHome(debugAxis)
def goAway():
goAway(debugAxis)
def debugAxisX():
debugAxisX()
def debugAxisY():
debugAxisY()
def debugAxisZ():
debugAxisZ()
def enableAxis():
enableAxis(debugAxis)
def setVelocity():
setVelocity(debugAxis)
def handleStatus():
checkStatus(debugAxis)
def moveIncrement():
moveIncrement(debugAxis)
def handleClear():
global bytesSentText
global bytesReceivedText
bytesSentText.text = ""
bytesReceivedText.text = ""
# Key bindings.
kb = KeyBindings()
@kb.add('h')
def keyHome(event):
handleHome()
@kb.add('a')
def keyAway(event):
goAway()
@kb.add('X')
def keyX(event):
debugAxisX()
@kb.add('Y')
def keyY(event):
debugAxisY()
@kb.add('Z')
def keyZ(event):
debugAxisZ()
@kb.add('s')
def keyStatus(event):
handleStatus()
@kb.add('q')
def keyEnable(event):
enableAxis()
@kb.add('v')
def keyVelocity(event):
setVelocity()
@kb.add('I')
def keyIncrement(event):
moveIncrement()
@kb.add('c')
def keyClear(event):
handleClear()
# Layout for displaying hello world.
# (The frame creates the border, the box takes care of the margin/padding.)
testCommands = HSplit([
Button("X", handler=debugAxisX),
Button("Y", handler=debugAxisY),
Button("Z", handler=debugAxisZ), #-default
Button("Status", handler=handleStatus),
Button("Home", handler=handleHome),
Button("Away", handler=goAway),
Button("Qenable", handler=enableAxis),
Button("Velocity",handler=setVelocity),
Button("Increment",handler=moveIncrement),
Button("Clear", handler=handleClear)
])
testCommandContainer = Box(
Frame(testCommands, width=20, height=20)
)
sendMonitor = Box(
Frame(bytesSentText, width=56, height=10)
)
recvMonitor = Box(
Frame(bytesReceivedText, width=56, height=10)
)
bytesMonitor = Box(
HSplit([
TextArea(text="bytes sent"),
sendMonitor,
TextArea(text="bytes received"),
recvMonitor
])
)
clientArea = Box(
VSplit([testCommands, bytesMonitor])
)
layout = Layout(container=clientArea)
@kb.add("c-c")
def _(event):
" Quit when control-c is pressed. "
event.app.exit()
# Build a main application object.
application = Application(layout=layout, key_bindings=kb, full_screen=True, mouse_support=True)
def main():
init()
# Tell prompt_toolkit to use asyncio.
use_asyncio_event_loop()
# Run application async.
asyncio.get_event_loop().run_until_complete(
application.run_async().to_asyncio_future()
)
import argparse
import serial
import serial.threaded
import sys
import RPi.GPIO as GPIO # import RPi.GPIO module
from time import sleep # lets us have a delay
GPIO.setmode(GPIO.BCM) # choose BCM or BOARD
GPIO.setup(24, GPIO.OUT) # set GPIO24 as temp RTS
# docs only specify 15, but that makes no sense
responseSizeExpected = 16
ser = None
buf = ""
def twos_comp(val, bits):
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val # return positive value as is
class SerialToGui(serial.threaded.Protocol):
def __init__(self):
pass
def __call__(self):
return self
def connection_lost(self, exc):
global bytesReceivedText
prev = bytesReceivedText.text
oldLines = prev.split("\n")
num=len(oldLines)
if (num > lineCount):
newLines = "\n".join(oldLines[num-lineCount:])
else:
newLines = prev
newLines += "connection lost\n"
#print("connection_lost: {0}\n".format(exc))
bytesReceivedText.text = newLines
def connection_made(self, transport):
global bytesReceivedText
prev = bytesReceivedText.text
oldLines = prev.split("\n")
num=len(oldLines)
if (num > lineCount):
newLines = "\n".join(oldLines[num-lineCount:])
else:
newLines = prev
newLines += "connection made\n"
#print("connection_made: {0}\n".format(transport))
bytesReceivedText.text = newLines
def data_received(self, data):
global bytesReceivedText
bytesReceivedText.text = string.replace(bytesReceivedText.text[-32:],
"\n", "") + "\n" + data.hex()
def init():
global ser
parser = argparse.ArgumentParser(description="Simple serial terminal")
parser.add_argument(
"--serial-port",
help="serial port name",
default="/dev/serial0",
dest="serialPort",
)
parser.add_argument(
"--baud-rate",
type=int,
nargs="?",
help="set baud rate, default: %(default)s",
default=38400,
dest="baudRate",
)
group = parser.add_argument_group("serial port")
group.add_argument(
"--parity",
choices=["N", "E", "O", "S", "M"],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default="N",
)
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=True,
)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=None,
)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=None,
)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=None,
)
group = parser.add_argument_group("network settings")
args = parser.parse_args()
if args.rts is not None:
serial.rts(args.rts)
if args.dtr is not None:
serial.dtr(args.dtr)
try:
ser = serial.Serial(
port=args.serialPort,
baudrate=args.baudRate,
parity=args.parity,
xonxoff=args.xonxoff,
rtscts=args.rtscts,
timeout=3.0
)
except serial.SerialException as e:
errMsg = "Could not open serial port {}: {}\n".format(ser.name, e)
sys.stderr.write(errMsg)
sys.exit(1)
#print("open")
ser_to_gui = SerialToGui()
serial_worker = serial.threaded.ReaderThread(ser, ser_to_gui)
serial_worker.start()
#print("listening")
# BCC lsb calculator to check communication integrity
def bcc_calc(bcc_int):
bcc = (~bcc_int & 0xFFF) + 1 # 2's complement calculation (= one's complement + 1)
for i in range(11, 7, -1):
if bcc > 2**i:
bcc -= 2**i # takes the LSB of the integer
bcc = hex(bcc).upper()[2:] # converts the integer to hex characters
if len(bcc) == 1: bcc = '0' + bcc # protocol needs BCC to be two characters
return bcc
def makeCommand(a, b, c, d, e, f, g, h, i, j, k, l):
bcc_int = 0
bcc_int += ord(a)
bcc_int += ord(b)
bcc_int += ord(c)
bcc_int += ord(d)
bcc_int += ord(e)
bcc_int += ord(f)
bcc_int += ord(g)
bcc_int += ord(h)
bcc_int += ord(i)
bcc_int += ord(j)
bcc_int += ord(k)
bcc_int += ord(l)
bcc = bcc_calc(bcc_int)
return bytes([
ord(a[0]),
ord(b[0]),
ord(c[0]),
ord(d[0]),
ord(e[0]),
ord(f[0]),
ord(g[0]),
ord(h[0]),
ord(i[0]),
ord(j[0]),
ord(k[0]),
ord(l[0]),
ord(bcc[0]),
ord(bcc[1])
])
# axis: 0-15
def checkStatus(axis):
sendStringCommand(
makeCommand(
chr(ord("0") + axis), "n", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0"
)
)
return True
def send(str):
global bytesSentText
global ser
prev = bytesSentText.text
oldLines = prev.split("\n")
num=len(oldLines)
if (num > lineCount):
newLines = "\n".join(oldLines[num-lineCount:])
else:
newLines = prev
my_str_as_bytes = str
newLines += "{0}\n".format(my_str_as_bytes.hex())
bytesSentText.text = newLines
GPIO.output(24, 1) # set GPIO24 to 1/GPIO.HIGH/True
ser.write(my_str_as_bytes)
sleep(0.0045) # wait 5mS
GPIO.output(24, 0) # set GPIO24 to 0
def sendStringCommand(cmd):
myBytes = bytes([2])
for i,c in enumerate(cmd):
myBytes += bytes([c])
myBytes += bytes([3])
send(myBytes)
def debugAxisX():
global debugAxis
debugAxis = 0
def debugAxisY():
global debugAxis
debugAxis = 1
def debugAxisZ():
global debugAxis
debugAxis = 2
def sendHome(axis):
sendStringCommand(
makeCommand(chr(ord("0") + axis), "o", "0", "7", "0","0","0","0","0","0","0","0")
)
def goAway(axis):
sendStringCommand(
makeCommand(chr(ord("0") + axis), "o", "0", "8", "0","0","0","0","0","0","0","0")
)
def moveIncrement(axis):
sendStringCommand(
makeCommand(chr(ord("0") + axis), "m", "0", "0", "0","2","1","d","9","5","0","0")
)
def enableAxis(axis):
sendStringCommand(
makeCommand(chr(ord("0") + axis), "q", "1", "0", "0","0","0","0","0","0","0","0")
)
def setVelocity(axis):
sendStringCommand(
makeCommand(chr(ord("0") + axis), "v", "2", "2", "7","1","0","0","1","8","8","0")
)
def bccChecker():
print(makeCommand("1","Q","3","0","1","0","6","0","0","0","0","0"))
bccChecker()
main()
``` |
{
"source": "JoelDeen820/Table_To_VHDL_Converter",
"score": 2
} |
#### File: source/Gates/SingleInputOperator.py
```python
class SingleInputGate:
def __init__(self, value1):
self.value1 = value1.copy()
def copy(self):
return SingleInputGate(self.value1.copy())
def toVHDL(self):
pass
``` |
{
"source": "joeldentici/gitm",
"score": 3
} |
#### File: src/transports/bundle.py
```python
import uuid
class Bundle:
def __init__(self, data):
self.data = data
@staticmethod
def load(path):
with open(path, 'rb') as f:
return Bundle.fromBytes(f.read())
@staticmethod
def fromBytes(data):
return Bundle(data)
@staticmethod
def path():
return '/tmp/' + str(uuid.uuid4()) + '.bundle'
def tmp(self):
path = Bundle.path()
with open(path, 'wb') as f:
for byte in self.data:
f.write(byte)
return path
def bytes(self):
return self.data
```
#### File: src/transports/inputs.py
```python
def textInput(field, msg, value):
newValue = raw_input('Enter ' + field + ': ' + msg)
return newValue if newValue else value
import getpass
def passInput(field, msg, value):
newValue = getpass.getpass('Enter ' + field + ': ' + msg)
if len(newValue) == 0:
return value
confirmValue = getpass.getpass('Confirm ' + field + ': ')
if confirmValue != newValue:
print('Passwords do not match')
return passInput(field, msg, value)
return newValue if newValue else value
def csvInput(field, msg, value):
newValue = raw_input('Enter ' + field + ' (comma separated): ' + msg)
if newValue:
return map(lambda x: x.strip(), newValue.split(','))
else:
return value if value else []
def textOutput(value):
return '(' + value + ') ' if value else ''
def passOutput(value):
return textOutput('****' if value else '')
def csvOutput(value):
return textOutput(", ".join(value))
def intInput(field, msg, value):
newValue = textInput(field, msg, value)
try:
return int(newValue)
except Exception as e:
print(str(e))
return intInput(field, msg, value)
def boolInput(field, msg, value):
newValue = textInput(field, msg, value)
if (type(newValue) == bool):
return newValue
if newValue.lower() == 'y':
return True
elif newValue.lower() == 'n':
return False
else:
print('Must be y/n')
return boolInput(field, msg, value)
def boolOutput(value):
return textOutput({
True: 'y',
False: 'n'
}.get(value, ''))
def intOutput(value):
return textOutput(str(value))
inputMethod = {
'text': textInput,
'pass': passInput,
'csv': csvInput,
'int': intInput,
'bool': boolInput
}
outputMethod = {
'text': textOutput,
'pass': passOutput,
'csv': csvOutput,
'int': intOutput,
'bool': boolOutput
}
'''
update :: (string, Dict string any, string, string, any) -> ()
Updates the value of the specified option in the specified dictionary.
The user is prompted to enter the specified field value
The user's response is stored into the specified option
If a default option to take the default value from is specified, the
value of that option is used when the specified option value (current) is null
'''
def update(inputType, options, field, option, defaultOption = None):
#get current value and message
defaultValue = options[defaultOption] if defaultOption else ''
value = options.get(option, defaultValue)
curMsg = outputMethod[inputType](value)
#prompt for input and update option value
options[option] = inputMethod[inputType](field, curMsg, value)
``` |
{
"source": "joeldentici/python_stepper",
"score": 3
} |
#### File: python_stepper/examples/lambda.py
```python
def apply(f, x):
return f(x)
apply(lambda x: x + 1, 5)
```
#### File: python_stepper/examples/one.py
```python
def double(x):
return x * 2
def apply(f, x):
return f(x)
def double_add(a, b):
print('in double_add', a)
return double(a) + double(b)
double_add('h', 'e')
double_add(5, 10 + 2)
apply(double, 5)
```
#### File: src/include/expr_statement.py
```python
from reducible import Reducible
class ExprStatement(Reducible):
def __init__(self, program, expr):
super().__init__(program, 1)
self.expr = self.program.wrap(expr)
self.state = 'initial'
def do_reduce(self):
self.report()
result = self.expr.reduce()
self.report()
return result
def do_show(self):
if self.state == 'initial':
return {
"type": "statement",
"value": self.expr.show()
}
```
#### File: src/include/function_app_scope.py
```python
from name_model import NameScope
class FunctionAppScope(NameScope):
def __init__(self, program, info, args = [], is_closure = False):
super().__init__(program)
self.info = info
self.displays = {}
self.parent_scope = info.parent_scope
self.is_closure = is_closure
for b in self.info.as_bindings:
self.create_binding(b)
# set values for parameter bindings
for i,b in enumerate(info.params):
arg = args[i] if i < len(args) else None
self.bind(b, arg)
def resolve_scope(self, name):
if name in self.info.gl_bindings:
return self.program.name_model.scopes[0].resolve_scope(name)
elif name in self.info.nl_bindings:
return self.parent_scope.resolve_scope(name)
elif name in self.info.as_bindings:
return self
elif name in self.info.params:
return self
else:
return self.parent_scope.resolve_scope(name)
def show_name(self, name):
if self.is_closure:
return name
else:
return super().show_name(name)
```
#### File: src/include/statement_group.py
```python
from report_state import rename_statements
'''
StatementGroup
written by <NAME>
on 02/15/2018
Represents a group of statements that run in sequence.
'''
class StatementGroup:
'''
StatementGroup class definition
'''
def __init__(self, program, stmts):
'''
__init__ :: (StatementGroup, Program, [string]) -> ()
Initializes the StatementGroup.
'''
self.program = program
self.set_statements(stmts)
self.active = []
def activate_statement(self, stmt):
'''
activate_statement :: (StatementGroup a, Reducible b) -> ()
Activates the next statement in the group.
'''
self.freeze_last()
self.active.append(stmt)
def show(self):
'''
show :: StatementGroup a -> ProgramState
Gets the state of this group, which changes as its statements run.
'''
raise NotImplementedError("No show method implemented for this StatementGroup")
def base_show(self):
'''
base_show :: StatementGroup a -> ProgramState
Returns a list of ProgramState
'''
active = self.show_active()
original = self.show_original()
return {
"type": "statement_group",
"statements": active + original
}
def enter(self):
self.program.push_statement_group(self)
def exit(self):
self.freeze_last()
# handle early returns!
while (self.program.active_statement_group() != self):
self.program.pop_statement_group()
self.program.pop_statement_group()
def reset(self):
self.active = []
self.rename_statements()
def freeze_last(self):
if len(self.active):
self.active[-1] = StrStmt(self.active[-1].show())
def set_statements(self, stmts):
self.original = stmts
self.renamed = self.original
def has_statements(self):
return len(self.original) > 0
def ignore_stmt(self):
cur_stmt = self.renamed[len(self.active)]
self.activate_statement(StrStmt(cur_stmt))
def show_active(self):
return [x.show() for x in self.active[-1:]]
def show_original(self):
return self.renamed[len(self.active):]
def rename_statements(self):
self.renamed = rename_statements(self.program.name_model.current_scope, self.original)
class StrStmt:
def __init__(self, stmt):
self.stmt = stmt
def show(self):
return self.stmt
class RootStatementGroup(StatementGroup):
'''
RootStatementGroup class definition
'''
def __init__(self, program):
'''
__init__ :: (RootStatementGroup a, Program) -> ()
Initializes the RootStatementGroup.
'''
super().__init__(program, [])
self.ended = False
self.is_renamed = False
def show(self):
'''
show :: RootStatementGroup a -> ProgramState
Gets the state of the root statement group.
'''
if not self.is_renamed:
self.rename_statements()
self.is_renamed = True
active = self.show_active()
original = self.show_original()
bindings = self.program.name_model.show()
#ran_stmts = active[0:-1] if not self.ended else active
#ran = self.show_boundary('ran') + ran_stmts
ran = []
memory = self.show_boundary('memory') + bindings
current = self.show_boundary('running') + active[-1:]
future = original
rest = current + future if not self.ended else ['# Your Program Finished']
return {
"type": "statement_group",
"statements": ran + memory + rest
}
def show_boundary(self, text):
total_len = 50
text = ' ' + text + ' '
hashes = int((total_len - len(text)) / 2)
hash_text = '#' * hashes
return [hash_text + text + hash_text]
def set_ended(self):
self.ended = True
```
#### File: src/include/stepper_lib.py
```python
from binary_operation import BinaryOperation
from program import Program
from function_def import FunctionDef
from function_call import FunctionCall
from lambda_expression import LambdaExpression
from return_statement import ReturnStatement
from expr_statement import ExprStatement
from identifier import Identifier
from commandline_reporter import CommandlineReporter
from if_expr import IfExpression
from if_stmt import IfStatement
from while_loop import WhileLoop
from assignment_statement import AssignmentStatement
from attribute import Attribute
context = None
def initialize(reporter):
global context
'''
initialize :: Reporter -> ()
Initialize the program context with a
reporter, which is used to interact with
the user.
'''
context = Program(reporter, 1)
def function_def(name, initial_src, params, fn, named_src, as_bindings, nl_bindings, gl_bindings):
'''
function_def :: (string, [string], [string], Function) -> ()
Occurs when a function definition is finished
'''
context.evaluate_statement(FunctionDef(context, name, initial_src, params, fn,\
named_src, as_bindings, nl_bindings, gl_bindings))
def assignment_statement(lval, value):
'''
assignment_statement :: (Ref, Expression a) -> a
Occurs before an assignment happens (ie, this is the expression of an
assignment)
'''
#return context.evaluate_statement(AssignmentStatement(context, lval, value))
return context.evaluate_statement(AssignmentStatement(context, lval, value))
def return_statement(value):
'''
return_statement :: Expression a -> a
Occurs before a return happens (ie, this is the expression of a return)
'''
return context.evaluate_statement(ReturnStatement(context, value))
def lambda_expression(initial_src, params, fn, named_src):
'''
lambda_expression :: Function a -> LambdaExpression a
Wraps a lambda expression
'''
return LambdaExpression(context, initial_src, params, fn, named_src)
def function_call(fn, *args):
'''
function_call :: (Function a, [any]) -> FunctionCall a
Wraps a function call
'''
return FunctionCall(context, fn, args)
def binary_operation(left, op, right):
'''
binary_operation :: (Expression a, string, Expression a) -> BinaryOperation a
Wraps a binary operation
'''
return BinaryOperation(context, op, left, right)
def expr_stmt(expr):
'''
expr_statement :: Expression a -> ()
Wraps an expression as statement
'''
context.evaluate_statement(ExprStatement(context, expr))
def ref(id, value):
'''
'''
return Identifier(context, id, value)
def if_expr(test, t, f):
return IfExpression(context, test, t, f)
def begin_if(test, t, f):
return context.evaluate_statement(IfStatement(context, test, t, f))
def end_group():
context.active_statement_group().cleanup()
def loop_continue(explicit = False):
context.active_statement_group().loop_continue(explicit)
def loop_break():
context.active_statement_group().loop_break()
def begin_while(t, f):
context.evaluate_statement(WhileLoop(context, t, f))
def while_test(test):
return context.active_statement_group().while_test(test)
def ignore_stmt():
context.active_statement_group().ignore_stmt()
def module_statements(stmts, bindings):
context.active_statement_group().set_statements(stmts)
for b in bindings:
context.name_model.current_scope.create_binding(b)
def end_program():
context.report_clear(1)
context.active_statement_group().set_ended()
context.report(1, True)
def report(granularity):
context.report(granularity)
context.reducible_stack.pop()
def attribute(left, ident):
return Attribute(context, left, ident)
# This should be done by the instrumenter at a later time
# to allow specifying the reporter as cmd line argument
initialize(CommandlineReporter())
```
#### File: python_stepper/src/run_stepper.py
```python
import sys
import tempfile
import instrumenter
import os
from shutil import copy2
'''
run_stepper.py
written by <NAME>
on 01/22/2018
Evaluate a python script with the
stepper.
Usage:
./run_stepper.py <script> ...<command-line-args>
The script is instrumented, and
placed into a temporary directory with the
stepper_lib runtime, then executed with the
specified command line arguments. It will have
its stdin and stdout appropriately piped to the
parent process stdin and stdout.
'''
def copy_lib(tmp):
location = os.path.dirname(os.path.abspath(__file__))
# todo: read this from fs
files = [
'assignment_statement.py',
'attribute.py',
'binary_operation.py',
'commandline_reporter.py',
'expr_statement.py',
'function_app_scope.py',
'function_call.py',
'function_def.py',
'identifier.py',
'if_expr.py',
'if_stmt.py',
'lambda_expression.py',
'name_model.py',
'program.py',
'reducible.py',
'report_state.py',
'return_statement.py',
'statement_group.py',
'stepper_lib.py',
'value.py',
'while_loop.py',
]
# TODO: Use platform specific paths
paths = (location + '/include/' + file for file in files)
for path in paths:
copy2(path, tmp)
def run_stepper(script, src, args):
# instrument source code
instrumented = instrumenter.instrument(src)
# created temporary directory to run in
with tempfile.TemporaryDirectory() as tmp:
# path to instrumented script
script_path = tmp + '/' + os.path.basename(script)
copy_lib(tmp)
# TODO: Use platform specific paths
# write instrumented script
with open(script_path, 'w') as f:
f.write(instrumented)
# TODO: use subprocess and set up piped IO streams
# run instrumented script
os.system('python3 ' + script_path + ' ' + ' '.join(args))
# get command line arguments
if len(sys.argv) < 2:
print("""Usage:
./run_stepper.py <script> ...<command-line-args>""")
args = sys.argv[2:] if len(sys.argv) > 2 else []
script = sys.argv[1]
with open(script, 'r') as f:
src = f.read()
run_stepper(script, src, args)
```
#### File: src/test_instrumenter/if_expr.py
```python
import instrumenter
import unittest
class TestIfExpr(unittest.TestCase):
def test_if_expr(self):
src = """
5 if x > 7 else 10
""".strip()
expected = """
stepper_lib.if_expr(x > 7, 5, 10)
""".strip()
actual = instrumenter.instrument(src, "ifexpr").strip()
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
```
#### File: src/test_instrumenter/return_statement.py
```python
import instrumenter
import unittest
class TestReturnStatement(unittest.TestCase):
def test_return_statement(self):
src = """
def add(x, y):
if x > y:
return x + y
else:
return x - y
""".strip()
expected = """
def add(x, y):
if x > y:
return stepper_lib.return_statement(x + y)
else:
return stepper_lib.return_statement(x - y)
""".strip()
actual = instrumenter.instrument(src, "return_statement").strip()
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joeldierkes/documentation",
"score": 3
} |
#### File: documentation/tools/generate_release_notes_index.py
```python
import os
import pathlib
import re
from collections import defaultdict
from typing import Dict, List, Optional, TypeVar
import jinja2
MAJOR_VERSION = re.compile(r"(\d+)\.\d+\.\d+")
MINOR_VERSION = re.compile(r"\d+\.(\d+)\.\d+")
PATCH_VERSION = re.compile(r"\d+\.\d+\.(\d+)")
MAJOR_MINOR_VERSION = re.compile(r"(\d+\.\d+)\.\d+")
T = TypeVar("T")
def ensure_not_none(inp: Optional[T]) -> T:
"""
This function unwraps an Optional type and returns the content. It fails if
the provided value is None.
:param inp: The Optional type with the content to return.
:returns: The content of the inp.
"""
assert inp, "The input should not be None!"
return inp
def map_post_version_sort_to_number(stem: str) -> int:
"""
Maps the post version string (e.g. "post1" in "1.27.4.post1") to a
number. This defines the sorting of the releases.
"""
if re.match(r".*rc\d+", stem):
return 0
if re.match(r".*\.post\d+", stem):
return 2
return 1
def render_templates(templates_dir: str, output_path: pathlib.Path) -> None:
def minor_release_get_title(path: str, version: str) -> str:
"""
Returns the title for a minor release. If this title does not exist it
returns the version string itself.
:param path: The path to the folder containing the release informations.
:param version: A major and minor version of a release. (e.g. "1.27")
:returns: The coresponsing release title. This is just the version string in
case no title exist.
"""
# We hardcode the minor release titles for now, since there is no east
# way to automatically get them.
HARD_CODED_RELEASE_NOTE_TITLE = {
"1.30": "The Donkeynator",
"1.29": "Into the Donkeyverse",
"1.28": "Teenage Mutant Ninja Donkeys",
"1.27": "Batdonkey v Superdonkey",
"1.26": "Donkey League of La Mancha",
"1.25": "Rat-Donkey",
"1.24": "Aquadonkey",
"1.23": "The incredible Donkey",
"1.22": "Green Donkey",
"1.21": "Donkeys of the Galaxy",
"1.20": "Wonder Donkey",
"1.19": "Fantastic Donkeys",
"1.18": "Invisible Donkey",
"1.17": "Donkey Surfer",
"1.16": "<NAME>",
"1.15": "Daredonkey",
"1.14": "<NAME>",
"1.13": "Donkerine",
"1.12": "Captain Donkey",
"1.11": "Batdonkey",
"1.10": "Irondonkey",
"1.9": "Superdonkey",
"1.8": "Spiderdonkey",
"1.7": "Donkey One",
"1.6": "The Donkey awakens",
"1.5": "Return of the Donkey",
"1.4": "The Donkey strikes back",
}
if version in HARD_CODED_RELEASE_NOTE_TITLE:
return version + " " + HARD_CODED_RELEASE_NOTE_TITLE[version]
raise Exception(
f"The minor version {version} is not present in the hardcoded list of minor release titles!" # noqa: E501
)
def index_item(path: pathlib.Path) -> dict:
return {"stem": path.stem, "path": str(path.relative_to(output_path))}
def index_func(path: str) -> Dict[str, List[Dict]]:
relative_path = output_path / path
if not str(relative_path).startswith(str(output_path)):
raise ValueError("path may not escape the output path")
if not relative_path.exists():
raise ValueError(f"cannot index: {path} does not exist")
items = relative_path.iterdir()
mapped_items = defaultdict(list)
for i in items:
mapped_items[
ensure_not_none(MAJOR_MINOR_VERSION.match(i.stem)).group(1)
].append(
{
"major_version_number": int(
ensure_not_none(MAJOR_VERSION.match(i.stem)).group(1)
),
"minor_version_number": int(
ensure_not_none(MINOR_VERSION.match(i.stem)).group(1)
),
"patch_version_number": int(
ensure_not_none(PATCH_VERSION.match(i.stem)).group(1)
),
"post_version_sort": map_post_version_sort_to_number(i.stem),
"stem": i.stem,
"path": str(i.relative_to(output_path)),
}
)
return mapped_items
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_dir))
jinja_env.globals["index"] = index_func
jinja_env.globals["minor_release_get_title"] = minor_release_get_title
for tpl in pathlib.Path(templates_dir).iterdir():
# render all templates with .md as the first suffix and only non-hidden files
if tpl.suffixes[0] == ".md" and not tpl.name.startswith("."):
jinja_template = jinja_env.get_template(str(tpl.relative_to(templates_dir)))
tpl_out_path = output_path / tpl.name[: tpl.name.find(".md") + 3]
tpl_out_path.write_text(jinja_template.render())
if __name__ == "__main__":
templates_dir: str = os.path.join(os.path.dirname(__file__), "templates")
assert os.path.exists(templates_dir)
render_templates(
templates_dir,
pathlib.Path(os.path.join(os.path.dirname(__file__), "../docs")).resolve(),
)
```
#### File: tools/run_in_docker/rucio_processor.py
```python
import dataclasses
import logging
import typing as t
import docspec
import docstring_parser
from pydoc_markdown.interfaces import Processor, Resolver
logger = logging.getLogger(__name__)
def sanitize(s: t.Optional[str]) -> str:
if not s:
return ""
character_map = {
r"<": r"\<",
r">": r"\>",
}
for (before, after) in character_map.items():
s = s.replace(before, after)
return s
@dataclasses.dataclass
class _ParamLine:
"""
Helper data class for holding details of Sphinx arguments.
"""
name: str
docs: str
type: t.Optional[str] = None
def generate_sections_markdown(sections):
ret = []
ret.append("<table style={{border: 'none'}}><tbody>\n")
for key, section in sections.items():
if section:
ret.append("\n<tr style={{border: 'none'}}>\n")
ret.append(
"\n<td style={{border: 'none', backgroundColor: 'white', 'verticalAlign': 'top'}}>\n" # noqa: E501
)
ret.append(f"**{key}**:")
ret.append("\n</td>\n")
ret.append(
"\n<td style={{border: 'none', backgroundColor: 'white', 'verticalAlign': 'top'}}>\n" # noqa: E501
)
ret.extend(section)
ret.append("\n</td>\n")
ret.append("\n</tr>\n")
ret.append("\n</tbody></table>\n")
return ret
@dataclasses.dataclass
class RucioProcessor(Processor):
_KEYWORDS = {
"Arguments": [
"arg",
"argument",
"param",
"parameter",
"type",
],
"Returns": [
"return",
"returns",
"rtype",
],
"Raises": [
"raises",
"raise",
],
}
def check_docstring_format(self, docstring: str) -> bool:
return any(
f":{k}" in docstring for _, value in self._KEYWORDS.items() for k in value
)
def process(
self, modules: t.List[docspec.Module], resolver: t.Optional[Resolver]
) -> None:
docspec.visit(modules, self._process)
def _convert_raises(
self, raises: t.List[docstring_parser.common.DocstringRaises]
) -> list:
"""Convert a list of DocstringRaises from docstring_parser to markdown lines
:return: A list of markdown formatted lines
"""
converted_lines = []
for entry in raises:
converted_lines.append(
"`{}`: {}\n".format(
sanitize(entry.type_name), sanitize(entry.description)
)
)
return converted_lines
def _convert_params(
self, params: t.List[docstring_parser.common.DocstringParam]
) -> list:
"""Convert a list of DocstringParam to markdown lines.
:return: A list of markdown formatted lines
"""
converted = []
for param in params:
if param.type_name is None:
converted.append(
"`{name}`: {description}\n".format(
name=sanitize(param.arg_name),
description=sanitize(param.description),
)
)
else:
converted.append(
"`{name}` (`{type}`): {description}\n".format(
name=sanitize(param.arg_name),
type=param.type_name,
description=sanitize(param.description),
)
)
return converted
def _convert_returns(
self, returns: t.Optional[docstring_parser.common.DocstringReturns]
) -> str:
"""Convert a DocstringReturns object to a markdown string.
:return: A markdown formatted string
"""
if not returns:
return ""
if returns.type_name:
type_data = "`{}`: ".format(returns.type_name)
else:
type_data = ""
return " " + type_data + (sanitize(returns.description) or "") + "\n"
def _process(self, node: docspec.ApiObject) -> None:
if not node.docstring:
return
lines = []
components: t.Dict[str, t.List[str]] = {}
parsed_docstring = docstring_parser.parse(
node.docstring.content, docstring_parser.DocstringStyle.REST
)
components["Arguments"] = self._convert_params(parsed_docstring.params)
components["Raises"] = self._convert_raises(parsed_docstring.raises)
return_doc = self._convert_returns(parsed_docstring.returns)
if return_doc:
components["Returns"] = [return_doc]
if parsed_docstring.short_description:
lines.append(sanitize(parsed_docstring.short_description))
lines.append("")
if parsed_docstring.long_description:
lines.append(sanitize(parsed_docstring.long_description))
lines.append("")
lines.extend(generate_sections_markdown(components))
node.docstring.content = "\n".join(lines)
``` |
{
"source": "joeldierkes/persistent_cache",
"score": 3
} |
#### File: persistent_cache/examples/download_file.py
```python
import time
import requests
from persistent_cache import persistent_cache
@persistent_cache
def fetch_data():
return requests.get(
"https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2019-01.csv"
).content
print("Downloading...")
start = time.time()
fetch_data() # Downloads the data from the web
print(f"Time first call : {time.time() - start:.3f}s")
start = time.time()
fetch_data() # Uses the cached data, much faster now
print(f"Time second call: {time.time() - start:.3f}s")
```
#### File: persistent_cache/tests/test_persistent_cache_dir.py
```python
import os
import persistent_cache
from . import CacheTestCase
@persistent_cache.persistent_cache
def f():
return 0
class TestPersistentCacheDir(CacheTestCase):
def test_persistent_cache_dir(self):
f()
assert os.path.exists(".persistent_cache")
``` |
{
"source": "JoelDuring/PASTAQ",
"score": 3
} |
#### File: python-bindings/pastaq/__init__.py
```python
from .pastaq import *
import pastaq
import os
import json
import logging
import time
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
def find_sequence_consensus(annotations, sequence_column, min_consensus_count):
consensus = annotations[["cluster_id", "file_id", sequence_column]]
consensus = consensus.drop_duplicates()
consensus.columns = ["cluster_id", "file_id", "consensus_sequence"]
consensus = consensus.groupby(["cluster_id", "consensus_sequence"]).agg(
consensus_count=('consensus_sequence', 'count'))
max_count = consensus.groupby("cluster_id").max().reset_index()
max_count.columns = ["cluster_id", "consensus_count_max"]
consensus = pd.merge(consensus.reset_index(), max_count, on="cluster_id")
consensus = consensus[consensus["consensus_count"]
== consensus["consensus_count_max"]]
# NOTE: Should we ignore annotations when the consensus is ambiguous? For
# example, if we have sequence A for 3 samples and sequence B for another 3,
# the consensus is "unclear". Furthermore, we might choose to ignore it or
# to choose a suitable one. For now we are keeping all annotations even if
# that means having ambiguity about the sequence being assigned to a given
# isotope/feature cluster.
consensus = consensus[consensus["consensus_count"] >= min_consensus_count]
consensus = consensus.drop(["consensus_count_max"], axis=1)
return consensus
def find_protein_groups(
feature_data,
feature_annotations,
sequence_col,
protein_col,
protein_description_col,
min_peptides_number,
remove_subset_proteins,
ignore_ambiguous_peptides,
protein_quant_type,
):
# Create a list of annotations only with the info we need for protein groups.
protein_annotations = feature_annotations[['cluster_id', sequence_col, protein_col, protein_description_col]].copy()
protein_annotations = protein_annotations.drop_duplicates().dropna()
# Assign a unique numeric id for peptide and proteins for faster comparisons.
peptide_map = dict(zip(range(0, len(protein_annotations[sequence_col].unique())), protein_annotations[sequence_col].unique()))
protein_map = dict(zip(range(0, len(protein_annotations[protein_col].unique())), protein_annotations[protein_col].unique()))
peptide_map_df = pd.DataFrame({'peptide_id': peptide_map.keys(), sequence_col: peptide_map.values()})
protein_map_df = pd.DataFrame({'protein_id': protein_map.keys(), protein_col: protein_map.values()})
protein_annotations = pd.merge(protein_annotations, peptide_map_df)
protein_annotations = pd.merge(protein_annotations, protein_map_df)
protein_annotations = pd.merge(protein_annotations, protein_annotations)
# Create a copy of the feature data for aggregation.
protein_group_data = feature_data.copy()
protein_group_data['protein_group_id'] = -1
protein_group_metadata = protein_annotations.copy()
protein_group_metadata['protein_group_id'] = -1
# Initialize graph of protein-peptide nodes.
protein_nodes = {} # protein_id -> [peptide_1, peptide_2...]
peptide_nodes = {} # peptide_id -> [protein_1, protein_2...]
for index, row in protein_annotations.iterrows():
if row.protein_id not in protein_nodes:
protein_nodes[row.protein_id] = set()
protein_nodes[row.protein_id].add(row.peptide_id)
if row.peptide_id not in peptide_nodes:
peptide_nodes[row.peptide_id] = set()
peptide_nodes[row.peptide_id].add(row.protein_id)
def remove_proteins_from_graph(to_be_removed, protein_nodes, peptide_nodes):
for protein_id in to_be_removed:
for peptide_id in protein_nodes[protein_id]:
peptide_nodes[peptide_id] = set([
this_id
for this_id in peptide_nodes[peptide_id]
if this_id != protein_id
])
if len(peptide_nodes[peptide_id]) == 0:
del peptide_nodes[peptide_id]
del protein_nodes[protein_id]
# Filter out proteins that don't meet the minimum number of peptides requirement.
if min_peptides_number > 1:
to_be_removed = []
for protein_id, peptide_ids in protein_nodes.items():
if len(peptide_ids) < min_peptides_number:
to_be_removed += [protein_id]
remove_proteins_from_graph(to_be_removed, protein_nodes, peptide_nodes)
# Remove fully subset proteins.
if remove_subset_proteins:
subset_proteins = []
for protein_id, peptide_ids in protein_nodes.items():
# Find proteins that share some peptide with this one.
target_proteins = set()
for peptide_id in peptide_ids:
for target_protein in peptide_nodes[peptide_id]:
if target_protein != protein_id:
target_proteins.add(target_protein)
# Check target proteins to check if peptides are fully contained within
# another group.
for target_protein in target_proteins:
target_peptides = protein_nodes[target_protein]
if set(peptide_ids).issubset(target_peptides) and len(peptide_ids) < len(target_peptides):
subset_proteins += [protein_id]
break
remove_proteins_from_graph(subset_proteins, protein_nodes, peptide_nodes)
#
# Identify the type of peptide (Unique vs shared) and if razor principle is
# applied, to which protein(s) will they be assigned.
# - Unique: Only appears on a single protein.
# - Shared: The peptide is shared by one or more proteins.
# - Razor: Appears on multiple proteins, assigned to the protein with the
# largest number of peptides.
# - Ambiguous: When multiple proteins have the exact same number of peptides.
# Has more than one assigned protein.
#
# Initialize protein_peptides
protein_peptides = {}
for protein_id in protein_nodes.keys():
protein_peptides[protein_id] = {
'unique': set(),
'shared': set(),
'razor': set(),
'ambiguous': set(),
}
for peptide_id, protein_ids in peptide_nodes.items():
if len(protein_ids) == 1:
protein_id = list(protein_ids)[0]
if protein_id not in protein_peptides:
protein_peptides[protein_id] = {
'unique': [peptide_id],
'razor': [],
'ambiguous': [],
}
else:
protein_peptides[protein_id]['unique'].add(peptide_id)
continue
cur_assigned_proteins = []
cur_assigned_peptide_count = 0
for protein_id in protein_ids:
protein_peptides[protein_id]['shared'].add(peptide_id)
if len(protein_nodes[protein_id]) == cur_assigned_peptide_count:
cur_assigned_proteins += [protein_id]
if len(protein_nodes[protein_id]) > cur_assigned_peptide_count:
cur_assigned_proteins = [protein_id]
cur_assigned_peptide_count = len(protein_nodes[protein_id])
for protein_id in cur_assigned_proteins:
if len(cur_assigned_proteins) > 1:
protein_peptides[protein_id]['ambiguous'].add(peptide_id)
else:
protein_peptides[protein_id]['razor'].add(peptide_id)
# Find protein groups that contain unique peptides
protein_groups = {}
protein_group_counter = 0
unique_protein_ids = []
non_unique_protein_ids = []
for protein_id, peptide_ids in protein_nodes.items():
unique_found = False
for peptide_id in peptide_ids:
if len(peptide_nodes[peptide_id]) == 1:
unique_protein_ids += [protein_id]
unique_found = True
protein_groups[protein_group_counter] = set([protein_id])
protein_group_counter += 1
break
if not unique_found:
non_unique_protein_ids += [protein_id]
# Remove unique protein groups from the graph.
remove_proteins_from_graph(unique_protein_ids, protein_nodes, peptide_nodes)
# Group proteins with shared peptides only.
explored_proteins = set()
for protein_id in non_unique_protein_ids:
if protein_id in explored_proteins:
continue
previous_protein_group_size = 0
protein_group = set()
protein_group.add(protein_id)
while previous_protein_group_size != len(protein_group):
previous_protein_group_size = len(protein_group)
proteins_to_explore = [id for id in protein_group if id not in explored_proteins]
# Find all proteins associated with all peptides for unexplored proteins.
for id in proteins_to_explore:
for peptide_id in protein_nodes[id]:
for new_protein in peptide_nodes[peptide_id]:
protein_group.add(new_protein)
explored_proteins.add(id)
# Update protein group list and remove them from the available list.
protein_groups[protein_group_counter] = protein_group
protein_group_counter += 1
remove_proteins_from_graph(list(protein_group), protein_nodes, peptide_nodes)
# Depending on the selected quantification type, find which peptides should be
# used for quantification for each protein group.
for protein_group_id, protein_ids in protein_groups.items():
selected_peptides = set()
for protein_id in protein_ids:
for peptide_id in protein_peptides[protein_id]['unique']:
selected_peptides.add(peptide_id)
if protein_quant_type == 'all':
for peptide_id in protein_peptides[protein_id]['shared']:
selected_peptides.add(peptide_id)
elif protein_quant_type == 'razor':
for peptide_id in protein_peptides[protein_id]['razor']:
selected_peptides.add(peptide_id)
if ignore_ambiguous_peptides:
for peptide_id in protein_peptides[protein_id]['ambiguous']:
selected_peptides.add(peptide_id)
selected_cluster_ids = protein_annotations.cluster_id[protein_annotations.peptide_id.isin(selected_peptides)].unique()
protein_group_data.loc[protein_group_data.cluster_id.isin(selected_cluster_ids), 'protein_group_id'] = protein_group_id
protein_group_metadata.loc[protein_group_metadata.peptide_id.isin(selected_peptides) & protein_group_metadata.protein_id.isin(protein_ids) , 'protein_group_id'] = protein_group_id
def aggregate_protein_group_annotations(x):
ret = {}
if "consensus_sequence" in x:
ret["consensus_sequence"] = ".|.".join(
np.unique(x['consensus_sequence'].dropna())).strip(".|.")
if "consensus_protein_name" in x:
ret["consensus_protein_name"] = ".|.".join(
np.unique(x['consensus_protein_name'].dropna())).strip(".|.")
if "consensus_protein_description" in x:
ret["consensus_protein_description"] = ".|.".join(
np.unique(x['consensus_protein_description'].dropna())).strip(".|.")
return pd.Series(ret)
protein_group_data = protein_group_data[protein_group_data.protein_group_id != -1]
del protein_group_data['cluster_id']
protein_group_data = protein_group_data.groupby('protein_group_id').sum().reset_index()
protein_group_metadata = protein_group_metadata[protein_group_metadata.protein_group_id != -1]
del protein_group_metadata['cluster_id']
del protein_group_metadata['peptide_id']
del protein_group_metadata['protein_id']
protein_group_metdata = protein_group_metadata.drop_duplicates()
protein_group_metadata = protein_group_metadata.groupby('protein_group_id')
protein_group_metadata = protein_group_metadata.apply(aggregate_protein_group_annotations)
protein_group_metadata = protein_group_metadata.reset_index()
return protein_group_data, protein_group_metadata
def plot_mesh(mesh, transform='sqrt', figure=None):
plt.style.use('dark_background')
if figure is None:
figure = plt.figure()
img = mesh.data
img = np.reshape(img, (mesh.m, mesh.n))
bins_rt = mesh.bins_rt
bins_mz = mesh.bins_mz
plt.figure(figure.number)
plt.clf()
gs = gridspec.GridSpec(5, 5)
mz_plot = plt.subplot(gs[0, :-1])
mz_plot.clear()
mz_plot.plot(bins_mz, img.sum(axis=0))
mz_plot.margins(x=0)
mz_plot.set_xticks([])
mz_plot.set_ylabel("Intensity")
rt_plot = plt.subplot(gs[1:, -1])
rt_plot.plot(img.sum(axis=1), bins_rt)
rt_plot.margins(y=0)
rt_plot.set_yticks([])
rt_plot.set_xlabel("Intensity")
img_plot = plt.subplot(gs[1:, :-1])
offset_rt = (np.array(mesh.bins_rt).max() -
np.array(mesh.bins_rt).min())/mesh.m / 2
offset_mz = (np.array(mesh.bins_mz).max() -
np.array(mesh.bins_mz).min())/mesh.n / 2
if transform == 'sqrt':
img_plot.pcolormesh(
np.array(mesh.bins_mz) - offset_mz,
np.array(mesh.bins_rt) - offset_rt,
img,
snap=True,
norm=colors.PowerNorm(gamma=1./2.))
elif transform == 'cubic':
img_plot.pcolormesh(
np.array(mesh.bins_mz) - offset_mz,
np.array(mesh.bins_rt) - offset_rt,
img,
norm=colors.PowerNorm(gamma=1./3.))
elif transform == 'log':
img_plot.pcolormesh(
np.array(mesh.bins_mz) - offset_mz,
np.array(mesh.bins_rt) - offset_rt,
img,
norm=colors.LogNorm(vmin=img.min()+1e-8, vmax=img.max()))
else:
img_plot.pcolormesh(mesh.bins_mz, mesh.bins_rt, img)
img_plot.set_xlim([np.array(mesh.bins_mz).min() - offset_mz,
np.array(mesh.bins_mz).max() - offset_mz])
img_plot.set_ylim([np.array(mesh.bins_rt).min() - offset_rt,
np.array(mesh.bins_rt).max() - offset_rt])
img_plot.set_xlabel("m/z")
img_plot.set_ylabel("retention time (s)")
return({
"img_plot": img_plot,
"mz_plot": mz_plot,
"rt_plot": rt_plot,
})
def plot_xic(peak, raw_data, figure=None, method="max"):
xic = peak.xic(raw_data, method=method)
x = xic.retention_time
y = xic.intensity
plt.style.use('dark_background')
if not figure:
figure = plt.figure()
plt.plot(x, y, label='peak_id = {}'.format(peak.id))
plt.xlabel('Retention time (s)')
plt.ylabel('Intensity')
plt.legend()
return figure
def plot_peak_raw_points(
peak,
raw_data,
img_plot=None,
rt_plot=None,
mz_plot=None,
xic_method="max"):
data_points = raw_data.raw_points(
peak.roi_min_mz,
peak.roi_max_mz,
peak.roi_min_rt,
peak.roi_max_rt,
)
rts = data_points.rt
mzs = data_points.mz
intensities = data_points.intensity
# Calculate min/max values for the given peak.
min_mz = peak.roi_min_mz
max_mz = peak.roi_max_mz
min_rt = peak.roi_min_rt
max_rt = peak.roi_max_rt
if not img_plot and not rt_plot and not mz_plot:
plt.style.use('dark_background')
plt.figure()
plt.clf()
gs = gridspec.GridSpec(5, 5)
mz_plot = plt.subplot(gs[0, :-1])
mz_plot.margins(x=0)
mz_plot.set_xticks([])
mz_plot.set_ylabel("Intensity")
rt_plot = plt.subplot(gs[1:, -1])
rt_plot.margins(y=0)
rt_plot.set_yticks([])
rt_plot.set_xlabel("Intensity")
img_plot = plt.subplot(gs[1:, :-1])
# Set the min/max limits for mz/rt.
mz_plot.set_xlim([min_mz, max_mz])
rt_plot.set_ylim([min_rt, max_rt])
img_plot.set_xlim([min_mz, max_mz])
img_plot.set_ylim([min_rt, max_rt])
# NOTE: Adding 200 for a more pleasant color map on the first peaks, found
# this number by trial and error, dont @ me.
np.random.seed(peak.id + 200)
color = np.append(np.random.rand(3, 1).flatten(), 0.5)
np.random.seed(None)
if img_plot:
img_plot.scatter(
mzs, rts,
c=np.sqrt(intensities),
edgecolor=color,
)
if rt_plot:
xic = peak.xic(raw_data, method=xic_method)
x = xic.retention_time
y = xic.intensity
rt_plot.plot(y, x, color=color)
if mz_plot:
sort_idx_mz = np.argsort(mzs)
markerline, stemlines, baseline = mz_plot.stem(
np.array(mzs)[sort_idx_mz],
np.array(intensities)[sort_idx_mz],
markerfmt=' ',
)
plt.setp(baseline, color=color, alpha=0.5)
plt.setp(stemlines, color=color, alpha=0.5)
# Set x/y limits if necessary.
lim_min_mz, lim_max_mz = img_plot.get_xlim()
lim_min_rt, lim_max_rt = img_plot.get_ylim()
if min_mz < lim_min_mz:
lim_min_mz = min_mz
if min_rt < lim_min_rt:
lim_min_rt = min_rt
if max_mz > lim_max_mz:
lim_max_mz = max_mz
if max_rt > lim_max_rt:
lim_max_rt = max_rt
mz_plot.set_xlim([lim_min_mz, lim_max_mz])
rt_plot.set_ylim([lim_min_rt, lim_max_rt])
img_plot.set_xlim([lim_min_mz, lim_max_mz])
img_plot.set_ylim([lim_min_rt, lim_max_rt])
return({
"img_plot": img_plot,
"mz_plot": mz_plot,
"rt_plot": rt_plot,
})
# TODO: Probably we don't want avg_fwhm_rt to be a parameter being passed on
# this function. Just set it to a resonable level for the default parameters and
# modify it later as needed.
def default_parameters(instrument, avg_fwhm_rt):
if instrument == 'orbitrap':
pastaq_parameters = {
#
# Instrument configuration.
#
'instrument_type': 'orbitrap',
'resolution_ms1': 70000,
'resolution_msn': 30000,
'reference_mz': 200,
'avg_fwhm_rt': avg_fwhm_rt,
#
# Meshing.
#
'num_samples_mz': 5,
'num_samples_rt': 5,
'smoothing_coefficient_mz': 0.4,
'smoothing_coefficient_rt': 0.4,
#
# Warp2D.
#
'warp2d_slack': 30,
'warp2d_window_size': 100,
'warp2d_num_points': 2000,
'warp2d_rt_expand_factor': 0.2,
'warp2d_peaks_per_window': 100,
#
# MetaMatch.
#
'metamatch_fraction': 0.7,
'metamatch_n_sig_mz': 1.5,
'metamatch_n_sig_rt': 1.5,
#
# Feature detection.
#
'feature_detection_charge_states': [5, 4, 3, 2, 1],
#
# Other.
#
'max_peaks': 1000000,
'polarity': 'both',
'min_mz': 0,
'max_mz': 100000,
'min_rt': 0,
'max_rt': 100000,
#
# Annotation linking.
#
'link_n_sig_mz': 3,
'link_n_sig_rt': 3,
#
# Identification.
#
# Keep only the max rank PSM.
'ident_max_rank_only': True,
# Whether to filter the PSM that don't pass the FDR threshold.
'ident_require_threshold': True,
# Ignore PSMs that have been marked as decoy.
'ident_ignore_decoy': True,
#
# Quality.
#
'similarity_num_peaks': 2000,
# Options: Any 'seaborn' supported palette style, like:
# 'husl', 'crest', 'Spectral', 'flare', 'mako', etc.
'qc_plot_palette': 'husl',
# Options: 'png', 'pdf', 'eps'...
'qc_plot_extension': 'png',
# Options: 'dynamic', [0.0-1.0]
'qc_plot_fill_alpha': 'dynamic',
'qc_plot_line_alpha': 0.5,
'qc_plot_scatter_alpha': 0.3,
'qc_plot_scatter_size': 2,
'qc_plot_min_dynamic_alpha': 0.1,
'qc_plot_per_file': False,
# Options: 'fill', 'line'
'qc_plot_line_style': 'fill',
# Plot style config.
'qc_plot_dpi': 300,
'qc_plot_font_family': 'sans-serif',
'qc_plot_font_size': 7,
'qc_plot_fig_size_x': 7.08661,
'qc_plot_fig_size_y': 7.08661/1.618034,
'qc_plot_fig_legend': False,
'qc_plot_mz_vs_sigma_mz_max_peaks': 200000,
#
# Quantitative table generation.
#
# Options: 'height', 'volume'
'quant_isotopes': 'height',
# Options: 'monoisotopic_height', 'monoisotopic_volume',
# 'total_height', 'total_volume',
# 'max_height', 'max_volume',
'quant_features': 'max_height',
# Whether to remove feature annotations where the charge state of
# the detected feature doesn't match the one given by the
# identification engine.
'quant_features_charge_state_filter': True,
# Options: 'theoretical_mz', 'msms_event'
'quant_ident_linkage': 'msms_event',
# Whether to obtain a consensus sequence and proteins on identifications.
'quant_consensus': True,
# Demand a minimum number of files with identification per cluster.
'quant_consensus_min_ident': 2,
# Whether to store all the annotations prior to cluster aggregation.
'quant_save_all_annotations': True,
# Minimum number of peptides necessary to consider a protein for
# quantification.
'quant_proteins_min_peptides': 1,
# Wether to remove proteins whose peptides are entirely contined
# within another with a longest number of evidence peptides.
'quant_proteins_remove_subset_proteins': True,
# In case a peptide can't be assigned to a unique protein as 'razor'
# we can choose to use them regardless in all instances where they
# would if they were to be considered razor or to ignore them.
'quant_proteins_ignore_ambiguous_peptides': True,
# Protein inference method:
# - 'unique': Only unique peptides will be used for
# quantification.
# - 'razor': Unique and peptides assigned as most likely through
# the Occam's razor constrain.
# - 'all': All peptides will be used for quantification for all
# protein groups. Thus shared peptides will be used more
# than once.
'quant_proteins_quant_type': 'razor',
}
return pastaq_parameters
def _custom_log(msg, logger):
if logger:
logger.info(msg)
print(msg)
def parse_raw_files(params, output_dir, logger=None, force_override=False):
_custom_log('Starting raw data conversion', logger)
time_start = time.time()
for file in params['input_files']:
raw_path = file['raw_path']
stem = file['stem']
# Check if file has already been processed.
out_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
if os.path.exists(out_path) and not force_override:
continue
# File extension.
file_extension = os.path.splitext(raw_path)[1]
# Read raw files (MS1).
_custom_log('Reading MS1: {}'.format(raw_path), logger)
if file_extension.lower() == '.mzxml':
raw_data = pastaq.read_mzxml(
raw_path,
min_mz=params['min_mz'],
max_mz=params['max_mz'],
min_rt=params['min_rt'],
max_rt=params['max_rt'],
instrument_type=params['instrument_type'],
resolution_ms1=params['resolution_ms1'],
resolution_msn=params['resolution_msn'],
reference_mz=params['reference_mz'],
fwhm_rt=params['avg_fwhm_rt'],
polarity=params['polarity'],
ms_level=1,
)
elif file_extension.lower() == '.mzml':
raw_data = pastaq.read_mzml(
raw_path,
min_mz=params['min_mz'],
max_mz=params['max_mz'],
min_rt=params['min_rt'],
max_rt=params['max_rt'],
instrument_type=params['instrument_type'],
resolution_ms1=params['resolution_ms1'],
resolution_msn=params['resolution_msn'],
reference_mz=params['reference_mz'],
fwhm_rt=params['avg_fwhm_rt'],
polarity=params['polarity'],
ms_level=1,
)
# Write raw_data to disk (MS1).
_custom_log('Writing MS1: {}'.format(out_path), logger)
raw_data.dump(out_path)
for file in params['input_files']:
raw_path = file['raw_path']
stem = file['stem']
# Check if file has already been processed.
out_path = os.path.join(output_dir, 'raw', "{}.ms2".format(stem))
if os.path.exists(out_path) and not force_override:
continue
# File extension.
file_extension = os.path.splitext(raw_path)[1]
# Read raw files (MS2).
_custom_log('Reading MS2: {}'.format(raw_path), logger)
if file_extension.lower() == '.mzxml':
raw_data = pastaq.read_mzxml(
raw_path,
min_mz=params['min_mz'],
max_mz=params['max_mz'],
min_rt=params['min_rt'],
max_rt=params['max_rt'],
instrument_type=params['instrument_type'],
resolution_ms1=params['resolution_ms1'],
resolution_msn=params['resolution_msn'],
reference_mz=params['reference_mz'],
fwhm_rt=params['avg_fwhm_rt'],
polarity=params['polarity'],
ms_level=2,
)
elif file_extension.lower() == '.mzml':
raw_data = pastaq.read_mzml(
raw_path,
min_mz=params['min_mz'],
max_mz=params['max_mz'],
min_rt=params['min_rt'],
max_rt=params['max_rt'],
instrument_type=params['instrument_type'],
resolution_ms1=params['resolution_ms1'],
resolution_msn=params['resolution_msn'],
reference_mz=params['reference_mz'],
fwhm_rt=params['avg_fwhm_rt'],
polarity=params['polarity'],
ms_level=2,
)
# Write raw_data to disk (MS2).
_custom_log('Writing MS2: {}'.format(out_path), logger)
raw_data.dump(out_path)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished raw data parsing in {}'.format(elapsed_time), logger)
def detect_peaks(params, output_dir, save_grid=False, logger=None, force_override=False):
# Perform resampling/smoothing and peak detection and save results to disk.
_custom_log('Starting peak detection', logger)
time_start = time.time()
for file in params['input_files']:
stem = file['stem']
# Check if file has already been processed.
in_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
out_path = os.path.join(output_dir, 'peaks', "{}.peaks".format(stem))
if os.path.exists(out_path) and not force_override:
continue
_custom_log("Reading raw_data from disk: {}".format(stem), logger)
raw_data = pastaq.read_raw_data(in_path)
_custom_log("Resampling: {}".format(stem), logger)
grid = pastaq.resample(
raw_data,
params['num_samples_mz'],
params['num_samples_rt'],
params['smoothing_coefficient_mz'],
params['smoothing_coefficient_rt'],
)
if save_grid:
mesh_path = os.path.join(output_dir, 'grid', "{}.grid".format(stem))
_custom_log('Writing grid: {}'.format(mesh_path), logger)
grid.dump(mesh_path)
_custom_log("Finding peaks: {}".format(stem), logger)
peaks = pastaq.find_peaks(raw_data, grid, params['max_peaks'])
_custom_log('Writing peaks:'.format(out_path), logger)
pastaq.write_peaks(peaks, out_path)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished peak detection in {}'.format(elapsed_time), logger)
def calculate_similarity_matrix(params, output_dir, peak_dir, logger=None, force_override=False):
out_path = os.path.join(output_dir, 'quality', 'similarity_{}.csv'.format(peak_dir))
if os.path.exists(out_path) and not force_override:
return
_custom_log("Starting similarity matrix calculation for {}".format(peak_dir), logger)
time_start = time.time()
if not os.path.exists("{}.csv".format(out_path)) or force_override:
input_files = params['input_files']
n_files = len(input_files)
similarity_matrix = np.zeros(n_files ** 2).reshape(n_files, n_files)
for i in range(0, n_files):
stem_a = input_files[i]['stem']
peaks_a = pastaq.read_peaks(os.path.join(
output_dir, peak_dir, '{}.peaks'.format(stem_a)))
for j in range(i, n_files):
stem_b = input_files[j]['stem']
peaks_b = pastaq.read_peaks(os.path.join(output_dir, peak_dir, '{}.peaks'.format(stem_b)))
_custom_log("Calculating similarity of {} vs {}".format(stem_a, stem_b), logger)
similarity_matrix[j, i] = pastaq.find_similarity(
peaks_a, peaks_b,
params['similarity_num_peaks']).geometric_ratio
similarity_matrix[i, j] = similarity_matrix[j, i]
similarity_matrix = pd.DataFrame(similarity_matrix)
similarity_matrix_names = [input_file['stem'] for input_file in input_files]
similarity_matrix.columns = similarity_matrix_names
similarity_matrix.rename(index=dict(zip(range(0, len(similarity_matrix_names), 1), similarity_matrix_names)), inplace=True)
# Save similarity matrix to disk.
_custom_log("Saving similarity matrix for {}: {}".format(peak_dir, out_path), logger)
similarity_matrix.to_csv(out_path)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished similarity matrix calculation from {} in {}'.format(peak_dir, elapsed_time), logger)
def perform_rt_alignment(params, output_dir, logger=None, force_override=False):
warped_sim_path = os.path.join(output_dir, 'quality', 'similarity_warped_peaks.csv')
if os.path.exists(warped_sim_path) and not force_override:
return
input_files = params['input_files']
# Find selected reference samples.
ref_candidates = []
for input_file in input_files:
if 'reference' in input_file and input_file['reference']:
ref_candidates += [input_file]
if len(ref_candidates) == 1:
# If only a reference sample is marked, it will be used.
ref = ref_candidates[0]
_custom_log("Using selected reference: {}".format(ref['stem']), logger)
else:
# If no samples are selected, exhaustive search will be performed.
if len(ref_candidates) == 0:
_custom_log("No reference selected, performing exhaustive search", logger)
ref_candidates = input_files
# Find optimal reference sample from the list of candidates.
_custom_log("Starting optimal reference search", logger)
time_start = time.time()
n_ref = len(ref_candidates)
n_files = len(input_files)
similarity_matrix = np.zeros(n_ref * n_files).reshape(n_ref, n_files)
for i in range(0, n_ref):
stem_a = ref_candidates[i]['stem']
peaks_a = pastaq.read_peaks(os.path.join(
output_dir, 'peaks', '{}.peaks'.format(stem_a)))
for j in range(0, n_files):
if i == j:
similarity_matrix[i, j] = 1
continue
stem_b = input_files[j]['stem']
peaks_b = pastaq.read_peaks(os.path.join(output_dir, 'peaks', '{}.peaks'.format(stem_b)))
_custom_log("Warping {} peaks to {}".format(stem_b, stem_a), logger)
time_map = pastaq.calculate_time_map(
peaks_a, peaks_b,
params['warp2d_slack'],
params['warp2d_window_size'],
params['warp2d_num_points'],
params['warp2d_rt_expand_factor'],
params['warp2d_peaks_per_window'])
peaks_b = pastaq.warp_peaks(peaks_b, time_map)
_custom_log("Calculating similarity of {} vs {} (warped)".format(stem_a, stem_b), logger)
similarity_matrix[i, j] = pastaq.find_similarity(
peaks_a, peaks_b,
params['similarity_num_peaks']).geometric_ratio
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished optimal reference search in {}'.format(elapsed_time), logger)
# Find the reference with maximum similarity overall.
ref_index = similarity_matrix.sum(axis=1).argmax()
ref = ref_candidates[ref_index]
_custom_log("Selected reference: {}".format(ref['stem']), logger)
_custom_log("Starting peak warping to reference", logger)
time_start = time.time()
ref_stem = ref['stem']
ref_peaks = pastaq.read_peaks(os.path.join(output_dir, 'peaks', '{}.peaks'.format(ref_stem)))
for input_file in input_files:
stem = input_file['stem']
# Check if file has already been processed.
in_path = os.path.join(output_dir, 'peaks', "{}.peaks".format(stem))
out_path = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
out_path_tmap = os.path.join(output_dir, 'time_map', "{}.tmap".format(stem))
peaks = pastaq.read_peaks(in_path)
if not os.path.exists(out_path_tmap) or force_override:
_custom_log("Calculating time_map for {}".format(stem), logger)
time_map = pastaq.calculate_time_map(
ref_peaks, peaks,
params['warp2d_slack'],
params['warp2d_window_size'],
params['warp2d_num_points'],
params['warp2d_rt_expand_factor'],
params['warp2d_peaks_per_window'])
pastaq.write_time_map(time_map, out_path_tmap)
if os.path.exists(out_path) and not force_override:
continue
if stem != ref_stem:
_custom_log("Warping {} peaks to reference {}".format(stem, ref_stem), logger)
peaks = pastaq.warp_peaks(peaks, time_map)
pastaq.write_peaks(peaks, out_path)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished peak warping to reference in {}'.format(elapsed_time), logger)
def perform_feature_detection(params, output_dir, logger=None, force_override=False):
_custom_log('Starting feature detection', logger)
time_start = time.time()
for input_file in params['input_files']:
stem = input_file['stem']
# Check if file has already been processed.
in_path_peaks = os.path.join(output_dir, 'warped_peaks', '{}.peaks'.format(stem))
out_path = os.path.join(output_dir, 'features', '{}.features'.format(stem))
if os.path.exists(out_path) and not force_override:
continue
_custom_log("Reading peaks from disk: {}".format(stem), logger)
peaks = pastaq.read_peaks(in_path_peaks)
_custom_log("Performing feature_detection: {}".format(stem), logger)
features = pastaq.detect_features(
peaks, params['feature_detection_charge_states'])
_custom_log('Writing features: {}'.format(out_path), logger)
pastaq.write_features(features, out_path)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished feature detection in {}'.format(elapsed_time), logger)
def parse_mzidentml_files(params, output_dir, logger=None, force_override=False):
_custom_log('Starting mzIdentML parsing', logger)
time_start = time.time()
for input_file in params['input_files']:
stem = input_file['stem']
in_path = input_file['ident_path']
out_path = os.path.join(output_dir, 'ident', "{}.ident".format(stem))
if in_path == 'none' or (os.path.exists(out_path) and not force_override):
continue
_custom_log('Reading mzIdentML: {}'.format(in_path), logger)
# TODO: We may want to add an option to pass a prefix for ignoring
# decoys when they are not properly annotated, for example in msfragger
# + idconvert
ident_data = pastaq.read_mzidentml(
in_path,
ignore_decoy=params['ident_ignore_decoy'],
require_threshold=params['ident_require_threshold'],
max_rank_only=params['ident_max_rank_only'],
min_mz=params['min_mz'],
max_mz=params['max_mz'],
min_rt=params['min_rt'],
max_rt=params['max_rt'],
)
_custom_log('Writing ident data: {}'.format(out_path), logger)
pastaq.write_ident_data(ident_data, out_path)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished mzIdentML parsing in {}'.format(elapsed_time), logger)
def link_peaks_msms_idents(params, output_dir, logger=None, force_override=False):
_custom_log('Starting ident/msms linkage', logger)
time_start = time.time()
for input_file in params['input_files']:
stem = input_file['stem']
# Check if file has already been processed.
in_path_raw = os.path.join(output_dir, 'raw', "{}.ms2".format(stem))
in_path_peaks = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
in_path_idents = os.path.join(output_dir, 'ident', "{}.ident".format(stem))
out_path_peak_ms2 = os.path.join(output_dir, 'linking', "{}.peak_ms2.link".format(stem))
out_path_ident_ms2 = os.path.join(output_dir, 'linking', "{}.ident_ms2.link".format(stem))
out_path_psm = os.path.join(output_dir, 'linking', "{}.ident_peak.link".format(stem))
raw_data = None
peaks = None
ident_data = None
if not os.path.exists(out_path_peak_ms2) or force_override:
_custom_log("Performing peaks-msms linkage: {}".format(stem), logger)
if raw_data is None:
raw_data = pastaq.read_raw_data(in_path_raw)
if peaks is None:
peaks = pastaq.read_peaks(in_path_peaks)
linked_msms = pastaq.link_peaks(
peaks,
raw_data,
params['link_n_sig_mz'],
params['link_n_sig_rt'],
)
_custom_log('Writing linked_msms: {}'.format(out_path_peak_ms2), logger)
pastaq.write_linked_msms(linked_msms, out_path_peak_ms2)
# Check that we had identification info.
if input_file['ident_path'] == 'none':
continue
if not os.path.exists(out_path_ident_ms2) or force_override:
_custom_log("Performing ident-msms linkage: {}".format(stem), logger)
if ident_data is None:
ident_data = pastaq.read_ident_data(in_path_idents)
if raw_data is None:
raw_data = pastaq.read_raw_data(in_path_raw)
linked_idents = pastaq.link_idents(
ident_data,
raw_data,
params['link_n_sig_mz'],
params['link_n_sig_rt'],
)
_custom_log('Writing linked_msms: {}'.format(out_path_ident_ms2), logger)
pastaq.write_linked_msms(linked_idents, out_path_ident_ms2)
if not os.path.exists(out_path_psm) or force_override:
_custom_log("Performing ident-peaks linkage: {}".format(stem), logger)
if ident_data is None:
ident_data = pastaq.read_ident_data(in_path_idents)
if raw_data is None:
raw_data = pastaq.read_raw_data(in_path_raw)
if peaks is None:
peaks = pastaq.read_peaks(in_path_peaks)
linked_psm = pastaq.link_psm(
ident_data,
peaks,
raw_data,
params['link_n_sig_mz'],
params['link_n_sig_rt'],
)
_custom_log('Writing linked_psm: {}'.format(out_path_psm), logger)
pastaq.write_linked_psm(linked_psm, out_path_psm)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished ident/msms linkage in {}'.format(elapsed_time), logger)
def match_peaks_and_features(params, output_dir, logger=None, force_override=False):
input_files = params['input_files']
# Transform groups from string into an integer list.
groups = []
group_map = {}
group_counter = 0
for input_file in input_files:
group = input_file['group']
if group in group_map:
groups += [group_map[group]]
continue
group_map[group] = group_counter
groups += [group_counter]
group_counter += 1
# Peak matching.
_custom_log('Starting peak matching', logger)
time_start = time.time()
in_path_peaks = os.path.join(output_dir, 'warped_peaks')
out_path = os.path.join(output_dir, 'metamatch', "peaks.clusters")
if (not os.path.exists(out_path) or force_override):
_custom_log("Reading peaks from disk", logger)
peaks = [
pastaq.read_peaks(os.path.join(in_path_peaks, "{}.peaks".format(input_file['stem'])))
for input_file in input_files
]
_custom_log("Finding peak clusters", logger)
peak_clusters = pastaq.find_peak_clusters(
groups,
peaks,
params["metamatch_fraction"],
params["metamatch_n_sig_mz"],
params["metamatch_n_sig_rt"])
_custom_log("Writing peak clusters to disk", logger)
pastaq.write_peak_clusters(peak_clusters, out_path)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished peak matching in {}'.format(elapsed_time), logger)
# Feature matching.
_custom_log('Starting feature matching', logger)
time_start = time.time()
in_path_features = os.path.join(output_dir, 'features')
out_path = os.path.join(output_dir, 'metamatch', "features.clusters")
if (not os.path.exists(out_path) or force_override):
_custom_log("Reading features from disk", logger)
features = [
pastaq.read_features(os.path.join(in_path_features, "{}.features".format(input_file['stem'])))
for input_file in input_files
]
_custom_log("Finding feature clusters", logger)
feature_clusters = pastaq.find_feature_clusters(
groups,
features,
params["metamatch_fraction"],
params["metamatch_n_sig_mz"],
params["metamatch_n_sig_rt"])
_custom_log("Writing feature clusters to disk", logger)
pastaq.write_feature_clusters(feature_clusters, out_path)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished feature matching in {}'.format(elapsed_time), logger)
# NOTE: This is a giant ball of spaghetti and could use some love.
def create_quantitative_tables(params, output_dir, logger=None, force_override=False):
input_files = params['input_files']
_custom_log('Starting creation of quantitative tables', logger)
time_start = time.time()
for input_file in input_files:
stem = input_file['stem']
# Peak quantification.
# ====================
in_path_peaks = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
in_path_peaks_link = os.path.join(output_dir, 'linking', "{}.peak_ms2.link".format(stem))
in_path_ident_link_msms = os.path.join(output_dir, 'linking', "{}.ident_ms2.link".format(stem))
in_path_ident_link_theomz = os.path.join(output_dir, 'linking', "{}.ident_peak.link".format(stem))
in_path_ident_data = os.path.join(output_dir, 'ident', "{}.ident".format(stem))
out_path_peaks = os.path.join(output_dir, 'quant',"{}_peaks.csv".format(stem))
out_path_peak_annotations = os.path.join(output_dir, 'quant',"{}_peak_annotations.csv".format(stem))
# TODO: This is probably not necessary or needs to be changed if we are
# doing all per-peak quantification in a single loop.
if os.path.exists(out_path_peaks) and not force_override:
continue
_custom_log("Reading peaks from disk: {}".format(stem), logger)
peaks = pastaq.read_peaks(in_path_peaks)
_custom_log("Generating peaks quantitative table", logger)
peaks_df = pd.DataFrame({
'peak_id': [peak.id for peak in peaks],
'mz': [peak.fitted_mz for peak in peaks],
'rt': [peak.fitted_rt for peak in peaks],
'rt_delta': [peak.rt_delta for peak in peaks],
'height': [peak.fitted_height for peak in peaks],
'sigma_mz': [peak.fitted_sigma_mz for peak in peaks],
'sigma_rt': [peak.fitted_sigma_rt for peak in peaks],
'volume': [peak.fitted_volume for peak in peaks],
'smooth_height': [peak.local_max_height for peak in peaks],
'smooth_mz': [peak.local_max_mz for peak in peaks],
'smooth_rt': [peak.local_max_rt for peak in peaks],
'roi_min_mz': [peak.roi_min_mz for peak in peaks],
'roi_max_mz': [peak.roi_max_mz for peak in peaks],
'roi_min_rt': [peak.roi_min_rt for peak in peaks],
'roi_max_rt': [peak.roi_max_rt for peak in peaks],
'raw_mean_mz': [peak.raw_roi_mean_mz for peak in peaks],
'raw_mean_rt': [peak.raw_roi_mean_rt for peak in peaks],
'raw_std_mz': [peak.raw_roi_sigma_mz for peak in peaks],
'raw_std_rt': [peak.raw_roi_sigma_rt for peak in peaks],
'raw_skewness_mz': [peak.raw_roi_skewness_mz for peak in peaks],
'raw_skewness_rt': [peak.raw_roi_skewness_rt for peak in peaks],
'raw_kurtosis_mz': [peak.raw_roi_kurtosis_mz for peak in peaks],
'raw_kurtosis_rt': [peak.raw_roi_kurtosis_rt for peak in peaks],
'raw_total_intensity': [peak.raw_roi_total_intensity for peak in peaks],
'num_points': [peak.raw_roi_num_points for peak in peaks],
'num_scans': [peak.raw_roi_num_scans for peak in peaks],
})
# Peak Annotations.
# =================
_custom_log("Reading linked peaks from disk: {}".format(stem), logger)
peak_annotations = peaks_df[["peak_id"]]
linked_peaks = pastaq.read_linked_msms(in_path_peaks_link)
linked_peaks = pd.DataFrame({
'peak_id': [linked_peak.entity_id for linked_peak in linked_peaks],
'msms_id': [linked_peak.msms_id for linked_peak in linked_peaks],
})
peak_annotations = pd.merge(
peak_annotations, linked_peaks, on="peak_id", how="left")
if os.path.isfile(in_path_ident_data):
_custom_log("Reading ident_data from disk: {}".format(stem), logger)
ident_data = pastaq.read_ident_data(in_path_ident_data)
psms = pd.DataFrame({
'psm_index': [i for i in range(0, len(ident_data.spectrum_matches))],
'psm_id': [psm.id for psm in ident_data.spectrum_matches],
'psm_pass_threshold': [psm.pass_threshold for psm in ident_data.spectrum_matches],
'psm_charge_state': [psm.charge_state for psm in ident_data.spectrum_matches],
'psm_theoretical_mz': [psm.theoretical_mz for psm in ident_data.spectrum_matches],
'psm_experimental_mz': [psm.experimental_mz for psm in ident_data.spectrum_matches],
'psm_retention_time': [psm.retention_time for psm in ident_data.spectrum_matches],
'psm_rank': [psm.rank for psm in ident_data.spectrum_matches],
'psm_peptide_id': [psm.match_id for psm in ident_data.spectrum_matches],
})
if not psms.empty:
if params["quant_ident_linkage"] == 'theoretical_mz':
_custom_log(
"Reading linked ident_peak from disk: {}".format(stem), logger)
linked_idents = pastaq.read_linked_psm(
in_path_ident_link_theomz)
linked_idents = pd.DataFrame({
'peak_id': [linked_ident.peak_id for linked_ident in linked_idents],
'psm_index': [linked_ident.psm_index for linked_ident in linked_idents],
'psm_link_distance': [linked_ident.distance for linked_ident in linked_idents],
})
linked_idents = pd.merge(
linked_idents, psms, on="psm_index")
peak_annotations = pd.merge(
peak_annotations, linked_idents, on="peak_id", how="left")
elif params["quant_ident_linkage"] == 'msms_event':
_custom_log(
"Reading linked ident_peak from disk: {}".format(stem), logger)
linked_idents = pastaq.read_linked_msms(
in_path_ident_link_msms)
linked_idents = pd.DataFrame({
'msms_id': [linked_ident.msms_id for linked_ident in linked_idents],
'psm_index': [linked_ident.entity_id for linked_ident in linked_idents],
'psm_link_distance': [linked_ident.distance for linked_ident in linked_idents],
})
linked_idents = pd.merge(
linked_idents, psms, on="psm_index")
peak_annotations = pd.merge(
peak_annotations, linked_idents, on="msms_id", how="left")
else:
raise ValueError("unknown quant_ident_linkage parameter")
# Get the peptide information per psm.
def format_modification(mod):
ret = "monoisotopic_mass_delta: {}, ".format(
mod.monoisotopic_mass_delta)
ret += "average_mass_delta: {}, ".format(
mod.average_mass_delta)
ret += "residues: {}, ".format(mod.residues)
ret += "location: {}, ".format(mod.location)
ret += "id: {}".format("; ".join(mod.id))
return ret
peptides = pd.DataFrame({
'psm_peptide_id': [pep.id for pep in ident_data.peptides],
'psm_sequence': [pep.sequence for pep in ident_data.peptides],
'psm_modifications_num': [len(pep.modifications) for pep in ident_data.peptides],
'psm_modifications_info': [" / ".join(map(format_modification, pep.modifications)) for pep in ident_data.peptides],
})
peak_annotations = pd.merge(
peak_annotations, peptides, on="psm_peptide_id", how="left")
# Get the protein information per peptide.
db_sequences = pd.DataFrame({
'db_seq_id': [db_seq.id for db_seq in ident_data.db_sequences],
'protein_name': [db_seq.accession for db_seq in ident_data.db_sequences],
'protein_description': [db_seq.description for db_seq in ident_data.db_sequences],
})
peptide_evidence = pd.DataFrame({
'db_seq_id': [pe.db_sequence_id for pe in ident_data.peptide_evidence],
'psm_peptide_id': [pe.peptide_id for pe in ident_data.peptide_evidence],
'psm_decoy': [pe.decoy for pe in ident_data.peptide_evidence],
})
peptide_evidence = pd.merge(
peptide_evidence, db_sequences, on="db_seq_id").drop(["db_seq_id"], axis=1)
# Get the protein information per psm.
peak_annotations = pd.merge(
peak_annotations, peptide_evidence, on="psm_peptide_id")
_custom_log("Saving peaks quantitative table to disk: {}".format(stem), logger)
peaks_df.to_csv(out_path_peaks, index=False)
_custom_log("Saving peaks annotations table to disk: {}".format(stem), logger)
if "msms_id" in peak_annotations:
peak_annotations["msms_id"] = peak_annotations["msms_id"].astype(
'Int64')
if "psm_charge_state" in peak_annotations:
peak_annotations["psm_charge_state"] = peak_annotations["psm_charge_state"].astype(
'Int64')
if "psm_rank" in peak_annotations:
peak_annotations["psm_rank"] = peak_annotations["psm_rank"].astype(
'Int64')
if "psm_modifications_num" in peak_annotations:
peak_annotations["psm_modifications_num"] = peak_annotations["psm_modifications_num"].astype(
'Int64')
peak_annotations = peak_annotations.sort_values("peak_id")
peak_annotations.to_csv(out_path_peak_annotations, index=False)
in_path_features = os.path.join(
output_dir, 'features', "{}.features".format(stem))
if os.path.isfile(in_path_features):
out_path_features = os.path.join(output_dir, 'quant',
"{}_features.csv".format(stem))
out_path_feature_annotations = os.path.join(output_dir, 'quant',
"{}_feature_annotations.csv".format(stem))
_custom_log("Reading features from disk: {}".format(stem), logger)
features = pastaq.read_features(in_path_features)
_custom_log("Generating features quantitative table", logger)
features_df = pd.DataFrame({
'feature_id': [feature.id for feature in features],
'average_mz': [feature.average_mz for feature in features],
'average_mz_sigma': [feature.average_mz_sigma for feature in features],
'average_rt': [feature.average_rt for feature in features],
'average_rt_sigma': [feature.average_rt_sigma for feature in features],
'average_rt_delta': [feature.average_rt_delta for feature in features],
'total_height': [feature.total_height for feature in features],
'monoisotopic_mz': [feature.monoisotopic_mz for feature in features],
'monoisotopic_height': [feature.monoisotopic_height for feature in features],
'charge_state': [feature.charge_state for feature in features],
'peak_id': [feature.peak_ids for feature in features],
})
# Find the peak annotations that belong to each feature.
feature_annotations = features_df[[
"feature_id", "peak_id"]].explode("peak_id")
# TODO: It's possible that we want to regenerate the feature table
# without doing the same with the peak tables.
feature_annotations = pd.merge(
feature_annotations, peak_annotations,
on="peak_id", how="left")
features_df.to_csv(out_path_features, index=False)
feature_annotations.to_csv(
out_path_feature_annotations, index=False)
# Matched Peaks
# =============
_custom_log("Reading peak clusters from disk", logger)
in_path_peak_clusters = os.path.join(
output_dir, 'metamatch', 'peaks.clusters')
out_path_peak_clusters_metadata = os.path.join(output_dir, 'quant',
"peak_clusters_metadata.csv")
out_path_peak_clusters_peaks = os.path.join(output_dir, 'quant',
"peak_clusters_peaks.csv")
out_path_peak_clusters_annotations = os.path.join(output_dir, 'quant',
"peak_clusters_annotations.csv")
# TODO: This is clearly suboptimal and will take a long time if the
# number of clusters is very high. Needs a rewrite for optimal
# performance.
def aggregate_cluster_annotations(x):
ret = {}
if "psm_sequence" in x:
ret["psm_sequence"] = ".|.".join(
np.unique(x['psm_sequence'].dropna())).strip(".|.")
if "psm_charge_state" in x:
ret["psm_charge_state"] = ".|.".join(
map(str, np.unique(x['psm_charge_state'].dropna()))).strip(".|.")
if "psm_modifications_num" in x:
ret["psm_modifications_num"] = ".|.".join(
map(str, np.unique(x['psm_modifications_num'].dropna()))).strip(".|.")
if "protein_name" in x:
ret["protein_name"] = ".|.".join(
np.unique(x['protein_name'].dropna())).strip(".|.")
if "protein_description" in x:
ret["protein_description"] = ".|.".join(
np.unique(x['protein_description'].dropna())).strip(".|.")
if "consensus_sequence" in x:
ret["consensus_sequence"] = ".|.".join(
np.unique(x['consensus_sequence'].dropna())).strip(".|.")
if "consensus_count" in x:
ret["consensus_count"] = ".|.".join(
map(str, np.unique(x['consensus_count'].dropna()))).strip(".|.")
if "consensus_protein_name" in x:
ret["consensus_protein_name"] = ".|.".join(
np.unique(x['consensus_protein_name'].dropna())).strip(".|.")
if "consensus_protein_description" in x:
ret["consensus_protein_description"] = ".|.".join(
np.unique(x['consensus_protein_description'].dropna())).strip(".|.")
if "protein_group" in x:
ret["protein_group"] = ".|.".join(
map(str, np.unique(x['protein_group'].dropna()))).strip(".|.")
return pd.Series(ret)
if (not os.path.exists(out_path_peak_clusters_metadata) or force_override):
peak_clusters = pastaq.read_peak_clusters(in_path_peak_clusters)
_custom_log("Generating peak clusters quantitative table", logger)
peak_clusters_metadata_df = pd.DataFrame({
'cluster_id': [cluster.id for cluster in peak_clusters],
'mz': [cluster.mz for cluster in peak_clusters],
'rt': [cluster.rt for cluster in peak_clusters],
'avg_height': [cluster.avg_height for cluster in peak_clusters],
})
_custom_log("Generating peak clusters quantitative table", logger)
peak_clusters_df = pd.DataFrame({
'cluster_id': [cluster.id for cluster in peak_clusters],
})
if params['quant_isotopes'] == 'volume':
out_path_peak_clusters = os.path.join(output_dir, 'quant',
"peak_clusters_volume.csv")
for i, input_file in enumerate(input_files):
stem = input_file['stem']
peak_clusters_df[stem] = [cluster.file_volumes[i]
for cluster in peak_clusters]
elif params['quant_isotopes'] == 'height':
out_path_peak_clusters = os.path.join(output_dir, 'quant',
"peak_clusters_height.csv")
for i, input_file in enumerate(input_files):
stem = input_file['stem']
peak_clusters_df[stem] = [cluster.heights[i]
for cluster in peak_clusters]
else:
raise ValueError("unknown quant_isotopes parameter")
_custom_log("Writing peaks quantitative table to disk", logger)
peak_clusters_df.to_csv(out_path_peak_clusters, index=False)
# Peak associations.
_custom_log("Generating peak clusters peak associations table", logger)
cluster_peaks = [(cluster.id, cluster.peak_ids)
for cluster in peak_clusters]
cluster_peaks = pd.DataFrame(cluster_peaks, columns=[
"cluster_id", "peak_ids"]).explode("peak_ids")
cluster_peaks["file_id"] = cluster_peaks["peak_ids"].map(
lambda x: input_files[x.file_id]['stem'])
cluster_peaks["peak_id"] = cluster_peaks["peak_ids"].map(
lambda x: x.peak_id)
cluster_peaks = cluster_peaks.drop(["peak_ids"], axis=1)
_custom_log("Writing cluster to peak table to disk", logger)
cluster_peaks.to_csv(out_path_peak_clusters_peaks, index=False)
# Cluster annotations.
_custom_log("Generating peak clusters annotations table", logger)
annotations = pd.DataFrame()
for input_file in input_files:
stem = input_file['stem']
_custom_log("Reading peak annotations for: {}".format(stem), logger)
in_path_peak_annotations = os.path.join(output_dir, 'quant',
"{}_peak_annotations.csv".format(stem))
peak_annotations = pd.read_csv(
in_path_peak_annotations, low_memory=False)
cluster_annotations = cluster_peaks[cluster_peaks["file_id"] == stem][[
"cluster_id", "peak_id"]]
cluster_annotations["file_id"] = stem
_custom_log(
"Merging peak/clusters annotations for: {}".format(stem), logger)
cluster_annotations = pd.merge(
cluster_annotations, peak_annotations, on="peak_id", how="left")
annotations = pd.concat(
[annotations, cluster_annotations]).reset_index(drop=True)
# Ensure these columns have the proper type.
if "msms_id" in annotations:
annotations["msms_id"] = annotations["msms_id"].astype(
'Int64')
if "psm_charge_state" in annotations:
annotations["psm_charge_state"] = annotations["psm_charge_state"].astype(
'Int64')
if "psm_rank" in annotations:
annotations["psm_rank"] = annotations["psm_rank"].astype(
'Int64')
if "psm_modifications_num" in annotations:
annotations["psm_modifications_num"] = annotations["psm_modifications_num"].astype(
'Int64')
if params['quant_consensus'] and 'psm_sequence' in annotations:
# Find a sequence consensus
consensus_sequence = find_sequence_consensus(
annotations, 'psm_sequence', params['quant_consensus_min_ident'])
annotations = pd.merge(
annotations,
consensus_sequence[[
"cluster_id",
"consensus_sequence",
"consensus_count",
]], on="cluster_id", how="left")
# Find a consensus proteins
proteins = annotations[annotations['psm_sequence']
== annotations['consensus_sequence']]
proteins = proteins[['cluster_id', 'protein_name',
'protein_description']].drop_duplicates()
proteins.columns = [
'cluster_id', 'consensus_protein_name', 'consensus_protein_description']
annotations = pd.merge(annotations, proteins,
on="cluster_id", how="left")
# Saving annotations before aggregation.
if params['quant_save_all_annotations']:
_custom_log("Writing annotations to disk", logger)
annotations = annotations.sort_values(by=["cluster_id"])
annotations.to_csv(out_path_peak_clusters_annotations, index=False)
_custom_log("Aggregating annotations", logger)
# TODO: This is clearly suboptimal and will take a long time if the
# number of clusters is very high. Needs a rewrite for optimal
# performance.
annotations = annotations.groupby(
'cluster_id').apply(aggregate_cluster_annotations)
# Metadata
_custom_log("Merging metadata with annotations", logger)
peak_clusters_metadata_df = pd.merge(
peak_clusters_metadata_df, annotations, how="left", on="cluster_id")
_custom_log("Writing metadata to disk", logger)
peak_clusters_metadata_df.to_csv(
out_path_peak_clusters_metadata, index=False)
# Matched Features
# ================
_custom_log("Reading feature clusters from disk", logger)
in_path_feature_clusters = os.path.join(
output_dir, 'metamatch', 'features.clusters')
out_path_feature_clusters_metadata = os.path.join(output_dir, 'quant',
"feature_clusters_metadata.csv")
out_path_feature_clusters_features = os.path.join(output_dir, 'quant',
"feature_clusters_features.csv")
out_path_feature_clusters_annotations = os.path.join(output_dir, 'quant',
"feature_clusters_annotations.csv")
if (not os.path.exists(out_path_feature_clusters_metadata) or force_override):
feature_clusters = pastaq.read_feature_clusters(
in_path_feature_clusters)
_custom_log("Generating feature clusters quantitative table", logger)
metadata = pd.DataFrame({
'cluster_id': [cluster.id for cluster in feature_clusters],
'mz': [cluster.mz for cluster in feature_clusters],
'rt': [cluster.rt for cluster in feature_clusters],
'avg_height': [cluster.avg_total_height for cluster in feature_clusters],
'charge_state': [cluster.charge_state for cluster in feature_clusters],
})
data = pd.DataFrame({
'cluster_id': [cluster.id for cluster in feature_clusters],
})
if params['quant_features'] == 'monoisotopic_height':
out_path_feature_clusters = os.path.join(output_dir, 'quant',
"feature_clusters_monoisotopic_height.csv")
for i, input_file in enumerate(input_files):
stem = input_file['stem']
data[stem] = [cluster.monoisotopic_heights[i]
for cluster in feature_clusters]
elif params['quant_features'] == 'monoisotopic_volume':
out_path_feature_clusters = os.path.join(output_dir, 'quant',
"feature_clusters_monoisotopic_volume.csv")
for i, input_file in enumerate(input_files):
stem = input_file['stem']
data[stem] = [cluster.monoisotopic_volumes[i]
for cluster in feature_clusters]
elif params['quant_features'] == 'total_height':
out_path_feature_clusters = os.path.join(output_dir, 'quant',
"feature_clusters_total_height.csv")
for i, input_file in enumerate(input_files):
stem = input_file['stem']
data[stem] = [cluster.total_heights[i]
for cluster in feature_clusters]
elif params['quant_features'] == 'total_volume':
out_path_feature_clusters = os.path.join(output_dir, 'quant',
"feature_clusters_total_volume.csv")
for i, input_file in enumerate(input_files):
stem = input_file['stem']
data[stem] = [cluster.total_volumes[i]
for cluster in feature_clusters]
elif params['quant_features'] == 'max_height':
out_path_feature_clusters = os.path.join(output_dir, 'quant',
"feature_clusters_max_height.csv")
for i, input_file in enumerate(input_files):
stem = input_file['stem']
data[stem] = [cluster.max_heights[i]
for cluster in feature_clusters]
elif params['quant_features'] == 'max_volume':
out_path_feature_clusters = os.path.join(output_dir, 'quant',
"feature_clusters_max_volume.csv")
for i, input_file in enumerate(input_files):
stem = input_file['stem']
data[stem] = [cluster.max_volumes[i]
for cluster in feature_clusters]
else:
raise ValueError("unknown quant_features parameter")
_custom_log("Writing feature clusters quantitative table to disk", logger)
data.to_csv(out_path_feature_clusters, index=False)
# Feature associations.
_custom_log("Generating feature clusters feature associations table", logger)
cluster_features = [(cluster.id, cluster.feature_ids)
for cluster in feature_clusters]
cluster_features = pd.DataFrame(cluster_features, columns=[
"cluster_id", "feature_ids"]).explode("feature_ids")
cluster_features["file_id"] = cluster_features["feature_ids"].map(
lambda x: input_files[x.file_id]['stem'])
cluster_features["feature_id"] = cluster_features["feature_ids"].map(
lambda x: x.feature_id)
cluster_features = cluster_features.drop(["feature_ids"], axis=1)
_custom_log("Writing cluster to feature table to disk", logger)
cluster_features.to_csv(
out_path_feature_clusters_features, index=False)
# Cluster annotations.
_custom_log("Generating peak clusters annotations table", logger)
annotations = pd.DataFrame()
for input_file in input_files:
stem = input_file['stem']
_custom_log("Reading features for: {}".format(stem), logger)
in_path_peak_features = os.path.join(output_dir, 'features',
"{}.features".format(stem))
in_path_peak_annotations = os.path.join(output_dir, 'quant',
"{}_peak_annotations.csv".format(stem))
features = pastaq.read_features(in_path_peak_features)
features = [(feature.id, feature.peak_ids, feature.charge_state)
for feature in features]
features = pd.DataFrame(
features, columns=["feature_id", "peak_id", "charge_state"]).explode("peak_id")
peak_annotations = pd.read_csv(
in_path_peak_annotations, low_memory=False)
cluster_annotations = cluster_features[cluster_features["file_id"] == stem][[
"cluster_id", "feature_id"]]
cluster_annotations["file_id"] = stem
_custom_log(
"Merging peak/clusters annotations for: {}".format(stem), logger)
cluster_annotations = pd.merge(
cluster_annotations, features, on="feature_id", how="left")
cluster_annotations = pd.merge(
cluster_annotations, peak_annotations, on="peak_id", how="left")
annotations = pd.concat([annotations, cluster_annotations])
# Ensure these columns have the proper type.
if "msms_id" in annotations:
annotations["msms_id"] = annotations["msms_id"].astype('Int64')
if "charge_state" in annotations:
annotations["charge_state"] = annotations["charge_state"].astype(
'Int64')
if "psm_charge_state" in annotations:
annotations["psm_charge_state"] = annotations["psm_charge_state"].astype(
'Int64')
if "psm_rank" in annotations:
annotations["psm_rank"] = annotations["psm_rank"].astype('Int64')
if "psm_modifications_num" in annotations:
annotations["psm_modifications_num"] = annotations["psm_modifications_num"].astype(
'Int64')
if params['quant_consensus'] and 'psm_sequence' in annotations:
# Find a sequence consensus
consensus_sequence = find_sequence_consensus(
annotations, 'psm_sequence', params['quant_consensus_min_ident'])
annotations = pd.merge(
annotations,
consensus_sequence[[
"cluster_id",
"consensus_sequence",
"consensus_count",
]], on="cluster_id", how="left")
# Find a consensus proteins
proteins = annotations[annotations['psm_sequence']
== annotations['consensus_sequence']]
proteins = proteins[['cluster_id', 'protein_name',
'protein_description']].drop_duplicates()
proteins.columns = [
'cluster_id', 'consensus_protein_name', 'consensus_protein_description']
annotations = pd.merge(annotations, proteins,
on="cluster_id", how="left")
# Calculate protein groups.
_custom_log("Calculating protein groups", logger)
sequence_column = 'psm_sequence'
protein_name_column = 'protein_name'
protein_description_column = 'protein_description'
if params['quant_consensus'] and 'psm_sequence' in annotations:
sequence_column = 'consensus_sequence'
protein_name_column = 'consensus_protein_name'
protein_description_column = 'consensus_protein_description'
# Combine protein name/description in case
if (sequence_column in annotations and
protein_name_column in annotations and
protein_description_column in annotations):
prot_data, prot_metadata = find_protein_groups(
data,
annotations,
sequence_column,
protein_name_column,
protein_description_column,
params['quant_proteins_min_peptides'],
params['quant_proteins_remove_subset_proteins'],
params['quant_proteins_ignore_ambiguous_peptides'],
params['quant_proteins_quant_type'],
)
out_path_protein_data = os.path.join(output_dir, 'quant',
"protein_groups.csv")
out_path_protein_metadata = os.path.join(output_dir, 'quant',
"protein_groups_metadata.csv")
_custom_log("Writing protein group data/metadata to disk", logger)
prot_data.to_csv(out_path_protein_data, index=False)
prot_metadata.to_csv(out_path_protein_metadata, index=False)
# Saving annotations before aggregation.
if params['quant_save_all_annotations']:
_custom_log("Writing annotations to disk", logger)
annotations = annotations.sort_values(by=["cluster_id"])
annotations.to_csv(
out_path_feature_clusters_annotations, index=False)
_custom_log("Aggregating annotations", logger)
if ("psm_charge_state" in annotations and
params['quant_features_charge_state_filter']):
annotations = annotations[annotations["psm_charge_state"]
== annotations["charge_state"]]
# TODO: This is clearly suboptimal and will take a long time if the
# number of clusters is very high. Needs a rewrite for optimal
# performance.
annotations_agg = annotations.groupby(
'cluster_id').apply(aggregate_cluster_annotations)
# Metadata.
_custom_log("Merging metadata with annotations", logger)
metadata = pd.merge(
metadata, annotations_agg, how="left", on="cluster_id")
_custom_log("Writing metadata to disk", logger)
metadata.to_csv(out_path_feature_clusters_metadata, index=False)
# Aggregate peptides.
sequence_column = 'psm_sequence'
if params['quant_consensus'] and 'psm_sequence' in annotations:
sequence_column = 'consensus_sequence'
if sequence_column in annotations:
_custom_log("Aggregating peptide charge states", logger)
def aggregate_peptide_annotations(x):
ret = {}
if "psm_sequence" in x:
ret["psm_sequence"] = ".|.".join(
np.unique(x['psm_sequence'].dropna())).strip(".|.")
if "protein_name" in x:
ret["protein_name"] = ".|.".join(
np.unique(x['protein_name'].dropna())).strip(".|.")
if "protein_description" in x:
ret["protein_description"] = ".|.".join(
np.unique(x['protein_description'].dropna())).strip(".|.")
if "consensus_sequence" in x:
ret["consensus_sequence"] = ".|.".join(
np.unique(x['consensus_sequence'].dropna())).strip(".|.")
if "consensus_protein_name" in x:
ret["consensus_protein_name"] = ".|.".join(
np.unique(x['consensus_protein_name'].dropna())).strip(".|.")
if "consensus_protein_description" in x:
ret["consensus_protein_description"] = ".|.".join(
np.unique(x['consensus_protein_description'].dropna())).strip(".|.")
return pd.Series(ret)
peptide_data = data.copy()
peptide_data = peptide_data.drop(["cluster_id"], axis=1)
peptide_data[sequence_column] = metadata[sequence_column]
peptide_data = peptide_data[~peptide_data[sequence_column].isna()]
peptide_data = peptide_data[peptide_data[sequence_column] != '']
peptide_data = peptide_data[~peptide_data[sequence_column].str.contains('\.\|\.')]
peptide_data = peptide_data.groupby(sequence_column).agg(sum)
peptide_data = peptide_data.reset_index()
peptide_metadata = annotations.copy()
peptide_metadata = peptide_metadata[peptide_metadata[sequence_column].isin(peptide_data[sequence_column])]
peptide_metadata = peptide_metadata.groupby(sequence_column)
peptide_metadata = peptide_metadata.apply(aggregate_peptide_annotations)
out_path_peptide_data = os.path.join(output_dir, 'quant',
"peptides_data.csv")
out_path_peptide_metadata = os.path.join(output_dir, 'quant',
"peptides_metadata.csv")
_custom_log("Writing peptide data/metadata to disk", logger)
peptide_data.to_csv(out_path_peptide_data, index=False)
peptide_metadata.to_csv(out_path_peptide_metadata, index=False)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished creation of quantitative tables in {}'.format(elapsed_time), logger)
def dda_pipeline_summary(params, output_dir, logger):
_custom_log("Starting summary stats", logger)
time_start = time.time()
input_files = params['input_files']
summary_log = logging.getLogger('summary')
summary_log.setLevel(logging.INFO)
summary_log_fh = logging.FileHandler(os.path.join(output_dir, 'summary.log'))
summary_log_fh.setLevel(logging.INFO)
summary_log_formatter = logging.Formatter('%(message)s')
summary_log_fh.setFormatter(summary_log_formatter)
summary_log.addHandler(summary_log_fh)
# Raw data
summary_log.info('Raw data')
for input_file in input_files:
stem = input_file['stem']
summary_log.info(' {}'.format(stem))
# MS1
in_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
if os.path.exists(in_path):
raw_data = pastaq.read_raw_data(in_path)
summary_log.info(' MS1')
summary_log.info(' number of scans: {}'.format(len(raw_data.scans)))
summary_log.info(' min_mz: {}'.format(raw_data.min_mz))
summary_log.info(' max_mz: {}'.format(raw_data.max_mz))
summary_log.info(' min_rt: {}'.format(raw_data.min_rt))
summary_log.info(' max_rt: {}'.format(raw_data.max_rt))
# MS2
in_path = os.path.join(output_dir, 'raw', "{}.ms2".format(stem))
if os.path.exists(in_path):
raw_data = pastaq.read_raw_data(in_path)
summary_log.info(' MS2')
summary_log.info(' number of scans: {}'.format(len(raw_data.scans)))
summary_log.info(' min_mz: {}'.format(raw_data.min_mz))
summary_log.info(' max_mz: {}'.format(raw_data.max_mz))
summary_log.info(' min_rt: {}'.format(raw_data.min_rt))
summary_log.info(' max_rt: {}'.format(raw_data.max_rt))
# Peaks
summary_log.info('Peaks detected')
avg_peak_heights = []
median_peak_heights = []
std_peak_heights = []
n_peaks = []
for input_file in input_files:
stem = input_file['stem']
summary_log.info(' {}'.format(stem))
in_path = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
if os.path.exists(in_path):
peaks = pastaq.read_peaks(in_path)
peak_heights = np.array([peak.fitted_height for peak in peaks])
n_peaks += [len(peaks)]
mean_height = peak_heights.mean()
median_height = np.median(peak_heights)
std_height = np.std(peak_heights)
avg_peak_heights += [mean_height]
median_peak_heights += [median_height]
std_peak_heights += [std_height]
summary_log.info(' Number of peaks: {}'.format(len(peaks)))
summary_log.info(' Fitted height')
summary_log.info(' mean: {}'.format(mean_height))
summary_log.info(' median: {}'.format(median_height))
summary_log.info(' std: {}'.format(std_height))
if len(n_peaks) != 0:
summary_log.info(' Overall average')
summary_log.info(' Number of peaks: {}'.format(np.mean(n_peaks)))
summary_log.info(' Fitted height')
summary_log.info(' mean: {}'.format(np.mean(avg_peak_heights)))
summary_log.info(' median: {}'.format(np.mean(median_peak_heights)))
summary_log.info(' std: {}'.format(np.mean(std_peak_heights)))
# Feature detection
summary_log.info('Feature detection')
avg_feature_monoisotopic_heights = []
median_feature_monoisotopic_heights = []
std_feature_monoisotopic_heights = []
avg_feature_max_heights = []
median_feature_max_heights = []
std_feature_max_heights = []
avg_feature_total_heights = []
median_feature_total_heights = []
std_feature_total_heights = []
n_features = []
for input_file in input_files:
stem = input_file['stem']
summary_log.info(' {}'.format(stem))
in_path = os.path.join(output_dir, 'features', "{}.features".format(stem))
if os.path.exists(in_path):
features = pastaq.read_features(in_path)
feature_max_heights = np.array([feature.max_height for feature in features])
feature_monoisotopic_heights = np.array([feature.monoisotopic_height for feature in features])
feature_total_heights = np.array([feature.total_height for feature in features])
feature_mean_max_height = feature_max_heights.mean()
feature_median_max_height = np.median(feature_max_heights)
feature_std_max_height = np.std(feature_max_heights)
feature_mean_monoisotopic_height = feature_monoisotopic_heights.mean()
feature_median_monoisotopic_height = np.median(feature_monoisotopic_heights)
feature_std_monoisotopic_height = np.std(feature_monoisotopic_heights)
feature_mean_total_height = feature_total_heights.mean()
feature_median_total_height = np.median(feature_total_heights)
feature_std_total_height = np.std(feature_total_heights)
n_features += [len(features)]
avg_feature_max_heights += [feature_mean_max_height]
median_feature_max_heights += [feature_median_max_height]
std_feature_max_heights += [feature_std_max_height]
avg_feature_monoisotopic_heights += [feature_mean_monoisotopic_height]
median_feature_monoisotopic_heights += [feature_median_monoisotopic_height]
std_feature_monoisotopic_heights += [feature_std_monoisotopic_height]
avg_feature_total_heights += [feature_mean_total_height]
median_feature_total_heights += [feature_median_total_height]
std_feature_total_heights += [feature_std_total_height]
summary_log.info(' Number of features: {}'.format(len(features)))
summary_log.info(' Max height')
summary_log.info(' mean: {}'.format(feature_mean_max_height))
summary_log.info(' median: {}'.format(feature_median_max_height))
summary_log.info(' std: {}'.format(feature_std_max_height))
summary_log.info(' Monoisotopic height')
summary_log.info(' mean: {}'.format(feature_mean_monoisotopic_height))
summary_log.info(' median: {}'.format(feature_median_monoisotopic_height))
summary_log.info(' std: {}'.format(feature_std_monoisotopic_height))
summary_log.info(' Total height')
summary_log.info(' mean: {}'.format(feature_mean_total_height))
summary_log.info(' median: {}'.format(feature_median_total_height))
summary_log.info(' std: {}'.format(feature_std_total_height))
if len(n_features) != 0:
summary_log.info(' Overall average')
summary_log.info(' Number of features: {}'.format(np.mean(n_features)))
summary_log.info(' Max height')
summary_log.info(' mean: {}'.format(np.mean(avg_feature_max_heights)))
summary_log.info(' median: {}'.format(np.mean(median_feature_max_heights)))
summary_log.info(' std: {}'.format(np.mean(std_feature_max_heights)))
summary_log.info(' Monoisotopic height')
summary_log.info(' mean height: {}'.format(np.mean(avg_feature_monoisotopic_heights)))
summary_log.info(' median height: {}'.format(np.mean(median_feature_monoisotopic_heights)))
summary_log.info(' std height: {}'.format(np.mean(std_feature_monoisotopic_heights)))
summary_log.info(' Total height')
summary_log.info(' mean height: {}'.format(np.mean(avg_feature_total_heights)))
summary_log.info(' median height: {}'.format(np.mean(median_feature_total_heights)))
summary_log.info(' std height: {}'.format(np.mean(std_feature_total_heights)))
# Identifications and linkage
summary_log.info('Annotations and linkage')
for input_file in input_files:
stem = input_file['stem']
summary_log.info(' {}'.format(stem))
in_path_raw_data = os.path.join(output_dir, 'raw', "{}.ms2".format(stem))
in_path_linked_msms = os.path.join(output_dir, 'linking', "{}.peak_ms2.link".format(stem))
if os.path.exists(in_path_raw_data) and os.path.exists(in_path_linked_msms):
raw_data = pastaq.read_raw_data(in_path_raw_data)
linked_msms = pastaq.read_linked_msms(in_path_linked_msms)
summary_log.info(' MS/MS-Peaks linkage')
summary_log.info(' Number of ms/ms events: {}'.format(len(raw_data.scans)))
summary_log.info(' Number of ms/ms events linked to peaks: {}'.format(len(linked_msms)))
if len(raw_data.scans) > 0:
summary_log.info(' Linking efficiency (%): {}'.format(len(linked_msms)/len(raw_data.scans) * 100.0))
in_path_ident_data = os.path.join(output_dir, 'ident', "{}.ident".format(stem))
if os.path.exists(in_path_ident_data):
ident_data = pastaq.read_ident_data(in_path_ident_data)
in_path_ident_ms2 = os.path.join(output_dir, 'linking', "{}.ident_ms2.link".format(stem))
if os.path.exists(in_path_ident_ms2):
ident_ms2 = pastaq.read_linked_msms(in_path_ident_ms2)
summary_log.info(' MS/MS-Identification linkage')
summary_log.info(' Number of PSMs: {}'.format(len(ident_data.spectrum_matches)))
summary_log.info(' Number of PSMs linked to MS/MS events: {}'.format(len(ident_ms2)))
if len(ident_data.spectrum_matches) > 0:
summary_log.info(' PSM-peaks linking efficiency (%): {}'.format(len(ident_ms2)/len(ident_data.spectrum_matches) * 100.0))
in_path_peak_idents = os.path.join(output_dir, 'linking', "{}.ident_peak.link".format(stem))
if os.path.exists(in_path_peak_idents):
ident_peak = pastaq.read_linked_psm(in_path_peak_idents)
summary_log.info(' Peaks-Identification linkage')
summary_log.info(' Number of PSMs: {}'.format(len(ident_data.spectrum_matches)))
summary_log.info(' Number of PSMs linked to peaks: {}'.format(len(ident_peak)))
if len(ident_data.spectrum_matches) > 0:
summary_log.info(' PSM-peaks linking efficiency (%): {}'.format(len(ident_peak)/len(ident_data.spectrum_matches) * 100.0))
# TODO: Average identification linkage stats.
# TODO: Metamatch stats
# TODO: Peptide stats
# TODO: Protein group stats
summary_log.removeHandler(summary_log_fh)
summary_log_fh.close()
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished summary stats in {}'.format(elapsed_time), logger)
def generate_qc_plots(params, output_dir, logger=None, force_override=False):
input_files = params['input_files']
#
# General plot config.
#
# Font and figure size
plt.rcParams.update({
'font.family': params['qc_plot_font_family'],
'font.size': params['qc_plot_font_size'],
'figure.figsize': (params['qc_plot_fig_size_x'], params['qc_plot_fig_size_y']),
})
# Alpha parameters.
fill_alpha = params['qc_plot_fill_alpha']
line_alpha = params['qc_plot_line_alpha']
scatter_alpha = params['qc_plot_scatter_alpha']
if line_alpha == 'dynamic':
line_alpha = max(params['qc_plot_min_dynamic_alpha'], 1.0 / len(input_files))
if fill_alpha == 'dynamic':
fill_alpha = max(params['qc_plot_min_dynamic_alpha'], 1.0 / len(input_files))
# Colorscheme.
palette = sns.color_palette(params['qc_plot_palette'], len(input_files))
_custom_log("Starting quality control plotting", logger)
time_start = time.time()
#
# Peak sigma density
#
if not params['qc_plot_per_file']:
out_path = os.path.join(output_dir, 'quality', 'peak_sigma_mz_rt_density.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, (ax_left, ax_right) = plt.subplots(1, 2)
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
_custom_log("Plotting density of sigma_mz/sigma_rt: {}".format(stem), logger)
peaks_path = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
peaks = pastaq.read_peaks(peaks_path)
sigma_mzs = np.array([peak.fitted_sigma_mz for peak in peaks])
sigma_rts = np.array([peak.fitted_sigma_rt for peak in peaks])
if params['qc_plot_line_style'] == 'fill':
sns.kdeplot(sigma_rts, label=stem, ax=ax_left, fill=True, linewidth=0, alpha=fill_alpha, color=color)
sns.kdeplot(sigma_mzs, label=stem, ax=ax_right, fill=True, linewidth=0, alpha=fill_alpha, color=color)
else:
sns.kdeplot(sigma_rts, label=stem, ax=ax_left, alpha=line_alpha, color=color)
sns.kdeplot(sigma_mzs, label=stem, ax=ax_right, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax_left.set_xlabel('$\\sigma_{rt}$')
ax_right.set_xlabel('$\\sigma_{mz}$')
ax_left.set_ylabel('Density')
ax_right.set_ylabel('')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
else:
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
out_path = os.path.join(output_dir, 'quality', '{}_peak_sigma_mz_rt_density.{}'.format(stem, params['qc_plot_extension']))
if os.path.exists(out_path) and not force_override:
continue
fig, (ax_left, ax_right) = plt.subplots(1, 2)
_custom_log("Plotting density of sigma_mz/sigma_rt: {}".format(stem), logger)
peaks_path = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
peaks = pastaq.read_peaks(peaks_path)
sigma_mzs = np.array([peak.fitted_sigma_mz for peak in peaks])
sigma_rts = np.array([peak.fitted_sigma_rt for peak in peaks])
if params['qc_plot_line_style'] == 'fill':
sns.kdeplot(sigma_rts, label=stem, ax=ax_left, fill=True, linewidth=0, alpha=fill_alpha, color=color)
sns.kdeplot(sigma_mzs, label=stem, ax=ax_right, fill=True, linewidth=0, alpha=fill_alpha, color=color)
else:
sns.kdeplot(sigma_rts, label=stem, ax=ax_left, alpha=line_alpha, color=color)
sns.kdeplot(sigma_mzs, label=stem, ax=ax_right, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax_left.set_xlabel('$\\sigma_{rt}$')
ax_right.set_xlabel('$\\sigma_{mz}$')
ax_left.set_ylabel('Density')
ax_right.set_ylabel('')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
#
# Peak rt vs rt_delta
#
if not params['qc_plot_per_file']:
out_path = os.path.join(output_dir, 'quality', 'peak_rt_vs_rt_delta.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, ax = plt.subplots(1, 1)
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
_custom_log("Plotting rt vs rt_delta: {}".format(stem), logger)
peaks_path = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
peaks = pastaq.read_peaks(peaks_path)
rts = np.array([peak.fitted_rt for peak in peaks])
rt_deltas = np.array([peak.rt_delta for peak in peaks])
idx = np.argsort(rts)
rts = rts[idx]
rt_deltas = rt_deltas[idx]
ax.plot(rts, rt_deltas, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Retention time delta (s)')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
else:
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
out_path = os.path.join(output_dir, 'quality', '{}_peak_rt_vs_rt_delta.{}'.format(stem, params['qc_plot_extension']))
if os.path.exists(out_path) and not force_override:
continue
fig, ax = plt.subplots(1, 1)
_custom_log("Plotting rt vs rt_delta: {}".format(stem), logger)
peaks_path = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
peaks = pastaq.read_peaks(peaks_path)
rts = np.array([peak.fitted_rt for peak in peaks])
rt_deltas = np.array([peak.rt_delta for peak in peaks])
idx = np.argsort(rts)
rts = rts[idx]
rt_deltas = rt_deltas[idx]
ax.plot(rts, rt_deltas, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Retention time delta (s)')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
#
# Peak sigma_mz vs m/z scatterplot.
#
if not params['qc_plot_per_file']:
out_path = os.path.join(output_dir, 'quality', 'peak_mz_vs_sigma_mz.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, ax = plt.subplots(1, 1)
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
_custom_log("Plotting mz vs sigma_mz: {}".format(stem), logger)
peaks_path = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
peaks = pastaq.read_peaks(peaks_path)
mz = np.array([peak.fitted_mz for peak in peaks])[0:params['qc_plot_mz_vs_sigma_mz_max_peaks']]
sigma_mz = np.array([peak.fitted_sigma_mz for peak in peaks])[0:params['qc_plot_mz_vs_sigma_mz_max_peaks']]
ax.scatter(mz, sigma_mz, s=params['qc_plot_scatter_size'], label=stem, edgecolors='none', alpha=scatter_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('m/z')
ax.set_ylabel('$\\sigma_{mz}$')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
else:
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
out_path = os.path.join(output_dir, 'quality', '{}_peak_mz_vs_sigma_mz.{}'.format(stem, params['qc_plot_extension']))
if os.path.exists(out_path) and not force_override:
continue
fig, ax = plt.subplots(1, 1)
_custom_log("Plotting mz vs sigma_mz: {}".format(stem), logger)
peaks_path = os.path.join(output_dir, 'warped_peaks', "{}.peaks".format(stem))
peaks = pastaq.read_peaks(peaks_path)
mz = np.array([peak.fitted_mz for peak in peaks])[0:params['qc_plot_mz_vs_sigma_mz_max_peaks']]
sigma_mz = np.array([peak.fitted_sigma_mz for peak in peaks])[0:params['qc_plot_mz_vs_sigma_mz_max_peaks']]
ax.scatter(mz, sigma_mz, s=params['qc_plot_scatter_size'], label=stem, edgecolors='none', alpha=scatter_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('m/z')
ax.set_ylabel('$\\sigma_{mz}$')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
#
# Extracted Ion Chromatogram (XIC) before and after alignment.
#
if not params['qc_plot_per_file']:
out_path = os.path.join(output_dir, 'quality', 'xic_unaligned.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, ax = plt.subplots(1, 1)
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
_custom_log("Plotting XIC (unaligned): {}".format(stem), logger)
raw_data_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
raw_data = pastaq.read_raw_data(raw_data_path)
xic = pastaq.xic(
raw_data,
raw_data.min_mz,
raw_data.max_mz,
raw_data.min_rt,
raw_data.max_rt,
"sum"
)
x = xic.retention_time
y = xic.intensity
if params['qc_plot_line_style'] == 'fill':
ax.fill_between(x, 0, y, lw=0, color=color, alpha=fill_alpha, label=stem)
else:
ax.plot(x, y, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Intensity')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
else:
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
out_path = os.path.join(output_dir, 'quality', '{}_xic_unaligned.{}'.format(stem, params['qc_plot_extension']))
if os.path.exists(out_path) and not force_override:
continue
fig, ax = plt.subplots(1, 1)
_custom_log("Plotting XIC (unaligned): {}".format(stem), logger)
raw_data_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
raw_data = pastaq.read_raw_data(raw_data_path)
xic = pastaq.xic(
raw_data,
raw_data.min_mz,
raw_data.max_mz,
raw_data.min_rt,
raw_data.max_rt,
"sum"
)
x = xic.retention_time
y = xic.intensity
if params['qc_plot_line_style'] == 'fill':
ax.fill_between(x, 0, y, lw=0, color=color, alpha=fill_alpha, label=stem)
else:
ax.plot(x, y, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Intensity')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
if not params['qc_plot_per_file']:
out_path = os.path.join(output_dir, 'quality', 'xic_aligned.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, ax = plt.subplots(1, 1)
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
_custom_log("Plotting XIC (aligned): {}".format(stem), logger)
raw_data_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
tmap_path = os.path.join(output_dir, 'time_map', "{}.tmap".format(stem))
raw_data = pastaq.read_raw_data(raw_data_path)
tmap = pastaq.read_time_map(tmap_path)
xic = pastaq.xic(
raw_data,
raw_data.min_mz,
raw_data.max_mz,
raw_data.min_rt,
raw_data.max_rt,
"sum"
)
x = [tmap.warp(rt) for rt in xic.retention_time]
y = xic.intensity
if params['qc_plot_line_style'] == 'fill':
ax.fill_between(x, 0, y, lw=0, color=color, alpha=fill_alpha, label=stem)
else:
ax.plot(x, y, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Intensity')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
else:
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
out_path = os.path.join(output_dir, 'quality', '{}_xic_aligned.{}'.format(stem, params['qc_plot_extension']))
if os.path.exists(out_path) and not force_override:
continue
fig, ax = plt.subplots(1, 1)
_custom_log("Plotting XIC (aligned): {}".format(stem), logger)
raw_data_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
tmap_path = os.path.join(output_dir, 'time_map', "{}.tmap".format(stem))
raw_data = pastaq.read_raw_data(raw_data_path)
tmap = pastaq.read_time_map(tmap_path)
xic = pastaq.xic(
raw_data,
raw_data.min_mz,
raw_data.max_mz,
raw_data.min_rt,
raw_data.max_rt,
"sum"
)
x = [tmap.warp(rt) for rt in xic.retention_time]
y = xic.intensity
if params['qc_plot_line_style'] == 'fill':
ax.fill_between(x, 0, y, lw=0, color=color, alpha=fill_alpha, label=stem)
else:
ax.plot(x, y, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Intensity')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
#
# Base Peak chromatogram before and after alignment.
#
if not params['qc_plot_per_file']:
out_path = os.path.join(output_dir, 'quality', 'bpc_unaligned.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, ax = plt.subplots(1, 1)
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
_custom_log("Plotting Base Peak Chromatogram (unaligned): {}".format(stem), logger)
raw_data_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
raw_data = pastaq.read_raw_data(raw_data_path)
xic = pastaq.xic(
raw_data,
raw_data.min_mz,
raw_data.max_mz,
raw_data.min_rt,
raw_data.max_rt,
"max"
)
x = xic.retention_time
y = xic.intensity
if params['qc_plot_line_style'] == 'fill':
ax.fill_between(x, 0, y, lw=0, color=color, alpha=fill_alpha, label=stem)
else:
ax.plot(x, y, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Intensity')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
else:
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
out_path = os.path.join(output_dir, 'quality', '{}_bpc_unaligned.{}'.format(stem, params['qc_plot_extension']))
if os.path.exists(out_path) and not force_override:
continue
fig, ax = plt.subplots(1, 1)
_custom_log("Plotting Base Peak Chromatogram (unaligned): {}".format(stem), logger)
raw_data_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
raw_data = pastaq.read_raw_data(raw_data_path)
xic = pastaq.xic(
raw_data,
raw_data.min_mz,
raw_data.max_mz,
raw_data.min_rt,
raw_data.max_rt,
"max"
)
x = xic.retention_time
y = xic.intensity
if params['qc_plot_line_style'] == 'fill':
ax.fill_between(x, 0, y, lw=0, color=color, alpha=fill_alpha, label=stem)
else:
ax.plot(x, y, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Intensity')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
if not params['qc_plot_per_file']:
out_path = os.path.join(output_dir, 'quality', 'bpc_aligned.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, ax = plt.subplots(1, 1)
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
_custom_log("Plotting Base Peak Chromatogram (aligned): {}".format(stem), logger)
raw_data_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
tmap_path = os.path.join(output_dir, 'time_map', "{}.tmap".format(stem))
raw_data = pastaq.read_raw_data(raw_data_path)
tmap = pastaq.read_time_map(tmap_path)
xic = pastaq.xic(
raw_data,
raw_data.min_mz,
raw_data.max_mz,
raw_data.min_rt,
raw_data.max_rt,
"max"
)
x = [tmap.warp(rt) for rt in xic.retention_time]
y = xic.intensity
if params['qc_plot_line_style'] == 'fill':
ax.fill_between(x, 0, y, lw=0, color=color, alpha=fill_alpha, label=stem)
else:
ax.plot(x, y, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Intensity')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
else:
for input_file, color in zip(input_files, palette):
stem = input_file['stem']
out_path = os.path.join(output_dir, 'quality', '{}_bpc_aligned.{}'.format(stem, params['qc_plot_extension']))
if os.path.exists(out_path) and not force_override:
continue
fig, ax = plt.subplots(1, 1)
_custom_log("Plotting Base Peak Chromatogram (aligned): {}".format(stem), logger)
raw_data_path = os.path.join(output_dir, 'raw', "{}.ms1".format(stem))
tmap_path = os.path.join(output_dir, 'time_map', "{}.tmap".format(stem))
raw_data = pastaq.read_raw_data(raw_data_path)
tmap = pastaq.read_time_map(tmap_path)
xic = pastaq.xic(
raw_data,
raw_data.min_mz,
raw_data.max_mz,
raw_data.min_rt,
raw_data.max_rt,
"max"
)
x = [tmap.warp(rt) for rt in xic.retention_time]
y = xic.intensity
if params['qc_plot_line_style'] == 'fill':
ax.fill_between(x, 0, y, lw=0, color=color, alpha=fill_alpha, label=stem)
else:
ax.plot(x, y, label=stem, alpha=line_alpha, color=color)
if params['qc_plot_fig_legend']:
plt.legend()
ax.set_xlabel('Retention time (s)')
ax.set_ylabel('Intensity')
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
#
# Similarity matrix before/after alignment.
#
out_path = os.path.join(output_dir, 'quality', 'similarity_unaligned.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, ax = plt.subplots(1, 1)
_custom_log("Plotting similarity matrix before alignment", logger)
matrix_path = os.path.join(output_dir, 'quality', 'similarity_{}.csv'.format('peaks'))
similarity_matrix = pd.read_csv(matrix_path, index_col=0)
sns.heatmap(similarity_matrix, xticklabels=True, yticklabels=True, square=True, vmin=0, vmax=1)
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
out_path = os.path.join(output_dir, 'quality', 'similarity_aligned.{}'.format(params['qc_plot_extension']))
if not os.path.exists(out_path) or force_override:
fig, ax = plt.subplots(1, 1)
_custom_log("Plotting similarity matrix after alignment", logger)
matrix_path = os.path.join(output_dir, 'quality', 'similarity_{}.csv'.format('warped_peaks'))
similarity_matrix = pd.read_csv(matrix_path, index_col=0)
sns.heatmap(similarity_matrix, xticklabels=True, yticklabels=True, square=True, vmin=0, vmax=1)
_custom_log("Saving figure: {}".format(out_path), logger)
plt.savefig(out_path, dpi=params['qc_plot_dpi'])
plt.close(fig)
elapsed_time = datetime.timedelta(seconds=time.time()-time_start)
_custom_log('Finished quality control plotting in {}'.format(elapsed_time), logger)
def dda_pipeline(
pastaq_parameters,
input_files,
output_dir="pastaq",
force_override=False,
save_grid=False,
):
# TODO: Logger should have different levels and user can configure the
# verbosity of output.
# TODO: Sanitize parameters.
# TODO: Sanitize input/outputs.
# TODO: - Check if there are name conflicts.
# Create output directory and subdirectoreis if necessary.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(os.path.join(output_dir, 'raw')):
os.makedirs(os.path.join(output_dir, 'raw'))
if not os.path.exists(os.path.join(output_dir, 'quality')):
os.makedirs(os.path.join(output_dir, 'quality'))
if save_grid:
if not os.path.exists(os.path.join(output_dir, 'grid')):
os.makedirs(os.path.join(output_dir, 'grid'))
if not os.path.exists(os.path.join(output_dir, 'peaks')):
os.makedirs(os.path.join(output_dir, 'peaks'))
if not os.path.exists(os.path.join(output_dir, 'time_map')):
os.makedirs(os.path.join(output_dir, 'time_map'))
if not os.path.exists(os.path.join(output_dir, 'warped_peaks')):
os.makedirs(os.path.join(output_dir, 'warped_peaks'))
if not os.path.exists(os.path.join(output_dir, 'metamatch')):
os.makedirs(os.path.join(output_dir, 'metamatch'))
if not os.path.exists(os.path.join(output_dir, 'linking')):
os.makedirs(os.path.join(output_dir, 'linking'))
if not os.path.exists(os.path.join(output_dir, 'ident')):
os.makedirs(os.path.join(output_dir, 'ident'))
if not os.path.exists(os.path.join(output_dir, 'features')):
os.makedirs(os.path.join(output_dir, 'features'))
if not os.path.exists(os.path.join(output_dir, 'quant')):
os.makedirs(os.path.join(output_dir, 'quant'))
# Initialize logger.
# TODO: Log to file and cout simultaneously if the user asks for it.
class DeltaTimeFilter(logging.Filter):
def filter(self, record):
current_time = time.time()
record.delta_time = datetime.timedelta(
seconds=current_time - self.prev_time)
self.prev_time = current_time
return True
def __init__(self):
self.prev_time = time.time()
logger = logging.getLogger('pipeline')
logger.addFilter(DeltaTimeFilter())
logger.setLevel(logging.INFO)
logger_fh = logging.FileHandler(os.path.join(output_dir, 'info.log'))
logger_fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(delta_time)s | %(message)s')
logger_fh.setFormatter(formatter)
logger.addHandler(logger_fh)
# Prepare input files.
for file in input_files:
# TODO: Check if the input path for raw files exist.
# TODO: Check if the input path for identifications exist.
# If there is no identification, make sure it is set as none.
if 'ident_path' not in file:
file['ident_path'] = 'none'
# Obtain the stem for this file if not manually specified.
if 'stem' not in file:
base_name = os.path.basename(file['raw_path'])
base_name = os.path.splitext(base_name)
file['stem'] = base_name[0]
# Check that all files contain a group, if not, assign the default
# 'none' group.
if 'group' not in file:
file['group'] = 'none'
# Make sure the input files are in the parameters list before saving them to
# disk.
pastaq_parameters['input_files'] = input_files
# Save parameters file.
parameters_file_name = os.path.join(output_dir, 'parameters.json')
with open(parameters_file_name, 'w') as json_file:
json.dump(pastaq_parameters, json_file)
# Store current time for logging the total elapsed time for the entire run.
time_pipeline_start = time.time()
parse_raw_files(pastaq_parameters, output_dir, logger, force_override)
detect_peaks(pastaq_parameters, output_dir, save_grid, logger, force_override)
calculate_similarity_matrix(pastaq_parameters, output_dir, 'peaks', logger, force_override)
perform_rt_alignment(pastaq_parameters, output_dir, logger, force_override)
calculate_similarity_matrix(pastaq_parameters, output_dir, 'warped_peaks', logger, force_override)
perform_feature_detection(pastaq_parameters, output_dir, logger, force_override)
parse_mzidentml_files(pastaq_parameters, output_dir, logger, force_override)
link_peaks_msms_idents(pastaq_parameters, output_dir, logger, force_override)
match_peaks_and_features(pastaq_parameters, output_dir, logger, force_override)
create_quantitative_tables(pastaq_parameters, output_dir, logger, force_override)
generate_qc_plots(pastaq_parameters, output_dir, logger, force_override)
dda_pipeline_summary(pastaq_parameters, output_dir, logger)
logger.info('Total time elapsed: {}'.format(
datetime.timedelta(seconds=time.time()-time_pipeline_start)))
# Stop logger.
logger.removeHandler(logger_fh)
logger_fh.close()
``` |
{
"source": "JoelEager/AsyncTerminal",
"score": 3
} |
#### File: AsyncTerminal/AsyncTerminal/GenericSupport.py
```python
class GenericSupport():
OSEnvironment = "Generic"
@classmethod
def setup(cls):
"""
Configures the support logic
:returns: A reference to the support class for this environment
"""
return cls
@classmethod
def cleanup(cls):
"""
Cleans up on exit of AsyncTerminal
Automatically called by Python on exit thanks to the registration done on package initialization
"""
raise NotImplementedError()
@classmethod
async def getInputChar(cls):
"""
Blocking IO call to get next character typed as a String
"""
raise NotImplementedError()
@classmethod
def print(cls, message):
"""
Writes the given message to the terminal
Does not append a newline
"""
raise NotImplementedError()
```
#### File: AsyncTerminal/AsyncTerminal/UnixSupport.py
```python
import sys
import termios
import tty
import select
import asyncio
from .GenericSupport import GenericSupport
class UnixSupport(GenericSupport):
"""
See docs in GenericSupport.py
"""
OSEnvironment = "Linux/Mac"
__oldSettings = None
@classmethod
def setup(cls):
stdinFile = sys.stdin.fileno()
cls.__oldSettings = termios.tcgetattr(stdinFile)
tty.setraw(stdinFile)
return cls
@classmethod
def cleanup(cls):
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, cls.__oldSettings)
@classmethod
async def getInputChar(cls):
while True:
pendingInput = select.select([sys.stdin], [], [], 0.0)[0]
# Make sure key is pressed before reading it
if pendingInput:
return pendingInput[0].buffer.read(1).decode("utf8")
else:
await asyncio.sleep(0.1)
@classmethod
def print(cls, message):
sys.stdout.write(message)
if message.endswith("\n"):
# Return the cursor to the left side of the screen
sys.stdout.write(u"\u001b[1000D")
``` |
{
"source": "JoelEager/Functional-Python",
"score": 4
} |
#### File: Functional-Python/code/generators.py
```python
from random import randint
def random_gen(max_int=100, min_int=0, num_entries=None):
"""
A generator-based take on the RandomIter class from the custom iterators example
"""
while num_entries != 0:
if num_entries is not None:
num_entries -= 1
yield randint(min_int, max_int) # Execution resumes here when next() is called again
# The Python interpreter will automatically raise StopIteration when the function exits
def run_demo():
print("Generators are another way to create custom iterators")
print(list(random_gen(num_entries=10)), "\n")
print("Often times they're far more concise than the class-based approach", "\n")
print("Additionally, generators can be created using comprehension syntax")
rand_comp = (randint(0, 100) for _ in range(10))
print(list(rand_comp))
if __name__ == "__main__":
run_demo()
``` |
{
"source": "JoelEager/packet",
"score": 2
} |
#### File: packet/packet/context_processors.py
```python
import hashlib
import urllib
from functools import lru_cache
from datetime import datetime
from packet.ldap import ldap_get_member
from packet.models import Freshman
from packet import app
# pylint: disable=bare-except
@lru_cache(maxsize=128)
def get_csh_name(username):
try:
member = ldap_get_member(username)
return member.cn + ' (' + member.uid + ')'
except:
return username
def get_roles(sig):
"""
Converts a signature's role fields to a dict for ease of access.
:return: A dictionary of role short names to role long names
"""
out = {}
if sig.eboard:
out['eboard'] = sig.eboard
if sig.active_rtp:
out['rtp'] = 'RTP'
if sig.three_da:
out['three_da'] = '3DA'
if sig.webmaster:
out['webmaster'] = 'Webmaster'
if sig.c_m:
out['cm'] = 'Constitutional Maintainer'
if sig.drink_admin:
out['drink'] = 'Drink Admin'
return out
# pylint: disable=bare-except
@lru_cache(maxsize=128)
def get_rit_name(username):
try:
freshman = Freshman.query.filter_by(rit_username=username).first()
return freshman.name + ' (' + username + ')'
except:
return username
# pylint: disable=bare-except
@lru_cache(maxsize=128)
def get_rit_image(username):
if username:
addresses = [username + '@rit.edu', username + '@g.rit.edu']
for addr in addresses:
url = 'https://gravatar.com/avatar/' + hashlib.md5(addr.encode('utf8')).hexdigest() + '.jpg?d=404&s=250'
try:
gravatar = urllib.request.urlopen(url)
if gravatar.getcode() == 200:
return url
except:
continue
return 'https://www.gravatar.com/avatar/freshmen?d=mp&f=y'
def log_time(label):
"""
Used during debugging to log timestamps while rendering templates
"""
print(label, datetime.now())
@app.context_processor
def utility_processor():
return dict(
get_csh_name=get_csh_name, get_rit_name=get_rit_name, get_rit_image=get_rit_image, log_time=log_time,
get_roles=get_roles
)
```
#### File: packet/packet/utils.py
```python
from functools import wraps, lru_cache
import requests
from flask import session, redirect
from packet import auth, app
from packet.models import Freshman
from packet.ldap import ldap_get_member, ldap_is_intromember
INTRO_REALM = 'https://sso.csh.rit.edu/auth/realms/intro'
def before_request(func):
"""
Credit to <NAME> and <NAME>
https://github.com/liam-middlebrook/gallery
"""
@wraps(func)
def wrapped_function(*args, **kwargs):
uid = str(session['userinfo'].get('preferred_username', ''))
if session['id_token']['iss'] == INTRO_REALM:
info = {
'realm': 'intro',
'uid': uid,
'onfloor': is_freshman_on_floor(uid)
}
else:
info = {
'realm': 'csh',
'uid': uid
}
kwargs['info'] = info
return func(*args, **kwargs)
return wrapped_function
@lru_cache(maxsize=128)
def is_freshman_on_floor(rit_username):
"""
Checks if a freshman is on floor
"""
freshman = Freshman.query.filter_by(rit_username=rit_username).first()
if freshman is not None:
return freshman.onfloor
else:
return False
def packet_auth(func):
"""
Decorator for easily configuring oidc
"""
@auth.oidc_auth('app')
@wraps(func)
def wrapped_function(*args, **kwargs):
if app.config['REALM'] == 'csh':
username = str(session['userinfo'].get('preferred_username', ''))
if ldap_is_intromember(ldap_get_member(username)):
app.logger.warn('Stopped intro member {} from accessing upperclassmen packet'.format(username))
return redirect(app.config['PROTOCOL'] + app.config['PACKET_INTRO'], code=301)
return func(*args, **kwargs)
return wrapped_function
def notify_slack(name: str):
"""
Sends a congratulate on sight decree to Slack
"""
if app.config['SLACK_WEBHOOK_URL'] is None:
app.logger.warn('SLACK_WEBHOOK_URL not configured, not sending message to slack.')
return
msg = f':pizza-party: {name} got :100: on packet! :pizza-party:'
requests.put(app.config['SLACK_WEBHOOK_URL'], json={'text':msg})
app.logger.info('Posted 100% notification to slack for ' + name)
``` |
{
"source": "JoelEager/pyTanks-Server",
"score": 4
} |
#### File: pyTanks-Server/dataModels/tank.py
```python
import datetime
import math
import copy
import config
class tank:
"""
Stores the state data for a tank
"""
def __init__(self):
self.x = -100 # Current x position of the tank's center
self.y = -100 # Current y position of the tank's center
self.heading = 0 # Current heading in radians from the +x axis
self.moving = False # Boolean for whether or not this tank is moving
self.alive = False # Boolean for whether or not this tank is alive
# The datetime of this tank's last shot
self.__lastShotTime = datetime.datetime.now() - datetime.timedelta(seconds=config.game.tank.reloadTime)
self.kills = 0 # Kills in the current round
self.wins = 0 # Rounds won
# The string identifying this player's author or other info
self.info = "This player has not provided any info."
def spawn(self):
"""
Resets the tank's per-game variables (other than position)
"""
self.heading = 0
self.moving = False
self.alive = True
self.__lastShotTime = datetime.datetime.now() - datetime.timedelta(seconds=config.game.tank.reloadTime)
self.kills = 0
def canShoot(self):
"""
Used to cap rate of fire
:return: True if the tank can shoot, False if not
"""
return datetime.timedelta(seconds=config.game.tank.reloadTime) <= datetime.datetime.now() - self.__lastShotTime
def didShoot(self):
"""
Called whenever a tank shoots so its lastShotTime can be updated
"""
self.__lastShotTime = datetime.datetime.now()
def move(self, distance):
"""
Moves the tank the given distance along its current heading
"""
self.x += math.cos(self.heading) * distance
self.y -= math.sin(self.heading) * distance
def toDict(self, doClean):
"""
:param doClean: True/False to indicate if the dict should be cleaned for sending to players
:return: A dictionary of the tank's data
"""
myDict = copy.copy(vars(self))
# The lastShotTime should never be in a gameState update
del myDict["_tank__lastShotTime"]
# Remove info that should be hidden from players
if doClean:
del myDict["kills"]
del myDict["wins"]
del myDict["info"]
return myDict
def toPoly(self, margin=0):
"""
:param margin: If set the polygon will have a padding of margin pixels in every direction
:return: The tank's polygon as a list of points as tuples
"""
sin = math.sin(self.heading)
cos = math.cos(self.heading)
def rotateVector(x, y):
return x * cos - y * sin, x * sin + y * cos
halfWidth = (config.game.tank.width / 2) + margin
halfHeight = (config.game.tank.height / 2) + margin
poly = [rotateVector(-halfWidth, -halfHeight),
rotateVector(halfWidth, -halfHeight),
rotateVector(halfWidth, halfHeight),
rotateVector(-halfWidth, halfHeight)]
for count in range(0, len(poly)):
vector = poly[count]
poly[count] = (vector[0] + self.x, vector[1] + self.y)
return poly
```
#### File: pyTanks-Server/dataModels/wall.py
```python
from random import randint
import config
class wall:
"""
Stores the state data for a wall on the map
"""
def __init__(self):
"""
Randomly generates a wall using the bounding values in config.py
"""
# Set lengths for the long and short sides of the wall
longSide = randint(config.game.wall.longSideBounds[0], config.game.wall.longSideBounds[1])
shortSide = randint(config.game.wall.shortSideBounds[0], config.game.wall.shortSideBounds[1])
# Decide if this is going to be a tall or long wall
if randint(0, 2) == 0:
self.width, self.height = longSide, shortSide
self.x = randint(config.game.wall.placementPadding, config.game.map.width -
config.game.wall.placementPadding - config.game.wall.longSideBounds[0])
self.y = randint(config.game.wall.placementPadding, config.game.map.height -
config.game.wall.placementPadding - config.game.wall.shortSideBounds[0])
else:
self.height, self.width = longSide, shortSide
self.y = randint(config.game.wall.placementPadding, config.game.map.height -
config.game.wall.placementPadding - config.game.wall.longSideBounds[0])
self.x = randint(config.game.wall.placementPadding, config.game.map.width -
config.game.wall.placementPadding - config.game.wall.shortSideBounds[0])
# Check to make sure the wall doesn't go too far
if self.x + self.width > config.game.map.width - config.game.wall.placementPadding:
self.width = config.game.map.width - config.game.wall.placementPadding - self.x
elif self.y + self.height > config.game.map.height - config.game.wall.placementPadding:
self.height = config.game.map.height - config.game.wall.placementPadding - self.y
# Correct x and y to be the center of the wall instead of top-left corner
self.x += self.width / 2
self.y += self.height / 2
def toPoly(self, margin=0):
"""
:param margin: If set the polygon will have a padding of margin pixels in every direction
:return: The wall's polygon as a list of points as tuples
"""
halfWidth = (self.width / 2) + margin
halfHeight = (self.height / 2) + margin
return [(self.x - halfWidth, self.y - halfHeight),
(self.x + halfWidth, self.y - halfHeight),
(self.x + halfWidth, self.y + halfHeight),
(self.x - halfWidth, self.y + halfHeight)]
```
#### File: pyTanks-Server/gameLogic/collisionDetector.py
```python
import math
import config
def hasCollided(poly1, poly2, maxDist=None):
"""
Checks for a collision between two convex 2D polygons using separating axis theorem (SAT)
:param poly1, poly2: The two polygons described as lists of points as tuples
Example: [(x1, y1), (x2, y2), (x3, y3)]
Note: The points list must go in sequence around the polygon
:param maxDist: The maximum distance between any two points of any two polygons that can be touching
If this is left off the optimization check that uses it will be skipped
:return: The boolean result
"""
def edgeVector(point1, point2):
"""
:return: A vector going from point1 to point2
"""
return point2[0] - point1[0], point2[1] - point1[1]
def polyToEdges(poly):
"""
Runs edgeVector() on each point paired with the point after it in the poly
:return: A list of the edges of the poly as vectors
"""
return [edgeVector(poly[i], poly[(i + 1) % len(poly)]) for i in range(len(poly))]
def orthogonal(vector):
"""
:return: A new vector which is orthogonal to the given vector
"""
return vector[1], - vector[0]
def dotProduct(vector1, vector2):
"""
:return: The dot (or scalar) product of the two vectors
"""
return vector1[0] * vector2[0] + vector1[1] * vector2[1]
def project(poly, axis):
"""
:return: A vector showing how much of the poly lies along the axis
"""
dots = [dotProduct(point, axis) for point in poly]
return min(dots), max(dots)
def overlap(projection1, projection2):
"""
:return: Boolean indicating if the two projections overlap
"""
return min(projection1) <= max(projection2) and min(projection2) <= max(projection1)
def runSAT(poly1, poly2):
"""
:return: The boolean result of running separating axis theorem on the two polys
"""
edges = polyToEdges(poly1) + polyToEdges(poly2)
axes = [orthogonal(edge) for edge in edges]
for axis in axes:
overlapping = overlap(project(poly1, axis), project(poly2, axis))
if not overlapping:
# The polys don't overlap on this axis so they can't be touching
return False
# The polys overlap on all axes so they must be touching
return True
# Do an optimization check using the maxDist
if maxDist is not None:
if (poly1[1][0] - poly2[0][0]) ** 2 + (poly1[1][1] - poly2[0][1]) ** 2 <= maxDist ** 2:
# Collision is possible so run SAT on the polys
return runSAT(poly1, poly2)
else:
return False
else:
# No maxDist so run SAT on the polys
return runSAT(poly1, poly2)
def getMaxDist(rect1, rect2):
"""
Finds the maxDist for two rectangles that can be fed into hasCollided()
To do so this function finds the maximum distance that can any two corners on two rectangles can be separated
by while the rectangles are touching.
:param rect1, rect2: Objects or classes representing rectangles with width and height fields
"""
rect1Size = math.sqrt(rect1.width ** 2 + rect1.height ** 2)
rect2Size = math.sqrt(rect2.width ** 2 + rect2.height ** 2)
return rect1Size + rect2Size
class maxDistValues:
"""
Pre-calculated maxDist values for use when checking collisions between two objects with sizes set by config.py
"""
tankShell = getMaxDist(config.game.tank, config.game.shell)
tankTank = getMaxDist(config.game.tank, config.game.tank)
def perfTest(iterations):
"""
Runs a speed benchmark on hasCollided() using a tank and shell and prints the results
:param iterations: The number of times to repeat each test
"""
import datetime
from dataModels import tank, shell
from serverLogic.logging import round
def runTrials(maxDist=None):
"""
Runs the number of trials set by iterations
:return: The time taken in seconds
"""
# Set up the objects
aTank = tank()
aTank.x = 200
aTank.y = 100
aShell = shell(0, aTank, 0)
# Run the trials
start = datetime.datetime.now()
for count in range(0, iterations):
aShell.x = 100
while not hasCollided(aTank.toPoly(), aShell.toPoly(), maxDist=maxDist):
aShell.move(1)
return (datetime.datetime.now() - start).total_seconds()
print("Benchmarking hasCollided() using a shell and tank...")
print("Using " + str(iterations) + " iterations\n")
timeWith = runTrials(maxDist=maxDistValues.tankShell)
timeWithout = runTrials()
print("Time with maxDist: " + str(round(timeWith, 5)) + " secs")
print("Time without: " + str(round(timeWithout, 5)) + " secs\n")
print("maxDist is " + str(round(timeWithout / timeWith, 2)) + " times faster")
# If this file is run just launch perfTest()
if __name__ == "__main__":
import sys
try:
perfTest(int(sys.argv[1]))
except (ValueError, IndexError):
print("Usage: python collisionDetector.py <numOfTrials>")
```
#### File: pyTanks-Server/gameLogic/gameManager.py
```python
from random import randint
import config
from . import collisionDetector, gameData
import dataModels
from serverLogic import serverData
from serverLogic.logging import logPrint
def startGame():
"""
Starts a new game
"""
gameData.shells = list()
gameData.walls = list()
# Create the walls
for count in range(0, randint(config.game.wall.wallCountBounds[0], config.game.wall.wallCountBounds[1])):
isValidLocation = False
aWall = None
while not isValidLocation:
aWall = dataModels.wall()
isValidLocation = True
# Check for overlap with the other walls
for otherWall in gameData.walls:
if collisionDetector.hasCollided(aWall.toPoly(), otherWall.toPoly(
margin=config.game.wall.placementPadding)):
isValidLocation = False
break
gameData.walls.append(aWall)
# Spawn the tanks
halfWidth = (config.game.map.width / 2) - config.game.tank.width
halfHeight = (config.game.map.height / 2) - config.game.tank.height
tanksSpawned = list()
for clientID in serverData.clients.keys():
if serverData.clients[clientID].isPlayer():
tank = serverData.clients[clientID].tank
tank.spawn()
isValidLocation = False
while not isValidLocation:
tank.x = (config.game.map.width / 2) + randint(-halfWidth, halfWidth)
tank.y = (config.game.map.height / 2) + randint(-halfHeight, halfHeight)
isValidLocation = True
# Check for collisions with the walls
for wall in gameData.walls:
if collisionDetector.hasCollided(tank.toPoly(), wall.toPoly()):
isValidLocation = False
break
# Check for collisions with the other tanks
for otherTank in tanksSpawned:
if collisionDetector.hasCollided(tank.toPoly(),
otherTank.toPoly(margin=config.game.tank.spawnPadding)):
isValidLocation = False
break
tanksSpawned.append(tank)
# Start the game
gameData.ongoingGame = True
logPrint("New game started with " + str(gameData.playerCount) + " players", 1)
def gameTick(elapsedTime):
"""
Runs the logic to maintain the game state and applies commands from players
Called once every frame by gameClock.py
:param elapsedTime: The time elapsed, in seconds, since the last frame
"""
# Temporary, per-frame lists
players = list() # A complete list of the clientIDs of players with alive tanks
otherTanks = list() # The list of stopped tanks and already moved tanks used by checkTankLocation()
# Checks a tank's location against the map bounds, the otherTanks list, and the list of walls
# If the tank has collided with any of those it is moved back and the moving property is set to False
def checkTankLocation(tankToCheck):
def didCollide():
tankToCheck.move(-config.game.tank.speed * elapsedTime)
tankToCheck.moving = False
# Check for collisions with map bounds
for point in tankToCheck.toPoly():
if (point[0] > config.game.map.width or point[0] < 0 or point[1] > config.game.map.height
or point[1] < 0):
didCollide()
return
# Check for collisions with other tanks
for otherTank in otherTanks:
if collisionDetector.hasCollided(tankToCheck.toPoly(), otherTank.toPoly(),
maxDist=collisionDetector.maxDistValues.tankTank):
didCollide()
return
# Check for collisions with walls
for wall in gameData.walls:
if collisionDetector.hasCollided(tankToCheck.toPoly(), wall.toPoly()):
didCollide()
return
# Move the shells and check for collisions with the map bounds
outOfBoundsShells = list()
for index in range(0, len(gameData.shells)):
gameData.shells[index].move(config.game.shell.speed * elapsedTime)
# Discard any shells that fly off the map
if (gameData.shells[index].x > config.game.map.width or gameData.shells[index].x < 0 or
gameData.shells[index].y > config.game.map.height or gameData.shells[index].y < 0):
outOfBoundsShells.insert(0, index)
continue
# Discard any shells that hit a wall
for wall in gameData.walls:
if collisionDetector.hasCollided(gameData.shells[index].toPoly(), wall.toPoly()):
outOfBoundsShells.insert(0, index)
break
for index in outOfBoundsShells:
del gameData.shells[index]
# Fill the per-frame lists, execute any commands, and create tanks for new players
for clientID in serverData.clients.keys():
if serverData.clients[clientID].isPlayer():
player = serverData.clients[clientID]
if player.tank.alive:
# Execute any commands
if len(player.incoming) != 0:
command = player.incoming.pop()
if command.action == config.server.commands.fire:
if player.tank.canShoot():
player.tank.didShoot()
gameData.shells.append(dataModels.shell(clientID, player.tank, command.arg))
elif command.action == config.server.commands.turn:
player.tank.heading = command.arg
elif command.action == config.server.commands.stop:
player.tank.moving = False
elif command.action == config.server.commands.go:
player.tank.moving = True
# If there's another queued command it'll be processed in the next frame
# Add stopped tanks to the list of otherTanks
if not player.tank.moving:
otherTanks.append(player.tank)
# Append the player's id to the list of players
players.append(clientID)
# Update positions for any moving tanks and check for collisions on all tanks
for clientID in players:
tank = serverData.clients[clientID].tank
# Move the tank if it is moving
if tank.moving:
tank.move(config.game.tank.speed * elapsedTime)
# Check if the tank is hit
for index in range(0, len(gameData.shells)):
shell = gameData.shells[index]
# This if statement keeps a tank from being hit by it's own shell on the same frame as it shot that shell
if shell.shooterId != clientID:
if collisionDetector.hasCollided(tank.toPoly(), shell.toPoly(),
maxDist=collisionDetector.maxDistValues.tankShell):
# Mark tank as dead, give the shooter a kill, and delete the shell
tank.alive = False
tank.moving = False
if shell.shooterId in serverData.clients:
serverData.clients[shell.shooterId].tank.kills += 1
del gameData.shells[index]
break
# Location checking is only needed for moving tanks
if tank.moving:
checkTankLocation(tank)
otherTanks.append(tank)
if len(players) <= 1:
# Game over
if len(players) == 1:
# We have a winner!
serverData.clients[players[0]].tank.wins += 1
serverData.clients[players[0]].tank.alive = False
gameData.ongoingGame = False
``` |
{
"source": "JoelEager/Python-seminar",
"score": 3
} |
#### File: JoelEager/Python-seminar/requestsDemo.py
```python
import requests
def getFoods():
resp = requests.get("http://localhost:5000/api/foods")
print(resp.json())
def postFood(newFood):
resp = requests.post("http://localhost:5000/api/foods", json=newFood)
print("HTTP status code: {}, Response text: {}".format(resp.status_code, resp.text))
def main():
getFoods()
while True:
postFood({
"name": input("Enter a food: ")
})
getFoods()
if __name__ == "__main__":
main()
``` |
{
"source": "joeledwardson/betfair-browser",
"score": 2
} |
#### File: mybrowser/session/config.py
```python
from dataclasses import dataclass
from typing import List, Dict, Callable, Any, Optional
from pydantic import (
BaseModel,
BaseSettings,
Field,
)
from datetime import datetime, timedelta
from mytrading.utils import dbfilter as dbf
from myutils.dashutilities import interface as comp
from . import formatters
@dataclass
class MarketFilter:
component_id: str
component: any
filter: dbf.DBFilter
def get_strategy_filters(date_format: str) -> List[dbf.DBFilter]:
return [
dbf.DBFilterMulti(
'strategy_id',
fmt_spec=date_format,
order_col='exec_time',
is_desc=True,
cols=['strategy_id', 'exec_time', 'name']
)
]
def get_market_filters(date_format: str) -> List[MarketFilter]:
id_sport = 'input-sport-type'
id_market_type = 'input-market-type'
id_bet_type = 'input-bet-type'
id_format = 'input-format'
id_country_code = 'input-country-code'
id_venue = 'input-venue'
id_date = 'input-date'
id_market = 'input-market-id'
return [
MarketFilter(
id_sport,
comp.select(id_sport, placeholder='Sport...'),
dbf.DBFilterJoin(
db_col='sport_id',
join_tbl_name='sportids',
join_id_col='sport_id',
join_name_col='sport_name'
)
),
MarketFilter(
id_market_type,
comp.select(id_market_type, placeholder='Market type...'),
dbf.DBFilter(db_col='market_type')
),
MarketFilter(
id_bet_type,
comp.select(id_bet_type, placeholder='Betting type...'),
dbf.DBFilter(db_col='betting_type')
),
MarketFilter(
id_format,
comp.select(id_format, placeholder='Format...'),
dbf.DBFilter(db_col='format')
),
MarketFilter(
id_country_code,
comp.select(id_country_code, placeholder='Country...'),
dbf.DBFilterJoin(
db_col="country_code",
join_tbl_name='countrycodes',
join_id_col='alpha_2_code',
join_name_col='name'
)
),
MarketFilter(
id_venue,
comp.select(id_venue, placeholder='Venue...'),
dbf.DBFilter(db_col='venue')
),
MarketFilter(
id_date,
comp.select(id_date, placeholder='Market date...'),
dbf.DBFilterDate(
db_col='market_time',
dt_fmt=date_format
)
),
MarketFilter(
id_market,
comp.input_component(id_market, placeholder='Market ID filter...'),
dbf.DBFilterText(db_col='market_id')
)
]
def default_datetime_formatter(value: datetime):
return formatters.format_datetime(value, "%Y-%m-%d %H:%M")
def default_timedelta_formatter(value: timedelta):
return formatters.format_timedelta(value, "{d}d {h:02}:{m:02}:{s:02}.{u:06}")
def default_money_formatter(value: Optional[float]):
return formatters.format_money(value, "£{value:+.2f}")
class DisplayConfig(BaseModel):
"""
Dashboard display
"""
cache: bool = Field(True, description="display interactions for reading/writing to cache")
libraries: bool = Field(True, description="display interactions for reloading libraries")
strategy_delete: bool = Field(True, description="display interaction for deleting strategy")
config_reloads: bool = Field(True, description="display interaction for reloading feature and plot configurations")
class DatabaseConfig(BaseModel):
"""
Database querying
"""
market_date_format: str = Field(
"%d %b %y",
description="market date format to present in market filter")
strategy_date_format: str = Field(
"{exec_time:%y-%m-%d %H:%M:%S} {name}",
description="strategy datetime format used in strategy filter"
)
max_rows: int = Field(100, description="maximum number of rows to return from a database query")
db_kwargs: Dict[str, Any] = Field(
{
'db_lang': 'postgresql',
'db_user': 'better',
'db_host': 'notrelevant.org.uk',
'db_port': 5432,
'db_name': 'betting',
'db_engine': 'psycopg2',
'cache_root': 'bf_cache'
},
description="kwargs passed when creating BettingDB"
)
class TableConfigs(BaseModel):
"""
Tabular displays
"""
market_rows: int = Field(12, description="number of rows to display in market table")
strategy_rows: int = Field(12, description="number of rows to display in strategy table")
runner_rows: int = Field(10, description="number of rows to display in runner table")
orders_rows: int = Field(15, description="number of rows to display in order table")
timings_rows: int = Field(15, description="number of rows to display in timings table")
market_table_cols: dict = Field(
{
"market_id": "Market ID",
"market_type": "Market Type",
"market_time": "Market Time",
"venue": "Venue",
"market_profit": "Profit"
},
description="""market column mappings
maps database column name -> display column title
n.b. in addition to the database columns, there are calculated columns available:
-> "market_profit" is calculated from strategy market profit
""")
market_sort_options: dict = Field({
"market_id": "Market ID",
"market_time": "Market Time"
})
market_table_formatters: Dict[str, Callable[[Any], Any]] = Field(
{
"market_time": default_datetime_formatter,
"market_profit": default_money_formatter
},
description='mappings of market table column name to formatter function'
)
strategy_table_cols: dict = Field(
{
'strategy_id': 'Strategy ID',
'type': 'Type',
'name': 'Name',
'exec_time': 'Execution Time',
'date_added': 'Date Added',
'n_markets': 'Market Count',
'total_profit': 'Total Profit'
},
description="""strategy column mappings
maps database column name -> display column title
n.b. in addition to the database columns, there are calculated columns available:
-> "n_markets" is the total markets available for the strategy
-> "total_profit" is the calculated total profit for the strategy
""")
strategy_table_formatters: Dict[str, Callable[[Any], Any]] = Field(
{
'exec_time': default_datetime_formatter,
'date_added': default_datetime_formatter,
'total_profit': default_money_formatter
},
description='mappings of strategy table column name to formatter function'
)
runner_table_cols: dict = Field(
{
'runner_id': 'Selection ID',
'runner_name': 'Name',
'starting_odds': 'Starting Odds',
'runner_profit': 'Profit'
},
description="""runner column mappings
maps database column name -> display column title
n.b. in addition to the database columns, there are calculated columns available:
-> "starting_odds" is the odds of the runner at the start of the race
-> "runner_profit" is the profit on the runner from the selected strategy
"""
)
runner_table_formatters: Dict[str, Callable[[Any], Any]] = Field(
{
'runner_profit': default_money_formatter,
},
description='mappings of runner table column name to formatter function'
)
timings_table_cols: dict = Field(
{
'function': 'Function',
'count': 'Count',
'mean': 'Mean',
'level': 'Level'
},
description="""timings table mappings
maps timing attribute -> display column title
timing attributes are from TimingRegistrar.get_timings_summary()
"""
)
timings_table_formatters: Dict[str, Callable[[Any], Any]] = Field(
{
"mean": default_timedelta_formatter
},
description='mappings of timings table column name to formatter function'
)
order_table_cols: dict = Field(
{
'date': 'Timestamp',
'trade': 'Trade Index',
'side': 'Side',
'price': 'Price',
'size': 'Size',
'm-price': 'Matched Price',
'matched': 'Matched',
'order-profit': 'Order',
'trade-profit': 'Trade',
't-start': 'Time to Start'
},
description="""order table mappings
maps order attribute -> display column title
""")
class PlotConfig(BaseModel):
"""
Chart plotting
"""
default_offset: str = Field("00:03:00", description="default time offset before start of event")
order_offset_secs: int = Field(2, description="number of seconds to plot either side of order update start/end")
cmp_buffer_secs: int = Field(10, description="additional seconds to add when computing features for plotting")
class Config(BaseSettings):
display_config: DisplayConfig = DisplayConfig()
database_config: DatabaseConfig = DatabaseConfig()
table_configs: TableConfigs = TableConfigs()
plot_config: PlotConfig = PlotConfig()
```
#### File: mybrowser/session/formatters.py
```python
from datetime import datetime, timedelta
from typing import Optional
from myutils import registrar, datetime as utils_datetime
def format_datetime(value: datetime, dt_format: str):
return value.strftime(dt_format)
def format_timedelta(value: timedelta, td_format: str):
return utils_datetime.format_timedelta(td=value, fmt=td_format)
def format_money(value: Optional[float], money_format: str):
if value is not None:
return money_format.format(value=value)
else:
return None
```
#### File: betfair-browser/mytrading/configs.py
```python
from typing import List, Dict, Optional
from myutils.registrar import Registrar
from myutils import dictionaries
import os, yaml
from os import path
import json
from .exceptions import FeatureConfigException
reg_plots = Registrar()
reg_features = Registrar()
KEY_SAMPLE = 'smp'
KEY_AVERAGE = 'avg'
KEY_TICKS = 'tck'
KEY_MAX_DIF = 'mdf'
KEY_COMPARE = 'cmp'
class ConfigGenerator:
CONFIG_SPEC = {
'name': {
'type': str
},
'kwargs': {
'type': dict
}
}
def __init__(self, cfg_dir: str, out_dir, reg: Registrar):
self._cfg_dir = path.abspath(path.expandvars(cfg_dir))
if not path.isdir(self._cfg_dir):
raise FeatureConfigException(f'configuration dir "{self._cfg_dir}" is not a directory')
self._out_dir = path.abspath(path.expandvars(out_dir))
if not path.isdir(self._out_dir):
raise FeatureConfigException(f'output dir "{self._out_dir}" is not a directory')
self._reg = reg
def reload(self):
_, _, filenames = next(os.walk(self._cfg_dir))
for fn in filenames:
p_in = path.join(self._cfg_dir, fn)
with open(p_in, 'r') as f:
data = f.read()
file_cfg = yaml.load(data, yaml.FullLoader)
dictionaries.validate_config(file_cfg, self.CONFIG_SPEC)
reg_nm = file_cfg['name']
reg_kwargs = file_cfg['kwargs']
ftr_cfg = self._reg[reg_nm](**reg_kwargs)
p_out = path.join(self._out_dir, fn)
with open(p_out, 'w') as f:
f.write(yaml.dump(ftr_cfg, sort_keys=False))
p_out_json = path.splitext(p_out)[0] + '.json'
with open(p_out_json, 'w') as f:
f.write(json.dumps(ftr_cfg, indent=2, sort_keys=False))
def _plot_procs_lad(ftr_key, lad_key):
"""return processors to add ladder feature of price sizes to back/lay feature"""
return [
{
'name': 'prc_ftrstodf',
'kwargs': {
'ftr_keys': {
'y': ftr_key,
'text': lad_key
}
}
}, {
'name': 'prc_dffmtps',
'kwargs': {
'df_col': 'text'
}
}, {
'name': 'prc_dftodict'
}
]
def _plot_colorscale(color_0, color_1) -> Dict:
"""`chart_args` argument to set colorscale with lines+markers"""
return {
'mode': 'lines+markers',
'line_color': 'black',
'marker': {
'colorscale': [
[0, color_0],
[1, color_1]
],
'cmid': 0,
}
}
def _ftr_smooth(sample_ms, cache_count):
"""sub-features config for sampling and then moving average"""
return {
KEY_SAMPLE: {
'name': 'RFSample',
'kwargs': {
'periodic_ms': sample_ms,
'cache_count': cache_count,
'sub_features_config': {
KEY_AVERAGE: {
'name': 'RFMvAvg'
}
}
}
}
}
def _ftr_tick(sub_features_config=None):
"""sub-feature converting parent to tick"""
return {
'name': 'RunnerFeatureSub',
'kwargs': {
'value_processors_config': [{
'name': 'value_processor_to_tick',
}],
'sub_features_config': sub_features_config,
},
}
def _ftr_tvlad(window_s, sampling_ms, cache_count):
"""traded volume `TVLad` feature sub-config for creating max/min values over window, sampling then moving avg"""
return {
'cache_secs': window_s,
'cache_insidewindow': False,
'sub_features_config': {
'dif': {
'name': 'RFTVLadDif',
'kwargs': {
'sub_features_config': {
'max': {
'name': 'RFTVLadMax',
'kwargs': {
'sub_features_config': _ftr_smooth(sampling_ms, cache_count)
}
},
'min': {
'name': 'RFTVLadMin',
'kwargs': {
'sub_features_config': _ftr_smooth(sampling_ms, cache_count)
}
}
}
}
}
}
}
@reg_features.register_element
def feature_configs_spike(
n_ladder_elements,
n_wom_ticks,
ltp_window_width_s,
ltp_window_sampling_ms,
ltp_window_sampling_count,
spread_sampling_ms,
spread_sampling_count,
) -> Dict[str, Dict]:
"""
Get a dict of default runner features, where each entry is a dictionary of:
- key: feature usage name
- value: dict of
- 'name': class name of feature
- 'kwargs': dict of constructor arguments used when creating feature
"""
def ltp_win_kwargs(sample_ms, cache_count):
d = {
'sub_features_config': {
'smp': {
'name': 'RFSample',
'kwargs': {
'periodic_ms': sample_ms,
'cache_count': cache_count,
'sub_features_config': {
'avg': {
'name': 'RFMvAvg'
}
}
}
}
}
}
return d
return {
'best back': {
'name': 'RFBck',
},
'best lay': {
'name': 'RFLay',
},
'back ladder': {
'name': 'RFLadBck',
'kwargs': {
'n_elements': n_ladder_elements,
}
},
'lay ladder': {
'name': 'RFLadLay',
'kwargs': {
'n_elements': n_ladder_elements,
}
},
'wom': {
'name': 'RFWOM',
'kwargs': {
'wom_ticks': n_wom_ticks
},
},
'tvlad': {
'name': 'RFTVLad',
'kwargs': {
'cache_secs': ltp_window_width_s,
'cache_insidewindow': False,
'sub_features_config': {
'dif': {
'name': 'RFTVLadDif',
'kwargs': {
'sub_features_config': {
'max': {
'name': 'RFTVLadMax',
'kwargs': ltp_win_kwargs(ltp_window_sampling_ms, ltp_window_sampling_count)
},
'min': {
'name': 'RFTVLadMin',
'kwargs': ltp_win_kwargs(ltp_window_sampling_ms, ltp_window_sampling_count)
},
'spread': {
'name': 'RFTVLadSpread'
}
}
}
}
}
}
},
'ltp': {
'name': 'RFLTP',
},
'tv': {
'name': 'RFTVTot',
},
'spread': {
'name': 'RFLadSprd',
'kwargs': {
'sub_features_config': {
'smp': {
'name': 'RFSample',
'kwargs': {
'periodic_ms': spread_sampling_ms,
'cache_count': spread_sampling_count,
'sub_features_config': {
'avg': {
'name': 'RFMvAvg'
}
}
}
}
}
}
}
}
@reg_plots.register_element
def plot_configs_spike(
ltp_diff_opacity,
ltp_diff_s,
tv_width_ms
):
IGNORE = [
'back ladder',
'lay ladder',
'spread',
'spread.smp',
'spread.smp.avg',
'wom',
'tvlad',
'tvlad.dif',
'tvlad.dif.max',
'tvlad.dif.max.smp',
'tvlad.dif',
'tvlad.dif.spread',
'tvlad.dif.min',
'tvlad.dif.min.smp',
]
return {
k: {
'ignore': True
} for k in IGNORE
} | {
'best back': {
'value_processors': _plot_procs_lad('best back', 'back ladder'),
},
'best lay': {
'value_processors': _plot_procs_lad('best lay', 'lay ladder')
},
'ltp': {
'chart_args': {
'mode': 'lines+markers'
},
'value_processors': [{
'name': 'prc_ftrstodf',
'kwargs': {
'ftr_keys': {
'y': 'ltp',
'text': 'tv',
}
}
}, {
'name': 'prc_dffillna'
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'text',
'fmt_spec': 'Traded Volume: £{0:.2f}'
}
}, {
'name': 'prc_dftodict',
}],
},
'tv': {
'chart': 'Bar',
'chart_args': {
'marker': {
'colorscale': [
[0, 'rgb(250,50,50)'],
[1, 'rgb(50,250,50)']
], # default plotly colours go white, so use a green to red scale
'cmid': 0, # with grey 0 scale
},
'opacity': ltp_diff_opacity,
'width': tv_width_ms, # 1 seconds width of bars
'offset': 0, # end of bar to be aligned with timestamp
},
'trace_args': {
'secondary_y': True
},
'value_processors': [{
'name': 'prc_dfdiff'
}, {
'name': 'prc_getftr',
'keys': {
'key_out': 'key_1'
},
'kwargs': {
'ftr_key': 'wom',
}
}, {
'name': 'prc_buftodf',
'kwargs': {
'buf_cfg': {
'y': 'key_0',
'text': 'key_1'
},
}
}, {
'name': 'prc_dftypes',
'kwargs': {
'dtypes': {
'y': 'float',
'text': 'float',
}
}
}, {
'name': 'prc_resmp',
'kwargs': {
'n_seconds': tv_width_ms/1000,
'agg_function': {
'y': 'sum',
'text': 'mean',
}
}
}, {
'name': 'prc_dfcp',
'kwargs': {
'col_src': 'text',
'col_out': 'marker_color'
}
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'text',
'fmt_spec': 'Weight of Money: £{0:.2f}'
},
}, {
'name': 'prc_dftodict'
}],
},
'tvlad.dif.max.smp.avg': {
'rename': 'ltp max'
},
'tvlad.dif.min.smp.avg': {
'rename': 'ltp min'
}
}
@reg_features.register_element
def feature_configs_smooth(
spread_sampling_ms,
spread_sampling_count,
wom_ticks,
ltp_window_width_s,
ltp_window_sampling_ms,
ltp_window_sampling_count,
ladder_sampling_ms,
ladder_sampling_count,
ltp_sampling_ms,
ltp_sampling_count,
n_ladder_elements,
diff_s,
split_sum_s,
):
def side_kwargs(diff_s, ladder_sampling_ms, ladder_sampling_count) -> Dict:
return {
'cache_secs': diff_s,
'cache_insidewindow': False,
'sub_features_config': {
KEY_TICKS: {
'name': 'RFTick',
'kwargs': {
'cache_secs': diff_s,
'cache_insidewindow': False,
'sub_features_config': {
KEY_MAX_DIF: {
'name': 'RFMaxDif'
},
KEY_SAMPLE: {
'name': 'RFSample',
'kwargs': {
'periodic_ms': ladder_sampling_ms,
'cache_count': ladder_sampling_count,
'sub_features_config': {
KEY_AVERAGE: {
'name': 'RFMvAvg',
'kwargs': {
'cache_secs': diff_s,
'sub_features_config': {
KEY_COMPARE: {
'name': 'RFDif'
}
}
}
}
}
}
}
}
}
},
KEY_SAMPLE: {
'name': 'RFSample',
'kwargs': {
'periodic_ms': ladder_sampling_ms,
'cache_count': ladder_sampling_count,
'sub_features_config': {
KEY_AVERAGE: {
'name': 'RFMvAvg'
}
}
}
}
}
}
return {
'spread': {
'name': 'RFLadSprd',
'kwargs': {
'sub_features_config': _ftr_smooth(
spread_sampling_ms,
spread_sampling_count
)
}
},
'lay': {
'name': 'RFLay',
'kwargs': side_kwargs(diff_s, ladder_sampling_ms, ladder_sampling_count)
},
'bck': {
'name': 'RFBck',
'kwargs': side_kwargs(diff_s, ladder_sampling_ms, ladder_sampling_count)
},
'ltp': {
'name': 'RFLTP',
'kwargs': {
'sub_features_config': _ftr_smooth(
ltp_sampling_ms,
ltp_sampling_count
),
}
},
'tvlad': {
'name': 'RFTVLad',
'kwargs': _ftr_tvlad(
ltp_window_width_s,
ltp_window_sampling_ms,
ltp_window_sampling_count
)
},
'bcklad': {
'name': 'RFLadBck',
'kwargs': {
'n_elements': n_ladder_elements,
}
},
'laylad': {
'name': 'RFLadLay',
'kwargs': {
'n_elements': n_ladder_elements,
}
},
'wom': {
'name': 'RFWOM',
'kwargs': {
'wom_ticks': wom_ticks
},
},
'split': {
'name': 'RFBkSplit',
'kwargs': {
'cache_secs': split_sum_s,
'cache_insidewindow': False,
'sub_features_config': {
'sum': {
'name': 'RFSum'
},
'tot': {
'name': 'RFIncSum'
}
}
}
},
'tv': {
'name': 'RFTVTot',
},
}
@reg_plots.register_element
def plot_configs_smooth(bar_width_ms, tv_opacity):
IGNORE_LIST = [
'bcklad',
'laylad',
'wom',
'spread',
'spread.smp',
'spread.smp.avg',
'tv',
'bck.smp',
'lay.smp',
'bck.tck',
'lay.tck',
'bck.tck.mdf',
'lay.tck.mdf',
'bck.tck.smp',
'lay.tck.smp',
'bck.tck.smp.avg',
'lay.tck.smp.avg',
'bck.tck.smp.avg.cmp',
'lay.tck.smp.avg.cmp',
'ltp.smp',
'tvlad',
'tvlad.dif',
'tvlad.dif.max',
'tvlad.dif.max.smp',
'tvlad.dif',
'tvlad.dif.min',
'tvlad.dif.min.smp',
]
def prcs_ltp(ltp, tv, spread, split) -> List[Dict]:
return [{
'name': 'prc_ftrstodf',
'kwargs': {
'ftr_keys': {
'y': ltp,
'tv_text': tv,
'spread_text': spread,
'split_text': split,
'marker_color': 'wom',
'wom_text': 'wom',
}
}
}, {
'name': 'prc_dffillna',
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'spread_text',
'fmt_spec': 'Spread: {0}'
}
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'tv_text',
'fmt_spec': 'Traded Volume: £{0:.2f}'
}
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'split_text',
'fmt_spec': 'Book split: £{0:.2f}'
}
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'wom_text',
'fmt_spec': 'WOM: £{0:.2f}'
}
}, {
'name': 'prc_dftxtjoin',
'kwargs': {
'dest_col': 'text',
'src_cols': [
'tv_text',
'spread_text',
'split_text',
'wom_text'
],
}
}, {
'name': 'prc_dfdrop',
'kwargs': {
'cols': [
'tv_text',
'spread_text',
'split_text',
'wom_text'
]
}
}, {
'name': 'prc_dftodict',
}]
def prcs_tvbar(tv, bar_width_ms):
return [{
'name': 'prc_getftr',
'keys': {
'key_out': 'key_tv'
},
'kwargs': {
'ftr_key': tv
}
}, {
'name': 'prc_dfdiff',
'keys': {
'key_in': 'key_tv',
'key_out': 'key_tv'
}
}, {
'name': 'prc_buftodf',
'kwargs': {
'buf_cfg': {
'y': 'key_0',
'text': 'key_tv'
}
}
}, {
'name': 'prc_resmp',
'kwargs': {
'n_seconds': int(bar_width_ms / 1000),
'agg_function': {
'y': 'sum',
'text': 'sum'
}
}
}, {
'name': 'prc_dfcp',
'kwargs': {
'col_src': 'text',
'col_out': 'marker_color'
}
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'text',
'fmt_spec': 'Traded volume: £{0:.2f}'
}
}, {
'name': 'prc_dftodict'
}]
def smooth_value_processors(ftr_src, ftr_tks, ftr_cmp, ftr_dif) -> List[Dict]:
return [{
'name': 'prc_ftrstodf',
'kwargs': {
'ftr_keys': {
'y': ftr_src,
'marker_color': ftr_cmp,
'text_ticks': ftr_tks,
'text_tick_comp': ftr_cmp,
'text_max_diff': ftr_dif
}
},
}, {
'name': 'prc_dffillna',
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'text_ticks',
'fmt_spec': 'Tick: {0:.2f}'
}
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'text_tick_comp',
'fmt_spec': 'Tick difference: {0:.2f}'
}
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'text_max_diff',
'fmt_spec': 'Max tick difference: {0:.2f}',
}
}, {
'name': 'prc_dftxtjoin',
'kwargs': {
'dest_col': 'text',
'src_cols': [
'text_ticks',
'text_tick_comp',
'text_max_diff'
],
}
}, {
'name': 'prc_dfdrop',
'kwargs': {
'cols': [
'text_ticks',
'text_tick_comp',
'text_max_diff'
]
}
}, {
'name': 'prc_dftodict',
}]
return {
f: {
'ignore': True
} for f in IGNORE_LIST
} | {
'tvlad.dif.max.smp.avg': {
'rename': 'ltp max'
},
'tvlad.dif.min.smp.avg': {
'rename': 'ltp min'
},
'bck': {
'chart_args': {
'visible': 'legendonly',
},
'value_processors': _plot_procs_lad('bck', 'bcklad'),
},
'lay': {
'chart_args': {
'visible': 'legendonly',
},
'value_processors': _plot_procs_lad('lay', 'laylad'),
},
'ltp': {
'value_processors': prcs_ltp(
ltp='ltp',
tv='tv',
spread='spread',
split='split.sum',
),
'chart_args': {
'mode': 'lines+markers',
'visible': 'legendonly',
'line_color': 'black',
'marker': {
'colorscale': [
[0, 'rgb(255,0,0)'],
[1, 'rgb(0,255,0)']
],
'cmid': 0,
}
},
},
'split': {
'chart': 'Bar',
'chart_args': {
'marker': { # default plotly colours go white, so use a green to red scale
'colorscale': [
[0, 'rgb(250,50,50)'],
[1, 'rgb(50,250,50)']
],
'cmid': 0, # with grey 0 scale
},
'opacity': tv_opacity,
'width': bar_width_ms, # 1 seconds width of bars
'offset': 0, # end of bar to be aligned with timestamp
},
'trace_args': {
'secondary_y': True
},
'value_processors': prcs_tvbar('tv', bar_width_ms),
},
'split.sum': {
'chart_args': {
'visible': 'legendonly',
},
'trace_args': {
'secondary_y': True
},
},
'split.tot': {
# 'trace_args': {
# 'secondary_y': True
# },
'ignore': True
},
'ltp.smp.avg': {
'chart_args': _plot_colorscale(
color_0='rgb(255,255,0)',
color_1='rgb(0,0,255)' # yellow to blue scale
),
'value_processors': [{
'name': 'prc_ftrstodf',
'kwargs': {
'ftr_keys': {
'y': 'ltp.smp.avg',
'text': 'split.sum',
},
},
}, {
'name': 'prc_dffillna'
}, {
'name': 'prc_dfcp',
'kwargs': {
'col_src': 'text',
'col_out': 'marker_color'
}
}, {
'name': 'prc_dffmtstr',
'kwargs': {
'df_col': 'text',
'fmt_spec': 'Book split: £{0:.2f}'
}
}, {
'name': 'prc_dftodict',
}],
'rename': 'ltp smoothed'
},
'bck.smp.avg': {
# use red to green scale
'chart_args': _plot_colorscale(
color_0='rgb(255,0,0)',
color_1='rgb(0,255,0)',
),
'value_processors': smooth_value_processors(
ftr_src='bck.smp.avg',
ftr_tks='bck.tck.smp.avg',
ftr_cmp='bck.tck.smp.avg.cmp',
ftr_dif='bck.tck.mdf',
),
'rename': 'back smoothed'
},
'lay.smp.avg': {
# use red to green scale
'chart_args': _plot_colorscale(
color_0='rgb(255,0,0)',
color_1='rgb(0,255,0)',
),
'value_processors': smooth_value_processors(
ftr_src='lay.smp.avg',
ftr_tks='lay.tck.smp.avg',
ftr_cmp='lay.tck.smp.avg.cmp',
ftr_dif='lay.tck.mdf',
),
'rename': 'lay smoothed'
},
}
```
#### File: mytrading/process/__init__.py
```python
from __future__ import annotations
import operator
from dataclasses import dataclass
import logging
from datetime import datetime
from typing import Dict, List, Union, Optional, Callable
from betfairlightweight.resources import MarketBook, RunnerBook, MarketDefinitionRunner
from betfairlightweight.resources.bettingresources import RunnerBookEX
from flumine.order.trade import Trade
import myutils.datetime
import myutils.dictionaries
from .ticks import LTICKS, LTICKS_DECODED, TICKS, TICKS_DECODED
from myutils import general, timing
from ..exceptions import BfProcessException
from . import oddschecker as oc
active_logger = logging.getLogger(__name__)
active_logger.setLevel(logging.INFO)
GETTER = {
# Use True to return dictionary getter, False to return attribute getter
# betfairlightweight RunnerBookEx ojects available_to_back, available_to_lay, traded_volume are inconsistent in
# appearing as lists of dicts with 'price' and 'size', and lists of PriceSize objects.
True: dict.get,
False: getattr
}
@dataclass
class BfLadderPoint:
"""single point of betfair ladder, on back/lay side, with associated price & size and index of price in complete
betfair tick ladder"""
price: float
size: float
tick_index: int
side: str
def __str__(self):
return f'{self.side} at {self.price} for £{self.size:.2f}, tick index {self.tick_index}'
@staticmethod
def get_ladder_point(price: float, size: float, side: str) -> BfLadderPoint:
"""get ladder point instance with tick index"""
# max decimal points is 2 for betfair prices
price = round(price, 2)
if price not in LTICKS_DECODED:
raise BfProcessException(f'failed to create ladder point at price {price}')
if side != 'BACK' and side != 'LAY':
raise BfProcessException(f'failed to create ladder point with side "{side}"')
return BfLadderPoint(
price=price,
size=size,
tick_index=LTICKS_DECODED.index(price),
side=side
)
@dataclass
class MatchBetSums:
"""
Matched bet sums to record:
- total stakes of back bets
- total potential profits from back bets (minus stakes)
- total stakes of lay bets
- total exposure of lay bets
"""
back_stakes: float
back_profits: float
lay_stakes: float
lay_exposure: float
def outstanding_profit(self):
"""get difference between (profit/loss on selection win) minus (profit/loss on selection loss)"""
# selection win profit is (back bet profits - lay bet exposures)
# selection loss profit is (lay stakes - back stakes)
return (self.back_profits - self.lay_exposure) - (self.lay_stakes - self.back_stakes)
@staticmethod
def get_match_bet_sums(trade: Trade) -> MatchBetSums:
"""Get match bet sums from all orders in trade"""
back_stakes = sum([o.size_matched for o in trade.orders if o.side == 'BACK'])
back_profits = sum([
(o.average_price_matched - 1) * o.size_matched for o in trade.orders
# if o.status == OrderStatus.EXECUTABLE or o.status == OrderStatus.EXECUTION_COMPLETE
if o.side == 'BACK' and o.average_price_matched and o.size_matched
])
lay_stakes = sum([o.size_matched for o in trade.orders if o.side == 'LAY'])
lay_exposure = sum([
(o.average_price_matched - 1) * o.size_matched for o in trade.orders
# if o.status == OrderStatus.EXECUTABLE or o.status == OrderStatus.EXECUTION_COMPLETE
if o.side == 'LAY' and o.average_price_matched and o.size_matched
])
return MatchBetSums(
back_stakes=back_stakes,
back_profits=back_profits,
lay_stakes=lay_stakes,
lay_exposure=lay_exposure
)
def get_runner_spread(book_ex: RunnerBookEX) -> float:
"""
get the number of ticks spread between the back and lay side of a runner book
returns 1000 if either side is empty
"""
atb = book_ex.available_to_back
atl = book_ex.available_to_lay
if atb and atl:
if atb[0]['price'] in LTICKS_DECODED and atl[0]['price'] in LTICKS_DECODED:
return LTICKS_DECODED.index(atl[0]['price']) - LTICKS_DECODED.index(atb[0]['price'])
return len(LTICKS_DECODED)
def get_names(market, name_attr='name', name_key=False) -> Dict[int, str]:
"""
Get dictionary of {runner ID: runner name} from a market definition
- name_attr: optional attribute name to retrieve runner name
- name_key: optional flag to return {runner name: runner ID} with name as key instead
"""
if not name_key:
return {
runner.selection_id: getattr(runner, name_attr)
for runner in market.runners
}
else:
return {
getattr(runner, name_attr): runner.selection_id
for runner in market.runners
}
def get_starting_odds(records: List[List[MarketBook]]) -> Dict:
"""get a dictionary of {selection ID: starting odds} from last record where market is open"""
for i in reversed(range(len(records))):
if not records[i][0].market_definition.in_play and records[i][0].status == 'OPEN':
runner_odds = {}
for runner in records[i][0].runners:
price = get_best_price(runner.ex.available_to_back)
if price is not None:
runner_odds[runner.selection_id] = price
return runner_odds
else:
return {}
def get_best_price(available: List, is_dict=True) -> float:
"""get best price from available ladder of price sizes, returning None if empty"""
return GETTER[is_dict](available[0], 'price') if available else None
def get_ltps(market_book: MarketBook) -> Dict[int, float]:
"""get dictionary of runner ID to last traded price if last traded price is not 0 (or None), sorting with
shortest LTP first"""
return myutils.dictionaries.dict_sort({
r.selection_id: r.last_price_traded
for r in market_book.runners if r.last_price_traded
})
def get_order_profit(sts: str, side: str, price: float, size: float) -> float:
"""
Compute order profit from dictionary of values retrieved from a line of a file written to by TradeTracker.log_update
Function is shamelessly stolen from `flumine.backtest.simulated.Simulated.profit`, but that requires an order
instance which is not possible to create trade/strategy information etc
"""
if sts == "WINNER":
if side == "BACK":
return round((price - 1) * size, ndigits=2)
else:
return round((price - 1) * -size, ndigits=2)
elif sts == "LOSER":
if side == "BACK":
return -size
else:
return size
else:
return 0.0
def get_runner_book(
runners: Union[List[RunnerBook],
List[MarketDefinitionRunner]],
selection_id
) -> Optional[RunnerBook]:
"""Get a runner book object by checking for match of "selection_id" attribute from a list of objects"""
for runner in runners:
if selection_id == runner.selection_id:
return runner
else:
return None
def get_side_operator(side, invert=False) -> Callable:
"""
generic operator selection when comparing odds
- if side is 'BACK', returns gt (greater than)
- if side is 'LAY', returns lt (less than)
- set invert=True to return the other operator
"""
if side == 'BACK':
greater_than = True
elif side == 'LAY':
greater_than = False
else:
raise BfProcessException(f'side "{side}" not recognised')
if invert:
greater_than = not greater_than
if greater_than:
return operator.gt
else:
return operator.lt
def get_side_ladder(book_ex: RunnerBookEX, side) -> List[Dict]:
"""
get selected side of runner book ex:
- if side is 'BACK', returns 'book_ex.available_to_back'
- if side is 'LAY', returns 'book.ex.available_to_lay'
"""
if side == 'BACK':
return book_ex.available_to_back
elif side == 'LAY':
return book_ex.available_to_lay
else:
raise BfProcessException(f'side "{side}" not recognised')
def side_invert(side: str) -> str:
"""
convert 'BACK' to 'LAY' and vice-versa
"""
if side == 'BACK':
return 'LAY'
elif side == 'LAY':
return 'BACK'
else:
raise BfProcessException(f'side "{side}" not recognised')
def closest_tick(value: float, return_index=False, round_down=False, round_up=False):
"""
Convert an value to the nearest odds tick, e.g. 2.10000001 would be converted to 2.1
Specify return_index=True to get index instead of value
"""
return general.closest_value(
TICKS_DECODED,
value,
return_index=return_index,
round_down=round_down,
round_up=round_up
)
def tick_spread(value_0: float, value_1: float, check_values: bool) -> int:
"""
get tick spread between two odds values
- if `check_values` is True and both values don't correspond to tick
values, then 0 is returned
- if `check_values` if False then the closest tick value is used for `value_0` and `value_1`
"""
if check_values:
# check that both values are valid odds
if value_0 in LTICKS_DECODED and value_1 in LTICKS_DECODED:
# get tick spread
return abs(LTICKS_DECODED.index(value_0) - LTICKS_DECODED.index(value_1))
else:
# both values are not valid odds
return 0
else:
# dont check values are valid odds, just use closet odds values
return abs(closest_tick(value_0, return_index=True) - closest_tick(value_1, return_index=True))
def traded_runner_vol(runner: RunnerBook, is_dict=True):
"""Get runner traded volume across all prices"""
return sum(e['size'] if is_dict else e.size for e in runner.ex.traded_volume)
def total_traded_vol(record: MarketBook):
"""Get traded volume across all runners at all prices"""
return sum(traded_runner_vol(runner) for runner in record.runners)
def get_record_tv_diff(
tv1: List[Dict],
tv0: List[Dict],
is_dict=True
) -> List[Dict]:
"""
Get difference between traded volumes from one tv ladder to another
use is_dict=False if `price` and `size` are object attributes, use is_dict=True if are dict keys
"""
traded_diffs = []
atr = GETTER[is_dict]
# loop items in second traded volume ladder
for y in tv1:
# get elements in first traded volume ladder if prices matches
m = [x for x in tv0 if atr(x, 'price') == atr(y, 'price')]
# first element that matches
n = next(iter(m), None)
# get price difference, using 0 for other value if price doesn't exist
size_diff = atr(y, 'size') - (atr(n, 'size') if m else 0)
# only append if there is a difference
if size_diff:
traded_diffs.append({
'price': atr(y, 'price'),
'size': size_diff
})
return traded_diffs
def event_time(dt: datetime, localise=True) -> str:
"""
Time of event in HH:MM, converted from betfair UTC to local
"""
if localise:
dt = myutils.datetime.localise(dt)
return dt.strftime("%H:%M")
def bf_dt(dt: datetime) -> str:
"""Datetime format to use with betfair API"""
return dt.strftime("%Y-%m-%dT%TZ")
```
#### File: mytrading/process/ticks.py
```python
import os
import numpy as np
import pandas as pd
def get_tick_increments() -> pd.DataFrame:
"""
Get list of tick increments in encoded integer format
Retrieves list of {'Start', 'Stop', 'Step'} objects from JSON file 'ticks.json'
"""
# generate file path based on current directory and filename "ticks.json"
# when a library is imported, it takes active script as current directory and file is stored locally so have to
# work out file path based on current directory
cur_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(cur_dir, "ticks.json")
# return file as pandas DataFrame
return pd.read_json(file_path)
def generate_ticks(tick_increments: pd.DataFrame) -> np.ndarray:
"""
Generate numpy list of ticks from list of {'Start', 'Stop', 'Step'} objects
Output list is complete: [1.00, 1.01, ..., 1000]
"""
return np.concatenate([
np.arange(row.Start, row.Stop, row.Step)
for index, row in tick_increments.iterrows()
])
def float_encode(v):
"""Encode a floating point number to integer format with x1000 scale"""
return round(v*1000)
def int_decode(v):
"""decode an integer encoded x1000 scale number to floating point actual value"""
return v/1000
# numpy array of Betfair ticks in integer encoded form
TICKS: np.ndarray = generate_ticks(get_tick_increments())
# list of Betfair ticks in integer encoded form
LTICKS = TICKS.tolist()
# numpy array of Betfair ticks in actual floating values
TICKS_DECODED: np.ndarray = int_decode(TICKS)
# list of Betfair ticks in actual floating values
LTICKS_DECODED = TICKS_DECODED.tolist()
```
#### File: mytrading/strategy/messages.py
```python
from enum import Enum
from typing import Dict, Callable
import logging
from mytrading.exceptions import MessagerException
active_logger = logging.getLogger(__name__)
message_formatters: Dict[str, Callable] = {}
def format_message(msg_type: str, msg_attrs: Dict) -> str:
"""
convert a message type and attributes into a string message
where a formatter is not found, the message type and attributes dictionary will be returned
"""
if msg_type in message_formatters:
msg = message_formatters[msg_type](msg_attrs)
if type(msg) is not str:
raise MessagerException(f'message type "{msg_type}" formatting did not return string, instead: {msg}')
return msg
else:
msg = f'message type "{msg_type}"'
for k, v in msg_attrs.items():
msg = f'{msg}\n-> "{k}": {v}'
return msg
def register_formatter(key: Enum):
"""
register a formatter(attrs: Dict)->str function with an integer Enumeration key to the dictionary of
formatters
"""
def decorator(func):
if key.name in message_formatters:
raise MessagerException(f'registering message type "{key.name}", but already exists!')
else:
message_formatters[key.name] = func
return func
return decorator
class MessageTypes(Enum):
"""Enumeration types for messages"""
MSG_TRACK_TRADE = 'tracking new trade'
MSG_TRACK_ORDER = 'tracking new order'
MSG_MATCHED_SIZE = 'order matched amount change'
MSG_STATUS_UPDATE = 'order status update'
MSG_TRADE_UPDATE = 'trade status update'
MSG_OPEN_PLACE = 'placing opening order'
MSG_OPEN_ERROR = 'error status open order'
MSG_MARKET_CLOSE = 'market closed'
MSG_HEDGE_NOT_MET = 'hedge minimum not met'
MSG_BOOKS_EMPTY = 'back/lay books are empty'
MSG_GREEN_INVALID = 'invalid green price'
MSG_GREEN_PLACE = 'placing greening order'
MSG_HEDGE_ERROR = 'error trying to hedge'
MSG_HEDGE_REPLACE = 'replacing hedge order'
MSG_HEDGE_UNKNOWN = 'unknown hedge order status'
MSG_TRADE_COMPLETE = 'trade complete'
MSG_STATE_CHANGE = 'state change'
MSG_ALLOW_REACHED = 'reached allowed trading point'
MSG_CUTOFF_REACHED = 'reached cutoff point for trading'
MSG_LAY_EMPTY = 'lay empty'
MSG_BACK_EMPTY = 'back empty'
MSG_PRICE_INVALID = 'price invalid'
MSG_CANCEL_ID_FAIL = 'cannot cancel'
@register_formatter(MessageTypes.MSG_LAY_EMPTY)
def formatter(attrs: Dict) -> str:
return f'could not place trade, lay ladder empty'
@register_formatter(MessageTypes.MSG_BACK_EMPTY)
def formatter(attrs: Dict) -> str:
return f'could not place trade, back ladder empty'
@register_formatter(MessageTypes.MSG_TRACK_TRADE)
def formatter(attrs: Dict) -> str:
return f'started tracking trade ID "{attrs.get("trade_id")}"'
@register_formatter(MessageTypes.MSG_TRACK_ORDER)
def formatter(attrs: Dict) -> str:
return f'started tracking order ID "{attrs.get("order_id")}"'
@register_formatter(MessageTypes.MSG_MATCHED_SIZE)
def formatter(attrs: Dict) -> str:
side = attrs.get("side")
price = attrs.get("price")
size = attrs.get("size", -1)
matched = attrs.get("size_matched", -1)
return f'order side {side} at {price} for £{size:.2f} now matched £{matched:.2f}'
@register_formatter(MessageTypes.MSG_STATUS_UPDATE)
def formatter(attrs: Dict) -> str:
order_id = attrs.get("order_id")
side = attrs.get("side")
price = attrs.get("price")
size = attrs.get("size", -1)
status = attrs.get("status")
str = f'order ID "{order_id}" side {side} at {price} for £{size:.2f}, now status {status}'
msg = attrs.get("msg")
if msg:
str += f', message: "{msg}"'
return str
@register_formatter(MessageTypes.MSG_TRADE_UPDATE)
def formatter(attrs: Dict) -> str:
return f'trade ID "{attrs.get("trade_id")}" now status: "{attrs.get("status")}"'
@register_formatter(MessageTypes.MSG_OPEN_PLACE)
def formatter(attrs: Dict) -> str:
side = attrs.get("side")
price = attrs.get("price")
size = attrs.get("size", -1)
return f'placing open order at {price} for £{size:.2f} on {side} side'
@register_formatter(MessageTypes.MSG_MARKET_CLOSE)
def formatter(attrs: Dict) -> str:
return f'market closed, order "{attrs.get("order_id")}" runner status "{attrs.get("runner_status")}"'
@register_formatter(MessageTypes.MSG_HEDGE_NOT_MET)
def formatter(attrs: Dict) -> str:
outstanding_profit = attrs.get("outstanding_profit", -1)
min_hedge = attrs.get("min_hedge", -1)
return f'win/loss diff £{outstanding_profit:.2f} doesnt exceed required hedge amount £{min_hedge:.2f}'
@register_formatter(MessageTypes.MSG_BOOKS_EMPTY)
def formatter(attrs: Dict) -> str:
return 'one side of book is completely empty...'
@register_formatter(MessageTypes.MSG_GREEN_INVALID)
def formatter(attrs: Dict) -> str:
return f'invalid green price {attrs.get("green_price")}'
@register_formatter(MessageTypes.MSG_GREEN_PLACE)
def formatter(attrs: Dict) -> str:
close_side = attrs.get('close_side')
green_price = attrs.get('green_price')
green_size = attrs.get('green_size', -1)
order_id = attrs.get('order_id')
return f'greening active order ID {order_id} side {close_side} on {green_price} for £{green_size:.2f}'
@register_formatter(MessageTypes.MSG_HEDGE_ERROR)
def formatter(attrs: Dict) -> str:
return f'error trying to hedge: "{attrs.get("order_status")}"'
@register_formatter(MessageTypes.MSG_HEDGE_REPLACE)
def formatter(attrs: Dict) -> str:
return f'cancelling hedge at price {attrs.get("old_price")} for new price {attrs.get("new_price")}'
@register_formatter(MessageTypes.MSG_HEDGE_UNKNOWN)
def formatter(attrs: Dict) -> str:
return f'unexpected hedge order state reached {attrs.get("order_status")}'
@register_formatter(MessageTypes.MSG_TRADE_COMPLETE)
def formatter(attrs: Dict) -> str:
win_profit = attrs.get("win_profit", -1)
loss_profit = attrs.get("loss_profit", -1)
return f'trade complete, case win: £{win_profit:.2f}, case loss: £{loss_profit:.2f}'
@register_formatter(MessageTypes.MSG_OPEN_ERROR)
def formatter(attrs: Dict) -> str:
return f'open order status is erroneous: "{attrs.get("order_status")}"'
@register_formatter(MessageTypes.MSG_STATE_CHANGE)
def formatter(attrs: Dict) -> str:
return f'state machine changed from state "{attrs.get("old_state")}" to "{attrs.get("new_state")}"'
@register_formatter(MessageTypes.MSG_CUTOFF_REACHED)
def formatter(attrs: Dict) -> str:
cutoff_seconds = attrs.get('cutoff_seconds')
start_time = attrs.get('start_time')
return f'cutoff point reached {cutoff_seconds}s beofre start time: {start_time}'
@register_formatter(MessageTypes.MSG_ALLOW_REACHED)
def formatter(attrs: Dict) -> str:
pre_seconds = attrs.get('pre_seconds')
start_time = attrs.get('start_time')
return f'allowed trading point reached {pre_seconds}s before start time: {start_time}'
@register_formatter(MessageTypes.MSG_PRICE_INVALID)
def formatter(attrs: Dict) -> str:
return f'price is not a valid tick: "{attrs.get("price")}"'
@register_formatter(MessageTypes.MSG_CANCEL_ID_FAIL)
def foramtter(attrs: Dict) -> str:
return f'cannot cancel order "{attrs.get("order_id")}", bet_id is None'
```
#### File: mytrading/strategy/tradestates.py
```python
from enum import Enum
from typing import List, Union
from datetime import datetime, timedelta
from flumine.controls.clientcontrols import MaxTransactionCount
from flumine import BaseStrategy
from flumine.markets.market import Market
from flumine.order.order import BetfairOrder, OrderStatus
from flumine.order.ordertype import LimitOrder, OrderTypes
from flumine.order.trade import TradeStatus
from betfairlightweight.resources.bettingresources import RunnerBook
from ..process import MatchBetSums, get_order_profit, get_side_operator, get_side_ladder, side_invert, closest_tick, \
LTICKS_DECODED
from ..exceptions import TradeStateException
from mytrading.strategy.messages import MessageTypes
from .runnerhandler import RunnerHandler
from myutils import statemachine as stm
order_error_states = [
OrderStatus.EXPIRED,
OrderStatus.VIOLATION
]
order_pending_states = [
OrderStatus.PENDING,
OrderStatus.CANCELLING,
OrderStatus.UPDATING,
OrderStatus.REPLACING
]
class TradeStateTypes(Enum):
"""
Enumeration of trade state keys used for names in state instances
"""
BASE = 'unimplemented'
CREATE_TRADE = 'create trade'
IDLE = 'unplaced'
OPEN_PLACING = 'placing opening trade'
OPEN_MATCHING = 'waiting for opening trade to match'
BIN = 'bottling trade'
HEDGE_SELECT = 'selecting type of hedge'
HEDGE_TAKE_PLACE = 'place hedge trade at available price'
HEDGE_TAKE_MATCHING = 'waiting for hedge to match at available price'
HEDGE_QUEUE_PLACE = 'queue hedge trade'
HEDGE_QUEUE_MATCHING = 'wait for queue hedge to finish'
CLEANING = 'cleaning trade'
PENDING = 'pending state'
WAIT = 'wait for a set number of milliseconds'
class TradeStateBase(stm.State):
"""
base trading state for implementing sub-classes with run() defined
"""
# override default state name and next state without the need for sub-class
def __init__(self, name: Enum = None, next_state: Enum = None):
if name:
self.name = name
if next_state:
self.next_state = next_state
# set this value to false where entering into state should not be printed to info log in trademachine
print_change_message = True
# use enumerations for name of state for other states to refer to
name: TradeStateTypes = TradeStateTypes.BASE
# easily overridable state to progress to when state action is complete
next_state: TradeStateTypes = TradeStateTypes.BASE
def enter(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
"""
initialise a state
"""
pass
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
"""
called to operate state, options for return are:
- return None to remain in same state
- return TradeStates enum for new state
- return list of [TradeStates] for list of new states
- return True to continue in list of states queued
"""
raise NotImplementedError
def __str__(self):
return f'Trade state: {self.name}'
class TradeStateIntermediary(TradeStateBase):
"""
Intermediary state
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
if 'next_state' in kwargs:
raise TradeStateException(f'next_state kwarg found in intermediary state')
class TradeStatePending(TradeStateIntermediary):
"""
intermediary state for waiting for trade to process
intermediary state: run() returns True when complete
Specifying `all_trade_orders=True` means all orders within active trade are checked, rather than just the active
order
"""
name = TradeStateTypes.PENDING
def __init__(self, all_trade_orders=False, delay_once=False, **kwargs):
super().__init__(**kwargs)
self.all_trade_orders = all_trade_orders
self.delay_once = delay_once
self.first_call = True
def enter(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
self.first_call = True
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
"""called to operate state - return None to remain in same state, or return string for new state"""
# hold for 1 state
if self.first_call:
self.first_call = False
if self.delay_once:
return False
# select either active order or all active trade orders
trk = runner_handler.trade_tracker
if not self.all_trade_orders:
orders = [trk.active_order]
else:
orders = trk.active_trade.orders if trk.active_trade else []
# loop orders
for order in orders:
# ignore, go to next order if doesn't exist
if order is None:
continue
# if order in pending states then not done yet, don't exit state
if order.status in order_pending_states:
return False
# exit state if all order(s) not pending
return True
class TradeStateBin(TradeStateIntermediary):
"""
intermediary state waits for active order to finish pending (if exist) and cancel it
intermediary state: run() returns True when complete
"""
name = TradeStateTypes.BIN
def __init__(self, all_trade_orders=False, **kwargs):
super().__init__(**kwargs)
self.all_trade_orders = all_trade_orders
def run(self, market: Market, runner_index: BaseStrategy, runner_handler: RunnerHandler):
trk = runner_handler.trade_tracker
# select either active order or all active trade orders
if not self.all_trade_orders:
orders = [trk.active_order]
else:
orders = [o for o in trk.active_trade.orders] if trk.active_trade else []
done = True
# loop orders
for order in orders:
# ignore, go to next order if doesn't exist
if order is None:
continue
# if order in pending states then not done yet, don't exit state
if order.status in order_pending_states:
done = False
elif order.status == OrderStatus.EXECUTABLE:
# check if order has been called to be cancelled but has gone back to EXECUTABLE before finishing
if len(order.status_log) >= 2 and order.status_log[-2] == OrderStatus.CANCELLING:
pass
else:
# cancel order (flumine checks order is EXECUTABLE before cancelling or throws error)
# also check bet ID is valid before cancelling
if order.bet_id is None:
trk.log_update(
msg_type=MessageTypes.MSG_CANCEL_ID_FAIL,
dt=market.market_book.publish_time,
msg_attrs={
'order_id': order.id,
}
)
else:
market.cancel_order(order)
done = False
return done
class TradeStateWait(TradeStateIntermediary):
"""
Intermediary state that waits for a designates number of milliseconds before continuing
"""
name = TradeStateTypes.WAIT
def __init__(self, wait_ms, **kwargs):
super().__init__(**kwargs)
self.wait_ms = wait_ms
self.td = timedelta(milliseconds=wait_ms)
self.start_time = datetime.now()
def enter(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
self.start_time = market.market_book.publish_time
def run(self, market: Market, runner_index: BaseStrategy, runner_handler: RunnerHandler):
return (market.market_book.publish_time - self.start_time) >= self.td
# core states
class TradeStateCreateTrade(TradeStateBase):
"""
Create trade instance and move to next state
"""
name = TradeStateTypes.CREATE_TRADE
next_state = TradeStateTypes.IDLE
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
runner_handler.trade_tracker.create_trade(
handicap=market.market_book.runners[runner_index].handicap
)
return self.next_state
class TradeStateIdle(TradeStateBase):
"""
idle state, waiting to open trade as specified by implementing in sub-classes trade_criteria() function
Once trade_criteria() returns True, will move to next state
"""
name = TradeStateTypes.IDLE
next_state = TradeStateTypes.OPEN_PLACING
# on entering IDLE state dont print update message
print_change_message = False
def __init__(self, trade_transactions_cutoff, *args, **kwargs):
super().__init__(*args, **kwargs)
self.trade_transactions_cutoff = trade_transactions_cutoff
# return true to move to next state opening trade, false to remain idle
def trade_criteria(self, market: Market, runner_index: int, runner_handler: RunnerHandler) -> bool:
raise NotImplementedError
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
max_order_count: MaxTransactionCount = market.flumine.client.trading_controls[0]
if self.trade_transactions_cutoff and max_order_count.transaction_count >= self.trade_transactions_cutoff:
return None
if self.trade_criteria(market, runner_index, runner_handler):
return self.next_state
class TradeStateOpenPlace(TradeStateBase):
"""
place an opening trade
"""
name = TradeStateTypes.OPEN_PLACING
next_state = TradeStateTypes.OPEN_MATCHING
def place_order(self, market: Market, runner_index: int, runner_handler: RunnerHandler) -> Union[None, BetfairOrder]:
raise NotImplementedError
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
limit_order = self.place_order(market, runner_index, runner_handler)
if not limit_order:
return [TradeStateTypes.PENDING, self.next_state]
else:
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_OPEN_PLACE,
msg_attrs={
'side': limit_order.side,
'price': limit_order.order_type.price,
'size': limit_order.order_type.size,
},
dt=market.market_book.publish_time,
display_odds=limit_order.order_type.price,
order=limit_order
)
runner_handler.trade_tracker.active_order = limit_order
runner_handler.trade_tracker.open_side = limit_order.side
return [TradeStateTypes.PENDING, self.next_state]
class TradeStateOpenMatching(TradeStateBase):
"""
wait for open trade to match
"""
name = TradeStateTypes.OPEN_MATCHING
next_state = TradeStateTypes.HEDGE_SELECT
def __init__(self, move_on_complete=True, *args, **kwargs):
"""
Parameters
----------
move_on_complete : specifies whether to move to next state once active order status becomes EXECUTION_COMPLETE
"""
self.move_on_complete = move_on_complete
super().__init__(*args, **kwargs)
def open_order_processing(
self, market: Market, runner_index: int, runner_handler: RunnerHandler
) -> Union[None, List[Enum]]:
"""return new state(s) if different action required, otherwise None"""
raise NotImplementedError
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
new_states = self.open_order_processing(market, runner_index, runner_handler)
if new_states:
return new_states
else:
sts = runner_handler.trade_tracker.active_order.status
if sts == OrderStatus.EXECUTION_COMPLETE and self.move_on_complete:
return self.next_state
elif sts in order_error_states:
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_OPEN_ERROR,
msg_attrs={
'order_status': str(sts),
},
dt=market.market_book.publish_time
)
return self.next_state
class TradeStateHedgeSelect(TradeStateBase):
"""
proceed to hedge placement state, defined by `next_state`
"""
name = TradeStateTypes.HEDGE_SELECT
next_state = TradeStateTypes.HEDGE_TAKE_PLACE
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
return self.next_state
class TradeStateHedgePlaceBase(TradeStateBase):
"""
base class of placing a hedging order, but not defining how to get the price to place hedge at
- checks if outstanding profit between win/loss meets minimum requirement to place hedge trade, if fail go to clean
- check that ladder back/lay is available and that unimplemented method get_hedge_price() doesn't return 0 before
- placing trade
"""
def __init__(self, min_hedge_price, *args, **kwargs):
super().__init__(*args, **kwargs)
self.min_hedge_price = min_hedge_price
def get_hedge_price(
self, market: Market, runner_index: int, runner_handler: RunnerHandler, outstanding_profit: float
) -> float:
raise NotImplementedError
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
# get outstanding profit on trade (dif between profit in win/loss case)
match_bet_sums = MatchBetSums.get_match_bet_sums(runner_handler.trade_tracker.active_trade)
outstanding_profit = match_bet_sums.outstanding_profit()
# abort if below minimum required to hedge
if abs(outstanding_profit) <= self.min_hedge_price:
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_HEDGE_NOT_MET,
msg_attrs={
'outstanding_profit': outstanding_profit,
'min_hedge': self.min_hedge_price
},
dt=market.market_book.publish_time
)
return TradeStateTypes.CLEANING
# check that ladders not empty
runner = market.market_book.runners[runner_index]
if not runner.ex.available_to_lay or not runner.ex.available_to_back:
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_BOOKS_EMPTY,
dt=market.market_book.publish_time
)
# wait for ladder to populate
return None
if outstanding_profit > 0:
# value is positive: trade is "underlayed", i.e. needs more lay money
close_side = 'LAY'
close_ladder = runner.ex.available_to_lay
else:
# value is negative: trade is "overlayed", i.e. needs more back moneyy
close_side = 'BACK'
close_ladder = runner.ex.available_to_back
# get green price for hedging, round to 2dp
green_price = self.get_hedge_price(market, runner_index, runner_handler, outstanding_profit)
green_price = round(green_price, ndigits=2)
# if function returns 0 or invalid then error
if green_price <= 0 or green_price and green_price not in LTICKS_DECODED:
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_GREEN_INVALID,
msg_attrs={
'green_price': green_price
},
dt=market.market_book.publish_time
)
# use best available
green_price = close_ladder[0]['price']
# compute size from outstanding profit and price, round to 2dp
green_size = abs(outstanding_profit) / green_price
green_size = round(green_size, 2)
# TODO (temporary fix so that orders are processed)
runner_handler.trade_tracker.active_trade._update_status(TradeStatus.PENDING)
# TODO - handle order errors
# place order
green_order = runner_handler.trade_tracker.active_trade.create_order(
side=close_side,
order_type=LimitOrder(
price=green_price,
size=green_size
)
)
market.place_order(green_order)
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_GREEN_PLACE,
msg_attrs={
'close_side': close_side,
'green_price': green_price,
'green_size': green_size,
'order_id': str(green_order.id),
},
dt=market.market_book.publish_time,
display_odds=green_price,
)
runner_handler.trade_tracker.active_order = green_order
return [TradeStateTypes.PENDING, self.next_state]
class TradeStateHedgeWaitBase(TradeStateBase):
"""
base class for waiting for hedge trade to match
price_moved() provides unimplemented method to detect whether price has moved and need to move hedging price
"""
def __init__(self, hedge_place_state, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hedge_place_state = hedge_place_state
def price_moved(self, market: Market, runner_index: int, runner_handler: RunnerHandler) -> float:
"""
determines whether a new hedge price is available and should be taken
if new hedge price available, return its price otherwise 0
"""
raise NotImplementedError
def compare_price(self, new_price, current_price, order_side, runner: RunnerBook) -> bool:
"""
Compare suggested new price for hedging order to current price of hedging order. Return True if current hedge
order should be replaced with new price or False to leave as is
"""
return new_price != current_price
def run(self, market: Market, runner_index: int, runner_handler):
order = runner_handler.trade_tracker.active_order
# check if there has been an error with the order
if order.status in order_error_states:
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_HEDGE_ERROR,
msg_attrs={
'order_status': order.violation_msg,
},
dt=market.market_book.publish_time
)
# try to hedge again
return TradeStateTypes.HEDGE_SELECT
elif order.status == OrderStatus.EXECUTION_COMPLETE:
# hedge done, move on
return self.next_state
elif order.status == OrderStatus.CANCELLING:
# hedge cancelling, try again
return TradeStateTypes.HEDGE_SELECT
elif order.status == OrderStatus.EXECUTABLE:
# hedge matching, get new price
new_price = self.price_moved(market, runner_index, runner_handler)
# non-zero value indicates price has moved
runner = market.market_book.runners[runner_index]
if new_price and self.compare_price(new_price, order.order_type.price, order.side, runner):
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_HEDGE_REPLACE,
msg_attrs={
'old_price': order.order_type.price,
'new_price': new_price
},
dt=market.market_book.publish_time,
display_odds=new_price,
)
# bin active hedge and hedge again with new price
return [
TradeStateTypes.BIN,
self.hedge_place_state
]
# replacing doesn't seem to work in back-test mode
# order.replace(available[0]['price'])
else:
# theoretically should never reach here - pending states covered, error states, EXECUTABLE and
# EXECUTION_COMPLETE
runner_handler.trade_tracker.log_update(
msg_type=MessageTypes.MSG_HEDGE_UNKNOWN,
msg_attrs={
'order_status': order.status.value
},
dt=market.market_book.publish_time,
)
return [
TradeStateTypes.BIN,
TradeStateTypes.HEDGE_SELECT
]
class TradeStateHedgePlaceTake(TradeStateHedgePlaceBase):
"""
place an order to hedge active trade orders at the available price
"""
name = TradeStateTypes.HEDGE_TAKE_PLACE
next_state = TradeStateTypes.HEDGE_TAKE_MATCHING
def get_hedge_price(
self, market: Market, runner_index: int, runner_handler: RunnerHandler, outstanding_profit: float
) -> float:
ex = market.market_book.runners[runner_index].ex
if outstanding_profit > 0:
return ex.available_to_lay[0]['price']
else:
return ex.available_to_back[0]['price']
class TradeStateHedgeWaitTake(TradeStateHedgeWaitBase):
"""
sees if available price on ladder has moved since placement
"""
name = TradeStateTypes.HEDGE_TAKE_MATCHING
next_state = TradeStateTypes.CLEANING
def price_moved(self, market: Market, runner_index: int, runner_handler: RunnerHandler) -> float:
# check active order exists
order = runner_handler.trade_tracker.active_order
if not order:
return 0
# get ladder on close side for hedging
available = get_side_ladder(
market.market_book.runners[runner_index].ex,
order.side
)
# get operator for comparing available price and current hedge price
op = get_side_operator(
order.side,
invert=True
)
# get available price for hedging if not empty
new_price = available[0]['price'] if available else 0
# if current price is not the same as order price then move
if new_price and op(new_price, order.order_type.price):
return new_price
else:
return 0
class TradeStateHedgePlaceQueue(TradeStateHedgePlaceBase):
"""
Queue a hedge order at best price available on opposite side of the book
e.g. if hedging on back side, best back is 4.1 and best lay is 4.5 then queue back order at 4.5
Can specify tick offset for queue, e.g. for 1 tick offset and the example above then would queue back order at 4.4
"""
name = TradeStateTypes.HEDGE_QUEUE_PLACE
next_state = TradeStateTypes.HEDGE_QUEUE_MATCHING
def __init__(self, tick_offset=0, **kwargs):
super().__init__(**kwargs)
self.tick_offset = tick_offset
def get_hedge_price(
self, market: Market, runner_index: int, runner_handler: RunnerHandler, outstanding_profit: float
) -> float:
runner = market.market_book.runners[runner_index]
if outstanding_profit > 0:
# value is positive: trade is "underlayed", i.e. needs more lay money
close_side = 'LAY'
open_ladder = runner.ex.available_to_back
else:
# value is negative: trade is "overlayed", i.e. needs more back moneyy
close_side = 'BACK'
open_ladder = runner.ex.available_to_lay
price = open_ladder[0]['price']
if not self.tick_offset:
return price
index = closest_tick(price, return_index=True)
if close_side == 'BACK':
index = max(index - self.tick_offset, 0)
else:
index = min(index + self.tick_offset, len(LTICKS_DECODED) - 1)
return LTICKS_DECODED[index]
class TradeStateHedgeWaitQueue(TradeStateHedgeWaitBase):
"""
Wait for queued hedge to match, if price moves for given period of time then chase
"""
name = TradeStateTypes.HEDGE_QUEUE_MATCHING
next_state = TradeStateTypes.CLEANING
def __init__(self, hold_time_ms: int, tick_offset=0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hold_time_ms = hold_time_ms
self.tick_offset = tick_offset
self.reset_time: datetime = datetime.now()
self.moving = False
self.original_price = 0
def enter(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
self.reset_time = market.market_book.publish_time
self.moving = False
self.original_price = 0
def compare_price(self, new_price, current_price, order_side, runner: RunnerBook) -> bool:
"""
when placing queue hedges, in live mode any orders that are offset from queue price on opposite side of the
book will become best available price so could cause recursion
e.g. best back is 2.3 and best lay is 2.6, if back order is queued 2 ticks away at 2.56 then the market will
update that best lay is now 2.56 and this state would immediately replace the order with 2.54, so on and so
forth..
Thus, when queing updates and back order shortens or lay order drifts, only update price if the availble
price is better than what we are offering.
e.g. best back is 2.3 and best lay is 2.6, back order queued 2 ticks away at 2.56 - if best back then updated to
2.54, then order would be updated to 2 ticks away at 2.50
"""
if order_side == 'BACK':
atl = runner.ex.available_to_lay
if atl:
best_lay = atl[0]['price']
if best_lay < current_price:
return True
elif order_side == 'LAY':
atb = runner.ex.available_to_back
if atb:
best_back = atb[0]['price']
if best_back > current_price:
return True
return False
def price_moved(self, market: Market, runner_index: int, runner_handler: RunnerHandler) -> float:
market_book = market.market_book
trade_tracker = runner_handler.trade_tracker
# check active order exists
order = trade_tracker.active_order
if not order:
return 0
# get ladder on open side for hedging
available = get_side_ladder(
market_book.runners[runner_index].ex,
side_invert(order.side)
)
# check not empty
if not available:
return 0
# get available price
new_price = available[0]['price']
price_index = closest_tick(new_price, return_index=True)
if order.side == 'BACK':
price_index = max(price_index - self.tick_offset, 0)
new_price = LTICKS_DECODED[price_index]
proceed = new_price < order.order_type.price
else:
price_index = min(price_index + self.tick_offset, len(LTICKS_DECODED) - 1)
new_price = LTICKS_DECODED[price_index]
proceed = new_price > order.order_type.price
if not self.moving:
if proceed:
self.moving = True
self.reset_time = market_book.publish_time
self.original_price = order.order_type.price
else:
if proceed:
if (market_book.publish_time - self.reset_time) > timedelta(milliseconds=self.hold_time_ms):
return new_price
else:
self.moving = False
# price not moved
return 0
class TradeStateClean(TradeStateBase):
name = TradeStateTypes.CLEANING
def enter(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
market_book = market.market_book
trade_tracker = runner_handler.trade_tracker
if trade_tracker.active_trade:
# filter to limit orders
orders = [o for o in trade_tracker.active_trade.orders if o.order_type.ORDER_TYPE == OrderTypes.LIMIT]
win_profit = sum(
get_order_profit(
'WINNER',
o.side,
o.average_price_matched,
o.size_matched)
for o in orders)
loss_profit = sum(
get_order_profit(
'LOSER',
o.side,
o.average_price_matched,
o.size_matched)
for o in orders)
trade_tracker.log_update(
msg_type=MessageTypes.MSG_TRADE_COMPLETE,
msg_attrs={
'win_profit': win_profit,
'loss_profit': loss_profit
},
dt=market_book.publish_time
)
def run(self, market: Market, runner_index: int, runner_handler: RunnerHandler):
pass
```
#### File: mytrading/strategy/tradetracker.py
```python
from __future__ import annotations
from flumine.order.order import BetfairOrder, OrderStatus
from flumine.order.trade import Trade, TradeStatus
from flumine.order.ordertype import LimitOrder
import logging
from enum import Enum
import pandas as pd
from datetime import datetime
from uuid import UUID
from typing import List, Dict, Optional
from os import path
import json
from dataclasses import dataclass, field
from mytrading.strategy.messages import MessageTypes, format_message
from ..exceptions import TradeTrackerException
from ..process import get_order_profit
active_logger = logging.getLogger(__name__)
active_logger.setLevel(logging.INFO)
@dataclass
class OrderTracker:
"""track the status and matched money of an order"""
matched: float
status: OrderStatus
@dataclass
class TradeFollower:
"""track the status of a trade"""
status: TradeStatus = field(default=None)
order_trackers: Dict[str, OrderTracker] = field(default_factory=dict)
class TradeTracker:
"""
Track trades for a runner, logging order updates
List of trades kept for all trades on a chosen runner (by `selection_id`) in one market. Active trade denoted by
`active_trade` (assumes that only 1 trade is happening on a runner at any given time)
Similarly, active order on active trade denoted by `active_order`
`open_side` indicates the side of the open order for the current trade
if `file_path` is specified, it is used as the path to log updates to as well as logging to stream
"""
def __init__(self, selection_id: int, strategy, market_id, file_path: Optional[str] = None):
active_logger.info(f'creating trade tracker with selection ID "{selection_id}" and file path "{file_path}"')
self.selection_id = selection_id
self.file_path = file_path
self._trades: List[Trade] = list()
self.active_trade: Optional[Trade] = None
self.active_order: Optional[BetfairOrder] = None
self.open_side: Optional[str] = None
self._prv_display_odds = 0
self._strategy = strategy
self.market_id = market_id
# indexed by trade ID
self._trade_followers: Dict[UUID, TradeFollower] = dict()
self._followed_orders = list()
def create_trade(self, handicap):
trade = Trade(
market_id=self.market_id,
selection_id=self.selection_id,
handicap=handicap,
strategy=self._strategy
)
self._trades.append(trade)
self._trade_followers[trade.id] = TradeFollower()
self.active_trade = trade
@staticmethod
def serializable_order_info(order: BetfairOrder) -> dict:
"""convert betfair order info to JSON serializable format"""
# copy order info so modifications don't change original object
info = order.info.copy()
# convert trade ID to string
info['trade']['id'] = str(info['trade']['id'])
# dont store strategy info
# convert strategy object in trade to dict of info
# info['trade']['strategy'] = info['trade']['strategy'].info
del info['trade']['strategy']
# convert strategy status to string
info['trade']['status'] = str(info['trade']['status'])
# add runner status to order
info['runner_status'] = str(order.runner_status)
# add datetime created
info['date_time_created'] = order.date_time_created.timestamp()
info['average_price_matched'] = order.average_price_matched
return info
@staticmethod
def get_runner_profits(updates_path: str) -> Dict:
df = TradeTracker.get_order_updates(updates_path)
active_logger.info(f'found {df.shape[0]} order updates in file "{updates_path}"')
if df.shape[0]:
df = df[df['msg_type'] == MessageTypes.MSG_MARKET_CLOSE.name]
df['profit'] = [TradeTracker.dict_order_profit(o) for o in df['order_info']]
return df.groupby(df['selection_id'])['profit'].sum().to_dict()
else:
return dict()
@staticmethod
def get_orders_from_buffer(buffer: str) -> pd.DataFrame:
lines = buffer.splitlines()
try:
order_data = [json.loads(line) for line in lines]
except (ValueError, TypeError) as e:
raise TradeTrackerException(f'Cannot json parse order updates: {e}')
order_df = pd.DataFrame(order_data)
if order_df.shape[0]:
order_df.index = order_df['dt'].apply(datetime.fromtimestamp)
return order_df
@staticmethod
def get_order_updates(file_path: str) -> pd.DataFrame:
"""get `TradeTracker` data written to file in dataframe format, with index set as `pt` converted to datetimes if
fail, return None"""
if not path.isfile(file_path):
raise TradeTrackerException(f'Cannot get order updates, path is not valid file: "{file_path}')
with open(file_path) as f:
data = f.read()
return TradeTracker.get_orders_from_buffer(data)
@staticmethod
def dict_order_profit(order_info: dict) -> float:
"""
Compute order profit from dictionary of values retrieved from a line of a file written to by TradeTracker.log_update
Function is shamelessly stolen from `flumine.backtest.simulated.Simulated.profit`, but that requires an order
instance which is not possible to create trade/strategy information etc
"""
try:
sts = order_info['runner_status']
side = order_info['info']['side']
price = order_info['info']['average_price_matched']
size = order_info['info']['size_matched']
return get_order_profit(sts, side, price, size)
except KeyError as e:
raise TradeTrackerException(f'failed to get profit elements: {e}')
def update_order_tracker(self, publish_time: datetime):
"""
loop orders in each trade instance, and log update message where order amount matched or status has changed
since last call of function
"""
tfs = self._trade_followers
# loop trades
for trade in self._trades:
# log trade status updates
tf = tfs[trade.id]
if tf.status != trade.status:
self.log_update(
msg_type=MessageTypes.MSG_TRADE_UPDATE,
dt=publish_time,
msg_attrs={
'trade_id': str(trade.id),
'status': trade.status.value
}
)
tf.status = trade.status
# loop limit orders in trade
for order in [o for o in trade.orders if type(o.order_type) == LimitOrder]:
# if order untracked, create order tracker and track
if order.id not in self._followed_orders:
self.log_update(
msg_type=MessageTypes.MSG_TRACK_ORDER,
dt=publish_time,
msg_attrs={
"order_id": order.id
},
order=order
)
tf.order_trackers[order.id] = OrderTracker(
matched=order.size_matched,
status=order.status
)
self._followed_orders.append(order.id)
continue
# check if size matched change
if order.size_matched != tf.order_trackers[order.id].matched:
self.log_update(
msg_type=MessageTypes.MSG_MATCHED_SIZE,
dt=publish_time,
msg_attrs={
"order_id": order.id,
"side": order.side,
"price": order.order_type.price,
"size": order.order_type.size,
"size_matched": order.size_matched
},
order=order,
display_odds=order.order_type.price,
)
# check for status change
if order.status != tf.order_trackers[order.id].status:
msg = ''
if order.status == OrderStatus.VIOLATION:
msg = order.violation_msg or ''
self.log_update(
msg_type=MessageTypes.MSG_STATUS_UPDATE,
dt=publish_time,
msg_attrs={
"order_id": order.id,
"side": order.side,
"price": order.order_type.price,
"size": order.order_type.size,
"status": order.status.value,
'msg': msg,
},
order=order,
display_odds=order.order_type.price,
)
# update cached order status and size matched values
tf.order_trackers[order.id].status = order.status
tf.order_trackers[order.id].matched = order.size_matched
def log_close(self, publish_time: datetime):
for trade in self._trades:
for order in trade.orders:
self.log_update(
msg_type=MessageTypes.MSG_MARKET_CLOSE,
msg_attrs={
'runner_status': order.runner_status,
'order_id': str(order.id)
},
dt=publish_time,
order=order,
)
def log_update(
self,
msg_type: Enum,
dt: datetime,
msg_attrs: dict = None,
level=logging.INFO,
to_file=True,
display_odds: float = 0.0,
order: BetfairOrder = None
):
"""
Log an update
- msg: verbose string describing the update
- dt: timestamp in race of update
- level: log level for stream logging
- to_file: set to False if update is not to be logged to file as well as stream
- display_odds: purely visual odds to be used when visualising order updates (actual order odds will be found in
`order` argument)
- order: instance of BetfairOrder which will be logged to file
"""
# print update to stream
active_logger.log(level, f'{dt} {self.selection_id} {format_message(msg_type.name, msg_attrs)}')
# use previous log odds if not given and update
if not display_odds and self._prv_display_odds:
display_odds = self._prv_display_odds
self._prv_display_odds = display_odds
# if order instance not given then assume current order/trade
if not order:
order = self.active_order
trade = self.active_trade
else:
trade = order.trade
# get trade ID if trade exists else None
trade_id = trade.id if trade else None
# write to file if path specified
if self.file_path and to_file:
# get order serialized info (if exist)
if order:
order_info = self.serializable_order_info(order)
else:
order_info = None
# convert message attrs to empty dict if not set
msg_attrs = msg_attrs or {}
data = {
'selection_id': self.selection_id,
'dt': dt.timestamp(),
'msg_type': msg_type.name,
'msg_attrs': msg_attrs,
'display_odds': display_odds,
'order_info': order_info,
'trade_id': str(trade_id)
}
with open(self.file_path, mode='a') as f:
try:
json_data = json.dumps(data)
except TypeError as e:
raise TradeTrackerException(f'failed to serialise data writing to file: "{self.file_path}"\n{e}')
f.writelines([json_data + '\n'])
```
#### File: mytrading/utils/bettingdb.py
```python
from __future__ import annotations
import shutil
from betfairlightweight.resources.streamingresources import MarketDefinition
from betfairlightweight.resources.bettingresources import MarketCatalogue, MarketBook
from betfairlightweight.streaming.listener import StreamListener
import sqlalchemy
from sqlalchemy.sql.expression import ColumnElement
from sqlalchemy.sql.selectable import CTE
from sqlalchemy import create_engine, func, DECIMAL
from sqlalchemy.orm import Session
from sqlalchemy.sql.schema import Table
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.dialects.postgresql import base as psqlbase
from sqlalchemy.dialects.postgresql import json as psqljson
from sqlalchemy.sql.functions import sum as sql_sum
from sqlalchemy_filters.filters import Operator as SqlOperator
from sqlalchemy.orm.query import Query
from queue import Queue
import logging
from typing import Optional, Dict, List, Callable, Any, Tuple, Union, Literal, TypedDict
from os import path
import os
from datetime import datetime, timedelta
import zlib
import yaml
import json
import sys
import dateparser
from myutils import dictionaries, registrar
from ..exceptions import DBException
from .dbfilter import DBFilterHandler
active_logger = logging.getLogger(__name__)
active_logger.setLevel(logging.INFO)
ProcessorKey = Literal['process_in', 'process_out', 'processors']
ProcessorMap = Dict[type, Dict[ProcessorKey, List[str]]]
Processor = Callable[[Any], Any]
db_processors = registrar.Registrar[Processor]()
DB_PROCESSORS: ProcessorMap = {
psqlbase.BYTEA: {
'process_in': [
'prc_compress'
],
'process_out': [
'prc_decompress',
]
},
}
CACHE_PROCESSORS: ProcessorMap = {
psqlbase.BYTEA: {
'process_in': [
'prc_str_encode',
],
'process_out': [
'prc_str_decode'
]
},
psqlbase.TIMESTAMP: {
'process_in': [
'prc_dt_from_str',
],
'process_out': [
'prc_dt_to_str'
]
},
psqlbase.INTERVAL: {
'process_in': [
'prc_td_from_float',
],
'process_out': [
'prc_td_to_float'
]
},
psqljson.JSON: {
'process_in': [
'prc_json_decode',
],
'process_out': [
'prc_json_encode'
]
}
}
@db_processors.register_element
def prc_str_to_dt(data):
return dateparser.parse(data, settings={'DATE_ORDER': 'DMY'}) # use UK day-month-year instead of US month-day-year
@db_processors.register_element
def prc_compress(data):
return zlib.compress(data)
@db_processors.register_element
def prc_decompress(data):
return zlib.decompress(data)
@db_processors.register_element
def prc_str_encode(data):
return data.encode()
@db_processors.register_element
def prc_str_decode(data):
return data.decode()
@db_processors.register_element
def prc_td_to_float(data: timedelta):
return data.total_seconds()
@db_processors.register_element
def prc_td_from_float(data):
return timedelta(seconds=data)
@db_processors.register_element
def prc_dt_from_str(data):
return datetime.fromisoformat(data)
@db_processors.register_element
def prc_dt_to_str(data):
return data.isoformat()
@db_processors.register_element
def prc_json_encode(data):
return json.dumps(data)
@db_processors.register_element
def prc_json_decode(data):
return json.loads(data)
class DBBase:
def __init__(
self,
db_lang=None,
db_user=None,
db_host=None,
db_port=None,
db_name=None,
db_pwd=<PASSWORD>,
db_engine=None,
col_processors=None,
engine_kwargs=None
):
self.col_prcs = col_processors or DB_PROCESSORS
self.Base = automap_base()
engine_kwargs = engine_kwargs or {} # TODO - remove?
engine_str = f'+{db_engine}' if db_engine else ''
url = f'{db_lang}{engine_str}://{db_user}:{db_pwd}@{db_host}:{db_port}/{db_name}'
# prioritise engine kwargs if provided - "url" key will override constructed if provided
engine_kwargs = {'url': url} | engine_kwargs
active_logger.info(f'connecting to database with kwargs:\n{engine_kwargs}')
self.engine = create_engine(**engine_kwargs)
self.Base.prepare(self.engine, reflect=True)
self.session = Session(self.engine)
self.tables: Dict[str, Table] = self.Base.metadata.tables
active_logger.info(f'tables found: {list(self.tables.keys())}')
def _validate_tbl(self, tbl_name: str):
if tbl_name not in self.tables:
raise DBException(f'error inserting row, table "{tbl_name}" not found in tables')
if tbl_name not in self.Base.classes:
raise DBException(f'error inserting row, table "{tbl_name}" not found in base')
def _validate_cols(self, tbl_name: str, cols: List[str]):
for col in cols:
if col not in self.tables[tbl_name].columns:
raise DBException(f'column "{col}" not found in table "{tbl_name}"')
def _validate_pkeys(self, tbl_nm: str, pkey_flts: Dict):
tbl_pkeys = tuple(x.name for x in self.tables[tbl_nm].primary_key)
flt_pkeys = tuple(pkey_flts.keys())
if tbl_pkeys != flt_pkeys:
raise DBException(
f'error writing cache, table primary keys "{tbl_pkeys}" does not match specified "{flt_pkeys}"'
)
def apply_basic_filters(self, tbl_nm: str, pkey_flts: Dict) -> Query:
return self.session.query(self.tables[tbl_nm]).filter(
*[self.tables[tbl_nm].columns[k] == v for k, v in pkey_flts.items()]
)
def row_exist(self, tbl_nm: str, pkey_flts: Dict) -> bool:
"""
Determine if row(s) exist in database for a given table
"""
return self.apply_basic_filters(tbl_nm, pkey_flts).count() >= 1
def _value_processors(self, value: Any, tbl_name: str, col: str, prcs: ProcessorMap, prc_type: ProcessorKey) -> Any:
col_type = type(self.tables[tbl_name].columns[col].type)
prc_nms = prcs.get(col_type, {}).get(prc_type)
if prc_nms:
if type(prc_nms) is not list:
raise DBException(f'processors "{prc_type}" for column "{col}" not list')
for i, prc_nm in enumerate(prc_nms):
prc_func = db_processors[prc_nm]
active_logger.info(f'running processor "{prc_type}" #{i}, "{prc_nm}" on column "{col}"')
value_out = prc_func(value)
value = value_out
return value
def _process_columns(self, data: Dict, tbl_name: str, prcs: ProcessorMap, prc_type: ProcessorKey) -> None:
self._validate_tbl(tbl_name)
self._validate_cols(tbl_name, list(data.keys()))
for col in data.keys():
val_in = data[col]
if val_in is None:
active_logger.warning(f'table "{tbl_name}", col "{col}" value is None, skipping processing')
else:
val_out = self._value_processors(val_in, tbl_name, col, prcs, prc_type)
data[col] = val_out
def insert_row(self, tbl_name: str, data: Dict):
active_logger.info(f'inserting row of information into table "{tbl_name}"')
active_logger.info(f'keys passed are:\n'
f'{yaml.dump([str(k) for k in data.keys()])}')
self._process_columns(data, tbl_name, self.col_prcs, 'process_in')
row = self.Base.classes[tbl_name](**data)
self.session.add(row)
self.session.commit()
def read_rows(self, tbl_nm: str, pkey_flts: Dict) -> List[Dict]:
active_logger.info(f'reading rows from table "{tbl_nm}" with filter "{pkey_flts}"')
self._validate_tbl(tbl_nm)
self._validate_pkeys(tbl_nm, pkey_flts)
if not self.row_exist(tbl_nm, pkey_flts):
raise DBException(f'row in table "{tbl_nm}" with filters "{pkey_flts}" does not exist')
sql_rows = self.apply_basic_filters(tbl_nm, pkey_flts).all()
rows = []
for row in sql_rows:
row_dict = {
str(k): v
for k, v in dict(row).items()
} # convert sqlalchemy key objects to str for yaml
self._process_columns(row_dict, tbl_nm, self.col_prcs, 'process_out')
rows.append(row_dict)
return rows
def read_row(self, tbl_nm: str, pkey_flts: Dict) -> Dict:
rows = self.read_rows(tbl_nm, pkey_flts)
if len(rows) != 1:
raise DBException(f'expected 1 row from table "{tbl_nm}" with filters "{pkey_flts}", got {len(rows)}')
return rows[0]
def delete_rows(self, tbl_nm: str, pkey_flts: Dict) -> int:
active_logger.info(f'deleting rows from table "{tbl_nm}" with filters: "{pkey_flts}"')
q = self.apply_basic_filters(tbl_nm, pkey_flts)
ret = q.delete(synchronize_session='fetch')
self.session.commit()
return ret
def order_query(self, query: Query, cols, order_col: str, order_asc: bool):
"""apply ordering based on column of cte"""
if order_col not in cols:
raise DBException(f'cannot order by column "{order_col}", does not exist in CTE')
order_func = sqlalchemy.asc if order_asc else sqlalchemy.desc
return query.order_by(order_func(cols[order_col]))
class DBCache(DBBase):
def __init__(self, cache_root, cache_processors=None, **kwargs):
super().__init__(**kwargs)
self.cache_root = path.abspath(path.expandvars(cache_root))
if not path.isdir(self.cache_root):
active_logger.info(f'creating cache root directory at: "{self.cache_root}"')
os.makedirs(self.cache_root)
else:
active_logger.info(f'existing cache root directory found at: "{self.cache_root}"')
self.cache_prcs = cache_processors or CACHE_PROCESSORS
def cache_tbl(self, tbl_nm) -> str:
return path.join(self.cache_root, tbl_nm)
def cache_dir(self, tbl_nm: str, pkey_flts: Dict) -> str:
return path.join(self.cache_tbl(tbl_nm), *pkey_flts.values())
def cache_col(self, tbl_nm: str, pkey_flts: Dict, col: str) -> str:
return path.join(self.cache_dir(tbl_nm, pkey_flts), col)
def clear_cache(self, tbl_nm: str, pkey_flts: Dict):
active_logger.info(f'clearing cache from table "{tbl_nm}" with filters "{pkey_flts}"')
p = self.cache_dir(tbl_nm, pkey_flts)
if not path.exists(p):
active_logger.info(f'path "{p}" does not exist, skipping')
else:
if not path.isdir(p):
raise DBException(f'path "{p}" is not a directory')
active_logger.info(f'removing cache dir: "{p}"')
os.rmdir(p)
def write_to_cache(self, tbl_nm: str, pkey_flts: Dict, data: Dict):
self._validate_pkeys(tbl_nm, pkey_flts)
self._validate_tbl(tbl_nm)
d = self.cache_dir(tbl_nm, pkey_flts)
active_logger.info(f'writing cache to path: "{d}"')
if path.exists(d):
active_logger.info('path already exists, exiting...')
return
os.makedirs(d, exist_ok=True)
self._process_columns(data, tbl_nm, self.cache_prcs, 'process_out')
for k in pkey_flts.keys():
data.pop(k, None)
for col in data.keys():
if data[col] is None:
active_logger.warning(f'column "{col}" value is none, skipping')
else:
p = self.cache_col(tbl_nm, pkey_flts, col)
active_logger.info(f'writing column "{col}" to file: "{p}"')
with open(p, 'w') as f:
f.write(data[col])
def read_to_cache(self, tbl_nm: str, pkey_flts: Dict):
active_logger.info(f'reading table "{tbl_nm}" row to cache with filters "{pkey_flts}"')
data = self.read_row(tbl_nm, pkey_flts)
self.write_to_cache(tbl_nm, pkey_flts, data)
def insert_from_cache(self, tbl_nm, pkey_flts: Dict):
active_logger.info(f'insert row to table "{tbl_nm}" from cache with filters "{pkey_flts}"')
self._validate_pkeys(tbl_nm, pkey_flts)
self._validate_tbl(tbl_nm)
d = self.cache_dir(tbl_nm, pkey_flts)
active_logger.info(f'getting files from cache directory: "{d}"')
if not path.isdir(d):
raise DBException(f'expected to be directory: "{d}"')
data = pkey_flts.copy()
_, _, files = next(os.walk(d))
self._validate_cols(tbl_nm, files) # files should match column names
for fnm in files:
fp = self.cache_col(tbl_nm, pkey_flts, fnm)
active_logger.info(f'reading column data from file: "{fp}"')
with open(fp, 'r') as f:
data[fnm] = f.read()
self._process_columns(data, tbl_nm, self.cache_prcs, 'process_in')
self.insert_row(tbl_nm, data)
def _cache_pkeys(self, tbl_nm: str):
"""
get list of primary key filters from nested dirs in cache
"""
pkey_names = tuple(x.name for x in self.tables[tbl_nm].primary_key)
def _get_pkeys(_dir: str, _base_pkey: Dict, _lvl) -> List:
if not path.isdir(_dir):
return []
_, dirnames, _ = next(os.walk(_dir))
return [_base_pkey | {pkey_names[_lvl]: d} for d in dirnames]
lvl = 0
flts = [{}]
while lvl < len(pkey_names):
flts_out = []
for f in flts:
d = self.cache_dir(tbl_nm, f)
flts_out += _get_pkeys(d, f, lvl)
flts = flts_out
lvl += 1
return flts
def scan_cache(self, tbl_nm: str, post_insert: Optional[Callable[[str, Dict], None]] = None) -> List[Dict]:
tbl_root = self.cache_tbl(tbl_nm)
active_logger.info(f'scanning for cached rows for table "{tbl_nm}" to insert in "{tbl_root}"')
flts = self._cache_pkeys(tbl_nm)
added_pkeys = []
for pkey_filters in flts:
if self.row_exist(tbl_nm, pkey_filters):
active_logger.info(f'row "{pkey_filters}" already exists in database, skipping...')
else:
self.insert_from_cache(tbl_nm, pkey_filters)
added_pkeys.append(pkey_filters)
if post_insert is not None:
post_insert(tbl_nm, pkey_filters)
return added_pkeys
def wipe_cache(self) -> Tuple[int, int]:
active_logger.info(f'clearing cache root at "{self.cache_root}"')
_, dirnames, filenames = next(os.walk(self.cache_root))
for fnm in filenames:
p = path.join(self.cache_root, fnm)
os.remove(p)
for dnm in dirnames:
p = path.join(self.cache_root, dnm)
shutil.rmtree(p)
return len(filenames), len(dirnames)
class QueryFilter(TypedDict):
value: object
field: str
op: str
def apply_filter_spec(tbl: Table, q: Query, filters_spec: List[QueryFilter]) -> Query:
"""sqlalchemy_filters `apply_filters` function doesn't work with Sqlalchemy V1.14 so i've bodged it myself until
they sort it out"""
conditions = [
SqlOperator.OPERATORS[f['op']](tbl.columns[f['field']], f['value'])
for f in filters_spec
]
return q.filter(*conditions)
class BettingDB:
"""
Betting database handler
Manages session that connects to remote SQL ase for querying
"Historic" markets to are files downloaded directly from betfair's historical data website
"Recorded" markets are files from betfair markets recorded through a python script locally, which are recorded
with the accompanying market catalogue file
"""
def __init__(self, **kwargs):
self._dbc = DBCache(**kwargs)
def read(self, tbl_nm: str, pkey_flts: Dict):
return self._dbc.read_row(tbl_nm, pkey_flts)
def close(self):
self._dbc.session.close()
def meta_serialise(self, market_info: Dict) -> None:
"""run caching serialisation on market information retrieved from 'marketmeta' database"""
self._dbc._process_columns(market_info, 'marketmeta', self._dbc.cache_prcs, 'process_out')
def meta_de_serialise(self, market_info: Dict) -> None:
"""run caching de-serialisation on market information that has been serialised"""
self._dbc._process_columns(market_info, 'marketmeta', self._dbc.cache_prcs, 'process_in')
@staticmethod
def get_meta(first_book: MarketBook, cat: MarketCatalogue = None) -> Dict:
"""
Get metadata corresponding to the "Meta" table in the betting database for a given betfair Market
Parameters
----------
first_book : first MarketBook for betfair Market
cat : if market is recorded and not historic, this needs to be passed to get
venue and runner names
Returns dict of metadata
-------
"""
mktdef: MarketDefinition = first_book.market_definition
mktid = first_book.market_id
init_time = first_book.publish_time
pre_off = mktdef.market_time - init_time
metadata = {
'market_id': mktid,
'sport_id': mktdef.event_type_id,
'market_time': mktdef.market_time,
'market_type': mktdef.market_type,
'betting_type': mktdef.betting_type,
'country_code': mktdef.country_code,
'event_id': mktdef.event_id,
'event_name': mktdef.event_name, # historical
'timezone': mktdef.timezone,
'venue': mktdef.venue,
'init_time': init_time,
'pre_off': pre_off,
'format': 'historic',
}
if cat is not None:
metadata['event_name'] = cat.event.name
metadata['venue'] = cat.event.venue
metadata['format'] = 'recorded'
return metadata
@staticmethod
def get_first_book(file_path: str) -> Optional[MarketBook]:
"""
read the first line in a historical/streaming file and get the MarketBook parsed object, without reading or
processing the rest of the file
"""
with open(file_path) as f:
l = f.readline()
q = Queue()
# stop it winging about stream latency by using infinity as max latency
listener = StreamListener(q, max_latency=sys.float_info.max)
listener.register_stream(0, 'marketSubscription')
listener.on_data(l)
return listener.output_queue.get()[0]
def insert_market_meta(self, market_id: str):
active_logger.info(f'creating metadata database entry for market "{market_id}"')
pkey_flts = {'market_id': market_id}
self._dbc.read_to_cache('marketstream', pkey_flts)
stream_path = self._dbc.cache_col('marketstream', pkey_flts, 'stream_updates')
bk = self.get_first_book(stream_path)
cat = None
cat_path = self._dbc.cache_col('marketstream', pkey_flts, 'catalogue')
if path.exists(cat_path):
if path.getsize(cat_path):
with open(cat_path, 'r') as f:
cat_dict = json.loads(f.read())
try:
cat = MarketCatalogue(**cat_dict)
except TypeError as e:
raise DBException(f'failed to create market catalogue: {e}')
if cat is None:
names = {r.selection_id: r.name for r in bk.market_definition.runners}
else:
names = {r.selection_id: r.runner_name for r in cat.runners}
for runner_id, name in names.items():
active_logger.info(f'creating row for market "{market_id}", runner "{runner_id}", name "{name}"')
self._dbc.insert_row('marketrunners', {
'market_id': market_id,
'runner_id': runner_id,
'runner_name': name
})
meta_data = self.get_meta(bk, cat)
self._dbc.insert_row('marketmeta', meta_data)
def insert_strategy_runners(self, pkey_filters, profit_func: Callable[[str], Dict]):
p = self._dbc.cache_col('strategyupdates', pkey_filters, 'strategy_updates')
if not path.isfile(p):
raise DBException(f'expected strategy update file at "{p}"')
runner_profits = profit_func(p)
for k, v in runner_profits.items():
self._dbc.insert_row('strategyrunners', pkey_filters | {
'runner_id': k,
'profit': v
})
def wipe_cache(self) -> Tuple[int, int]:
return self._dbc.wipe_cache()
def scan_mkt_cache(self) -> List[Dict]:
"""
scan marketstream cache files - insert into database if not exist and add corresponding marketmeta and runner rows
"""
def mkt_post_insert(tbl_name, pkey_flts):
if tbl_name != 'marketstream':
raise DBException(f'expected "marketstream" table')
self.insert_market_meta(pkey_flts['market_id'])
return self._dbc.scan_cache('marketstream', mkt_post_insert)
def scan_strat_cache(self, profit_func: Callable[[str], Dict]) -> List[Dict]:
"""
scan strategy cache files - insert into database if not exist
"""
def strat_post_insert(tbl_nm, pkey_flts):
self.insert_strategy_runners(pkey_flts, profit_func)
added_keys = self._dbc.scan_cache('strategymeta')
self._dbc.scan_cache('strategyupdates', strat_post_insert)
return added_keys
def write_strat_info(self, strategy_id, type: str, name: str, exec_time: datetime, info: dict):
data = {
'type': type,
'name': name,
'exec_time': exec_time,
'info': info
}
self._dbc.write_to_cache(
tbl_nm='strategymeta',
pkey_flts={
'strategy_id': str(strategy_id)
},
data=data
)
def path_mkt_usr_updates(self, market_id) -> str:
return self._dbc.cache_col(
tbl_nm='marketstream',
pkey_flts={
'market_id': market_id
},
col='user_data'
)
def path_mkt_cat(self, market_id) -> str:
return self._dbc.cache_col(
tbl_nm='marketstream',
pkey_flts={
'market_id': market_id
},
col='catalogue',
)
def path_mkt_updates(self, market_id) -> str:
return self._dbc.cache_col(
tbl_nm='marketstream',
pkey_flts={
'market_id': market_id
},
col='stream_updates',
)
def path_strat_features(self, market_id, strategy_id) -> str:
return self._dbc.cache_col(
tbl_nm='strategyupdates',
pkey_flts={
'strategy_id': str(strategy_id),
'market_id': market_id,
},
col='strategy_features'
)
def path_strat_updates(self, market_id, strategy_id) -> str:
return self._dbc.cache_col(
tbl_nm='strategyupdates',
pkey_flts={
'strategy_id': str(strategy_id),
'market_id': market_id
},
col='strategy_updates'
)
def paths_market_updates(self, filter_spec: List[QueryFilter], limit=200):
tbl = self._dbc.tables['marketmeta']
q = self._dbc.session.query(tbl)
q_flt = apply_filter_spec(tbl, q, filter_spec)
rows = q_flt.limit(limit).all()
update_paths = []
for row in rows:
mkt_flt = {'market_id': row.market_id}
self._dbc.read_to_cache('marketstream', mkt_flt)
p = self._dbc.cache_col('marketstream', mkt_flt, 'stream_updates')
if not path.isfile(p):
raise DBException(f'expected file at stream update path: "{p}"')
update_paths.append(p)
return update_paths
def rows_runners(self, market_id, strategy_id) -> List[Dict]:
"""
get filters rows of runners, joined with profit column from strategy
"""
sr = self._dbc.tables['strategyrunners']
cte_strat = self._dbc.session.query(
sr.columns['runner_id'],
sr.columns['profit'].label('runner_profit')
).filter(
sr.columns['strategy_id'] == strategy_id,
sr.columns['market_id'] == market_id
).cte()
rn = self._dbc.tables['marketrunners']
rows = self._dbc.session.query(
rn,
cte_strat.c['runner_profit'],
).join(
cte_strat,
rn.columns['runner_id'] == cte_strat.c['runner_id'],
isouter=True,
).filter(
rn.columns['market_id'] == market_id
).all()
return [dict(row) for row in rows]
def rows_market(self, cte, col_names, max_rows, order_col=None, order_asc=False) -> List[Dict]:
cols = [cte.c[nm] for nm in col_names]
q = self._dbc.session.query(*cols)
if order_col is not None:
q = self._dbc.order_query(q, cte.c, order_col, order_asc)
rows = q.limit(max_rows).all()
return [dict(row) for row in rows]
# TODO - implement in UI
def rows_strategy(self, max_rows) -> List[Dict]:
shn = self._dbc.session
sm = self._dbc.tables['strategymeta']
sr = self._dbc.tables['strategyrunners']
p_cte = shn.query(
sr.columns['strategy_id'],
func.sum(sr.columns['profit']).label('total_profit')
).group_by(sr.columns['strategy_id']).cte()
m_cte = shn.query(sr.c['strategy_id'], sr.c['market_id']).distinct().cte()
m_cte = shn.query(
m_cte.c['strategy_id'],
func.count(m_cte.c['market_id']).label('n_markets')
).group_by(m_cte.c['strategy_id']).cte()
q = shn.query(sm, p_cte.c['total_profit'], m_cte.c['n_markets']).join(
p_cte, sm.c['strategy_id'] == p_cte.c['strategy_id'], isouter=True
).join(
m_cte, sm.c['strategy_id'] == m_cte.c['strategy_id'], isouter=True
)
return [dict(row) for row in q.limit(max_rows).all()]
def filters_labels(self, filters: DBFilterHandler, cte) -> List[List[Dict[str, Any]]]:
return filters.filters_labels(self._dbc.session, self._dbc.tables, cte)
def cte_count(self, cte: CTE) -> int:
return self._dbc.session.query(cte).count()
def strategy_count(self) -> int:
return self._dbc.session.query(self._dbc.tables['strategymeta']).count()
def strategy_delete(self, strategy_id) -> Tuple[int, int ,int]:
strategy_id = str(strategy_id)
active_logger.info(f'attempting to delete strategy: "{strategy_id}"')
pkey_flt = {'strategy_id': strategy_id}
if not self._dbc.row_exist('strategymeta', pkey_flt):
raise DBException(f'strategy does not exist, using filters: "{pkey_flt}"')
if not strategy_id:
raise DBException(f'trying to delete strategy where ID passed is blank!')
rows = self._dbc.read_rows('strategymeta', pkey_flt)
if len(rows) != 1:
raise DBException(f'expected 1 strategy meta row with filter: "{pkey_flt}"')
n_runners = self._dbc.delete_rows('strategyrunners', pkey_flt)
active_logger.info(f'deleted {n_runners} rows from "strategyrunners" table')
n_mkts = self._dbc.delete_rows('strategyupdates', pkey_flt)
active_logger.info(f'deleted {n_mkts} rows from "strategyupdates" table')
n_meta = self._dbc.delete_rows('strategymeta', pkey_flt)
active_logger.info(f'deleted {n_meta} rows from "strategymeta" table')
return n_meta, n_mkts, n_runners
def filters_strat_cte(self, strat_filters: DBFilterHandler) -> CTE:
"""
get filtered database strategy common table expression (CTE)
"""
strat_meta = self._dbc.tables['strategymeta']
q = self._dbc.session.query(strat_meta).filter(
*strat_filters.filters_conditions(strat_meta)
)
return q.cte()
def filters_mkt_cte(self, strategy_id, column_filters: List[ColumnElement]) -> CTE:
meta = self._dbc.tables['marketmeta']
sr = self._dbc.tables['strategyrunners']
if strategy_id:
strat_cte = self._dbc.session.query(
sr.columns['market_id'],
sql_sum(sr.columns['profit']).label('market_profit')
).filter(
sr.columns['strategy_id'] == strategy_id
).group_by(
sr.columns['market_id']
).cte()
q = self._dbc.session.query(
meta,
strat_cte.c['market_profit']
).join(
strat_cte,
meta.columns['market_id'] == strat_cte.c['market_id']
)
else:
q = self._dbc.session.query(
meta,
sqlalchemy.null().label('market_profit')
)
q = q.filter(*column_filters)
return q.cte()
def cache_strat_updates(self, strategy_id, market_id):
pkey_flts = {
'strategy_id': str(strategy_id),
'market_id': market_id
}
self._dbc.read_to_cache('strategyupdates', pkey_flts)
def cache_strat_meta(self, strategy_id):
pkey_flt = {'strategy_id': strategy_id}
self._dbc.read_to_cache('strategymeta', pkey_flt)
def cache_mkt_stream(self, market_id):
pkey_flt = {'market_id': market_id}
self._dbc.read_to_cache('marketstream', pkey_flt)
def read_mkt_meta(self, market_id) -> Dict:
pkey_flt = {'market_id': market_id}
return self._dbc.read_row('marketmeta', pkey_flt)
def _lost_ids(self, t1: Table, t2, id_col: str):
"""
get a query for where table `t1` has rows that are not reflected in table `t2`, joined by a column with name
specified by `id_col`. table `t2` can be a 1-to-1 mapping of rows from `t1` or 1 to many.
E.g. if `t1` had an id column of 'sample_id_col' and some values [1,2,3], and `t2` had hundreds of rows but
only with 'sample_id_col' equal to 1 or 2, then the function would return the 'sample_id_col' value of 3
"""
cte = self._dbc.session.query(
t2.columns[id_col]
).group_by(t2.columns[id_col]).cte()
return self._dbc.session.query(
t1.columns[id_col],
cte.c[id_col]
).join(
cte,
t1.columns[id_col] == cte.c[id_col],
isouter=True
).filter(cte.c[id_col] == None)
def health_check(self):
mkt_stm = self._dbc.tables['marketstream']
mkt_met = self._dbc.tables['marketmeta']
mkt_run = self._dbc.tables['marketrunners']
# market stream/meta row counts
n_mkt = self._dbc.session.query(mkt_stm).count()
active_logger.info(f'{n_mkt} market stream rows')
n_met = self._dbc.session.query(mkt_met).count()
active_logger.info(f'{n_met} market meta rows')
# market stream rows without corresponding market meta row
q = self._lost_ids(mkt_stm, mkt_met, 'market_id')
for row in q.all():
active_logger.error(f'market "{row[0]}" does not have a meta row')
# market runner meta row count
nrun = self._dbc.session.query(mkt_run).count()
active_logger.info(f'{nrun} market runner rows')
# market stream rows without any corresponding runner rows
q = self._lost_ids(mkt_stm, mkt_run, 'market_id')
for row in q.all():
active_logger.error(f'market "{row[0]}" does not have any runner rows')
srt_met = self._dbc.tables['strategymeta']
srt_run = self._dbc.tables['strategyrunners']
srt_udt = self._dbc.tables['strategyupdates']
# strategy meta & strategy market update row counts
n_srtmet = self._dbc.session.query(srt_met).count()
active_logger.info(f'{n_srtmet} strategy meta rows found')
n_srtudt = self._dbc.session.query(srt_udt).count()
active_logger.info(f'{n_srtudt} strategy market update rows found')
# strategy meta rows without any strategy update rows
q = self._lost_ids(srt_met, srt_udt, 'strategy_id')
for row in q.all():
active_logger.error(f'strategy "{row[0]}" does not have any market updates')
# strategy runner row count
n_srtrun = self._dbc.session.query(srt_run).count()
active_logger.info(f'{n_srtrun} strategy runner rows found')
# strategy meta rows without any strategy runner rows
q = self._lost_ids(srt_met, srt_run, 'strategy_id')
for row in q.all():
active_logger.error(f'strategy "{row[0]}" does not have any runner rows')
```
#### File: mytrading/utils/dbfilter.py
```python
from __future__ import annotations
from typing import Dict, List, Any, Type, TypeVar, ForwardRef
from sqlalchemy.engine.row import Row
from sqlalchemy import func, cast, Date, desc, asc, Table
from sqlalchemy.orm import Session
from sqlalchemy.sql.functions import coalesce
from sqlalchemy.sql import cte
from sqlalchemy.sql.expression import ColumnElement
import dateparser
from datetime import date, datetime
from ..exceptions import DBException
from myutils.registrar import Registrar
DBFilter = ForwardRef('DBFilter')
filters_reg: Registrar[DBFilter] = Registrar[DBFilter]()
@filters_reg.register_element
class DBFilter:
"""
database filter for a column of a table
designed to present a list of options with values that correspond to database value, and labels that can be
customised for display
"""
HAS_OPTIONS = True
def __init__(self, db_col: str):
"""
set db_col to the name of the database column on which it will apply a filter
"""
self.db_col = db_col
def db_filter(self, tbl: Table, value: Any) -> ColumnElement:
"""
return a SQLAlchemy filter of the specified column at initialisation of the table passed as 'tbl' filtered to
value
"""
return tbl.columns[self.db_col] == value
def get_options(self, session: Session, tables, db_cte: cte) -> List[Row]:
"""
get a list of distinct values from database
"""
return session.query(db_cte.c[self.db_col]).distinct().all()
def get_labels(self, opts: List[Row]) -> List[Dict[str, Any]]:
"""
get a list of dicts with 'label' and 'value' set (in accordance to plotly dash datatable)
"""
return [{
'label': row[0],
'value': row[0],
} for row in opts]
@filters_reg.register_element
class DBFilterDate(DBFilter):
"""
Date specific type of database filter, can set format of date printed as label
"""
def __init__(self, db_col, dt_fmt: str):
super().__init__(db_col)
self.dt_fmt = dt_fmt
def db_filter(self, tbl: Table, value: Any):
try:
dt = datetime.strptime(value, self.dt_fmt)
except ValueError:
raise DBException(f'cannot convert date "{value}" using formatter "{self.dt_fmt}"')
return cast(tbl.columns[self.db_col], Date) == dt
def get_options(self, session: Session, tables, db_cte: cte) -> List[Row]:
"""
get options starting from most recent date first
"""
date_col = cast(db_cte.c[self.db_col], Date)
return session.query(date_col).distinct().order_by(desc(date_col)).all()
def get_labels(self, opts: List[Row]) -> List[Dict[str, Any]]:
"""
format labels with datetime format passed to constructor
"""
return [{
'label': row[0].strftime(self.dt_fmt),
'value': row[0].strftime(self.dt_fmt),
} for row in opts]
@filters_reg.register_element
class DBFilterJoin(DBFilter):
"""
Join a table to another database filter, where `db_col` specified should map to another column in the database
whose table is `join_tbl_name` and whose name is `join_id_col`. `join_name_col` specified the column in the other
table that is used to present in labels.
"""
def __init__(self, db_col, join_tbl_name, join_id_col, join_name_col):
super().__init__(db_col)
self.join_tbl_name = join_tbl_name
self.join_id_col = join_id_col
self.join_name_col = join_name_col
self.output_col = 'TEMP_OUTPUT_NAME'
def get_options(self, session: Session, tables, db_cte: cte) -> List[Row]:
join_tbl = tables[self.join_tbl_name]
q = session.query(
db_cte.c[self.db_col],
coalesce(
join_tbl.columns[self.join_name_col],
db_cte.c[self.db_col]
).label(self.output_col)
).join(
join_tbl,
db_cte.c[self.db_col] == join_tbl.columns[self.join_id_col],
isouter=True
).distinct()
return q.all()
def get_labels(self, opts: List[Row]) -> List[Dict[str, Any]]:
return [{
'label': dict(row)[self.output_col],
'value': dict(row)[self.db_col]
} for row in opts]
@filters_reg.register_element
class DBFilterMulti(DBFilter):
"""
Filter to 1 column but use other columns in table to construct label, whereby `fmt_spec` is the string formatting
specifier used to construct the labels
e.g. `fmt_spec`='{sport_name}, {sport_time}`
would mean for a sample row where 'sport_name'='football' and 'sport_time'='13:00'
the output label would be 'football, 13:00'
"""
def __init__(self, db_col: str, fmt_spec, order_col, is_desc: bool, cols: List[str]):
super().__init__(db_col)
self.fmt_spec = fmt_spec
self.order_col = order_col
self.is_desc = desc if is_desc else asc
self.cols = cols
def get_options(self, session: Session, tables, db_cte: cte) -> List[Row]:
"""
get a list of distinct values from database
"""
if self.is_desc:
odr = desc
else:
odr = asc
return session.query(
*(db_cte.c[col] for col in self.cols)
).distinct().order_by(
odr(db_cte.c[self.order_col])
).all()
def get_labels(self, opts: List[Row]) -> List[Dict[str, Any]]:
return [{
'label': self.fmt_spec.format(**dict(row)),
'value': dict(row)[self.db_col]
} for row in opts]
@filters_reg.register_element
class DBFilterText(DBFilter):
"""filter to a text string"""
HAS_OPTIONS = False
def __init__(self, db_col: str, pre_str='%', post_str='%'):
super().__init__(db_col)
self.pre_str = pre_str
self.post_str = post_str
def db_filter(self, tbl: Table, value: str):
return tbl.columns[self.db_col].like(f'{self.pre_str}{value}{self.post_str}')
class DBFilterHandler:
def __init__(self, db_filters: List[DBFilter]):
self._db_filters = db_filters
# def filters_values(self) -> List[Any]:
# return [flt.value for flt in self._db_filters]
def filters_labels(self, session, tables, cte) -> List[List[Dict[str, Any]]]:
return [
flt.get_labels(flt.get_options(session, tables, cte))
for flt in self._db_filters
if flt.HAS_OPTIONS
]
# def update_filters(self, clear, args):
# if len(args) != len(self._db_filters):
# raise DBException(f'args has len {len(args)}, expected {len(self._db_filters)}')
# for val, flt in zip(args, self._db_filters):
# flt.set_value(val, clear)
def filters_conditions(self, tbl: Table, values: List[Any]) -> List[ColumnElement]:
if len(values) != len(self._db_filters):
raise DBException(f'args has len {len(values)}, expected {len(self._db_filters)}')
return [
f.db_filter(tbl, v)
for f, v in zip(self._db_filters, values)
if v
]
```
#### File: mytrading/utils/__init__.py
```python
import betfairlightweight
from betfairlightweight.filters import market_filter
from betfairlightweight.resources import MarketCatalogue
from betfairlightweight import APIClient
import logging
from datetime import datetime
from typing import Optional, List, Dict
import pandas as pd
import os
import keyring
from os import path
import shutil
import re
from queue import Queue
import sys
from myutils.general import dgetattr
from ..process import bf_dt
from .bettingdb import BettingDB
RE_EVENT = r'^\d{8}$'
RE_MARKET_ID = r'^\d\.\d{9}$'
EXT_CATALOGUE = '.bfcatalogue'
EXT_RECORDED = '.bfrecorded'
active_logger = logging.getLogger(__name__)
active_logger.setLevel(logging.INFO)
class APIHandler:
MAX_CATALOGUES = 100
DEF_CAT_ATTRS = {
'market ID': 'market_id',
'market name': 'market_name',
'event type ID': 'event_type.id',
'event type name': 'event_type.name',
'event ID': 'event.id',
'event country': 'event.country_code',
'event name': 'event.name',
'start time': 'market_start_time',
}
def __init__(self):
"""initialise from locally stored betfair certification, username and password"""
self._certs_path = os.environ['USERPROFILE'] + r'\OneDrive\Betfair\bf certs'
self._my_username = keyring.get_password('bf_username', '<PASSWORD>')
self._my_password = keyring.get_password('bf_password', '<PASSWORD>')
self._my_app_key = keyring.get_password('bf_app_key', 'joel')
self._api_client = APIClient(
username=self._my_username,
password=<PASSWORD>,
app_key=self._my_app_key,
certs=self._certs_path
)
@property
def API_client(self):
"""Get Betfair API client with credentials"""
return self._api_client
def list_market_catalogues(
self,
event_type_ids: Optional[List[str]] = None,
market_type_codes: Optional[List[str]] = None,
market_betting_types: Optional[List[str]] = None,
market_limit: int = 0,
market_countries: Optional[List[str]] = None,
from_datetime: Optional[datetime] = None,
to_datetime: Optional[datetime] = None,
sort='FIRST_TO_START',
) -> List[MarketCatalogue]:
"""list market catalogues"""
# get all info from market catalogues (except competitions)
market_projection = ['EVENT',
'EVENT_TYPE',
'MARKET_START_TIME',
'MARKET_DESCRIPTION',
'RUNNER_DESCRIPTION']
race_filter = market_filter(
event_type_ids=event_type_ids,
market_type_codes=market_type_codes,
market_betting_types=market_betting_types,
market_countries=market_countries,
market_start_time={
'from': bf_dt(from_datetime) if from_datetime else None,
'to': bf_dt(to_datetime) if to_datetime else None,
}
)
# get market catalogues
return self._api_client.betting.list_market_catalogue(
filter=race_filter,
market_projection=market_projection,
max_results=market_limit or self.MAX_CATALOGUES,
sort=sort
)
@classmethod
def bf_catalogues_to_df(
cls,
market_catalogues: List[MarketCatalogue],
attrs: Optional[Dict] = None
) -> pd.DataFrame:
"""convert list of market catalogues to dataframe, columns specified by `attrs` dict of (col => catalogue
attribute)"""
if attrs is None:
attrs = cls.DEF_CAT_ATTRS
data = [{
k: dgetattr(cat, v)
for k, v in attrs.items()
} for cat in market_catalogues]
return pd.DataFrame(data)
def get_historical(self, stream_path: str) -> Queue:
"""Get Queue object from historical Betfair data file"""
output_queue = Queue()
# stop it winging about stream latency by using infinity as max latency
listener = betfairlightweight.StreamListener(
output_queue=output_queue,
max_latency=sys.float_info.max
)
stream = self._api_client.streaming.create_historical_stream(
file_path=stream_path,
listener=listener
)
stream.start()
return output_queue
def migrate_mkt_cache(db: BettingDB, market_id: str, stream_path: str, cat_path: str = None):
"""
migrate a market stream (and optionally catalogue) file(s) to database cache location
this is a utility tool for migrating from the old style of file storage with hierarchical file structuring according
to Betfair historical with (year => month => day => event) ...etc and .RECORDED filetyes
"""
d = db.cache_dir('marketstream', {'market_id': market_id})
os.makedirs(d, exist_ok=True)
stream_dest = path.join(d, 'stream_updates')
shutil.copy(stream_path, stream_dest)
active_logger.info(f'migrating stream from "{stream_path}" to cache "{stream_dest}')
if cat_path is not None:
cat_dest = path.join(d, 'catalogue')
active_logger.info(f'migrating catalogue from "{cat_path}" to cache "{cat_dest}"')
shutil.copy(cat_path, cat_dest)
def migrate_dir_cache(db: BettingDB, mkts_dir: str) -> None:
"""
Process a directory recursively, attempting to find historic market file(s) and recorded/catalogue market file
pair(s) and add them to the betting database
"""
for d, d_names, f_names in os.walk(mkts_dir):
for f_name in f_names:
f_path = path.join(d, f_name)
f_root, ext = path.splitext(f_path)
if re.match(RE_MARKET_ID, f_name):
active_logger.info(f'processing file "{f_path}"')
migrate_mkt_cache(db, f_name, f_name) # for historical file name is market ID
elif ext == EXT_RECORDED:
cat_path = f_root + EXT_CATALOGUE
active_logger.info(f'processing file "{f_path}"')
if path.exists(cat_path):
migrate_mkt_cache(db, f_root, f_path, cat_path) # for recorded, market ID is file root
else:
active_logger.warning(f'"{f_path}" <- recorded file\n'
f'"{cat_path}" <- catalogue file not found')
continue
```
#### File: betfair-browser/myutils/betfair.py
```python
from betfairlightweight import StreamListener
from betfairlightweight.exceptions import ListenerError
from betfairlightweight.streaming import BaseListener
class BufferStream:
def __init__(
self, data: str, listener: BaseListener, operation: str, unique_id: int
):
self.data = data
self.listener = listener
self.operation = operation
self.unique_id = unique_id
self._running = False
@staticmethod
def generator(
data: str = None,
listener: BaseListener = None,
operation: str = "marketSubscription",
unique_id: int = 0,
) -> 'BufferStream':
listener = listener if listener else StreamListener()
return BufferStream(data, listener, operation, unique_id)
def start(self) -> None:
self._running = True
self._read_loop()
def stop(self) -> None:
self._running = False
def _read_loop(self) -> None:
self.listener.register_stream(self.unique_id, self.operation)
for update in self.data.splitlines():
if self.listener.on_data(update) is False:
# if on_data returns an error stop the stream and raise error
self.stop()
raise ListenerError("HISTORICAL", update)
if not self._running:
break
self.stop()
```
#### File: myutils/dashutilities/callbacks.py
```python
from typing import Union, List, Dict, Any, Optional, Callable
import dash
import logging
from dash.dependencies import DashDependency, Input, Output, State
from myutils.exceptions import DashUtilsException
Config = Union[DashDependency, List['Config'], Dict[str, 'Config']]
ConfigDict = Dict[str, Config]
T = Union[Any, List['T'], Dict[str, 'T']]
TDict = Dict[str, T]
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def flatten_to_list(cfg: Config) -> List[Any]:
"""
Flatten a recursive dictionary/list of values to a single-layer list
Examples:
flatten({'a': {'b': [1, 2, 3]}, 'c': [5]})
[1, 2, 3, 5]
flatten(1)
[1]
"""
_out = []
def inner(v):
if isinstance(v, list):
[inner(x) for x in v]
elif isinstance(v, dict):
[inner(x) for x in v.values()]
else:
_out.append(v)
inner(cfg)
return _out
def assign_value(spec: Dict[str, T], args: List[Any]) -> Dict[str, T]:
"""
Iterate through recursive specification format of dictionaries/lists and assign list of values from `args` in
the same format
Examples:
assign_value(
{
'a': {
'b': 'DUMMY',
'c': 'DUMMY'
},
'd': ['DUMMY']
},
['Arg X', 'Arg Y', 'Arg Z']
)
{'a': {'b': 'Arg X', 'c': 'Arg Y'}, 'd': ['Arg Z']}
"""
if isinstance(spec, list):
return [assign_value(v, args) for v in spec]
elif isinstance(spec, dict):
return {k: assign_value(v, args) for k, v in spec.items()}
else:
if not len(args):
raise DashUtilsException(f'cannot assign value for spec: "{spec}", args empty')
return args.pop(0)
def flatten_output(spec: Dict[str, T], values: Dict[str, T]) -> List[Any]:
"""
Flatten as set of values in list/dictionary according to the specification designated by `spec`
Only flatten as far as the nested lists/dictionaries are constructed in `spec` - any further layers in
`values` will be left unchanged
Examples:
See below the example where keys 'a' and ('b', 'c' nested in 'x') are flattened but the list in 'c' is not
flattened
flatten_output(
{
'a': 'DUMMY',
'x': {
'b': 'DUMMY',
'c': 'DUMMY'
}
},
{
'a': 1,
'x':{
'b': 2,
'c': [3, 4]
}
}
)
[1, 2, [3, 4]]
"""
_out = []
def inner(_spec: T, _val: T):
if type(_spec) is list:
[inner(s, v) for s, v in zip(_spec, _val)]
elif type(_spec) is dict:
[inner(s, _val[k]) for k, s in _spec.items()]
else:
_out.append(_val)
inner(spec, values)
return _out
def dict_callback(
app: dash.Dash,
outputs_config: ConfigDict,
inputs_config: ConfigDict,
states_config: Optional[ConfigDict],
):
def outer(process: Callable[[TDict, TDict, TDict], None]):
inputs: List[Input] = flatten_to_list(inputs_config)
outputs: List[Output] = flatten_to_list(outputs_config)
states: List[State] = flatten_to_list(states_config)
logger.info(f'generating callback using process "{process.__name__}"')
get_info = lambda objs: '\n'.join([f'{x.__class__.__name__}: {repr(x)}' for x in objs])
logger.info(f'inputs:\n{get_info(inputs)}')
logger.info(f'outputs:\n{get_info(outputs)}')
logger.info(f'states:\n{get_info(states)}')
@app.callback(outputs, inputs, states)
def callback(*args):
args = list(args)
input_vars = assign_value(inputs_config, args)
state_vars = assign_value(states_config, args)
if len(args):
raise DashUtilsException(f'still have {len(args)} args remaining to process')
output_vars = assign_value(outputs_config, [None] * len(outputs))
process(output_vars, input_vars, state_vars)
callback_output = flatten_output(outputs_config, output_vars)
return callback_output
return outer
```
#### File: betfair-browser/myutils/dictionaries.py
```python
from typing import Iterable, Dict
import copy
from collections.abc import Mapping
from .exceptions import DictException
def validate_config(cfg: Dict, cfg_spec: Dict):
_cfg = copy.deepcopy(cfg)
for k, spec in cfg_spec.items():
exist = k in _cfg
val = _cfg.pop(k, None)
if not spec.get('optional'):
if not exist:
raise DictException(f'expected key "{k}" in configuration dict as per config spec: "{cfg_spec}"')
if exist:
# if 'type' in spec:
if not isinstance(val, spec['type']):
raise DictException(f'expected key "{k}" value to be type "{spec["type"]}", got "{type(val)}"')
if _cfg:
raise DictException(f'configuration dictionary has unexpected values: "{_cfg}"')
def is_dict_subset(x, y):
"""recursively determine if key value pairs in x are a subset of y"""
for k, v in x.items():
if k not in y:
return False
elif type(v) is dict:
if not isinstance(y[k], Iterable):
return False
elif not is_dict_subset(v, y[k]):
return False
elif v != y[k]:
return False
return True
def dict_update(updates: Mapping, base_dict: Mapping):
"""recursively update key value pairs of base_dict with updates"""
for k, v in updates.items():
if type(v) is not dict:
# value is not dict
base_dict[k] = v
continue
# value is dict
if k not in base_dict:
# value is dict & key not found in y
base_dict[k] = v
continue
# value is dict & key found in y
if isinstance(base_dict[k], Iterable):
# value is dict & key found in y & value in y is iterable
dict_update(v, base_dict[k])
continue
# value is dict & key found in y & value in y is not iterable
base_dict[k] = v
def dict_sort(d: dict, key=lambda item: item[1]) -> Dict:
"""sort a dictionary items"""
return {k: v for k, v in sorted(d.items(), key=key)}
``` |
{
"source": "joeledwardson/joelsutilities",
"score": 3
} |
#### File: joelsutilities/joelsutilities/edgedetector.py
```python
class EdgeDetector:
"""
detect when a boolean value changes from True to False and vice-versa comparing to previous-state value
>>> detector.update(True)
>>> detector.update(False)
>>> detector.rising
False
>>> detector.falling
True
>>> detector.update(True)
>>> detector.rising
True
>>> detector.falling
False
>>> detector.update(True)
>>> detector.rising
False
"""
def __init__(self, initial_value: bool):
self._value: bool = initial_value
self._previous: bool = initial_value
self._rising: bool = False
self._falling: bool = False
def update(self, new_value: bool):
"""
update current state boolean value
.. _update:
"""
self._previous = self._value
self._value = new_value
self._rising = self._value and not self._previous
self._falling = self._previous and not self._value
@property
def current_value(self) -> bool:
"""most recent value set by :ref:`self.update <update>`"""
return self._value
@property
def rising(self) -> bool:
"""returns `True` if latest value set by :ref:`self.update <update>` is `True` but preceeding value `False` """
return self._rising
@property
def falling(self) -> bool:
"""returns `True` if latest value set by :ref:`self.update <update>` is `False` but preceeding value `True` """
return self._falling
if __name__ == '__main__':
import doctest
doctest.testmod(extraglobs={'detector': EdgeDetector()})
```
#### File: joelsutilities/joelsutilities/jsonutils.py
```python
import json
from typing import Any
def is_jsonable(x: Any) -> bool:
"""
determine if data can be serialized safely with `json.dumps`
>>> is_jsonable("some string")
True
>>> is_jsonable(12345)
True
>>> is_jsonable(object())
False
"""
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
```
#### File: joelsutilities/joelsutilities/statemachine.py
```python
import logging
import queue
from enum import Enum
from typing import Dict, List, Union
from .exceptions import StateMachineException
active_logger = logging.getLogger(__name__)
class State:
"""Base state class used with :ref:`State.run <_staterun>`
.. _state:
"""
def enter(self, **inputs):
"""function called when transitioning from another state
function not called on subsequent `run`s once state is entered
"""
pass
def run(self, **inputs) -> Union[Enum, bool, None, List[Enum]]:
"""execute state actions
.. _staterun:
:raises NotImplementedError: function must be defined
:return: `Enum` for the next state, or `list` of `Enum`s for list of next states, `False` or `None` to remain in current state, `True` to continue to next state in list
:rtype: Union[Enum, bool, None, List[Enum]]
"""
raise NotImplementedError
# TODO - this should be broken up into individual building blocks
class StateMachine:
"""Handle state execution, magenement and transition
.. _statemachine:
"""
def __init__(self, states: Dict[Enum, State], initial_state: Enum):
"""
:param states: map of `Enum` to `State`
:type states: Dict[Enum, State]
:param initial_state: first key to use
:type initial_state: Enum
"""
self.states: Dict[Enum, State] = states
self.current_state_key: Enum = initial_state
self.previous_state_key: Enum = initial_state
self.initial_state_key: Enum = initial_state
self.is_state_change: bool = True
self.state_queue = queue.Queue()
def flush(self):
"""
clear state queue
"""
self.state_queue.queue.clear()
def force_change(self, new_states: List[Enum]):
"""
updating current state to first in queue and forcibly add a list of new states to queue
"""
for state_key in new_states:
self.state_queue.put(state_key)
self.current_state_key = self.state_queue.get()
self.is_state_change = True
def run(self, **kwargs):
"""
run state machine with `kwargs` dictionary repeatedly until no state change is detected
"""
while True:
if self.is_state_change:
self.states[self.current_state_key].enter(**kwargs)
self.previous_state_key = self.current_state_key
ret = self.states[self.current_state_key].run(**kwargs)
if isinstance(ret, list):
# list returned, add all to queue
for s in ret:
self.state_queue.put(s)
self.current_state_key = self.state_queue.get()
elif ret is None or ret is False or ret == self.current_state_key:
# no value returned or same state returned, same state
pass
elif isinstance(ret, Enum):
# True means go to next state, None means use existing, otherwise single state value has been
# returned so add to queue
self.state_queue.put(ret)
self.current_state_key = self.state_queue.get()
elif ret is True:
# returned value implies state change (True, list of states, or single new state)
if not self.state_queue.qsize():
# state has returned true when nothing in queue! (this shouldn't happen)
raise StateMachineException("state machine queue has no size")
# get next state from queue
self.current_state_key = self.state_queue.get()
else:
# unrecognized return type
raise StateMachineException(
f'return value "{ret}" in state machine not recognised'
)
self.is_state_change = self.previous_state_key != self.current_state_key
if self.is_state_change:
# new state is different to current, process change and repeat loop
self.process_state_change(
self.previous_state_key, self.current_state_key, **kwargs
)
else:
# exit loop if no state change
break
def process_state_change(self, old_state, new_state, **kwargs):
pass
```
#### File: joelsutilities/joelsutilities/timing.py
```python
import functools
import logging
import time
from datetime import datetime, timedelta
from typing import Any, Callable, Dict, List, Optional, TypedDict
import pandas as pd
active_logger = logging.getLogger(__name__)
class TimingResult(TypedDict):
function: str
count: int
mean: timedelta
min: timedelta
max: timedelta
class TimingRegistrar:
def __init__(self, timings: Optional[Dict[str, List[timedelta]]] = None):
self._function_timings: Dict[str, List[timedelta]] = timings or {}
def log_result(self, elapsed_seconds: float, name: str) -> None:
if name not in self._function_timings:
self._function_timings[name] = []
self._function_timings[name].append(timedelta(seconds=elapsed_seconds))
def _call(self, f: Callable, key: str, *args, **kwargs) -> Any:
start_time = (
time.perf_counter()
) # gets timestamp in seconds (with decimal places)
val = f(*args, **kwargs) # execute function and store output
end_time = time.perf_counter()
elapsed_time = end_time - start_time # compute time for function execution
# use object name with method name for key
if key not in self._function_timings:
self._function_timings[key] = list()
self._function_timings[key].append(timedelta(seconds=elapsed_time))
return val
def register_named_method(self, name_attr: str) -> Callable:
"""
register a class method, whose name at runtime is determined by
- first component is attribute specified by `name_attr`
- second component is function name
e.g. the following below would yield to key in timing registrar of 'hello.timed_function'
reg = TimingRegistrar()
class A:
c='hello'
@reg.register_named_method(name_attr='c')
def timed_function():
# some stuff
"""
def outer(method: Callable):
@functools.wraps(method)
def inner(_self, *args, **kwargs):
# use object name with method name for key
key = getattr(_self, name_attr) + "." + method.__name__
return self._call(method, key, _self, *args, **kwargs)
return inner
return outer
def register_method(self, func: Callable) -> Callable:
"""
Register a class method for execution times to be logged
Example below would register function calls to key 'A.hello'
reg = TimingRegistrar()
class A:
@reg.register_method
def hello(self):
# do some stuff
"""
@functools.wraps(func)
def inner(_self, *args, **kwargs):
key = _self.__class__.__name__ + "." + func.__name__
return self._call(inner, key, _self, *args, **kwargs)
return inner
def register_function(self, func: Callable) -> Callable:
"""
Register a function for execution times to be logged, using function name as key to register
The example below would register function timings to key 'hello'
reg = TimingRegistrar()
@reg.register_function
def hello():
# do some stuff
"""
@functools.wraps(func)
def inner(*args, **kwargs):
return self._call(func, func.__name__, *args, **kwargs)
return inner
def _series(self, func_name: str) -> pd.Series:
"""
get series of timedeltas for execution time each time function was run
"""
return pd.Series(self._function_timings[func_name])
def timed_functions(self) -> List[str]:
"""
get list of function names who are being tracked for timing
"""
return list(self._function_timings.keys())
def get_timings_summary(self) -> List[Dict]:
"""
get a list of dictionaries with function timings information:
'Function' is function name
'Count' is number of times function was recorded
'Mean' is mean of timings as timedelta object
'Min' is minimum time as timedelta object
'Max' is maximum time as timedelta object
"""
return [
TimingResult(
function=k,
count=len(v),
mean=sum(v, timedelta()) / len(v),
min=min(v),
max=max(v),
)
for k, v in self._function_timings.items()
if v
]
def clear(self) -> None:
"""
empty lists of timed functions results
"""
self._function_timings = {}
def items(self):
return self._function_timings.items()
def __contains__(self, item):
return self._function_timings.__contains__(item)
def __setitem__(self, key, value):
return self._function_timings.__setitem__(key, value)
def __getitem__(self, item):
return self._function_timings.__getitem__(item)
def __add__(self, other):
result = TimingRegistrar(self._function_timings)
for k, v in other.items():
if k in result:
result[k] += v
else:
result[k] = v
return result
def ms_to_datetime(timestamp_ms):
return datetime.fromtimestamp(float(timestamp_ms) / 1000)
```
#### File: joelsutilities/tests/test_json.py
```python
from joelsutilities import jsonutils
def test_is_jsonable():
class A:
pass
assert jsonutils.is_jsonable('hello') is True
assert jsonutils.is_jsonable(A) is False
assert jsonutils.is_jsonable(A()) is False
assert jsonutils.is_jsonable(1) is True
assert jsonutils.is_jsonable({'a': 1}) is True
assert jsonutils.is_jsonable({'a': A}) is False
```
#### File: joelsutilities/tests/test_logging.py
```python
from joelsutilities import loggingutils
from queue import Queue
import logging
def test_queue_handler():
q = Queue()
qh = loggingutils.QueueHandler(q)
qh.setFormatter(logging.Formatter())
logger = logging.getLogger('testlogger')
logger.addHandler(qh)
logger.warning('hello')
assert q.qsize()
assert q.get()['txt'] == 'hello'
``` |
{
"source": "joelee2012/claircli",
"score": 2
} |
#### File: claircli/tests/test_claircli.py
```python
import json
import logging
import os
import shutil
import unittest
from argparse import Namespace
from collections import defaultdict
from os.path import isdir, isfile
import responses
from requests import get as req_get
from six.moves.urllib.parse import quote, urlencode
from claircli.clair import Clair
from claircli.cli import ClairCli
from claircli.docker_image import Image
from claircli.docker_registry import LocalRegistry, RemoteRegistry
from claircli.report import Report, WhiteList
try:
from unittest.mock import patch
except:
from mock import patch
logger = logging.getLogger(__name__)
class ClairCmdTestBase(unittest.TestCase):
def setUp(self):
self.name = 'registry.example.com/org/image-name:version'
self.reg = 'registry.example.com'
self.repo = 'org/image-name'
self.tag = 'version'
self.reg_url = 'https://%s/v2/' % self.reg
self.token_url = self.reg_url + 'token'
auth = 'Bearer realm="%s",service="%s"' % (self.token_url, self.reg)
self.headers = {'WWW-Authenticate': auth}
self.clair_url = 'http://mock_clair:6060'
params = {'service': self.reg,
'client_id': 'claircli',
'scope': 'repository:%s:pull' % self.repo}
self.token_url = self.token_url + '?' + urlencode(params)
self.manifest_url = self.reg_url + 'org/image-name/manifests/version'
responses.add(responses.GET, self.reg_url,
json={'message': 'authentication required'},
status=401, headers=self.headers)
responses.add(responses.GET, self.token_url,
json={'token': 'test-token'}, status=200)
with open('tests/test_data/manifest.v2.json') as f:
self.manifest = json.load(f)
responses.add(responses.GET, self.manifest_url,
json=self.manifest, status=200)
self.v1_analyze_url = '%s/v1/layers' % self.clair_url
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
with open('tests/test_data/origin_vulnerabilities.json') as f:
self.origin_data = json.load(f)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
self.html = Report.get_report_path(self.name, '.html')
def tearDown(self):
RemoteRegistry.tokens = defaultdict(dict)
# if isfile(self.html):
# os.remove(self.html)
def assert_called_with_url(self):
self.assertEqual(responses.calls[0].request.url, self.reg_url)
self.assertEqual(
responses.calls[1].request.url, self.token_url)
self.assertEqual(
responses.calls[2].request.url, self.manifest_url)
def mock_docker_client(mock_docker):
mock_client = mock_docker.return_value
mock_image = mock_client.images.get.return_value
mock_image.save.return_value = open('tests/test_data/manifest.tar', 'r+b')
return mock_docker
class TestImage(ClairCmdTestBase):
def test_parse_image(self):
with open('tests/test_data/images.json') as f:
images = json.load(f)
for expected in images:
image = Image(expected['name'])
self.assertEqual(image.name, expected['name'])
self.assertEqual(image.repository, expected['repository'])
self.assertEqual(image.tag, expected['tag'])
self.assertEqual(str(image.registry), expected['registry'])
@responses.activate
def test_manifest(self):
image = Image(self.name)
self.assertEqual(image.manifest, self.manifest)
self.assert_called_with_url()
@responses.activate
def test_list_manifest(self):
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
image = Image(self.name)
self.assertEqual(image.manifest, list_manifest)
self.assert_called_with_url()
@responses.activate
def test_unsupported_manifest(self):
with open('tests/test_data/manifest.unsupported.json') as f:
manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=manifest, status=200)
with self.assertRaises(ValueError):
image = Image(self.name)
image.layers
@patch('docker.from_env')
def test_manifest_local(self, mock_docker):
mock_docker_client(mock_docker)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
self.assertEqual(image.manifest, manifest)
@patch('docker.from_env')
def test_layers_local(self, mock_docker):
mock_docker_client(mock_docker)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
self.assertEqual(image.layers, [e.replace(
'/layer.tar', '') for e in manifest[0]['Layers']])
@responses.activate
def test_layers_v1(self):
with open('tests/test_data/manifest.v1.json') as f:
manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=manifest, status=200)
image = Image(self.name)
self.assertEqual(image.layers, [e['blobSum']
for e in manifest['fsLayers']][::-1])
self.assert_called_with_url()
@responses.activate
def test_layers_v2(self):
image = Image(self.name)
self.assertEqual(image.layers,
[e['digest'] for e in self.manifest['layers']])
self.assert_called_with_url()
@responses.activate
def test_layers_list_v2(self):
list_image_manifest_url = self.reg_url + \
'org/image-name/manifests/sha256:d0fec089e611891a03f3282f10115bb186ed46093c3f083eceb250cee64b63eb'
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
with open('tests/test_data/manifest.list.v2-image.json') as f:
list_image_manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
responses.add(responses.GET, list_image_manifest_url,
json=list_image_manifest, status=200)
image = Image(self.name)
self.assertEqual(image.images[0].layers, [e['digest']
for e in list_image_manifest['layers']])
self.assertEqual(image.layers, [])
self.assert_called_with_url()
self.assertEqual(
responses.calls[3].request.url, list_image_manifest_url)
class TestClair(ClairCmdTestBase):
@responses.activate
def test_analyze_remote_image(self):
clair = Clair(self.clair_url)
image = Image(self.name)
layers = clair.analyze_image(image)
self.assertEqual(layers, self.layers)
self.assert_called_with_url()
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertEqual(req_body['Layer']['Path'],
image.registry.get_blobs_url(image, layer))
@patch('docker.from_env')
@responses.activate
def test_analyze_local_image(self, mock_docker):
mock_docker_client(mock_docker)
clair = Clair(self.clair_url)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, image.layers[0]))
layers = clair.analyze_image(image)
self.assertEqual(layers, image.layers)
for index, layer in enumerate(layers, start=1):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertEqual(req_body['Layer']['Path'],
image.registry.get_blobs_url(image, layer))
class TestClairCli(ClairCmdTestBase):
def test_read_white_list(self):
white_list = WhiteList('tests/test_data/example-whitelist.yaml')
self.assertEqual(white_list.get('common'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText'})
self.assertEqual(white_list.get('alpine'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText',
'CVE-2017-3261': 'SE'})
self.assertEqual(white_list.get('ubuntu'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText',
'CVE-2017-5230': 'XSX'})
self.assertEqual(white_list.get('centos'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText'})
@responses.activate
def test_analyze_images(self):
with patch('sys.argv', ['claircli', '-d', '-c',
self.clair_url, self.name]):
cli = ClairCli()
cli.run()
self.assert_called_with_url()
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
@responses.activate
def test_analyze_images_in_insecure_registry(self):
reg_url = 'http://%s/v2/' % self.reg
token_url = reg_url + 'token'
auth = 'Bearer realm="%s",service="%s"' % (token_url, self.reg)
headers = {'WWW-Authenticate': auth}
params = {'service': self.reg,
'client_id': 'claircli',
'scope': 'repository:%s:pull' % self.repo}
token_url = token_url + '?' + urlencode(params)
manifest_url = reg_url + 'org/image-name/manifests/version'
responses.reset()
responses.add(responses.GET, reg_url,
json={'message': 'authentication required'},
status=401, headers=headers)
responses.add(responses.GET, token_url,
json={'token': 'test-token'}, status=200)
responses.add(responses.GET, manifest_url,
json=self.manifest, status=200)
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
with patch('sys.argv', ['claircli', '-c',
self.clair_url, '-i', self.reg, self.name]):
cli = ClairCli()
cli.run()
for index, url in enumerate([reg_url, token_url, manifest_url]):
self.assertEqual(responses.calls[index].request.url, url)
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
self.assertIn(self.reg, RemoteRegistry.insec_regs)
@responses.activate
def test_analyze_images_in_secure_registry(self):
reg_url = 'https://%s/v2/' % self.reg
token = 'just-<PASSWORD>'
auth = 'Basic %s' % token
headers = {'WWW-Authenticate': auth}
manifest_url = reg_url + 'org/image-name/manifests/version'
responses.reset()
responses.add(responses.GET, manifest_url,
json=self.manifest, status=200, headers=headers)
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
with patch('sys.argv', ['claircli', '-c',
self.clair_url,
'-k', self.reg + ':' + token,
# Include a check for ignored arguments
'-k', '1234', '-k', 'ab:', '-k', ':',
self.name]):
cli = ClairCli()
cli.run()
for index, url in enumerate([manifest_url, ]):
self.assertEqual(responses.calls[index].request.url, url)
for index, layer in enumerate(self.layers, start=2):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
self.assertEqual(0, len(RemoteRegistry.insec_regs))
self.assertIn(self.reg, RemoteRegistry.tokens)
self.assertIn('', RemoteRegistry.tokens[self.reg])
self.assertEqual(auth, RemoteRegistry.tokens[self.reg][''])
self.assertIn(self.repo, RemoteRegistry.tokens[self.reg])
self.assertEqual(auth, RemoteRegistry.tokens[self.reg][self.repo])
@patch('docker.from_env')
@responses.activate
def test_analyze_local_images(self, mock_docker):
mock_docker_client(mock_docker)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
layers = [e.replace('/layer.tar', '') for e in manifest[0]['Layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, layers[0]))
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, layers[-1]), json=self.origin_data)
with patch('sys.argv', ['claircli', '-l', 'localhost',
'-c', self.clair_url, self.name]):
cli = ClairCli()
cli.run()
for index, layer in enumerate(layers, start=1):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
@responses.activate
def test_analyze_manifest_list(self):
list_image_manifest_url = self.reg_url + \
'org/image-name/manifests/sha256:d0fec089e611891a03f3282f10115bb186ed46093c3f083eceb250cee64b63eb'
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
with open('tests/test_data/manifest.list.v2-image.json') as f:
list_image_manifest = json.load(f)
with open('tests/test_data/origin_vulnerabilities_list.json') as f:
list_origin_data = json.load(f)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, list_origin_data['Layer']['Name']),
json=list_origin_data)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
responses.add(responses.GET, list_image_manifest_url,
json=list_image_manifest, status=200)
layers = [e['digest'] for e in list_image_manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, layers[0]))
for layer in layers:
responses.add(responses.GET, '%s/%s' %
(self.v1_analyze_url, layer))
with patch('sys.argv', ['claircli', '-d', '-c',
self.clair_url, self.name]):
cli = ClairCli()
cli.run()
image = Image(self.name)
self.assert_called_with_url()
for index, layer in enumerate(image.images[0].layers, start=5):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.html = Report.get_report_path('{}/{}@{}'.format(self.reg, self.repo, image.manifest['manifests'][0]['digest']), '.html')
self.assertTrue(isfile(self.html))
``` |
{
"source": "joelee2012/travis-test",
"score": 2
} |
#### File: joelee2012/travis-test/utlis.py
```python
def get_input(msg):
return msg
def main():
print(get_input('this is test'))
``` |
{
"source": "joelee2012/webbot",
"score": 3
} |
#### File: webbot/webbot/workflow.py
```python
from .action import new_action
import copy
class Task:
def __init__(self, raw):
if 'name' not in raw:
raise ValueError(f'name for task is required: {raw}')
self.raw = copy.deepcopy(raw)
self.name = self.raw.pop('name')
self.actions = []
self.parse()
def parse(self):
for action, args in self.raw.items():
if not args: # for action does not require args
args = {}
if not isinstance(args, (str, dict, list)):
args = str(args)
getattr(self, f'_parse_{type(args).__name__}')(action, args)
def _parse_str(self, action, args):
self.actions.append(new_action(action, args))
def _parse_dict(self, action, args):
self.actions.append(new_action(action, args))
def _parse_list(self, action, args):
for arg in args:
self._parse_str(action, arg)
def __getattr__(self, name):
return self.raw[name]
def __iter__(self):
yield from self.actions
def __str__(self):
return f'<{type(self).__name__}: {self.name}>'
class WorkFlow:
def __init__(self, raw):
self.raw = raw
self.tasks = []
def parse(self):
for task in self.raw['tasks']:
self.tasks.append(Task(task))
def __iter__(self):
yield from self.tasks
def __getattr__(self, name):
return self.raw[name]
def __str__(self):
return f'<{type(self).__name__}: {self.name}>'
``` |
{
"source": "joelee/camwatch",
"score": 3
} |
#### File: camwatch/src/detect_face_dlib.py
```python
import time
import cv2
import dlib
from frame import Frame
from utils import resize_img
detector = dlib.get_frontal_face_detector()
def detect_face(frame: Frame) -> dict:
ts = time.time()
cfg = frame.cfg.face_detect
img, scale = resize_img(
frame.img_gray if cfg.convert_gray else frame.image,
cfg.resize_img_pixel
)
dets = detector(img, cfg.upsample)
count = len(dets)
max_area = 0
zones = []
for d in dets:
l, t, r, b = d.left(), d.top(), d.right(), d.bottom()
left = int(l / scale)
top = int(t / scale)
width = int((r - l) / scale)
height = int((b - t) / scale)
area = width * height
if max_area < area:
max_area = area
zones.append(
(left, top, width, height)
)
if cfg.draw_rect:
print(
'Rect:', (l, t, r, b), scale, ':',
(left, top, width, height),
area, max_area
)
cv2.rectangle(
frame.image,
(left, top), (left + width, top + height),
cfg.draw_rect_color,
cfg.draw_rect_thickness
)
ret = {
'face_detected': count,
'face_area': max_area,
'face_zones': zones,
'face_ts': time.time() - ts,
'face_img': frame.image
}
img_path = cfg.save_image_path
if count > 0 and img_path:
file_name = (
f'face-{frame.index}-{count}-{max_area}.jpg'
)
cv2.imwrite(img_path + '/' + file_name, frame.image)
ret['face_file'] = file_name
return ret
```
#### File: camwatch/src/detect_face.py
```python
import time
import cv2
from frame import Frame
from utils import fix_rect
FACE_CASCADE = None
def detect_face(frame: Frame) -> dict:
global FACE_CASCADE
ts = time.time()
cfg = frame.cfg.face_detect
if FACE_CASCADE is None:
FACE_CASCADE = cv2.CascadeClassifier(
cfg.cascade_file
)
cas = FACE_CASCADE.detectMultiScale(
frame.img_gray, cfg.scale_factor, cfg.min_neighbours
)
count = len(cas)
max_area = 0
zones = []
if count > 0:
for zone in cas:
zone = fix_rect(zone, frame.cfg)
x, y, w, h = zone
area = w * h
if max_area < area:
max_area = area
if cfg.draw_rect:
cv2.rectangle(
frame.image,
(x, y), (x + w, y + h),
cfg.draw_rect_color,
cfg.draw_rect_thickness
)
zones.append(zone)
ret = {
'face_detected': count,
'face_area': max_area,
'face_zones': zones,
'face_ts': time.time() - ts,
'face_img': frame.image
}
img_path = cfg.save_image_path
if count > 0 and img_path:
file_name = (
f'face-{frame.frame}-{count}-{max_area}.jpg'
)
cv2.imwrite(img_path + '/' + file_name, frame.image)
ret['face_file'] = file_name
return ret
```
#### File: camwatch/src/tasks.py
```python
import time
from collections import deque
import numpy as np
import cv2
import threading
# import multiprocessing
from frame import Frame
from detect_motion import detect_motion
from detect_face_dlib import detect_face
from detect_car import detect_car
def prep_image(frame: Frame) -> dict:
cfg = frame.cfg.detector
contours = frame.cfg.capture.area_contours
if not contours:
img = frame.image
else:
# Mask detection area
stencil = np.zeros(frame.image.shape).astype(frame.image.dtype)
cv2.fillPoly(stencil, contours, [255, 255, 255])
img = cv2.bitwise_and(frame.image, stencil)
if frame.cfg.capture.area_rect:
x, y, w, h = frame.cfg.capture.area_rect
img = img[y:h, x:w]
if frame.cfg.capture.draw_areas:
colour = frame.cfg.capture.draw_area_color
for pts in frame.cfg.capture.area_pts:
cv2.polylines(frame.image, [pts], True, colour)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(
img_gray,
cfg.gaussian_blur.kernel_size,
cfg.gaussian_blur.sigmax
)
return {
'img_gray': img_gray,
'img_blur': img_blur
}
TASK = {
'prep': prep_image,
'motion': detect_motion,
'face': detect_face,
'car': detect_car
}
def execute_task(task: str, frame: Frame) -> Frame:
if task not in TASK:
raise Exception(f'Task "{task}" not found.')
frame.new_task(task)
ret = TASK[task](frame)
return frame.set_dict(ret)
def execute_sync(tasks: list, frame: Frame) -> Frame:
if len(tasks) == 1:
return execute_task(tasks.pop(), frame)
ts = time.time()
for task in tasks:
execute_task(task, frame)
print('SingleProcess', tasks, ':', time.time() - ts)
return frame
def execute_threading(tasks: list, frame: Frame) -> Frame:
if len(tasks) == 1:
return execute_task(tasks.pop(), frame)
ts = time.time()
ret_q = deque()
def store_ret_q(func):
def wrapper(*args):
ret_q.append(func(*args))
return wrapper
@store_ret_q
def exec_task(l_task: str, l_frame: Frame):
if l_task not in TASK:
raise Exception(f'Task "{task}" not found.')
frame.new_task(l_task)
ret = TASK[l_task](l_frame)
return ret
threads = list()
for task in tasks:
thread = threading.Thread(
target=exec_task,
args=(task, frame)
)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
while len(ret_q) > 0:
frame.set_dict(ret_q.popleft())
print('MultiThreading', tasks, ':', time.time() - ts)
return frame
# def exec_mp_task(task: str, frame: Frame):
# ret = {}
# print('exec_mp_task', task)
# if task not in TASK:
# raise Exception(f'Task "{task}" not found.')
# frame.new_task(task)
# ret[task] = TASK[task](frame)
#
#
# def execute_multiprocessing(tasks: list, frame: Frame) -> Frame:
# if len(tasks) == 1:
# return execute_task(tasks.pop(), frame)
# ts = time.time()
#
# manager = multiprocessing.Manager()
# return_dict = manager.dict()
#
# processes = []
# for task in tasks:
# print('func:', exec_mp_task)
# print('task:', task, frame, return_dict)
# f = frame.clone(task)
# print('clone:', f)
# p = multiprocessing.Process(
# target=exec_mp_task,
# args=(task, f)
# )
# print('p', p)
# processes.append(p)
# print('processes', processes)
# p.start()
# print('p started', p)
#
# for p in processes:
# p.join()
#
# # for task in return_dict:
# # frame.set_dict(return_dict[task])
#
# print('MultiProcessing', tasks, ':', time.time() - ts)
# return frame
``` |
{
"source": "joeleeofficial/Python-Console",
"score": 3
} |
#### File: joeleeofficial/Python-Console/config.py
```python
import os
def echo(msg='',end='\n'):
print(msg,end=end)
def python():
os.system("python")
def bash():
os.system("bash")
def configHelp():
print(
"""
Hello There, I am Joe. A Programmer, Developer.
"""
)
```
#### File: joeleeofficial/Python-Console/main.py
```python
user_color = "white"
console_color = "white"
pointer = "$JOE >"
pointer_color = "green"
# {} is the command given by the user
class error:
syntax_error = "Error: '{}' is not a valid command."
name_error = "Error: '{}' is not defined."
type_error = "Error: wrong type for '{}'"
invalid_parameter_error = "Error: {required_params} required parameters required and {optional_params} optional parameters needed but {params_given} given."
error_color = "red"
do_help_command = True
help_command = "help"
version = "1.5.2"
language_name = "Blitz"
author = "JoeLee"
clear_command = ["clear","clr"]
from inspect import signature as s, isfunction as f
from json import loads as parse, dumps as stringify
import config
colors = {
"white": "\033[0m",
"red": "\033[31m",
"green": "\033[32m",
"blue": "\033[34m",
"purple": "\033[35",
"cyan": "\033[36m",
"orange": "\033[33m"
}
def e(c):
exec('global i; i = %s' % c)
global i
return i
try:
user_color = colors[user_color]
console_color = colors[console_color]
pointer_color = colors[pointer_color]
error_color = colors[error_color]
except:
print("\033[31mInvalid colors in configuration.\033[0m")
if do_help_command:
print("{} {} 2021 © Copyright \nAll Right Reserved By Joe Lee\n > https://github.com/joeleeofficial".format(language_name,version,author))
else:
print("{} {} 2021 © Copyright \nAll Right Reserved By Joe Lee\n> https://github.com/joeleeofficial".format(language_name,version,author))
help = '== Help ==\nHello There, I am Joe. A Programmer, Developer.'
while True:
x = input(pointer_color + pointer + console_color + " ")
if x.startswith(help_command + " ") and do_help_command:
x = x.split(help_command + " ")[1]
try:
if f(e("config." + x)):
print("== Help | " + x + " ==")
h = []
prm = [0,0]
co = 0
sig = s(e("config." + x.split(" ")[0]))
for key in list(dict(sig.parameters).keys()):
if str(dict(sig.parameters)[key]).startswith("{}=".format(key)):
prm[1] += 1
else:
prm[0] += 1
for i in str(s(e("config." + x)))[1:-1].split(", "):
if co <= prm[0]:
h.append("[" + i.split("=")[0] + "]")
else:
h.append("(" + i.split("=")[0] + ")")
co += 1
print("Usage: " + x + " " + ' '.join(h) + "\nParams: " + " | ".join(str(s(e("config." + x)))[1:-1].split(",")))
except:
print(error_color + error.syntax_error.format(x))
elif x in clear_command:
print("\033c",end="",flush=True)
if do_help_command:
print("{} {} 2021 © Copyright \nAll Right Reserved By Joe Lee\n > https://github.com/joeleeofficial Type Help For More Information".format(language_name,version,author))
else:
print("{} {} 2021 © Copyright \nAll Right Reserved By Joe Lee\n > https://github.com/joeleeofficial Type Help For Information".format(language_name,version,author))
elif x.strip() != "":
y = x.split(" ")
c = x.split(" ")[0]
del(y[0])
y = ','.join(y)
sig = ''
prm = [0,0]
try:
if f(e("config." + c)):
sig = s(e("config." + x.split(" ")[0]))
for key in list(dict(sig.parameters).keys()):
if str(dict(sig.parameters)[key]).startswith("{}=".format(key)):
prm[1] += 1
else:
prm[0] += 1
if (len(y.split(",")) == prm[0] or y.split(",") == ['']) or len(y.split(",")) <= (prm[0] + prm[1]):
try:
if not y == "":
e("config." + c + "(" + y + ")")
else:
try:
e("config." + c + "()")
except:
print("<[function] {}>".format(c))
except TypeError:
print(error_color + error.type_error.format(x))
except NameError:
print(error_color + error.name_error.format(x))
else:
print(error_color + error.invalid_parameter_error.format(required_params=prm[0],optional_params=prm[1],params_given=len(y.split(","))))
else:
raise AttributeError
except (AttributeError, SyntaxError):
print(error_color + error.syntax_error.format(x))
``` |
{
"source": "joelegner/leglib",
"score": 3
} |
#### File: leglib/leglib-old/filesys.py
```python
import os
def del_file(path):
"Deletes a file if it exists."
if os.path.isfile(path):
os.popen("rm %s" % path)
```
#### File: leglib/leglib-old/floatdict.py
```python
class FloatDict(dict):
"A dictionary containing float values suitable for mathematical operators."
def __init__(self, valdict={}):
if valdict:
dict.__init__(self, valdict)
def __add__(self, other):
"""Add two FloatDicts or a FloatDict and a float. In the former
case, add each shared term. Unshared terms result in the value found
in the dictionary containing the term. For floats, each term increased
by the amount of the float."""
retval = FloatDict()
if isinstance(other, dict):
for k in list(self.keys()):
if k in list(other.keys()):
retval[k] = self[k] + other[k]
else:
retval[k] = self[k]
for k in list(other.keys()):
if k not in list(self.keys()):
retval[k] = other[k]
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k] + other
return retval
def __sub__(self, other):
retval = FloatDict()
if isinstance(other, dict):
for k in list(self.keys()):
if k in list(other.keys()):
retval[k] = self[k] - other[k]
else:
retval[k] = self[k]
for k in list(other.keys()):
if k not in list(self.keys()):
retval[k] = -other[k]
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k] - other
return retval
def __mul__(self, other):
"""Multiply two FloatDicts or a FloatDict and a float. In the former
case, multiply each shared term. Unshared terms result in zero in the
result FloatDict. For floats, each term is multiplied in turn by the
float value."""
retval = FloatDict()
if isinstance(other, dict):
for k in list(self.keys()):
if k in list(other.keys()):
retval[k] = self[k] * other[k]
else:
retval[k] = 0.0
for k in list(other.keys()):
if k not in list(self.keys()):
retval[k] = 0.0
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k] * other
return retval
def __rmul__(self, other):
retval = FloatDict()
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k] * other
return retval
def __truediv__(self, other):
"""Divide two FloatDicts or a FloatDict and a float. In the former
case, multiply each shared term. Unshared terms result in zero in the
result FloatDict. For floats, each term is multiplied in turn by the
float value."""
retval = FloatDict()
if isinstance(other, dict):
for k in list(self.keys()):
if k in list(other.keys()):
retval[k] = self[k]/other[k]
else:
retval[k] = None
for k in list(other.keys()):
if k not in list(self.keys()):
retval[k] = 0.0
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k]/other
return retval
def __rtruediv__(self, other):
"""Divide two FloatDicts or a FloatDict and a float. In the former
case, multiply each shared term. Unshared terms result in zero in the
result FloatDict. For floats, each term is multiplied in turn by the
float value."""
retval = FloatDict()
if isinstance(other, dict):
for k in list(self.keys()):
if k in list(other.keys()):
retval[k] = other[k]/self[k]
else:
retval[k] = 0.0
for k in list(other.keys()):
if k not in list(self.keys()):
retval[k] = None
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = other/self[k]
return retval
```
#### File: leglib/leglib-old/prop.py
```python
class Prop(dict):
def __init__(self, desc, name, val, units = ""):
self["desc"] = desc
self["name"] = name
self["val"] = val
self["units"] = units
class FloatDict(dict):
"A dictionary containing float values suitable for mathematical operators."
def __init__(self, valdict={}):
if valdict:
dict.__init__(self, valdict)
def __add__(self, other):
"""Add two FloatDicts or a FloatDict and a float. In the former
case, add each shared term. Unshared terms result in the value found
in the dictionary containing the term. For floats, each term increased
by the amount of the float."""
retval = FloatDict()
if isinstance(other, dict):
for k in list(self.keys()):
if k in list(other.keys()):
retval[k] = self[k] + other[k]
else:
retval[k] = self[k]
for k in list(other.keys()):
if k not in list(self.keys()):
retval[k] = other[k]
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k] + other
return retval
def __mul__(self, other):
"""Multiply two FloatDicts or a FloatDict and a float. In the former
case, multiply each shared term. Unshared terms result in zero in the
result FloatDict. For floats, each term is multiplied in turn by the
float value."""
retval = FloatDict()
if isinstance(other, dict):
for k in list(self.keys()):
if k in list(other.keys()):
retval[k] = self[k] * other[k]
else:
retval[k] = 0.0
for k in list(other.keys()):
if k not in list(self.keys()):
retval[k] = 0.0
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k] * other
return retval
def __rmul__(self, other):
retval = FloatDict()
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k] * other
return retval
def __div__(self, other):
"""Divide two FloatDicts or a FloatDict and a float. In the former
case, multiply each shared term. Unshared terms result in zero in the
result FloatDict. For floats, each term is multiplied in turn by the
float value."""
retval = FloatDict()
if isinstance(other, dict):
for k in list(self.keys()):
if k in list(other.keys()):
retval[k] = self[k]/other[k]
else:
retval[k] = 0.0
for k in list(other.keys()):
if k not in list(self.keys()):
retval[k] = 0.0
if isinstance(other, float):
for k in list(self.keys()):
retval[k] = self[k]/other
return retval
if __name__ == "__main__":
D = FloatDict({1 : 4.55, 2 : 3.78, 3: 8.22})
L = FloatDict({1 : 5.4, 2 : 9.4, 4: 11.22})
print(D)
print(L)
# print D*1.4 + L*1.7
print(1.4*D + 1.7*L)
print(1.4*D)
```
#### File: leglib-old/structural/piles.py
```python
from .footing import Footing
from leglib import fmt
from matplotlib import pyplot as plt
from portal import PortalFrame
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
interaction_polygon = (
(0.0, 0.0),
(596, 0.0),
(600, 150.0),
(608, 350),
(627, 500),
(632, 600),
(632, 650),
(600, 940),
(574, 1080),
(324, 1325),
(0, 1325),
(0, 0)
)
pg = Polygon(interaction_polygon)
MIN_ASPECT = 1.0
MAX_ASPECT = 4.0
ASPECT_INC = 0.125
PLOT_DPI = 300
def plot_interaction_24(filename="interaction24.png"):
global interaction_polygon
xs = [p[0] for p in interaction_polygon]
ys = [p[1] for p in interaction_polygon]
plt.close()
plt.plot(xs, ys)
plt.savefig(filename)
plt.close()
def check(M, P):
global pg
pt = Point(M, P)
return pg.contains(pt)
class PileGroup:
"""
Rectangular group of piles
s = Spacing in feet
H = Exposed "stick height" in feet
"""
def __init__(self, rows=2, cols=2, s=6.0, H=15.0):
self.rows = rows
self.cols = cols
self.s = s
self.H = H
self.edge_dist = 2.0
def n(self):
return self.rows*self.cols
def Wftg(self):
"Returns weight of footing in kips"
self.ftg = Footing(B=2.0*max(self.yrng()) + 2*self.edge_dist, L = 2.0*max(self.xrng()) + 2*self.edge_dist, T = 6.0)
return self.ftg.W()
def xrng(self):
return [i*self.s - self.xbar() for i in range(0, self.cols)]
def yrng(self):
return [i*self.s - self.ybar() for i in range(0, self.rows)]
def analyze_group_action(self, P=0.0, M=0.0):
"""Calculates worst-case force in the pile"""
return P/self.n() + M*max(self.xrng())/self.Ix()
def analyze(self, P, M, V):
"""
Analyze a rectangular pile group for forces:
P = axial downward in kips
V = lateral in kips
M = moment about X axis in kip-ft
"""
# Each row becomes a portal frame
# Each column becomes a column in the portal frame
f = PortalFrame(H=self.H*1.5, L=self.s, cols=self.cols)
f.analyze(V/self.rows)
self.Mcol = f.M
self.Pcol = f.R
# f.plot()
def xbar(self):
return (self.cols - 1)*self.s/2.0
def ybar(self):
return (self.rows - 1)*self.s/2.0
def Ix(self):
return self.rows*sum([x**2 for x in self.xrng()])
def Iy(self):
return self.cols*sum([y**2 for y in self.yrng()])
def plot_plan(self, filename):
plt.close()
xrng = self.xrng()
yrng = self.yrng()
xs = []
ys = []
# Calculate pile coordinates
for i in range(0, len(xrng)):
for j in range(0, len(yrng)):
xs.append(xrng[i])
ys.append(yrng[j])
# Plot axes
plt.plot((0, 0), (min(xs)*1.2, max(xs)*1.2), linestyle="-.", color="darkgrey")
plt.plot((min(xs)*1.2, max(xs)*1.2), (0, 0), linestyle="-.", color="darkgrey")
# Plot piles
plt.scatter(xs, ys, marker='s')
plt.scatter(0, 0, marker='+')
# Plot footing
x1 = min(xrng) - self.edge_dist
x2 = max(xrng) + self.edge_dist
y1 = min(yrng) - self.edge_dist
y2 = max(yrng) + self.edge_dist
xs = [x1, x2, x2, x1, x1]
ys = [y1, y1, y2, y2, y1]
plt.plot(xs, ys)
# Make it square
ymin, ymax = plt.xlim()
plt.ylim( (ymin, ymax) )
# Finalize and write
plt.title("Pile Layout: %.0f feet of Water\n%d x %d = %d Piles" % (self.H, self.rows, self.cols, self.n()))
plt.savefig(filename, dpi=PLOT_DPI)
def plot_elev(self, filename):
plt.close()
xrng = self.xrng()
xs = []
ys_top = []
ys_bot = []
# Plot the piles
for i in range(0, len(xrng)):
xs.append(xrng[i])
# Plot axes
plt.plot((0, 0), (min(xs)*1.2, max(xs)*1.2), linestyle="-.", color="darkgrey")
# Plot the piles
for i in range(0, len(xrng)):
plt.plot((xrng[i], xrng[i]), (-2.0, -self.H), color="black", linewidth=2.0)
# Plot footing
x1 = min(xrng) - self.edge_dist
x2 = max(xrng) + self.edge_dist
y1 = -3.0
y2 = 3.0
xs = [x1, x2, x2, x1, x1]
ys = [y1, y1, y2, y2, y1]
plt.plot(xs, ys)
# Make it square
ymin, ymax = plt.xlim()
plt.ylim( (ymin, ymax) )
# Finalize and write
plt.title("Pile Cap Elevation: %.0f feet of Water\n%d x %d = %d Piles" % (self.H, self.rows, self.cols, self.n()))
plt.savefig(filename, dpi=PLOT_DPI)
def rotate(self):
c = self.cols
self.cols = self.rows
self.rows = c
def design(self, P, M, V):
"""
Design for axial force, P (kips), moment M (kip-ft), and horizontal
force V (kips)
"""
aspect = MIN_ASPECT
min_num = 10**10
min_rows_cols = None
while aspect <= MAX_ASPECT:
self.rows = 2
passes = False
while not passes:
self.cols = int(self.rows*aspect)
# print("%d rows, %d cols" % (self.rows, self.cols))
self.analyze(P=P+1.25*self.Wftg(), M=M, V=V)
# print("Force in column due to portal frame action: Pu = %.1f kips" % self.Pcol)
self.Pcol = self.Pcol + self.analyze_group_action(P=P, M=M)
# print("Force in column: Pu = %.1f kips" % self.Pcol)
# print("Moment in column: Mu = %.0f kip-ft" % self.Mcol)
passes = check(P=self.Pcol, M=self.Mcol)
if passes:
# Check longitudinal direction
self.rotate()
self.analyze(P=P, M=0.0, V=V/2.0)
self.Pcol = self.Pcol + self.analyze_group_action(P=P, M=M)
passes = passes and check(P=self.Pcol, M=self.Mcol)
if passes:
# Passed both ways, let's see if it's the best
if self.n() < min_num:
min_num = self.n()
min_rows_cols = (self.rows, self.cols)
self.rotate() # restore it
else:
self.rows = self.rows + 1
aspect = aspect + ASPECT_INC
self.cols, self.rows = min_rows_cols
self.analyze(P=P+1.25*self.Wftg(), M=M, V=V)
print(("Winning combination: %d rows x %d columns" % (self.rows,
self.cols)))
print(("Footing size = %s x %s x %s" % (fmt.ft_in(self.ftg.L*12),
fmt.ft_in(self.ftg.B*12), fmt.ft_in(self.ftg.T*12))))
print(("Footing weight = %.0f kips" % self.Wftg()))
self.plot_plan(filename="depth%.0f_plan.png" % (self.H))
self.plot_elev(filename="depth%.0f_elev.png" % (self.H))
if __name__ == '__main__':
# Forces from Homework #1: PD = 1340 kips, PL = 408 kips (2 lanes), MLL =
# 3672 kip-ft.
for H in [15.0, 30.0, 50.0]:
print("===============================================================")
print(("Stick height, H = %.0f ft" % H))
print(("Design stick height, H = 1.5(H - 3) = %.1f ft" % (1.5*(H - 3.0))))
grp = PileGroup(H=H)
# Combine loads per Extreme Event II
grp.design(P=1.25*1340 + 0.5*408, M=3672*0.5, V=2000.0)
```
#### File: structural/tests/test_conccol.py
```python
from structural.conccol import RectTiedColumn
import unittest
class TestRectTiedColumn(unittest.TestCase):
def setUp(self):
self.col = RectTiedColumn(b=12.0, h=24.0, nx=2, ny=5,
barsize=14, tiebarsize=4, fc=6000, cover=1.5)
def test_props(self):
self.assertEqual(self.col.n(), 10)
self.assertAlmostEqual(self.col.Ag(), 288.0, places=2)
self.assertAlmostEqual(self.col.Ast(), 22.50, places=2)
self.assertAlmostEqual(self.col.sx(), 6.307, places=3)
self.assertAlmostEqual(self.col.sy(), 4.5768, places=3)
self.assertAlmostEqual(self.col.concrete().beta1(), 0.75, places=2)
def test_Pn(self):
self.assertAlmostEqual(self.col.Pn_max(), 2163.24, places=2)
self.assertAlmostEqual(self.col.phiPn_max(), 1406.0, places=0)
def test_ds(self):
rows = self.col.rows_y()
self.assertAlmostEqual(rows[0].ds, 2.8465, places=3)
self.assertAlmostEqual(rows[1].ds, 7.423, places=3)
self.assertAlmostEqual(rows[2].ds, 12.0, places=3)
self.assertAlmostEqual(rows[3].ds, 16.577, places=3)
self.assertAlmostEqual(rows[4].ds, 21.153, places=3)
self.assertAlmostEqual(sum([r.Ast() for r in rows]), self.col.Ast())
# def test_calcs(self):
# self.assertAlmostEqual(self.col.case_1(), 27.3088, places=2)
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: structural/tests/test_sign.py
```python
from structural.sign import Sign
import unittest
B = 31.5
s = 5.5
h = 40.0
class TestSign(unittest.TestCase):
def setUp(self):
self.sign = Sign(h=h, B=B, s=s)
def test_wind_calcs(self):
self.assertAlmostEqual(self.sign.As(), B*s, places=2)
self.assertAlmostEqual(self.sign.qh(), 18.3, places=1)
self.assertAlmostEqual(self.sign.F(), 5398.6, places=0)
self.assertAlmostEqual(self.sign.y(), h-s/2, places=0)
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: structural/tests/test_snowcalc.py
```python
from structural.snowcalc import SnowCalc
import unittest
class TestSnowCalc(unittest.TestCase):
def setUp(self):
self.calc = SnowCalc(pg=70.0, W=50.0, Ce=1.1, Ct=1.0, I=1.1,
is_leeward=False)
def test_calc(self):
self.assertAlmostEqual(self.calc.pf, 59.0, places=0)
self.assertAlmostEqual(self.calc.ps, 59.0, places=0)
self.assertAlmostEqual(self.calc.hb, 59.0/23.1, places=0)
def test_report(self):
r = self.calc.render("txt")
self.assertTrue(len(r) > 0)
self.assertTrue(type(r) == type("Joe"))
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: structural/tests/test_snow.py
```python
from structural import IBC2009
from structural import set_code
from structural.roof import Roof
import unittest
# ======================
# ASCE 7-05
# ======================
class TestSnowASCE705(unittest.TestCase):
def setUp(self):
code = set_code(IBC2009)
self.asce = code.asce7
def test_flat_roof_snow(self):
# Default when pg <= 20, I(pg) = (1.0)(15) = 15 psf
self.assertAlmostEqual(self.asce.pf(pg=15.0, I=1.0, Ce=1.0, Ct=1.0),
15.0, places=2)
self.assertAlmostEqual(self.asce.pf(pg=20.0, I=1.0, Ce=1.0, Ct=1.0),
20.0, places=2)
self.assertAlmostEqual(self.asce.pf(pg=25.0, I=1.0, Ce=1.0, Ct=1.0),
20.0, places=2)
self.assertAlmostEqual(self.asce.pf(pg=30.0, I=1.0, Ce=1.0, Ct=1.0),
21.0, places=2)
self.assertAlmostEqual(self.asce.pf(pg=30.0, I=1.15, Ce=1.0, Ct=1.0),
24.15, places=2)
def test_Cs(self):
roof = Roof(W=30.0, rise=8, slippery=False)
self.assertAlmostEqual(self.asce.Cs(roof, Ct=1.0), 0.91, places=2)
def test_snow_density(self):
self.assertAlmostEqual(self.asce.snow_density(pg=30.0), 17.9, places=1)
# Test upper limit of 30 pcf
self.assertAlmostEqual(self.asce.snow_density(pg=1000.0), 30.0, places=1)
def test_asce705_example_1(self):
"Replicate Example 1 on p. 329 of ASCE 7-05 in Commentary Chapter 6"
roof = Roof(W=30.0, rise=8, slippery=False)
self.assertTrue(self.asce.is_unbalanced(roof))
self.assertAlmostEqual(self.asce.ps(roof, 30.0, 1.0, 1.0, 1.0),
19.0, places=0)
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: structural/tests/test_wind.py
```python
from structural import FBC2010
from structural import IBC2009
from structural import set_code
import unittest
# =============================================================================
# ASCE 7-05
# =============================================================================
class TestWindASCE705(unittest.TestCase):
def setUp(self):
code = set_code(IBC2009)
self.asce = code.asce7
def test_illegal_exposure(self):
self.assertRaises(ValueError, self.asce.Kz, z=15, exposure="E")
def test_Kz(self):
self.assertAlmostEqual(self.asce.Kz(z=12, exposure="B"), 0.70, places=2)
self.assertAlmostEqual(self.asce.Kz(z=12, exposure="B", case=2), 0.57,
places=2)
self.assertAlmostEqual(self.asce.Kz(z=12, exposure="C"), 0.85, places=2)
self.assertAlmostEqual(self.asce.Kz(z=12, exposure="D"), 1.03, places=2)
self.assertAlmostEqual(self.asce.Kz(z=15, exposure="B"), 0.70, places=2)
self.assertAlmostEqual(self.asce.Kz(z=15, exposure="B", case=2), 0.57,
places=2)
self.assertAlmostEqual(self.asce.Kz(z=30, exposure="B"), 0.70, places=2)
self.assertAlmostEqual(self.asce.Kz(z=30, exposure="C"), 0.98, places=2)
self.assertAlmostEqual(self.asce.Kz(z=30, exposure="D"), 1.16, places=2)
self.assertAlmostEqual(self.asce.Kz(z=60, exposure="B"), 0.85, places=2)
self.assertAlmostEqual(self.asce.Kz(z=60, exposure="C"), 1.14, places=2)
self.assertAlmostEqual(self.asce.Kz(z=60, exposure="D"), 1.31, places=2)
self.assertAlmostEqual(self.asce.Kz(z=100, exposure="B"), 0.99, places=2)
self.assertAlmostEqual(self.asce.Kz(z=100, exposure="C"), 1.27, places=2)
self.assertAlmostEqual(self.asce.Kz(z=100, exposure="D"), 1.43, places=2)
def test_qz(self):
self.assertAlmostEqual(self.asce.qz(V=90.0, z=40.0, exposure="C", I=1.0,
Kd=0.85, Kzt=1.0), 18.33, places=2)
self.assertAlmostEqual(self.asce.qz(V=90.0, z=40.0, exposure="B", I=1.0,
Kd=0.85, Kzt=1.0), 13.4, places=2)
self.assertAlmostEqual(self.asce.qz(V=90.0, z=35.0, exposure="B", I=1.0,
Kd=0.85, Kzt=1.0), 12.87, places=2)
# =============================================================================
# ASCE 7-10
# =============================================================================
class TestWindASCE710(unittest.TestCase):
def setUp(self):
self.asce = set_code(FBC2010).asce7
def test_Kz(self):
self.assertAlmostEqual(self.asce.Kz(z=12, exposure="B"), 0.70, places=2)
self.assertAlmostEqual(self.asce.Kz(z=12, exposure="B", case=2), 0.57,
places=2)
self.assertAlmostEqual(self.asce.Kz(z=12, exposure="C"), 0.85, places=2)
self.assertAlmostEqual(self.asce.Kz(z=12, exposure="D"), 1.03, places=2)
self.assertAlmostEqual(self.asce.Kz(z=15, exposure="B"), 0.70, places=2)
self.assertAlmostEqual(self.asce.Kz(z=15, exposure="B", case=2), 0.57,
places=2)
self.assertAlmostEqual(self.asce.Kz(z=30, exposure="B"), 0.70, places=2)
self.assertAlmostEqual(self.asce.Kz(z=30, exposure="C"), 0.98, places=2)
self.assertAlmostEqual(self.asce.Kz(z=30, exposure="D"), 1.16, places=2)
self.assertAlmostEqual(self.asce.Kz(z=60, exposure="B"), 0.85, places=2)
self.assertAlmostEqual(self.asce.Kz(z=60, exposure="C"), 1.14, places=2)
self.assertAlmostEqual(self.asce.Kz(z=60, exposure="D"), 1.31, places=2)
self.assertAlmostEqual(self.asce.Kz(z=100, exposure="B"), 0.99, places=2)
self.assertAlmostEqual(self.asce.Kz(z=100, exposure="C"), 1.27, places=2)
self.assertAlmostEqual(self.asce.Kz(z=100, exposure="D"), 1.43, places=2)
def test_qz(self):
self.assertAlmostEqual(self.asce.qz(V=90.0, z=40.0, exposure="C",
Kd=0.85, Kzt=1.0), 18.33, places=2)
self.assertAlmostEqual(self.asce.qz(V=90.0, z=40.0, exposure="B",
Kd=0.85, Kzt=1.0), 13.4, places=2)
self.assertAlmostEqual(self.asce.qz(V=90.0, z=35.0, exposure="B",
Kd=0.85, Kzt=1.0), 12.87, places=2)
# Hand calc 2014-03-25 for Project No. 215810015:
self.assertAlmostEqual(self.asce.qz(V=142.0, z=15.0, exposure="C",
case=1, Kd=0.85, Kzt=1.0), 37.2953344, places=2)
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: leglib-old/structural/windcalc_other.py
```python
from structural import FBC2010
from structural.calc import BaseCalc
class WindOtherStructureCalc(BaseCalc):
def __init__(self, V, exposure="C", Kd=0.85, Kzt=1.0, code=FBC2010,
title="", project=None, project_number="", by=""):
super(WindOtherStructureCalc, self).__init__(title, project,
project_number, by)
self.exposure = exposure
self.V = V
self.Kd = Kd
self.Kzt = Kzt
self.code = code
self.name = "WindOtherStructureCalc"
self.areas=[] # constituent areas
self.G = 0.85
self.recalc()
def add_area(self, z, Af, Cf=2.0, name=None):
if name is None:
i = 1
next_name = "Area%d" % i
while next_name in [a["name"] for a in self.areas]:
i = i + 1
next_name = "Area%d" % i
self.areas.append({ "z" : z, "Af" : Af, "Cf" : Cf, "name" : name })
def recalc(self):
# Calculate z based upon constituent areas
self.z = 15.0 # minimum height in feet
self.Af = 0.0 # total area in sq feet
self.zbar = 0.0 # average height of constituent areas
if len(self.areas):
self.z = max(max([a["z"] for a in self.areas]), 15)
self.Af = sum([a["Af"] for a in self.areas])
self.CfAf = sum([a["Cf"]*a["Af"] for a in self.areas])
self.zbar = sum([a["Af"]*a["z"] for a in self.areas])/self.Af
self.Kz = self.code.asce7.Kz(z=self.z, exposure=self.exposure,
case=1)
self.qz = self.code.asce7.qz(self.V, z=self.z, exposure=self.exposure,
Kd=self.Kd, Kzt=self.Kzt)
# Calculate resultant force, P and moment M
self.P = 0.0
self.M = 0.0
self.P = sum([a["Af"]*a["Cf"]*self.G*self.qz for a in self.areas])/1000.0
self.M = sum([a["Af"]*a["Cf"]*a["z"]*self.G*self.qz for a in self.areas])/1000.0
# Calculate height of resultant, h
if self.P > 0.0:
self.h = self.M/self.P
else:
self.h = None
```
#### File: leglib-old/structural/windcalc.py
```python
from structural import FBC2010
from structural.calc import BaseCalc
class WindCalc(BaseCalc):
def __init__(self, V, z, exposure="C", Kd=0.85, Kzt=1.0, code=FBC2010,
title="", project=None, project_number="", by=""):
super(WindCalc, self).__init__(title, project, project_number, by)
self.exposure = exposure
self.z = z
self.V = V
self.Kd = Kd
self.Kzt = Kzt
self.code = code
self.name = "WindCalc"
self.recalc()
self.G = 0.85
def recalc(self):
if not hasattr(self, "G"):
self.G = 0.85
self.Kz = self.code.asce7.Kz(z=self.z, exposure=self.exposure,
case=1)
self.qz = self.code.asce7.qz(self.V, z=self.z, exposure=self.exposure,
Kd=self.Kd, Kzt=self.Kzt)
```
#### File: leglib-old/tests/test_circle.py
```python
import unittest
import math
from leglib.shapes import Circle
DIAMETER = 16.0
class TestCircle(unittest.TestCase):
def setUp(self):
self.circ = Circle(d=DIAMETER)
def test_area(self):
self.assertAlmostEqual(self.circ.A(), math.pi *
DIAMETER*DIAMETER/4.0, places=3)
def test_half_circle_area(self):
"Test to see if we get half the circle area when we plug in y=0.0"
self.assertAlmostEqual(self.circ.segment_area(y=0.0), math.pi *
DIAMETER*DIAMETER/4.0/2.0, places=3)
def test_circular_segment_area(self):
# Magic number value was calculated using an online calculator
# https://rechneronline.de/pi/circular-segment.php
self.assertAlmostEqual(self.circ.segment_area(y=6.0), 14.506, places=3)
def test_circular_segment_moment_area(self):
# Use a half-circle to test this function
# https://en.wikipedia.org/wiki/List_of_centroids
r = self.circ.R()
A = math.pi*r**2/2.0
y_bar = 4.0*r/(3.0*math.pi)
self.assertAlmostEqual(
self.circ.first_moment_segment_area(y=0.0), A*y_bar, 4)
def test_stress_block_area(self):
r = self.circ.R()
self.assertAlmostEqual(
self.circ.stress_block_area(c=8, beta1=1.0), math.pi*r**2/2.0, 4)
self.assertAlmostEqual(
# Test result from calculator at https://planetcalc.com/1421/
self.circ.stress_block_area(c=4, beta1=1.0), 39.31, 2)
if __name__ == '__main__':
unittest.main()
```
#### File: leglib-old/tests/test_geom.py
```python
from geom import Line
from geom import Point
from geom import Polygon
from geom import Segment
from geom import Vector
from geom import dist
import unittest
class TestGeom(unittest.TestCase):
def setUp(self):
self.p1 = Point(0.0, 0.0)
self.p2 = Point(5.0, 5.0)
self.p3 = Point(6.0, 4.0)
self.p4 = Point(6.0, -1.0)
self.seg1 = Segment(self.p1, self.p2)
self.seg2 = Segment(self.p3, self.p4)
self.line1 = Line(self.p1, self.p2)
self.line2 = Line(self.p3, self.p4)
self.pointA = Point(3,3)
self.pointB = Point(8,8)
self.poly = Polygon((Point(-1, -1), Point(6, -1), Point(5, 6),
Point(0, 5)))
self.square = Polygon((Point(0.0, 0.0), Point(6.0, 0.0),
Point(6.0, 5.0), Point(0.0, 5.0)))
def test_repr(self):
self.assertEqual(self.p2.__repr__(), "Point(5.0, 5.0)")
self.assertEqual(self.line1.__repr__(), "[(0.0, 0.0)..(5.0, 5.0)]")
def test_get_tuple(self):
self.assertTrue(isinstance(self.p1.get_tuple(), tuple))
def test_point_introspection(self):
self.assertAlmostEqual(self.p1.x, self.p1[0], places=6)
self.assertAlmostEqual(self.p4.y, self.p4[1], places=6)
self.assertRaises(IndexError, self.p1.__getitem__, 2)
def test_point_copy(self):
p = self.p1.copy((2.4, -1.22))
self.assertAlmostEqual(p.x, 2.4, places=6)
self.assertAlmostEqual(p.y, -1.22, places=6)
def test_line_introspection(self):
self.assertAlmostEqual(self.line2.x1, 6.0, places=6)
self.assertAlmostEqual(self.line2.y1, 4.0, places=6)
self.assertAlmostEqual(self.line2.x2, 6.0, places=6)
self.assertAlmostEqual(self.line2.y2, -1.0, places=6)
midpt = self.line2.midpoint()
self.assertAlmostEqual(midpt.x, 6.0, places=6)
self.assertAlmostEqual(midpt.y, 1.5, places=6)
self.assertFalse(self.line1.is_vertical())
self.assertTrue(self.line2.is_vertical())
self.assertTrue(self.line2.slope() is None)
self.assertAlmostEqual(self.line1.slope(), 1.0, places=6)
self.assertAlmostEqual(self.line1.yintercept(), 0.0, places=6)
self.assertTrue(self.line2.yintercept() is None)
def test_point_and_dist(self):
self.assertEqual("%s" % self.p1, "(0.0, 0.0)")
self.assertEqual("%s" % self.p2, "(5.0, 5.0)")
self.assertFalse(self.p1 == self.p2)
self.assertTrue(self.p1 == self.p1)
# Length should be sqrt(5^2 + 5^2) = 7.071
self.assertAlmostEqual(dist(self.p1, self.p2), 7.071, places=3)
self.assertAlmostEqual(self.p1.dist(self.p2), 7.071, places=3)
# Test distance between a point and a line (perpendicular dist)
# Correct answer determined using AutoCAD software
self.assertAlmostEqual(self.p3.dist(self.line1), 1.4142, places=4)
self.assertTrue(self.p3.dist("Joe") is None)
def test_point_move(self):
p = Point(1.0, 3.0)
p.move(0.75, -2.3)
self.assertAlmostEqual(p.x, 1.75, places=3)
self.assertAlmostEqual(p.y, 0.70, places=3)
def test_point_rotate(self):
# Rotate point about origin
p = Point(1.0, 3.0)
p.rotate(angle=0.5)
# Correct answer determined using AutoCAD software
self.assertAlmostEqual(p.x, -0.56069405, places=3)
self.assertAlmostEqual(p.y, 3.11217322, places=3)
def test_segment(self):
self.assertEqual("%s" % self.seg1, "(0.0, 0.0)-(5.0, 5.0)")
self.assertEqual(self.seg1.intersection(self.seg2), None)
self.assertEqual(self.seg1.intersection(self.seg1), None)
def test_line(self):
# Length should be sqrt(5^2 + 5^2) = 7.071
self.assertAlmostEqual(self.line1.length(), 7.071, places=3)
self.assertEqual(self.line1.intersection(self.line2), Point(6.0, 6.0))
self.assertEqual(self.line1.intersection(self.line1), None)
def test_polygon(self):
self.assertTrue(self.poly.point_within(self.pointA))
self.assertFalse(self.poly.point_within(self.pointB))
# Area of square = 6*5 = 30
self.assertAlmostEqual(self.square.area(), 30.0, places=2)
def test_point_constructors(self):
pt1 = (4.5, -5.4)
pt2 = Point.from_tuple(pt1)
pt3 = Point(x =4.5, y=-5.4)
pt4 = Point.from_point(pt3)
self.assertAlmostEqual(pt1[0], pt2.x, places=6)
self.assertAlmostEqual(pt1[1], pt2.y, places=6)
self.assertAlmostEqual(pt1[0], pt3.x, places=6)
self.assertAlmostEqual(pt1[1], pt3.y, places=6)
self.assertAlmostEqual(pt1[0], pt4.x, places=6)
self.assertAlmostEqual(pt1[1], pt4.y, places=6)
# # Test point rotation about another point
# p5 = Point(8.0, 7.0) # Original point
# p6 = Point(8.0, 7.0) # Original point to be rotated
# p6.rotate(math.radians(30), p2) # Rotate about point p2
# # Test polygon offset
# poly2 = Polygon((Point(0, 6), Point(4, 2), Point(10,0), Point(9, 9), Point(5, 11)))
# poly2.offset(1, True)
# for p in poly2.points:
# print p
# poly2.get_segments()
## poly2.plot()
# print line1.dir_vector(), line2.dir_vector()
# print "Test distance from pt1 to pt2: %s" % (p1.dist(p2))
# print "Test distance from %s to %s: %s" % (p1, seg2, seg2.dist_to_pt(p1))
# seg3 = Segment(Point(1.0, 2.0), Point(3.0, 3.0))
# print "Test distance from %s to %s: %s" % (p1, seg3, seg3.dist_to_pt(p1))
def test_vector(self):
v1 = Vector(7, 4)
v2 = Vector(-6, 3)
self.assertEqual("%s" % v1, "Vector(7.0, 4.0)")
self.assertEqual("%s" % v2, "Vector(-6.0, 3.0)")
# length should be sqrt(7**2 + 4**2) = 8.062
self.assertAlmostEqual(v1.norm(), 8.062, places=3)
# Create a unit vector and test its length
v1u = v1.unit_vector()
self.assertAlmostEqual(v1u.norm(), 1.00, places=3)
v3 = v1*6.4
self.assertTrue(isinstance(v3, Vector))
# v3 length = 8.062(6.4) = 51.597
self.assertAlmostEqual(v3.norm(), 51.597, places=2)
# v1 + v2 = (1, 7); length = 7.071
v4 = v1 + v2
self.assertAlmostEqual(v4.norm(), 7.071, places=3)
# Dot product of v1, v2 = (7)(-6) + (4)(3) = -30
self.assertAlmostEqual(v1.dot(v2), -30.0, places=3)
self.assertAlmostEqual(v2.dot(v1), -30.0, places=3)
# Cross product of v1 x v2 = (7)(3) - (-6)(4) = 45
self.assertAlmostEqual(v1.cross(v2), 45.0, places=3)
# Cross product of v2 x v1 = (-6)(4) - (7)(3) = -45
self.assertAlmostEqual(v2.cross(v1), -45.0, places=3)
v5=v1.perp()
self.assertAlmostEqual(v5[0], -4.0, places=3)
self.assertAlmostEqual(v5[1], 7.0, places=3)
if __name__ == "__main__": # pragma: no cover
unittest.main()
```
#### File: leglib-old/tests/test_shapes.py
```python
from shapes import Circle
from shapes import Cylinder
from shapes import HollowCircle
from shapes import Rectangle
from shapes import RectangularPrism
import math
import unittest
class CircleTest(unittest.TestCase):
def test_properties(self):
"""Test circle properties"""
c = Circle(d=12.0)
self.assertAlmostEqual(c.A(), 113.097, places=2)
self.assertAlmostEqual(c.I(), 1017.875, places=2)
self.assertAlmostEqual(c.S(), 169.646, places=2)
self.assertAlmostEqual(c.Z(), 288.000, places=2)
self.assertAlmostEqual(c.r(), 3.0, places=2)
class CylinderTest(unittest.TestCase):
def testProperties(self):
"""Test cylinder properties"""
c = Cylinder(D=2.0, L=4.0)
self.assertAlmostEqual(c.Atop(), 3.142, places=2)
self.assertAlmostEqual(c.Abottom(), 3.142, places=2)
self.assertAlmostEqual(c.A(), 31.416, places=2)
self.assertAlmostEqual(c.Aside(), 25.133, places=2)
self.assertAlmostEqual(c.V(), 12.566, places=2)
class HollowCircleTest(unittest.TestCase):
def testProperties(self):
"""Test hollow circle properties"""
hc = HollowCircle(d=10.0, d1=9.00)
self.assertAlmostEqual(hc.A(), 14.923, places=2)
self.assertAlmostEqual(hc.I(), 168.811, places=2)
self.assertAlmostEqual(hc.S(), 33.762, places=2)
self.assertAlmostEqual(hc.Z(), 45.167, places=2)
self.assertAlmostEqual(hc.r(), 3.363, places=2)
class RectangleTest(unittest.TestCase):
def testProperties(self):
"""Test section properties"""
r = Rectangle(b = 12.0, h = 24.0)
self.assertAlmostEqual(r.A(), 288.0)
self.assertAlmostEqual(r.Ix(), 13824.0, places=0)
self.assertAlmostEqual(r.Ix_base(), 55296.0, places=0)
self.assertAlmostEqual(r.Iy(), 3456.0, places=0)
self.assertAlmostEqual(r.Iy_base(), 13824.0, places=0)
self.assertAlmostEqual(r.Sx(), 1152.0, places=0)
self.assertAlmostEqual(r.Sy(), 576.00, places=0)
self.assertAlmostEqual(r.rx(), math.sqrt(13824.0/288.0), places=0)
self.assertAlmostEqual(r.ry(), math.sqrt(3456.0/288.0), places=0)
self.assertAlmostEqual(r.Zx(), 1728.0, places=0)
self.assertAlmostEqual(r.Zy(), 864.0, places=0)
class RectangularPrismTest(unittest.TestCase):
def testProperties(self):
"""Test rectangular prism (box) properties"""
p = RectangularPrism(L=3.0, B=2.0, T=1.0)
self.assertAlmostEqual(p.V(), 6.000, places=2)
self.assertAlmostEqual(p.Atop(), 6.0, places=2)
self.assertAlmostEqual(p.Abottom(), 6.0, places=2)
self.assertAlmostEqual(p.Afront(), 3.0, places=2)
self.assertAlmostEqual(p.Aback(), 3.0, places=2)
self.assertAlmostEqual(p.Aend(), 2.0, places=2)
A = 2*(3.0*2.0 + 3.0*1.0 + 2.0*1.0)
self.assertAlmostEqual(p.A(), A, places=2)
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: leglib/leglib-old/validate.py
```python
import re
# =============================================================================
# Floating-point values
# =============================================================================
def unsigned_float(txt):
"Returns true if string is a signed or unsigned decimal."
print("Validating string %s" % txt)
return (re.compile(r"^\d*\.?\d*$").search(txt) is not None)
def signed_float(txt):
"Returns true if string is a signed or unsigned decimal."
return (re.compile(r"^(\+|-)?\d*\.?\d*$").search(txt) is not None)
# =============================================================================
# Integers
# =============================================================================
def signed_int(txt):
"Returns true if string is a signed or unsigned integer or empty string."
if type(txt) == int: return True
if type(txt) == str:
return (re.compile(r"^(\+|-)?\d*$").search(txt) is not None)
else:
return False
def unsigned_int(txt):
"Returns true if string is an unsigned integer."
if type(txt) == int:
return (txt >= 0)
if type(txt) == str:
return (re.compile(r"^\d+$").search(txt) is not None)
else:
return False
def even_int(val):
"Returns true if string is an even integer."
if isinstance(val, str):
if not unsigned_int(val):
# It is not even an integer, so return false
return False
val = int(val)
elif not isinstance(val, int):
# Not a string representation of an int or an int itself
return False
# We know it is an integer, so check if it is even or not
return not(val%2)
if __name__ == "__main__":
print("Testing validation functions.")
assert not signed_int("-4.11") # False
assert signed_int("-4") # True
assert not unsigned_int("-4") # False
assert unsigned_int("4") # True
assert not unsigned_int("Joe") # False
assert not unsigned_float("-4.22") # True
assert signed_float("4.22") # True
assert not unsigned_float("Joe") # False
assert not signed_float("Joe") # False
assert even_int("24") # True
assert not even_int("3") # True
assert not even_int(31) # False
assert not even_int(1.7) # False
print("Done. No failures.")
```
#### File: leglib/leglib/util.py
```python
import datetime # for timestamp function
import math
import re
def almost_equal(first, second, places=7):
fmtstr = "%%.%df" % places
return (fmtstr % first) == (fmtstr % second)
def float_eq(float1, float2, prec=1.0E-6):
"Returns true if float1 and float2 differ by less than 1E-6."
return (math.fabs(float1 - float2) <= prec)
def float_zero(value, prec=1.0E-6):
"Returns True if value is very small (default < 1.0E-06)"
return value <= prec
def str_to_feet(value="0'-0"):
"""
Returns string converted into decimal feet.
Acceptible formats include:
1. 5'-7"
5'-7 1/2"
5'-7 1/2''
5'-7
5'-7 1/2
2. 7 3/4
-8
The trailing quotation mark can be omitted.
"""
# Remove optional inches mark
value = value.replace('"', '')
value = value.replace("''", "")
if value.find("'") != -1:
split_str = value.split("'")
whole_feet = float(split_str[0])
in_str = split_str[1]
if in_str[0] == '-':
a = len(in_str)
in_str = in_str[1:a]
else:
whole_feet = 0.0
in_str = value
split_in_str = in_str.split(" ")
whole_inches = float(split_in_str[0])
if len(split_in_str) > 1:
frac_split = split_in_str[1].split("/")
numer = float(frac_split[0])
denom = float(frac_split[1])
sign = int(whole_inches/math.fabs(whole_inches))
inches = sign*(math.fabs(whole_inches) + numer/denom)
else:
inches = whole_inches
# Convert the inches portion
# See if it is decimal form or fraction form"
if whole_feet < 0:
sign = -1
else:
sign = 1
return sign*(math.fabs(whole_feet) + inches/12.0)
def hr(width=79, char='='):
"Returns a horizontal line of characters."
return line(width, char)
def datestamp():
"Returns ISO 8601 date stamp."
t = datetime.datetime.today()
return t.strftime("%Y-%m-%d")
def timestamp():
"Returns ISO 8601 date time stamp."
t = datetime.datetime.today()
return t.strftime("%Y-%m-%d %H:%M")
def line(width, char='='):
"Returns a string composed of a number of characters repeated."
char = char[0]
retval = ""
for i in range(0, width):
retval += char
return retval
# def adir(obj):
# "Returns alphabetical dir() results."
# items = dir(obj)
# items.sort()
# return items
# def utc_to_datetime(utc_str):
# "Parse UTC string into datetime."
# utc_str = utc_str.lower()
# utc_re = re.compile("^(?P<dayname>[a-z]{3}), (?P<day>[0-9]{2}) (?P<monthname>[a-z]{3}) (?P<year>[0-9]{4}) (?P<hour>[0-9]{2}):(?P<minute>[0-9]{2}):(?P<second>[0-9]{2}) (?P<tzoffset>[\+|\-][0-9]{4})")
# m = utc_re.match(utc_str)
# month = datetime.datetime.strptime(m.groupdict()["monthname"], "%b").month
# year = int(m.groupdict()["year"])
# day = int(m.groupdict()["day"])
# hour = int(m.groupdict()["hour"])
# minute = int(m.groupdict()["minute"])
# second = int(m.groupdict()["second"])
# return datetime.datetime(year, month, day, hour, minute, second)
def interpolate(x1, y1, x2, y2, x):
"Returns y for point x given line (x1, y1) - (x2, y2)."
x = float(x)
x1 = float(x1)
x2 = float(x2)
y1 = float(y1)
y2 = float(y2)
return (y1 + (y2 - y1)/(x2 - x1)*(x - x1))
# def bilinear_interpolate(x, y, x1, y1, x2, y2, Q11, Q12, Q21, Q22):
# """Returns R which is interpolated from Q11, Q12, etc. for x, y in a
# square grid. See http://en.wikipedia.org/wiki/Bilinear_interpolation"""
# denom = (x2 - x1)*(y2 - y1)
# return Q11/denom*(x2 - x)*(y2 - y) + \
# Q21/denom*(x - x1)*(y2 - y) + \
# Q12/denom*(x2 - x)*(y - y1) + \
# Q22/denom*(x - x1)*(y - y1)
# def geocode(address):
# "Use geopy to return (placename, lat, lon) or None if geocoding fails."
# from geopy import geocoders
# try:
# gn = geocoders.GeoNames()
# g = geocoders.Google('ABQIAAAAsh_oKO4GhIzRmrsXh68uIxQ8K5bBOqwDHQamL\
# rpVX5GcdT719xT8C1zgQPQs6LNt2AAksu9_BDy5ZA')
# place, (lat, lng) = g.geocode(address)
# return (place, lat, lng)
# except:
# # Could not geocode for some reason
# return None
``` |
{
"source": "joelelmercarlson/stack",
"score": 2
} |
#### File: stack/python/backup.py
```python
import os
import sys
import json
import time
import boto3
def load_credentials():
"""
load_credentials
:return: dict
"""
with open("credentials.json", "r", encoding="UTF-8") as stream:
content = json.loads(stream.read())
return content
def bucket_add(data, path):
"""
bucket_add
:param data: dict
:param path: str
"""
os.chdir(path)
for name in os.listdir("."):
if os.path.isdir(name):
print(f"skipping dir={name}...")
else:
label = name.upper()
print(f"Adding file={name} with key={label}...")
upload(data, name, label)
def bucket_create(data):
"""
bucket_create
:param data: dict
"""
_s3_ = boto3.resource("s3",
endpoint_url=data["ep"],
aws_access_key_id=data["access_key"],
aws_secret_access_key=data["secret_key"])
bucket = _s3_.Bucket(data["bucket"])
bucket.create()
def bucket_get(data):
"""
bucket_get
:param data: dict
"""
_s3_ = boto3.client("s3",
endpoint_url=data["ep"],
aws_access_key_id=data["access_key"],
aws_secret_access_key=data["secret_key"])
response = _s3_.list_objects_v2(Bucket=data["bucket"])
for item in response["Contents"]:
print(item["Key"])
def bucket_list(data):
"""
bucket_create
:param data: dict
"""
_s3_ = boto3.client("s3",
endpoint_url=data["ep"],
aws_access_key_id=data["access_key"],
aws_secret_access_key=data["secret_key"])
response = _s3_.list_buckets()
for item in response["Buckets"]:
print(item["CreationDate"], item["Name"])
def upload(data, filename, key):
"""
bucket_create
:param data: dict
:param filename: str
:param key: str
"""
_s3_ = boto3.resource("s3",
endpoint_url=data["ep"],
aws_access_key_id=data["access_key"],
aws_secret_access_key=data["secret_key"])
bucket = _s3_.Bucket(data["bucket"])
bucket.upload_file(Filename=filename, Key=key)
def run():
"""
run
"""
print(f"Started at {time.strftime('%X')}...")
start = time.time()
data = load_credentials()
try:
endpoint = data["ep"]
bucket = data["bucket"]
except KeyError:
print("expected ep, bucket, workdir, access_key, secret_key...")
sys.exit(1)
print(f"S3 to ep={endpoint} bucket={bucket}...")
# TODO boto3 complains about XML
#bucket_create(data)
for work in data["workdir"]:
bucket_add(data, work)
bucket_list(data)
duration = time.time() - start
print(f"Stopped at {time.strftime('%X')}... duration={duration:.2f}")
if __name__ == "__main__":
run()
```
#### File: stack/python/game.py
```python
import time
import sys
def make_a_guy(name, level, power, pet):
"""
make_a_guy
:param name: str
:param level: int
:param power: str
:param pet: str
"""
print("==================================================")
print(f"{name} is level {level} w/ amazing power {power}")
print(f"my pet is {pet}")
print("==================================================")
def action(name, ops):
"""
action does ops
:param name: str
:param ops: str
"""
time.sleep(1)
print(f"{name} is doing {ops}!")
def blood():
"""blood does blood"""
i = 0
while i < 3:
i = i + 1
print("* BLOOD *")
def dylan():
"""say hello to the creator"""
say("Dylan")
def say(something):
"""
say something
:param something: str
"""
print(f"Hello {something}!")
if __name__ == "__main__":
NAME = "fiend"
PET = "fang"
WEAPON = "blood blaster"
TABLE = {"a": "attacks",
"d": "defends",
"s": "pet bites",
"f": "bleeds",
"n": "jumps",
"o": "king attack",
"p": "potty time",
"k": "kill move",
"c": "change to gorilla lava monster",
"9": "turn into Drew Brees",
"q": "dies"}
COUNTER = 0
dylan()
make_a_guy(NAME, "10", WEAPON, PET)
while True:
ACTOR = input("Action> ")
if len(ACTOR) > 1:
say(ACTOR)
else:
try:
DOING = TABLE[ACTOR]
except KeyError:
DOING = f"ducks, dodge, grab your {WEAPON}!"
action(NAME, DOING)
COUNTER = COUNTER + 1
if COUNTER > 5:
print(f"Battle over {NAME} and {PET}")
blood()
sys.exit()
```
#### File: stack/python/mtg.py
```python
import argparse
import os
import time
import json
import requests
import urllib3
urllib3.disable_warnings()
URL = "https://api.scryfall.com/cards/random?q=is%3Acommander"
WORKDIR = "/var/tmp"
CACHEDIR = f"{WORKDIR}/cache"
def cache(filename, contents):
"""
cache
:param filename: str
:param contents: str
"""
try:
os.mkdir(WORKDIR)
except FileExistsError:
pass
try:
os.mkdir(CACHEDIR)
except FileExistsError:
pass
with open(filename, "w", encoding="UTF-8") as stream:
stream.write(str(contents))
os.chmod(filename, 0o644)
def get_arguments():
"""
get_arguments for cli
:return: parser
"""
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cache", action="count", default=0, help="cached cards")
parser.add_argument("-r", "--random", action="count", default=1, help="random cards")
return parser.parse_args()
def get_card(path=CACHEDIR):
"""
get_card
:param path: str
"""
header = {"Content-Type": "application/json"}
time.sleep(0.5)
card = requests.get(URL, auth=None, headers=header, verify=False)
filename = f"{path}/{card.json()['multiverse_ids'][0]}.json"
cache(filename, card.text)
display_card(card.json())
def display_card(card):
"""
display_card
:param card: dict
"""
print("---")
print(f"## {card['name']}")
print(f"- Mana: {card['mana_cost']}")
print(f"- Converted Mana Cost: {card['cmc']}")
print(f"- {card['type_line']}")
print("```")
try:
print(f"{card['oracle_text']}")
except KeyError:
pass
try:
print(f"{card['flavor_text']}")
except KeyError:
pass
print("```")
try:
print(f"- P/T: {card['power']}/{card['toughness']}")
except KeyError:
pass
try:
print(f"- Loyalty: {card['loyalty']}")
except KeyError:
pass
print(f"- {card['collector_number']} {card['set']} (c) {card['frame']} WoTC")
print(f"- {card['related_uris']['gatherer']}")
def display_cards(path=CACHEDIR):
"""
display_cards
"""
os.chdir(path)
for name in os.listdir("."):
if not os.path.isdir(name):
with open(name, "r", encoding="UTF-8") as stream:
card = json.loads(stream.read())
display_card(card)
def run():
"""
run
"""
args = get_arguments()
if not args.cache:
count = args.random + 1
for _ in range(1, count):
get_card()
else:
display_cards()
if __name__ == '__main__':
run()
``` |
{
"source": "joeleong/idapython",
"score": 3
} |
#### File: idapython/examples/ex_expr.py
```python
from idaapi import add_idc_func
def py_power(n, e):
return n ** e
desc = ext_idcfunc_t
desc.name = "pow"
desc.func = py_power,
desc.args = (idaapi.VT_LONG, idaapi.VT_LONG),
desc.defvals = ()
desc.flags = 0
ok = add_idc_func(desc)
if ok:
print("Now the pow() will be present IDC!")
else:
print("Failed to register pow() IDC function")
```
#### File: idapython/examples/ex_pyqt.py
```python
from idaapi import PluginForm
from PyQt5 import QtCore, QtGui, QtWidgets
import sip
class MyPluginFormClass(PluginForm):
def OnCreate(self, form):
"""
Called when the plugin form is created
"""
# Get parent widget
self.parent = self.FormToPyQtWidget(form)
self.PopulateForm()
def PopulateForm(self):
# Create layout
layout = QtWidgets.QVBoxLayout()
layout.addWidget(
QtWidgets.QLabel("Hello from <font color=red>PyQt</font>"))
layout.addWidget(
QtWidgets.QLabel("Hello from <font color=blue>IDAPython</font>"))
self.parent.setLayout(layout)
def OnClose(self, form):
"""
Called when the plugin form is closed
"""
pass
plg = MyPluginFormClass()
plg.Show("PyQt hello world")
```
#### File: idapython/examples/vds4.py
```python
import idautils
import idaapi
import idc
import traceback
def run():
cfunc = idaapi.decompile(idaapi.get_screen_ea())
if not cfunc:
print 'Please move the cursor into a function.'
return
entry_ea = cfunc.entry_ea
print "Dump of user-defined information for function at %x" % (entry_ea, )
# Display user defined labels.
labels = idaapi.restore_user_labels(entry_ea);
if labels is not None:
print "------- %u user defined labels" % (len(labels), )
for org_label, name in labels.iteritems():
print "Label %d: %s" % (org_label, str(name))
idaapi.user_labels_free(labels)
# Display user defined comments
cmts = idaapi.restore_user_cmts(entry_ea);
if cmts is not None:
print "------- %u user defined comments" % (len(cmts), )
for tl, cmt in cmts.iteritems():
print "Comment at %x, preciser %x:\n%s\n" % (tl.ea, tl.itp, str(cmt))
idaapi.user_cmts_free(cmts)
# Display user defined citem iflags
iflags = idaapi.restore_user_iflags(entry_ea)
if iflags is not None:
print "------- %u user defined citem iflags" % (len(iflags), )
for cl, t in iflags.iteritems():
print "%a(%d): %08X%s" % (cl.ea, cl.op, f, " CIT_COLLAPSED" if f & CIT_COLLAPSED else "")
idaapi.user_iflags_free(iflags)
# Display user defined number formats
numforms = idaapi.restore_user_numforms(entry_ea)
if numforms is not None:
print "------- %u user defined number formats" % (len(numforms), )
for ol, nf in numforms.iteritems():
print "Number format at %a, operand %d: %s" % (ol.ea, ol.opnum, "negated " if (nf.props & NF_NEGATE) != 0 else "")
if nf.is_enum():
print "enum %s (serial %d)" % (str(nf.type_name), nf.serial)
elif nf.is_char():
print "char"
elif nf.is_stroff():
print "struct offset %s" % (str(nf.type_name), )
else:
print "number base=%d" % (idaapi.get_radix(nf.flags, ol.opnum), )
idaapi.user_numforms_free(numforms)
# Display user-defined local variable information
lvinf = idaapi.lvar_uservec_t()
if idaapi.restore_user_lvar_settings(lvinf, entry_ea):
print "------- User defined local variable information\n"
for lv in lvinf.lvvec:
print "Lvar defined at %x" % (lv.ll.defea, )
if len(str(lv.name)):
print " Name: %s" % (str(lv.name), )
if len(str(lv.type)):
#~ print_type_to_one_line(buf, sizeof(buf), idati, .c_str());
print " Type: %s" % (str(lv.type), )
if len(str(lv.cmt)):
print " Comment: %s" % (str(lv.cmt), )
return
if idaapi.init_hexrays_plugin():
run()
else:
print 'dump user info: hexrays is not available.'
```
#### File: idapython/pywraps/py_idp.py
```python
CUSTOM_INSN_ITYPE = 0x8000
REG_SPOIL = 0x80000000
REAL_ERROR_FORMAT = -1 # not supported format for current .idp
REAL_ERROR_RANGE = -2 # number too big (small) for store (mem NOT modifyed)
REAL_ERROR_BADDATA = -3 # illegal real data for load (IEEE data not filled)
#
# Check whether the operand is relative to stack pointer or frame pointer.
# This function is used to determine how to output a stack variable
# This function may be absent. If it is absent, then all operands
# are sp based by default.
# Define this function only if some stack references use frame pointer
# instead of stack pointer.
# returns flags:
OP_FP_BASED = 0x00000000 # operand is FP based
OP_SP_BASED = 0x00000001 # operand is SP based
OP_SP_ADD = 0x00000000 # operand value is added to the pointer
OP_SP_SUB = 0x00000002 # operand value is substracted from the pointer
# processor_t.id
PLFM_386 = 0 # Intel 80x86
PLFM_Z80 = 1 # 8085, Z80
PLFM_I860 = 2 # Intel 860
PLFM_8051 = 3 # 8051
PLFM_TMS = 4 # Texas Instruments TMS320C5x
PLFM_6502 = 5 # 6502
PLFM_PDP = 6 # PDP11
PLFM_68K = 7 # Motoroal 680x0
PLFM_JAVA = 8 # Java
PLFM_6800 = 9 # Motorola 68xx
PLFM_ST7 = 10 # SGS-Thomson ST7
PLFM_MC6812 = 11 # Motorola 68HC12
PLFM_MIPS = 12 # MIPS
PLFM_ARM = 13 # Advanced RISC Machines
PLFM_TMSC6 = 14 # Texas Instruments TMS320C6x
PLFM_PPC = 15 # PowerPC
PLFM_80196 = 16 # Intel 80196
PLFM_Z8 = 17 # Z8
PLFM_SH = 18 # Renesas (formerly Hitachi) SuperH
PLFM_NET = 19 # Microsoft Visual Studio.Net
PLFM_AVR = 20 # Atmel 8-bit RISC processor(s)
PLFM_H8 = 21 # Hitachi H8/300, H8/2000
PLFM_PIC = 22 # Microchip's PIC
PLFM_SPARC = 23 # SPARC
PLFM_ALPHA = 24 # DEC Alpha
PLFM_HPPA = 25 # Hewlett-Packard PA-RISC
PLFM_H8500 = 26 # Hitachi H8/500
PLFM_TRICORE = 27 # Tasking Tricore
PLFM_DSP56K = 28 # Motorola DSP5600x
PLFM_C166 = 29 # Siemens C166 family
PLFM_ST20 = 30 # SGS-Thomson ST20
PLFM_IA64 = 31 # Intel Itanium IA64
PLFM_I960 = 32 # Intel 960
PLFM_F2MC = 33 # Fujistu F2MC-16
PLFM_TMS320C54 = 34 # Texas Instruments TMS320C54xx
PLFM_TMS320C55 = 35 # Texas Instruments TMS320C55xx
PLFM_TRIMEDIA = 36 # Trimedia
PLFM_M32R = 37 # Mitsubishi 32bit RISC
PLFM_NEC_78K0 = 38 # NEC 78K0
PLFM_NEC_78K0S = 39 # NEC 78K0S
PLFM_M740 = 40 # Mitsubishi 8bit
PLFM_M7700 = 41 # Mitsubishi 16bit
PLFM_ST9 = 42 # ST9+
PLFM_FR = 43 # Fujitsu FR Family
PLFM_MC6816 = 44 # Motorola 68HC16
PLFM_M7900 = 45 # Mitsubishi 7900
PLFM_TMS320C3 = 46 # Texas Instruments TMS320C3
PLFM_KR1878 = 47 # Angstrem KR1878
PLFM_AD218X = 48 # Analog Devices ADSP 218X
PLFM_OAKDSP = 49 # Atmel OAK DSP
PLFM_TLCS900 = 50 # Toshiba TLCS-900
PLFM_C39 = 51 # Rockwell C39
PLFM_CR16 = 52 # NSC CR16
PLFM_MN102L00 = 53 # Panasonic MN10200
PLFM_TMS320C1X = 54 # Texas Instruments TMS320C1x
PLFM_NEC_V850X = 55 # NEC V850 and V850ES/E1/E2
PLFM_SCR_ADPT = 56 # Processor module adapter for processor modules written in scripting languages
PLFM_EBC = 57 # EFI Bytecode
PLFM_MSP430 = 58 # Texas Instruments MSP430
PLFM_SPU = 59 # Cell Broadband Engine Synergistic Processor Unit
#
# processor_t.flag
#
PR_SEGS = 0x000001 # has segment registers?
PR_USE32 = 0x000002 # supports 32-bit addressing?
PR_DEFSEG32 = 0x000004 # segments are 32-bit by default
PR_RNAMESOK = 0x000008 # allow to user register names for location names
PR_ADJSEGS = 0x000020 # IDA may adjust segments moving their starting/ending addresses.
PR_DEFNUM = 0x0000C0 # default number representation:
PRN_HEX = 0x000000 # hex
PRN_OCT = 0x000040 # octal
PRN_DEC = 0x000080 # decimal
PRN_BIN = 0x0000C0 # binary
PR_WORD_INS = 0x000100 # instruction codes are grouped 2bytes in binrary line prefix
PR_NOCHANGE = 0x000200 # The user can't change segments and code/data attributes (display only)
PR_ASSEMBLE = 0x000400 # Module has a built-in assembler and understands IDP_ASSEMBLE
PR_ALIGN = 0x000800 # All data items should be aligned properly
PR_TYPEINFO = 0x001000 # the processor module supports
# type information callbacks
# ALL OF THEM SHOULD BE IMPLEMENTED!
PR_USE64 = 0x002000 # supports 64-bit addressing?
PR_SGROTHER = 0x004000 # the segment registers don't contain
# the segment selectors, something else
PR_STACK_UP = 0x008000 # the stack grows up
PR_BINMEM = 0x010000 # the processor module provides correct
# segmentation for binary files
# (i.e. it creates additional segments)
# The kernel will not ask the user
# to specify the RAM/ROM sizes
PR_SEGTRANS = 0x020000 # the processor module supports
# the segment translation feature
# (it means it calculates the code
# addresses using the map_code_ea() function)
PR_CHK_XREF = 0x040000 # don't allow near xrefs between segments
# with different bases
PR_NO_SEGMOVE = 0x080000 # the processor module doesn't support move_segm()
# (i.e. the user can't move segments)
PR_USE_ARG_TYPES = 0x200000 # use ph.use_arg_types callback
PR_SCALE_STKVARS = 0x400000 # use ph.get_stkvar_scale callback
PR_DELAYED = 0x800000 # has delayed jumps and calls
PR_ALIGN_INSN = 0x1000000 # allow ida to create alignment instructions
# arbirtrarily. Since these instructions
# might lead to other wrong instructions
# and spoil the listing, IDA does not create
# them by default anymore
PR_PURGING = 0x2000000 # there are calling conventions which may
# purge bytes from the stack
PR_CNDINSNS = 0x4000000 # has conditional instructions
PR_USE_TBYTE = 0x8000000 # BTMT_SPECFLT means _TBYTE type
PR_DEFSEG64 = 0x10000000 # segments are 64-bit by default
# ----------------------------------------------------------------------
# instruc_t related constants
#
# instruc_t.feature
#
CF_STOP = 0x00001 # Instruction doesn't pass execution to the next instruction
CF_CALL = 0x00002 # CALL instruction (should make a procedure here)
CF_CHG1 = 0x00004 # The instruction modifies the first operand
CF_CHG2 = 0x00008 # The instruction modifies the second operand
CF_CHG3 = 0x00010 # The instruction modifies the third operand
CF_CHG4 = 0x00020 # The instruction modifies 4 operand
CF_CHG5 = 0x00040 # The instruction modifies 5 operand
CF_CHG6 = 0x00080 # The instruction modifies 6 operand
CF_USE1 = 0x00100 # The instruction uses value of the first operand
CF_USE2 = 0x00200 # The instruction uses value of the second operand
CF_USE3 = 0x00400 # The instruction uses value of the third operand
CF_USE4 = 0x00800 # The instruction uses value of the 4 operand
CF_USE5 = 0x01000 # The instruction uses value of the 5 operand
CF_USE6 = 0x02000 # The instruction uses value of the 6 operand
CF_JUMP = 0x04000 # The instruction passes execution using indirect jump or call (thus needs additional analysis)
CF_SHFT = 0x08000 # Bit-shift instruction (shl,shr...)
CF_HLL = 0x10000 # Instruction may be present in a high level language function.
#
# Set IDP options constants
#
IDPOPT_STR = 1 # string constant
IDPOPT_NUM = 2 # number
IDPOPT_BIT = 3 # bit, yes/no
IDPOPT_FLT = 4 # float
IDPOPT_I64 = 5 # 64bit number
IDPOPT_OK = 0 # ok
IDPOPT_BADKEY = 1 # illegal keyword
IDPOPT_BADTYPE = 2 # illegal type of value
IDPOPT_BADVALUE = 3 # illegal value (bad range, for example)
# ----------------------------------------------------------------------
import ida_ua
class processor_t(ida_idaapi.pyidc_opaque_object_t):
"""Base class for all processor module scripts"""
def __init__(self):
pass
def get_idpdesc(self):
"""
This function must be present and should return the list of
short processor names similar to the one in ph.psnames.
This method can be overridden to return to the kernel a different IDP description.
"""
return '\x01'.join(map(lambda t: '\x01'.join(t), zip(self.plnames, self.psnames)))
def get_uFlag(self):
"""Use this utility function to retrieve the 'uFlag' global variable"""
return ida_ua.cvar.uFlag
def get_auxpref(self, insn):
"""This function returns insn.auxpref value"""
return insn.auxpref
# ----------------------------------------------------------------------
class __ph(object):
id = property(lambda self: ph_get_id())
cnbits = property(lambda self: ph_get_cnbits())
dnbits = property(lambda self: ph_get_dnbits())
flag = property(lambda self: ph_get_flag())
icode_return = property(lambda self: ph_get_icode_return())
instruc = property(lambda self: ph_get_instruc())
instruc_end = property(lambda self: ph_get_instruc_end())
instruc_start = property(lambda self: ph_get_instruc_start())
reg_code_sreg = property(lambda self: ph_get_reg_code_sreg())
reg_data_sreg = property(lambda self: ph_get_reg_data_sreg())
reg_first_sreg = property(lambda self: ph_get_reg_first_sreg())
reg_last_sreg = property(lambda self: ph_get_reg_last_sreg())
regnames = property(lambda self: ph_get_regnames())
segreg_size = property(lambda self: ph_get_segreg_size())
tbyte_size = property(lambda self: ph_get_tbyte_size())
version = property(lambda self: ph_get_version())
ph = __ph()
#</pycode(py_idp)>
#<pycode_BC695(py_idp)>
AS_NOTAB=0
CUSTOM_CMD_ITYPE=CUSTOM_INSN_ITYPE
InstrIsSet=has_insn_feature
NEXTEAS_ANSWER_SIZE=0
PR_FULL_HIFXP=0
SETPROC_ALL=SETPROC_LOADER_NON_FATAL
SETPROC_COMPAT=SETPROC_IDB
SETPROC_FATAL=SETPROC_LOADER
area_cmt_changed=range_cmt_changed
changed_stkpnts=stkpnts_changed
changed_struc=struc_align_changed
changing_area_cmt=changing_range_cmt
changing_struc=changing_struc_align
func_tail_removed=func_tail_deleted
get_reg_info2=get_reg_info
ph_get_regCodeSreg=ph_get_reg_code_sreg
ph_get_regDataSreg=ph_get_reg_data_sreg
ph_get_regFirstSreg=ph_get_reg_first_sreg
ph_get_regLastSreg=ph_get_reg_last_sreg
removing_func_tail=deleting_func_tail
segm_attrs_changed=segm_attrs_updated
str2regf=str2reg
def __wrap_insn_func(name):
def __wrapper(*args):
arg0 = args[0]
import ida_ua
if not isinstance(arg0, ida_ua.insn_t):
tmp = ida_ua.insn_t()
if not ida_ua.decode_insn(tmp, arg0):
return False
arg0 = tmp
return getattr(_ida_idp, name)(arg0, *args[1:])
globals()[name] = __wrapper
__wrap_insn_func("is_call_insn")
__wrap_insn_func("is_ret_insn")
__wrap_insn_func("is_indirect_jump_insn")
__wrap_insn_func("is_basic_block_end")
def parse_reg_name(*args):
if isinstance(args[1], reg_info_t): # 6.95: regname, reg_info_t
regname, ri = args
else: # 7.00: reg_info_t, regname
ri, regname = args
return _ida_idp.parse_reg_name(ri, regname)
def __call_IDP_Hooks_auto_queue_empty(cb, qtype):
old_rc = cb(qtype)
if old_rc == 0: # 6.95's queue not empty anymore
rc = -1 # 7.0's queue not empty anymore
else:
rc = old_rc
return rc
import ida_ida
ida_ida.__wrap_hooks_callback(
IDP_Hooks,
"ev_auto_queue_empty",
"auto_queue_empty",
__call_IDP_Hooks_auto_queue_empty)
#</pycode_BC695(py_idp)>
```
#### File: idapython/pywraps/py_nalt.py
```python
import _ida_idaapi
SWI_SPARSE = 0x1
"""sparse switch ( value table present ) otherwise lowcase present"""
SWI_V32 = 0x2
"""32-bit values in table"""
SWI_J32 = 0x4
"""32-bit jump offsets"""
SWI_VSPLIT = 0x8
"""value table is split (only for 32-bit values)"""
SWI_DEFAULT = 0x10
"""default case is present"""
SWI_DEF_IN_TBL = 0x20
"""default case is an entry in the jump table.
This flag is only applicable in the case of a sparse
nonindirect switch (i.e. a switch with a values table).
<jump table size> == <value table size> + 1.
The default case entry is the last one in the table
(or the first one in the case of an inversed jump table)."""
SWI_JMP_INV = 0x40
"""jumptable is inversed (last entry is for first entry in values table)"""
SWI_SHIFT_MASK = 0x180
"""use formula (element*shift + elbase) to find jump targets"""
SWI_ELBASE = 0x200
"""elbase is present (if not and shift!=0, endof(jumpea) is used)"""
SWI_JSIZE = 0x400
"""jump offset expansion bit"""
SWI_VSIZE = 0x800
"""value table element size expansion bit"""
SWI_SEPARATE = 0x1000
"""do not create an array of individual dwords"""
SWI_SIGNED = 0x2000
"""jump table entries are signed"""
SWI_CUSTOM = 0x4000
"""custom jump table.
\ph{create_switch_xrefs} will be called to create code xrefs for the
table. Custom jump table must be created by the module
(see also #SWI_STDTBL)"""
SWI_INDIRECT = 0x00010000
"""value table elements are used as indexes into the jump table"""
SWI_SUBTRACT = 0x00020000
"""table values are subtracted from the elbase instead of being addded"""
SWI_HXNOLOWCASE = 0x00040000
"""lowcase value should not be used by the decompiler (internal flag)"""
SWI_STDTBL = 0x00080000
"""custom jump table with standard table formatting.
ATM IDA doesn't use SWI_CUSTOM for switches with standard
table formatting. So this flag can be considered as obsolete."""
SWI_DEFRET = 0x00100000
"""return in the default case (defjump==BADADDR)"""
# --------------------------------------------------------------------------
class switch_info_t(ida_idaapi.py_clinked_object_t):
def __init__(self, lnk = None):
ida_idaapi.py_clinked_object_t.__init__(self, lnk)
self.bc695_api = False
def _create_clink(self):
return _ida_nalt.switch_info_t_create()
def _del_clink(self, lnk):
return _ida_nalt.switch_info_t_destroy(lnk)
def assign(self, other):
return _ida_nalt.switch_info_t_assign(self, other)
def is_indirect(self):
return (self.flags & SWI_INDIRECT) != 0
def is_subtract(self):
return (self.flags & SWI_SUBTRACT) != 0
def get_jtable_size(self):
return self.jcases if self.is_indirect() else self.ncases
def get_lowcase(self):
return self.ind_lowcase if self.is_indirect() else self.lowcase
def set_expr(self, r, dt):
self.regnum = r
self.regdtype = dt
def get_shift(self):
return (self.flags & SWI_SHIFT_MASK) >> 7
def set_shift(self, shift):
self.flags &= ~SWI_SHIFT_MASK
self.flags |= ((shift & 3) << 7)
def get_jtable_element_size(self):
code = self.flags & (SWI_J32|SWI_JSIZE)
if code == 0: return 2
elif code == SWI_J32: return 4
elif code == SWI_JSIZE: return 1
else: return 8
def set_jtable_element_size(self, size):
self.flags &= ~(SWI_J32|SWI_JSIZE)
if size == 4: self.flags |= SWI_J32
elif size == 1: self.flags |= SWI_JSIZE
elif size == 8: self.flags |= SWI_J32|SWI_JSIZE
elif size != 2: return False
return True
def get_vtable_element_size(self):
code = self.flags & (SWI_V32|SWI_VSIZE)
if code == 0: return 2
elif code == SWI_V32: return 4
elif code == SWI_VSIZE: return 1
return 8
def set_vtable_element_size(self, size):
self.flags &= ~SWI_V32|SWI_VSIZE
if size == 4: self.flags |= SWI_V32
elif size == 1: self.flags |= SWI_VSIZE
elif size == 8: self.flags |= SWI_V32|SWI_VSIZE
elif size != 2: return False
return True
#
# Autogenerated
#
def __get_regdtype__(self):
return _ida_nalt.switch_info_t_get_regdtype(self)
def __set_regdtype__(self, v):
_ida_nalt.switch_info_t_set_regdtype(self, v)
def __get_jcases__(self):
return _ida_nalt.switch_info_t_get_jcases(self)
def __set_jcases__(self, v):
_ida_nalt.switch_info_t_set_jcases(self, v)
def __get_regnum__(self):
return _ida_nalt.switch_info_t_get_regnum(self)
def __set_regnum__(self, v):
_ida_nalt.switch_info_t_set_regnum(self, v)
def __get_flags__(self):
return _ida_nalt.switch_info_t_get_flags(self)
def __set_flags__(self, v):
_ida_nalt.switch_info_t_set_flags(self, v)
def __get_ncases__(self):
return _ida_nalt.switch_info_t_get_ncases(self)
def __set_ncases__(self, v):
_ida_nalt.switch_info_t_set_ncases(self, v)
def __get_defjump__(self):
return _ida_nalt.switch_info_t_get_defjump(self)
def __set_defjump__(self, v):
_ida_nalt.switch_info_t_set_defjump(self, v)
def __get_jumps__(self):
return _ida_nalt.switch_info_t_get_jumps(self)
def __set_jumps__(self, v):
_ida_nalt.switch_info_t_set_jumps(self, v)
def __get_elbase__(self):
return _ida_nalt.switch_info_t_get_elbase(self)
def __set_elbase__(self, v):
_ida_nalt.switch_info_t_set_elbase(self, v)
def __get_startea__(self):
return _ida_nalt.switch_info_t_get_startea(self)
def __set_startea__(self, v):
_ida_nalt.switch_info_t_set_startea(self, v)
def __get_custom__(self):
return _ida_nalt.switch_info_t_get_custom(self)
def __set_custom__(self, v):
_ida_nalt.switch_info_t_set_custom(self, v)
def __get_ind_lowcase__(self):
return _ida_nalt.switch_info_t_get_ind_lowcase(self)
def __set_ind_lowcase__(self, v):
_ida_nalt.switch_info_t_set_ind_lowcase(self, v)
def __get_values_lowcase__(self):
return _ida_nalt.switch_info_t_get_values_lowcase(self)
def __set_values_lowcase__(self, v):
_ida_nalt.switch_info_t_set_values_lowcase(self, v)
regdtype = property(__get_regdtype__, __set_regdtype__)
"""size of the switch expression register as dtype"""
jcases = property(__get_jcases__, __set_jcases__)
"""number of entries in the jump table (SWI_INDIRECT)"""
regnum = property(__get_regnum__, __set_regnum__)
"""the switch expression as a register number"""
flags = property(__get_flags__, __set_flags__)
"""switch info flags"""
ncases = property(__get_ncases__, __set_ncases__)
"""number of cases (excluding default)"""
defjump = property(__get_defjump__, __set_defjump__)
"""default jump address"""
jumps = property(__get_jumps__, __set_jumps__)
"""jump table address"""
elbase = property(__get_elbase__, __set_elbase__)
"""element base"""
startea = property(__get_startea__, __set_startea__)
"""start of switch idiom"""
custom = property(__get_custom__, __set_custom__)
"""information for custom tables (filled and used by modules)"""
ind_lowcase = property(__get_ind_lowcase__, __set_ind_lowcase__)
values = property(__get_values_lowcase__, __set_values_lowcase__)
lowcase = property(__get_values_lowcase__, __set_values_lowcase__)
#</pycode(py_nalt)>
#<pycode_BC695(py_nalt)>
ASCSTR_LAST=7
ASCSTR_LEN2=STRTYPE_LEN2
ASCSTR_LEN4=STRTYPE_LEN4
ASCSTR_PASCAL=STRTYPE_PASCAL
ASCSTR_TERMCHR=STRTYPE_TERMCHR
ASCSTR_ULEN2=STRTYPE_LEN2_16
ASCSTR_ULEN4=STRTYPE_LEN4_16
ASCSTR_UNICODE=STRTYPE_C_16
ASCSTR_UTF16=STRTYPE_C_16
ASCSTR_UTF32=STRTYPE_C_32
REF_VHIGH=V695_REF_VHIGH
REF_VLOW=V695_REF_VLOW
SWI_END_IN_TBL=SWI_DEF_IN_TBL
SWI_EXTENDED=0x8000
SWI2_INDIRECT=SWI_INDIRECT >> 16
SWI2_SUBTRACT=SWI_SUBTRACT >> 16
import ida_netnode
RIDX_AUTO_PLUGINS=ida_netnode.BADNODE
change_encoding_name=rename_encoding
@bc695redef
def del_tinfo2(ea, n=None):
if n is not None:
return del_op_tinfo(ea, n)
else:
return del_tinfo(ea)
get_encodings_count=get_encoding_qty
def get_op_tinfo(*args):
import ida_typeinf
if isinstance(args[2], ida_typeinf.tinfo_t): # 6.95: ea, n, tinfo_t
ea, n, tif = args
else: # 7.00: tinfo_t, ea, n
tif, ea, n = args
return _ida_nalt.get_op_tinfo(tif, ea, n)
get_op_tinfo2=get_op_tinfo
@bc695redef
def is_unicode(strtype):
return (strtype & STRWIDTH_MASK) > 0
set_op_tinfo2=set_op_tinfo
set_tinfo2=set_tinfo
switch_info_t.regdtyp = switch_info_t.regdtype
def get_tinfo(*args):
import ida_typeinf
if isinstance(args[1], ida_typeinf.tinfo_t): # 6.95: ea, tinfo_t
ea, tif = args
else: # 7.00: tinfo_t, ea
tif, ea = args
return _ida_nalt.get_tinfo(tif, ea)
get_tinfo2=get_tinfo
def get_refinfo(*args):
if isinstance(args[2], refinfo_t): # 6.95: ea, n, refinfo_t
ea, n, ri = args
else: # 7.00: refinfo_t, ea, n
ri, ea, n = args
return _ida_nalt.get_refinfo(ri, ea, n)
get_switch_info_ex=get_switch_info
set_switch_info_ex=set_switch_info
del_switch_info_ex=del_switch_info
switch_info_ex_t_assign=switch_info_t_assign
switch_info_ex_t_create=switch_info_t_create
switch_info_ex_t_destroy=switch_info_t_destroy
switch_info_ex_t_get_custom=switch_info_t_get_custom
switch_info_ex_t_get_defjump=switch_info_t_get_defjump
switch_info_ex_t_get_elbase=switch_info_t_get_elbase
switch_info_ex_t_get_flags=switch_info_t_get_flags
switch_info_ex_t_get_ind_lowcase=switch_info_t_get_ind_lowcase
switch_info_ex_t_get_jcases=switch_info_t_get_jcases
switch_info_ex_t_get_jumps=switch_info_t_get_jumps
switch_info_ex_t_get_ncases=switch_info_t_get_ncases
switch_info_ex_t_get_regdtyp=switch_info_t_get_regdtype
switch_info_ex_t_get_regnum=switch_info_t_get_regnum
switch_info_ex_t_get_startea=switch_info_t_get_startea
switch_info_ex_t_get_values_lowcase=switch_info_t_get_values_lowcase
switch_info_ex_t_set_custom=switch_info_t_set_custom
switch_info_ex_t_set_defjump=switch_info_t_set_defjump
switch_info_ex_t_set_elbase=switch_info_t_set_elbase
switch_info_ex_t_set_flags=switch_info_t_set_flags
switch_info_ex_t_set_ind_lowcase=switch_info_t_set_ind_lowcase
switch_info_ex_t_set_jcases=switch_info_t_set_jcases
switch_info_ex_t_set_jumps=switch_info_t_set_jumps
switch_info_ex_t_set_ncases=switch_info_t_set_ncases
switch_info_ex_t_set_regdtyp=switch_info_t_set_regdtype
switch_info_ex_t_set_regnum=switch_info_t_set_regnum
switch_info_ex_t_set_startea=switch_info_t_set_startea
switch_info_ex_t_set_values_lowcase=switch_info_t_set_values_lowcase
def __switch_info_t_get_flags__(instance):
return switch_info_t.__get_flags__(instance) | SWI_EXTENDED
def __switch_info_t_set_flags__(instance, v):
if instance.bc695_api:
v |= (switch_info_t.__get_flags__(instance) & 0xFFFF0000)
switch_info_t.__set_flags__(instance, v)
switch_info_t.flags = property(__switch_info_t_get_flags__, __switch_info_t_set_flags__)
def __switch_info_t_get_flags2__(instance):
instance.bc695_api = True
return switch_info_t.__get_flags__(instance) >> 16
def __switch_info_t_set_flags2__(instance, v):
instance.bc695_api = True
flags = switch_info_t.__get_flags__(instance)
instance.flags = (flags & 0xFFFF) | (v << 16)
switch_info_t.flags2 = property(__switch_info_t_get_flags2__, __switch_info_t_set_flags2__)
switch_info_ex_t=switch_info_t
#</pycode_BC695(py_nalt)>
```
#### File: idapython/pywraps/sidc.py
```python
UA_MAXOP = 6
# ----------------------------------------------------------------------
# instruc_t related constants
#
# instruc_t.feature
#
CF_STOP = 0x00001 # Instruction doesn't pass execution to the next instruction
CF_CALL = 0x00002 # CALL instruction (should make a procedure here)
CF_CHG1 = 0x00004 # The instruction modifies the first operand
CF_CHG2 = 0x00008 # The instruction modifies the second operand
CF_CHG3 = 0x00010 # The instruction modifies the third operand
CF_CHG4 = 0x00020 # The instruction modifies 4 operand
CF_CHG5 = 0x00040 # The instruction modifies 5 operand
CF_CHG6 = 0x00080 # The instruction modifies 6 operand
CF_USE1 = 0x00100 # The instruction uses value of the first operand
CF_USE2 = 0x00200 # The instruction uses value of the second operand
CF_USE3 = 0x00400 # The instruction uses value of the third operand
CF_USE4 = 0x00800 # The instruction uses value of the 4 operand
CF_USE5 = 0x01000 # The instruction uses value of the 5 operand
CF_USE6 = 0x02000 # The instruction uses value of the 6 operand
CF_JUMP = 0x04000 # The instruction passes execution using indirect jump or call (thus needs additional analysis)
CF_SHFT = 0x08000 # Bit-shift instruction (shl,shr...)
CF_HLL = 0x10000 # Instruction may be present in a high level language function.
# ----------------------------------------------------------------------
# op_t related constants
#
# op_t.type
# Description Data field
o_void = 0 # No Operand ----------
o_reg = 1 # General Register (al,ax,es,ds...) reg
o_mem = 2 # Direct Memory Reference (DATA) addr
o_phrase = 3 # Memory Ref [Base Reg + Index Reg] phrase
o_displ = 4 # Memory Reg [Base Reg + Index Reg + Displacement] phrase+addr
o_imm = 5 # Immediate Value value
o_far = 6 # Immediate Far Address (CODE) addr
o_near = 7 # Immediate Near Address (CODE) addr
o_idpspec0 = 8 # Processor specific type
o_idpspec1 = 9 # Processor specific type
o_idpspec2 = 10 # Processor specific type
o_idpspec3 = 11 # Processor specific type
o_idpspec4 = 12 # Processor specific type
o_idpspec5 = 13 # Processor specific type
# There can be more processor specific types
#
# op_t.dtype
#
dt_byte = 0 # 8 bit
dt_word = 1 # 16 bit
dt_dword = 2 # 32 bit
dt_float = 3 # 4 byte
dt_double = 4 # 8 byte
dt_tbyte = 5 # variable size (ph.tbyte_size)
dt_packreal = 6 # packed real format for mc68040
dt_qword = 7 # 64 bit
dt_byte16 = 8 # 128 bit
dt_code = 9 # ptr to code (not used?)
dt_void = 10 # none
dt_fword = 11 # 48 bit
dt_bitfild = 12 # bit field (mc680x0)
dt_string = 13 # pointer to asciiz string
dt_unicode = 14 # pointer to unicode string
dt_ldbl = 15 # long double (which may be different from tbyte)
dt_byte32 = 16 # 256 bit
dt_byte64 = 17 # 512 bit
#
# op_t.flags
#
OF_NO_BASE_DISP = 0x80 # o_displ: base displacement doesn't exist meaningful only for o_displ type if set, base displacement (x.addr) doesn't exist.
OF_OUTER_DISP = 0x40 # o_displ: outer displacement exists meaningful only for o_displ type if set, outer displacement (x.value) exists.
PACK_FORM_DEF = 0x20 # !o_reg + dt_packreal: packed factor defined
OF_NUMBER = 0x10 # can be output as number only if set, the operand can be converted to a number only
OF_SHOW = 0x08 # should the operand be displayed? if clear, the operand is hidden and should not be displayed
#
# insn_t.flags
#
INSN_MACRO = 0x01 # macro instruction
INSN_MODMAC = 0x02 # macros: may modify the database to make room for the macro insn
# ----------------------------------------------------------------------
# asm_t related constants
#
# asm_t.flag
#
AS_OFFST = 0x00000001 # offsets are 'offset xxx' ?
AS_COLON = 0x00000002 # create colons after data names ?
AS_UDATA = 0x00000004 # can use '?' in data directives
AS_2CHRE = 0x00000008 # double char constants are: "xy
AS_NCHRE = 0x00000010 # char constants are: 'x
AS_N2CHR = 0x00000020 # can't have 2 byte char consts
# ASCII directives:
AS_1TEXT = 0x00000040 # 1 text per line, no bytes
AS_NHIAS = 0x00000080 # no characters with high bit
AS_NCMAS = 0x00000100 # no commas in ascii directives
AS_HEXFM = 0x00000E00 # format of hex numbers:
ASH_HEXF0 = 0x00000000 # 34h
ASH_HEXF1 = 0x00000200 # h'34
ASH_HEXF2 = 0x00000400 # 34
ASH_HEXF3 = 0x00000600 # 0x34
ASH_HEXF4 = 0x00000800 # $34
ASH_HEXF5 = 0x00000A00 # <^R > (radix)
AS_DECFM = 0x00003000 # format of dec numbers:
ASD_DECF0 = 0x00000000 # 34
ASD_DECF1 = 0x00001000 # #34
ASD_DECF2 = 0x00002000 # 34.
ASD_DECF3 = 0x00003000 # .34
AS_OCTFM = 0x0001C000 # format of octal numbers:
ASO_OCTF0 = 0x00000000 # 123o
ASO_OCTF1 = 0x00004000 # 0123
ASO_OCTF2 = 0x00008000 # 123
ASO_OCTF3 = 0x0000C000 # @123
ASO_OCTF4 = 0x00010000 # o'123
ASO_OCTF5 = 0x00014000 # 123q
ASO_OCTF6 = 0x00018000 # ~123
AS_BINFM = 0x000E0000 # format of binary numbers:
ASB_BINF0 = 0x00000000 # 010101b
ASB_BINF1 = 0x00020000 # ^B010101
ASB_BINF2 = 0x00040000 # %010101
ASB_BINF3 = 0x00060000 # 0b1010101
ASB_BINF4 = 0x00080000 # b'1010101
ASB_BINF5 = 0x000A0000 # b'1010101'
AS_UNEQU = 0x00100000 # replace undefined data items
# with EQU (for ANTA's A80)
AS_ONEDUP = 0x00200000 # One array definition per line
AS_NOXRF = 0x00400000 # Disable xrefs during the output file generation
AS_XTRNTYPE = 0x00800000 # Assembler understands type of extrn
# symbols as ":type" suffix
AS_RELSUP = 0x01000000 # Checkarg: 'and','or','xor' operations
# with addresses are possible
AS_LALIGN = 0x02000000 # Labels at "align" keyword
# are supported.
AS_NOCODECLN = 0x04000000 # don't create colons after code names
AS_NOTAB = 0x08000000 # Disable tabulation symbols during the output file generation
AS_NOSPACE = 0x10000000 # No spaces in expressions
AS_ALIGN2 = 0x20000000 # .align directive expects an exponent rather than a power of 2
# (.align 5 means to align at 32byte boundary)
AS_ASCIIC = 0x40000000 # ascii directive accepts C-like
# escape sequences (\n,\x01 and similar)
AS_ASCIIZ = 0x80000000 # ascii directive inserts implicit
# zero byte at the end
# ----------------------------------------------------------------------
# processor_t related constants
IDP_INTERFACE_VERSION = 76
CUSTOM_INSN_ITYPE = 0x8000
REG_SPOIL = 0x80000000
REAL_ERROR_FORMAT = -1 # not supported format for current .idp
REAL_ERROR_RANGE = -2 # number too big (small) for store (mem NOT modifyed)
REAL_ERROR_BADDATA = -3 # illegal real data for load (IEEE data not filled)
#
# Check whether the operand is relative to stack pointer or frame pointer.
# This function is used to determine how to output a stack variable
# This function may be absent. If it is absent, then all operands
# are sp based by default.
# Define this function only if some stack references use frame pointer
# instead of stack pointer.
# returns flags:
OP_FP_BASED = 0x00000000 # operand is FP based
OP_SP_BASED = 0x00000001 # operand is SP based
OP_SP_ADD = 0x00000000 # operand value is added to the pointer
OP_SP_SUB = 0x00000002 # operand value is substracted from the pointer
#
# processor_t.flag
#
PR_SEGS = 0x000001 # has segment registers?
PR_USE32 = 0x000002 # supports 32-bit addressing?
PR_DEFSEG32 = 0x000004 # segments are 32-bit by default
PR_RNAMESOK = 0x000008 # allow to user register names for location names
PR_ADJSEGS = 0x000020 # IDA may adjust segments moving their starting/ending addresses.
PR_DEFNUM = 0x0000C0 # default number representation:
PRN_HEX = 0x000000 # hex
PRN_OCT = 0x000040 # octal
PRN_DEC = 0x000080 # decimal
PRN_BIN = 0x0000C0 # binary
PR_WORD_INS = 0x000100 # instruction codes are grouped 2bytes in binrary line prefix
PR_NOCHANGE = 0x000200 # The user can't change segments and code/data attributes (display only)
PR_ASSEMBLE = 0x000400 # Module has a built-in assembler and understands IDP_ASSEMBLE
PR_ALIGN = 0x000800 # All data items should be aligned properly
PR_TYPEINFO = 0x001000 # the processor module supports
# type information callbacks
# ALL OF THEM SHOULD BE IMPLEMENTED!
PR_USE64 = 0x002000 # supports 64-bit addressing?
PR_SGROTHER = 0x004000 # the segment registers don't contain
# the segment selectors, something else
PR_STACK_UP = 0x008000 # the stack grows up
PR_BINMEM = 0x010000 # the processor module provides correct
# segmentation for binary files
# (i.e. it creates additional segments)
# The kernel will not ask the user
# to specify the RAM/ROM sizes
PR_SEGTRANS = 0x020000 # the processor module supports
# the segment translation feature
# (it means it calculates the code
# addresses using the map_code_ea() function)
PR_CHK_XREF = 0x040000 # don't allow near xrefs between segments
# with different bases
PR_NO_SEGMOVE = 0x080000 # the processor module doesn't support move_segm()
# (i.e. the user can't move segments)
PR_USE_ARG_TYPES = 0x200000 # use ph.use_arg_types callback
PR_SCALE_STKVARS = 0x400000 # use ph.get_stkvar_scale callback
PR_DELAYED = 0x800000 # has delayed jumps and calls
PR_ALIGN_INSN = 0x1000000 # allow ida to create alignment instructions
# arbirtrarily. Since these instructions
# might lead to other wrong instructions
# and spoil the listing, IDA does not create
# them by default anymore
PR_PURGING = 0x2000000 # there are calling conventions which may
# purge bytes from the stack
PR_CNDINSNS = 0x4000000 # has conditional instructions
PR_USE_TBYTE = 0x8000000 # BTMT_SPECFLT means _TBYTE type
PR_DEFSEG64 = 0x10000000 # segments are 64-bit by default
# ----------------------------------------------------------------------
OOF_SIGNMASK = 0x0003 # sign symbol (+/-) output:
OOFS_IFSIGN = 0x0000 # output sign if needed
OOFS_NOSIGN = 0x0001 # don't output sign, forbid the user to change the sign
OOFS_NEEDSIGN = 0x0002 # always out sign (+-)
OOF_SIGNED = 0x0004 # output as signed if < 0
OOF_NUMBER = 0x0008 # always as a number
OOF_WIDTHMASK = 0x0070 # width of value in bits:
OOFW_IMM = 0x0000 # take from x.dtype
OOFW_8 = 0x0010 # 8 bit width
OOFW_16 = 0x0020 # 16 bit width
OOFW_24 = 0x0030 # 24 bit width
OOFW_32 = 0x0040 # 32 bit width
OOFW_64 = 0x0050 # 32 bit width
OOF_ADDR = 0x0080 # output x.addr, otherwise x.value
OOF_OUTER = 0x0100 # output outer operand
OOF_ZSTROFF = 0x0200 # meaningful only if is_stroff(uFlag)
# append a struct field name if
# the field offset is zero?
# if AFL_ZSTROFF is set, then this flag
# is ignored.
OOF_NOBNOT = 0x0400 # prohibit use of binary not
OOF_SPACES = 0x0800 # do not suppress leading spaces
# currently works only for floating point numbers
# ----------------------------------------------------------------------
class insn_t(object):
def __init__(self, noperands = UA_MAXOP):
self.auxpref = 0
self.cs = 0
self.ea = 0
self.flags = 0
self.insnpref = 0
self.ip = 0
self.itype = 0
self.n = 0
self.segpref = 0
self.size = 0
self.ops = []
# store the number of operands
self.n = noperands
# create operands
for i in xrange(0, noperands):
op = op_t()
op.n = i
self.ops.append(op)
setattr(self, 'Op%d' % (i+1), op)
def __getitem__(self, i):
return self.ops[i]
# ----------------------------------------------------------------------
class op_t(object):
def __init__(self):
self.addr = 0
self.dtype = 0
self.flags = 0
self.n = 0
self.offb = 0
self.offo = 0
self.reg = 0
self.specval = 0
self.specflag1 = 0
self.specflag2 = 0
self.specflag3 = 0
self.specflag4 = 0
self.type = 0
self.value = 0
# make sure reg and phrase have the same value
def __setattr__(self, name, value):
if name == 'reg' or name == 'phrase':
object.__setattr__(self, 'reg', value)
object.__setattr__(self, 'phrase', value)
else:
object.__setattr__(self, name, value)
```
#### File: idapython/Scripts/ExchainDump.py
```python
import idc
import re
import ida_kernwin
from ida_kernwin import Choose
# class to store parsed results
class exchain:
def __init__(self, m):
self.name = m.group(1)
self.addr = int(m.group(2), 16)
# Chooser class
class MyChoose(Choose):
def __init__(self, title, items):
Choose.__init__(self, title, [ ["Address", 16], ["Name", 250] ])
self.items = items
def OnGetLine(self, n):
o = self.items[n]
line = []
line.append("%08X" % o.addr)
line.append("%s" % o.name)
return line
def OnGetSize(self):
return len(self.items)
def OnSelectLine(self, n):
o = self.items[n]
Jump(o.addr)
return (Choose.NOTHING_CHANGED, )
# main
def main():
s = idc.eval('send_dbg_command("!exchain")')
if "IDC_FAILURE" in s:
return (False, "Cannot execute the command")
matches = re.finditer(r'[^:]+: ([^\(]+) \(([^\)]+)\)\n', s)
L = []
for x in matches:
L.append(exchain(x))
if not L:
return (False, "Nothing to display: Could parse the result!")
# Get a Choose instance
chooser = MyChoose("Exchain choose", L)
# Run the chooser
chooser.Show()
return (True, "Success!")
ok, r = main()
if not ok:
print r
``` |
{
"source": "joeleung00/webportal",
"score": 2
} |
#### File: webportal/portal/google_calendar.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from .models import Category, Message, GrepRequest
from django.shortcuts import redirect
from users.models import Profile
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
def google_calendar_connection(user):
SCOPES = ['https://www.googleapis.com/auth/calendar']
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
# Comment below three rows of code to require user to authorize everytime
try:
prefix = 'Token/'
token_name = 'token{}.pickle'.format(user.id)
if os.path.exists(prefix + token_name):
with open(prefix + token_name, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'client_secret.json', SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open(prefix + token_name, 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
except:
return False
return service
def createEvent(Gcal, title, startDate, startTime, endDate, endTime):
# HK timezone
GMT_OFF = '+08:00'
EVENT = {
'summary': title,
'start': {'dateTime': '%sT%s%s' % (startDate, startTime, GMT_OFF)},
'end': {'dateTime': '%sT%s%s' % (endDate, endTime, GMT_OFF)},
}
e = Gcal.events().insert(calendarId='primary',sendNotifications=True, body=EVENT).execute()
def calendar(request):
Gcal = google_calendar_connection(request.user)
if (Gcal != False):
profile = Profile.objects.get(user=request.user)
profile.google_auth = True
profile.save()
# example of calling createEvent function
#createEvent(Gcal, '3100project', '2019-04-08', '18:00:00', '2019-04-08', '20:00:00')
return redirect('portal-home')
```
#### File: webportal/portal/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# These are the models that saved in our database
class Category(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(User, on_delete=models.CASCADE)
position = models.PositiveIntegerField(default=0)
def __str__(self):
return self.title
# the default sorting method for Category is by position attribute
class Meta:
ordering = ['position']
class Message(models.Model):
# This would be used for storing GrepReply
title = models.CharField(max_length=100)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
content = models.TextField()
full_url = models.CharField(max_length=500, default='')
#date_posted = models.DateTimeField(default=timezone.now)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
modified_date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
# the default sorting method for Message is by their modified_date in decending order
class Meta:
ordering = ['-modified_date']
class GrepRequest(models.Model):
content_title = models.CharField(max_length=100)
crawltag = models.TextField(default='test')
#selector_type = models.CharField(max_length=5)
url = models.CharField(max_length=500)
#date_posted = models.DateTimeField(default=timezone.now)
add_to_calendar = models.BooleanField(default=False)
message = models.ForeignKey(Message, on_delete=models.CASCADE)
def __str__(self):
return self.content_title
# This function will be trigger when any user is created.
# Then it will automatically create a new category which is called News
@receiver(post_save, sender=User)
def create_user_category(sender, instance, created, **kwargs):
if created:
Category.objects.create(author=instance, title="News")
``` |
{
"source": "joelewiss/coinbase-portfolio",
"score": 3
} |
#### File: joelewiss/coinbase-portfolio/portfolio.py
```python
import time
import sys
import json
import colorama as cr
from requests import get
from terminaltables import SingleTable
"""
positions dictionary
{
symbol: "ETH",
price: 608.77,
qty: 0.0788641,
fee: 1.99,
}
"""
def read_positions():
try:
f = open("positions.json", "r")
except OSError:
print("Could not open positions.json file, exiting now")
sys.exit(1)
pos = json.load(f)
f.close()
return pos
"""
Retreives price data for symbols, limiting duplicate requests
pos (dict) Positions dictionary
returns (dict Dictionary with prices in the following format
{ "symbol": priceNow, ... }
"""
def get_prices(pos):
prices = dict()
for p in pos:
symb = p["symbol"]
if symb not in prices:
url = "https://api.coinbase.com/v2/prices/{}-USD/spot".format(symb)
res = get(url)
if (res.status_code == 200):
json = res.json()
prices[symb] = float(json["data"]["amount"])
else:
print(f"Could not retrieve price data for {symb}")
print(f"Got status code {res.status_code} from API")
print("Stoping program now")
sys.exit(1)
return prices
"""
Prints out entire portfolio to stdout in a fancy table with colors
pos (dict) Positions dictionary
clear (bool) Determines if terminal is cleared and table is printed at
1,1
"""
def print_portfolio(pos, clear=False):
table = [["Symbol", "Last Price ($)", "Change ($)", "Change (%)", "Qty",
"Price Paid ($)", "Market Value ($)", "Total Gain ($)",
"Total Gain (%)"]]
prices = get_prices(pos)
# Keep track of useful totals
tpricep = 0
tvalue = 0
tgain = 0
for p in pos:
# Construct table row by row
row = []
# Convience variable
symb = p["symbol"]
# Symbol
row.append(symb)
# Last price
if (prices[symb] < 10):
row.append("{:,.4f}".format(prices[symb]))
else:
row.append("{:,.2f}".format(prices[symb]))
# Change $
chgd = prices[symb] - p["price"]
if (chgd < 0):
color = cr.Fore.RED
else:
color = cr.Fore.GREEN
if (abs(chgd) < 10):
chgd = "{}{:,.4f}".format(color, chgd)
else:
chgd = "{}{:,.2f}".format(color, chgd)
chgd += cr.Fore.RESET
row.append(chgd)
# Change %
chgp = (prices[symb] - p["price"]) / p["price"]
row.append("{:+.2%}".format(chgp))
# Qty
row.append(p["qty"])
# Price Paid
ppaid = (p["price"] * p["qty"]) + p["fee"]
row.append("{:,.2f}".format(ppaid))
tpricep += ppaid
# Market value
mvalue = prices[symb] * p["qty"]
row.append("{:,.2f}".format(mvalue))
tvalue += mvalue
# Total Gain $
tgd = (prices[symb] - p["price"]) * p["qty"]
tgd -= p["fee"]
if (tgd < 0):
color = cr.Fore.RED
else:
color = cr.Fore.GREEN
if (abs(tgd) < 10):
row.append("{}{:,.4f}{}".format(color, tgd, cr.Fore.RESET))
else:
row.append("{}{:,.2f}{}".format(color, tgd, cr.Fore.RESET))
tgain += tgd
# Total Gain %
tgp = tgd / (p["price"] * p["qty"])
row.append("{:+.2%}".format(tgp))
table.append(row)
# Add in useful totals
totals = [""] * 9
totals[0] = "TOTAL"
# Total price paid is col 5
totals[5] = "{:,.2f}".format(tpricep)
# Total Market value is col 6
totals[6] = "{:,.2f}".format(tvalue)
# Total gain is col 7
totals[7] = "{:,.2f}".format(tgain)
# Calculate gain percentage
gain = tgain / tpricep
totals[8] = "{:+.2%}".format(gain)
table.append(totals)
if clear:
# Clear screen using colorama helper functions
print(cr.ansi.clear_screen())
# Position at top of screen
print(cr.Cursor.POS(1, 1))
# Print table
stbl = SingleTable(table, title="Portfolio")
stbl.inner_footing_row_border = True
print(stbl.table)
print(time.strftime("%I:%M %p"))
if __name__ == "__main__":
pos = read_positions()
cr.init()
print_portfolio(pos)
``` |
{
"source": "joelewiss/pi-smarthome",
"score": 3
} |
#### File: pi-smarthome/modules/projector.py
```python
from track import register
from http.client import HTTPConnection
from urllib.parse import urlencode
import telnetlib as tn
MODULE = "projector"
HOST = "JOE-PROJECTOR.lewisnet"
PORT = 4352
HEADER = "%1"
conn = tn.Telnet()
web = HTTPConnection(HOST)
def send_command(body, param):
# Perform validation of arguments
if len(body) == 4 and len(param) <= 128:
command = "{}{} {}\r".format(HEADER, body, param)
# first try and use existing connection
# write call will throw AtributeError if the connection was never
# created
# read_until call will throw an EOFError if the connection was closed
try:
conn.write(command.encode("ascii"))
res = conn.read_until(b"\r")
except (EOFError, AttributeError):
conn.open(HOST, PORT)
header = conn.read_until(b"\r")
if (header == b"PJLINK 0\r"):
conn.write(command.encode("ascii"))
res = conn.read_until(b"\r")
else:
return "ERR: COMMUNICATION"
# Trim up the response before we return it
res = res.decode()
res = res[2:len(res) - 1]
return res
else:
return "ERR: INVALID COMMAND"
def send_web_command(key):
# The web commands will have no effect without these headers
headers = {
"Host": HOST.lower(),
"Referer": "http://joe-projector.lewisnet/cgi-bin/webconf.exe?page=13",
}
query = urlencode({"KEY": key})
url = "/cgi-bin/sender.exe?{}".format(query)
web.request("GET", url, headers=headers)
res = web.getresponse()
print(res.status, res.reason)
print(res.getheaders())
if (res.status == 302):
return "sent"
else:
return "ERR: {}".format(res.status)
@register(MODULE)
def get_power():
return send_command("POWR", "?")
@register(MODULE)
def power_on():
return send_command("POWR", "1")
@register(MODULE)
def power_off():
return send_command("POWR", "0")
@register(MODULE)
def av_mute():
return send_command("AVMT", "31")
@register(MODULE)
def av_unmute():
return send_command("AVMT", "30")
@register(MODULE)
def vol_up():
return send_web_command("56")
@register(MODULE)
def vol_down():
return send_web_command("57")
@register(MODULE)
def freeze():
return send_web_command("47")
``` |
{
"source": "joelfak/advent_of_code_2019",
"score": 4
} |
#### File: advent_of_code_2019/clonest1ck-python3/3dec.py
```python
def parse(step):
direction = step[0]
distance = int(step[1:])
delta = (0,0)
if direction == "D":
delta = (0,-distance)
elif direction == "U":
delta = (0, distance)
elif direction == "L":
delta = (-distance, 0)
elif direction == "R":
delta = (distance, 0)
return delta
def add(a, b):
(ax, ay) = a
(bx, by) = b
c = (ax + bx, ay + by)
return c
def move_positive(start, end):
(s_x, s_y) = start
(e_x, e_y) = end
if s_x == e_x and s_y > e_y:
return (end, start)
if s_y == e_y and s_x > e_x:
return (end, start)
return (start, end)
def intersection(a_start, a_end, b_start, b_end):
(a_start, a_end) = move_positive(a_start, a_end)
(b_start, b_end) = move_positive(b_start, b_end)
(as_x, as_y) = a_start
(ae_x, ae_y) = a_end
(bs_x, bs_y) = b_start
(be_x, be_y) = b_end
# a moves in X and b moves in Y
if as_x < bs_x and ae_x > bs_x and bs_y < as_y and be_y > as_y:
return (bs_x, as_y)
# a moves in Y and b moves in X
if bs_x < as_x and be_x > as_x and as_y < bs_y and ae_y > bs_y:
return (as_x, bs_y)
return False
def manhattan_to_origin(point):
(x, y) = point
if x < 0:
x = 0 - x
if y < 0:
y = 0 - y
manhattan = x + y
return manhattan
def trace(path):
pathway = []
current_position = (0,0)
for step in path:
delta = parse(step)
current_position = add(current_position, delta)
pathway.append(current_position)
return pathway
def crossings(path_a, path_b):
cross = []
a = 0
b = 0
while(a < len(path_a) - 1):
while(b < len(path_b) - 1):
crossing = intersection(path_a[a], path_a[a + 1], path_b[b], path_b[b + 1])
if crossing:
delta = manhattan_to_origin(crossing)
cross.append((delta, crossing))
b += 1
a += 1
b = 0
return cross
def shortest_crossing(crossings):
def compare_distance(a):
(delta_a, point_a) = a
return delta_a
crossings.sort(key = compare_distance)
return crossings[0]
file_content = open("3dec-input", 'r')
#file_content = ["R75,D30,R83,U83,L12,D49,R71,U7,L72", "U62,R66,U55,R34,D71,R55,D58,R83"]
#file_content = ["R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51", "U98,R91,D20,R16,D67,R40,U7,R15,U6,R7"]
#file_content = ["R8,U5,L5,D3", "U7,R6,D4,L4"]
paths = []
for line in file_content:
paths.append(trace(line.split(",")))
cross = crossings(paths[0], paths[1])
print(shortest_crossing(cross))
file_content.close()
```
#### File: advent_of_code_2019/erikagnvall-python3/day12.py
```python
import math
import os.path
from dataclasses import dataclass
from itertools import combinations
@dataclass
class Moon:
x: int
y: int
z: int
dx: int = 0
dy: int = 0
dz: int = 0
def _parse_moons(lines):
moons = []
for line in lines:
parts = line.replace('<', '').replace('>', '').split(',')
x = int(parts[0].replace('x=', ''))
y = int(parts[1].replace('y=', ''))
z = int(parts[2].replace('z=', ''))
moons.append(Moon(x, y, z))
return moons
def _read_input():
with open(os.path.basename(__file__).replace('.py', '.txt')) as f:
return _parse_moons(f.readlines())
def _apply_gravity(m1: Moon, m2: Moon):
if m1.x > m2.x:
m1.dx -= 1
m2.dx += 1
elif m1.x < m2.x:
m1.dx += 1
m2.dx -= 1
if m1.y > m2.y:
m1.dy -= 1
m2.dy += 1
elif m1.y < m2.y:
m1.dy += 1
m2.dy -= 1
if m1.z > m2.z:
m1.dz -= 1
m2.dz += 1
elif m1.z < m2.z:
m1.dz += 1
m2.dz -= 1
def _potential_energy(moons):
return [abs(m.x) + abs(m.y) + abs(m.z) for m in moons]
def _kinetic_energy(moons):
return [abs(m.dx) + abs(m.dy) + abs(m.dz) for m in moons]
def _time_step(moons, pairs):
for m1, m2 in pairs:
_apply_gravity(m1, m2)
for m in moons:
m.x += m.dx
m.y += m.dy
m.z += m.dz
def part1(moons, n=1000):
pairs = list(combinations(moons, 2))
for _ in range(n):
_time_step(moons, pairs)
return sum([
p * k for p, k in zip(_potential_energy(moons), _kinetic_energy(moons))
])
def _lcm(a, b):
return abs(a * b) // math.gcd(a, b)
def part2(moons):
pairs = list(combinations(moons, 2))
xs = set()
ys = set()
zs = set()
found_x = False
found_y = False
found_z = False
while True:
x_state = tuple((m.x, m.dx) for m in moons)
y_state = tuple((m.y, m.dy) for m in moons)
z_state = tuple((m.z, m.dz) for m in moons)
if x_state in xs:
found_x = True
else:
xs.add(x_state)
if y_state in ys:
found_y = True
else:
ys.add(y_state)
if z_state in zs:
found_z = True
else:
zs.add(z_state)
if found_x and found_y and found_z:
break
_time_step(moons, pairs)
return _lcm(len(xs), _lcm(len(ys), len(zs)))
if __name__ == '__main__':
moons = _read_input()
print(part1(moons))
moons = _read_input()
print(part2(moons))
############
# Tests
example_1 = '''\
<x=-1, y=0, z=2>
<x=2, y=-10, z=-7>
<x=4, y=-8, z=8>
<x=3, y=5, z=-1>
'''.splitlines()
example_2 = '''\
<x=-8, y=-10, z=0>
<x=5, y=5, z=10>
<x=2, y=-7, z=3>
<x=9, y=-8, z=-3>
'''.splitlines()
def test_apply_gravity():
m1 = Moon(3, 0, 4)
m2 = Moon(5, 0, 3)
_apply_gravity(m1, m2)
assert m1.dx == 1
assert m2.dx == -1
assert m1.dy == 0
assert m2.dy == 0
assert m1.dz == -1
assert m2.dz == 1
def test_example_1():
assert part1(_parse_moons(example_1), n=10) == 179
assert part2(_parse_moons(example_1)) == 2772
def test_example_2():
assert part1(_parse_moons(example_2), n=100) == 1940
assert part2(_parse_moons(example_2)) == 4686774924
def test_solutions():
moons = _read_input()
assert part1(moons) == 10635
moons = _read_input()
assert part2(moons) == 583523031727256
```
#### File: advent_of_code_2019/erikagnvall-python3/graph.py
```python
from collections import defaultdict
def _min_dist(q, dist):
i = float('inf')
u = None
for v in q:
if dist[v] < i:
i = dist[v]
u = v
assert u
return u
def djikstra(graph, target, source):
q = set(graph.keys())
dist = defaultdict(lambda: float('inf'))
prev = defaultdict(lambda: None)
dist[source] = 0
while q:
u = _min_dist(q, dist)
q.remove(u)
if u == target:
break
for v in graph[u]:
alt = dist[u] + 1
if alt < dist[v]:
dist[v] = alt
prev[v] = u
return dist, prev
```
#### File: advent_of_code_2019/estomagordo-python3/day_10b.py
```python
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
def reduce(x, y):
for fac in range(2, min(abs(x), abs(y)) + 1):
if fac > max(abs(x), abs(y)):
return x, y
while x % fac == 0 and y % fac == 0:
x //= fac
y //= fac
return x, y
def solve(d):
asteroids = []
for y, row in enumerate(d):
for x, c in enumerate(row):
if c == '#':
asteroids.append((x, y))
best = (0, 0, 0)
bestangles = None
for i, asteroid in enumerate(asteroids):
ax, ay = asteroid
angles = defaultdict(list)
for j, other in enumerate(asteroids):
if i == j:
continue
ox, oy = other
xdiff = ox - ax
ydiff = oy - ay
if ydiff == 0:
ydiff = 1
xdiff = 10**12 if ox > ax else -10**12
xdiff, ydiff = reduce(xdiff, ydiff)
if xdiff == 0:
ydiff = 1 if ydiff > 0 else -1
angles[(xdiff, ydiff)].append((ox, oy))
score = len(angles)
if (score, ax, ay) > best:
best = (score, ax, ay)
bestangles = angles
ax, ay = best[1:]
new_order = []
types = [[] for _ in range(8)]
for angle, asts in bestangles.items():
typescore = 0
dx, dy = angle
if dx > 0 and dy < 0:
if dx < 10*12:
typescore = 1
else:
typescore = 2
elif dx > 0:
typescore = 3
elif dx == 0 and dy > 0:
typescore = 4
elif dx < 0 and dy > 0:
if dx > -10**12:
typescore = 5
else:
typescore = 6
elif dx < 0 and dy < 0:
typescore = 7
types[typescore].append((angle, asts))
if types[0]:
new_order.append((0, types[0][0][0]))
if types[1]:
types[1].sort(key=lambda pair: pair[0][0] / pair[0][1])
for angle, asts in types[1]:
asts.sort(key=lambda ast: -ast[1])
new_order.append((1, asts[0]))
if types[2]:
new_order.append((2, types[2][0][0]))
if types[3]:
types[3].sort(key=lambda pair: -(pair[0][0] / pair[0][1]))
for angle, asts in types[3]:
asts.sort(key=lambda ast: ast[1])
new_order.append((3, asts[0]))
if types[4]:
new_order.append((4, types[4][0][0]))
if types[5]:
types[5].sort(key=lambda pair: -(pair[0][0] / pair[0][1]))
for angle, asts in types[5]:
asts.sort(key=lambda ast: ast[1])
new_order.append((5, asts[0]))
if types[6]:
new_order.append((6, types[4][0][0]))
if types[7]:
types[7].sort(key=lambda pair: -(pair[0][0] / pair[0][1]))
for angle, asts in types[7]:
asts.sort(key=lambda ast: -ast[1])
new_order.append((7, asts[0]))
return new_order[199][1][0] * 100 + new_order[199][1][1]
def read_and_solve():
with open('input_10.txt') as f:
data = [line.rstrip() for line in f]
return solve(data)
if __name__ == '__main__':
print(read_and_solve())
```
#### File: advent_of_code_2019/estomagordo-python3/day_13b.py
```python
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
from intcode import Computer
def draw(cells, lowest, highest):
for y in range(lowest, highest + 1):
row = []
for x in range(lowest, highest + 1):
c = ' '
if (y, x) in cells and cells[(y, x)] != 0:
tile = cells[(y, x)]
if tile == 1:
c = 'X'
elif tile == 2:
c = 'S'
elif tile == 3:
c = '_'
elif tile == 4:
c = 'o'
row.append(c)
print(''.join(row))
def solve(d):
computer = Computer(d, 0, 2)
count = 0
retcode = 0
loops = 0
lowest = 100
highest = -100
out = [-1, -1, -1]
score = 0
ballx = 0
padx = 0
cells = {}
while retcode != -1:
retcode, retval = computer.step()
if retcode != 1:
continue
if out[:2] == [-1, 0] and loops % 3 == 2:
score = retval
else:
out[loops % 3] = retval
if loops % 3 == 2:
cells[(out[1], out[0])] = out[2]
if retval == 3:
padx = out[0]
if retval == 4:
ballx = out[0]
else:
lowest = min(lowest, retval)
highest = max(highest, retval)
computer.set_input(0 if ballx == padx else -1 if ballx < padx else 1)
loops += 1
return score
def read_and_solve():
with open('input_13.txt') as f:
data = list(map(int, f.readline().split(',')))
return solve(data)
if __name__ == '__main__':
print(read_and_solve())
```
#### File: advent_of_code_2019/estomagordo-python3/day_16a.py
```python
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
def patternize(i, j):
if i == 0 and j == 0:
return 1
j += 1
width = i + 1
band = j // width
return [0, 1, 0, -1][band % 4]
def solve(nums):
for _ in range(100):
new_nums = []
for i, _ in enumerate(nums):
tot = 0
for j, num in enumerate(nums):
tot += num * patternize(i, j)
new_nums.append(abs(tot) % 10)
nums = new_nums
return int(''.join(str(num) for num in nums[:8]))
def read_and_solve():
with open('input_16.txt') as f:
data = list(map(int, list(f.readline().rstrip())))
return solve(data)
if __name__ == '__main__':
print(read_and_solve())
```
#### File: advent_of_code_2019/estomagordo-python3/day_2b.py
```python
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
def solve_part(d, noun, verb):
d[1] = noun
d[2] = verb
p = 0
while p < len(d):
if d[p] == 99:
break
if d[p] == 1:
d[d[p + 3]] = d[d[p + 1]] + d[d[p + 2]]
elif d[p] == 2:
d[d[p + 3]] = d[d[p + 1]] * d[d[p + 2]]
p += 4
return d[0]
def solve(d):
for noun in range(100):
for verb in range(100):
if solve_part(list(d), noun, verb) == 19690720:
return 100 * noun + verb
def read_and_solve():
with open('input_2.txt') as f:
data = list(map(int, f.readline().split(',')))
return solve(data)
if __name__ == '__main__':
print(read_and_solve())
```
#### File: advent_of_code_2019/estomagordo-python3/day_6a.py
```python
import helpers
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
def solve(d):
graph = {}
for line in d:
a, b = line.split(')')
graph[b] = a
if a not in graph:
graph[a] = 'STOP'
count = 0
for node in graph.keys():
while node in graph and graph[node] in graph:
count += 1
node = graph[node]
return count
def read_and_solve():
with open('input_6.txt') as f:
data = [line.rstrip() for line in f]
return solve(data)
if __name__ == '__main__':
print(read_and_solve())
```
#### File: advent_of_code_2019/estomagordo-python3/day_6b.py
```python
import helpers
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
def pathize(graph, node):
path = {}
runlen = 0
while node in graph:
runlen += 1
node = graph[node]
path[node] = runlen
return path
def solve(d):
graph = {}
for line in d:
a, b = line.split(')')
graph[b] = a
if a not in graph:
graph[a] = 'STOP'
you_orbits = pathize(graph, graph['YOU'])
san_orbits = pathize(graph, graph['SAN'])
best = 10**10
for k, v in you_orbits.items():
if k not in san_orbits:
continue
best = min(best, v + san_orbits[k])
return best
def read_and_solve():
with open('input_6.txt') as f:
data = [line.rstrip() for line in f]
return solve(data)
if __name__ == '__main__':
print(read_and_solve())
```
#### File: advent_of_code_2019/jakobruhe-python3/day3.py
```python
import unittest
from collections import namedtuple
from collections import defaultdict
Loc = namedtuple("Loc", ("x", "y"))
DirDist = namedtuple("DirDist", ("direction", "distance"))
DIRECTIONS = {
'U': Loc(0, 1),
'D': Loc(0, -1),
'L': Loc(-1, 0),
'R': Loc(1, 0),
}
def parse_wire_path(input: str):
return list(map(lambda s: DirDist(s[0], int(s[1:])), input.split(",")))
def dir_x(direction):
return DIRECTIONS[direction].x
def dir_y(direction):
return DIRECTIONS[direction].y
def put_location_on_board(board, loc, name, dist):
if name in board[loc]:
return
board[loc][name] = dist
def put_segment_on_board(board, start_loc, start_dist, segment, name):
loc = start_loc
dist = start_dist
for d in range(0, segment.distance):
loc = Loc(loc.x + dir_x(segment.direction), loc.y + dir_y(segment.direction))
dist += 1
put_location_on_board(board, loc, name, dist)
return loc, dist
def put_wire_path_on_board(board, wire_path, name):
loc = Loc(0, 0)
dist = 0
for segment in wire_path:
loc, dist = put_segment_on_board(board, loc, dist, segment, name)
def manhattan_distance(p1, p2):
return abs(p1.x - p2.x) + abs(p1.y - p2.y)
def closest_crossing(board):
closest = None
for loc, wire_paths in board.items():
if len(wire_paths) >= 2 and loc != Loc(0, 0):
dist = manhattan_distance(Loc(0, 0), loc)
closest = dist if closest is None or dist < closest else closest
return closest
def draw(board):
for y in range(10, -10, -1):
for x in range(-10, 12):
if x == 0 and y == 0:
print("o", end="")
elif Loc(x, y) in board:
if len(board[Loc(x,y)]) >= 2:
print("X", end="")
else:
print("+", end="")
else:
print(".", end="")
print("")
def solve1(input):
lines = input.split("\n")
board = defaultdict(dict)
for name, line in enumerate(lines):
put_wire_path_on_board(board, parse_wire_path(line), name)
#draw(board)
return closest_crossing(board)
def closest_crossing_distance(board):
closest = None
for loc, wire_paths in board.items():
if len(wire_paths) >= 2 and loc != Loc(0, 0):
dist = sum(wire_paths.values())
closest = dist if closest is None or dist < closest else closest
return closest
def solve2(input):
lines = input.split("\n")
board = defaultdict(dict)
for name, line in enumerate(lines):
put_wire_path_on_board(board, parse_wire_path(line), name)
return closest_crossing_distance(board)
class TestThis(unittest.TestCase):
def test1(self):
self.assertEqual(solve1("R8,U5,L5,D3\nU7,R6,D4,L4"), 6)
self.assertEqual(solve1("R75,D30,R83,U83,L12,D49,R71,U7,L72\nU62,R66,U55,R34,D71,R55,D58,R83"), 159)
self.assertEqual(solve1("R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51\nU98,R91,D20,R16,D67,R40,U7,R15,U6,R7"), 135)
def test2(self):
self.assertEqual(solve2("R8,U5,L5,D3\nU7,R6,D4,L4"), 30)
self.assertEqual(solve2("R75,D30,R83,U83,L12,D49,R71,U7,L72\nU62,R66,U55,R34,D71,R55,D58,R83"), 610)
self.assertEqual(solve2("R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51\nU98,R91,D20,R16,D67,R40,U7,R15,U6,R7"), 410)
if __name__ == "__main__":
#unittest.main()
with open("input/day3.txt", "r") as f:
input = f.read()
a1 = solve1(input.strip())
print("{}".format(a1))
a2 = solve2(input.strip())
print("{}".format(a2))
```
#### File: advent_of_code_2019/jakobruhe-python3/day6.py
```python
import unittest
from collections import namedtuple
Relation = namedtuple("Relation", ["parent", "child"])
class Tree:
def __init__(self, name, parent = None):
self.name = name
self.parent = parent
self.children = []
def add_child(self, child):
self.children.append(child)
def __str__(self):
return self.name
def create_relation_from_string(s):
parent_and_child = s.split(")")
return Relation(parent_and_child[0], parent_and_child[1])
def parse_input(input: str):
return list(map(create_relation_from_string, input.strip().split()))
def build_tree(relations):
nodes = {}
for r in relations:
if r.parent in nodes:
parent = nodes[r.parent]
else:
parent = Tree(r.parent)
nodes[r.parent] = parent
if r.child not in nodes:
child = Tree(r.child, parent)
nodes[r.child] = child
else:
child = nodes[r.child]
if child.parent is None:
child.parent = parent
elif child.parent != r.parent:
ValueError("Child {} already have a parent: {}, new parent: {}"
.format(r.child, child.parent.name, r.parent))
parent.add_child(child)
return nodes["COM"]
def depth_sum(tree, depth):
children_depth = 0
for child in tree.children:
children_depth += depth_sum(child, depth + 1)
return depth + children_depth
def solve1(root):
return depth_sum(root, 0)
def find_node(tree, name):
if tree.name == name:
return tree
for c in tree.children:
node = find_node(c, name)
if node is not None:
return node
return None
def find_parents(node):
parents = []
while node.parent is not None:
parents.append(node.parent)
node = node.parent
return list(reversed(parents))
def solve2(root, from_name, to_name):
from_node = find_node(root, from_name)
from_node_parents = find_parents(from_node)
to_node = find_node(root, to_name)
to_node_parents = find_parents(to_node)
num_common = 0
for i, n in enumerate(to_node_parents):
if n == from_node_parents[i]:
num_common += 1
else:
break
return len(from_node_parents) + len(to_node_parents) - num_common * 2
class TestThis(unittest.TestCase):
def test1(self):
self.assertEqual(solve1(build_tree(parse_input("COM)A"))), 1)
self.assertEqual(solve1(build_tree(parse_input("A)B COM)A"))), 3)
self.assertEqual(solve1(build_tree(parse_input("COM)B B)C C)D D)E E)F B)G G)H D)I E)J J)K K)L"))), 42)
if __name__ == "__main__":
with open("input/day6.txt", "r") as f:
relations = parse_input(f.read())
root = build_tree(relations)
print("P1: {}".format(solve1(root)))
print("P2: {}".format(solve2(root, "YOU", "SAN")))
```
#### File: advent_of_code_2019/joelfak-python/day04.py
```python
from helpfunctions import *
import unittest, sys
def isPasswordValid(password, countPairsInLargerGroups):
passwordValid = True
if password < <PASSWORD> or password > <PASSWORD>:
passwordValid = False
passwordstr = str(password)
if countPairsInLargerGroups:
if sum(1 for x in zip(passwordstr, passwordstr[1:]) if x[0] == x[1]) < 1:
passwordValid = False
else:
lastNumber = None
numConsecutiveNumbers = 0
numPairs = 0
for c in passwordstr:
if int(c) == lastNumber:
numConsecutiveNumbers = numConsecutiveNumbers + 1
else:
numConsecutiveNumbers = 1
lastNumber = int(c)
if numConsecutiveNumbers == 2:
numPairs = numPairs + 1
if numConsecutiveNumbers == 3:
numPairs = numPairs - 1
if numPairs < 1:
passwordValid = False
if sum(1 for x in zip(passwordstr, passwordstr[1:]) if x[0] > x[1]) > 0:
passwordValid = False
return passwordValid
def part1(minVal, maxVal):
return sum(1 for x in range(minVal, maxVal+1) if isPasswordValid(x, True))
def part2(minVal, maxVal):
return sum(1 for x in range(minVal, maxVal+1) if isPasswordValid(x, False))
## Unit tests ########################################################
class TestDay04(unittest.TestCase):
def test_isPasswordValid_part_1_example_1(self):
self.assertEqual(isPasswordValid(111111, True), True)
def test_isPasswordValid_part_1_example_2(self):
self.assertEqual(isPasswordValid(223450, True), False)
def test_isPasswordValid_part_1_example_3(self):
self.assertEqual(isPasswordValid(123789, True), False)
def test_isPasswordValid_part_2_example_1(self):
self.assertEqual(isPasswordValid(112233, False), True)
def test_isPasswordValid_part_2_example_2(self):
self.assertEqual(isPasswordValid(<PASSWORD>, False), False)
def test_isPasswordValid_part_2_example_3(self):
self.assertEqual(isPasswordValid(<PASSWORD>, False), True)
## Main ########################################################
if __name__ == '__main__':
print("Advent of code day 4")
print("Part1 result: {}".format(part1(272091,815432)))
print("Part2 result: {}".format(part2(272091,815432)))
```
#### File: advent_of_code_2019/joelfak-python/day05.py
```python
from helpfunctions import *
import unittest, sys
import operator
from collections import namedtuple, deque
IntcodeResult = namedtuple('IntcodeResult', ['program', 'output'])
OpCode = namedtuple('OpCode', ['opcode','parameterModes'])
def parseOpcode(opcodeIn):
parameterModes = deque()
opcode = opcodeIn % 100
opcodeIn //= 100
while opcodeIn:
parameterModes.append(opcodeIn % 10)
opcodeIn //= 10
return OpCode(opcode, parameterModes)
def getParameter(program, p, parameterModes):
if len(parameterModes) > 0 and parameterModes.popleft():
return program[p]
else:
return program[program[p]]
def intcode(program, input=None):
p = 0
oc = parseOpcode(program[p])
output = []
while oc.opcode != 99:
if oc.opcode == 1: # Addition
program[program[p+3]] = getParameter(program, p+1, oc.parameterModes) + getParameter(program, p+2, oc.parameterModes)
p = p + 4
elif oc.opcode == 2: # Multiplication
program[program[p+3]] = getParameter(program, p+1, oc.parameterModes) * getParameter(program, p+2, oc.parameterModes)
p = p + 4
elif oc.opcode == 3: # Read input
program[program[p+1]] = input
p = p + 2
elif oc.opcode == 4: # Write output
output.append(getParameter(program, p+1, oc.parameterModes))
p = p + 2
elif oc.opcode == 5: # Jump if true
if getParameter(program, p+1, oc.parameterModes) != 0:
p = getParameter(program, p+2, oc.parameterModes)
else:
p = p + 3
elif oc.opcode == 6: # Jump if false
if getParameter(program, p+1, oc.parameterModes) == 0:
p = getParameter(program, p+2, oc.parameterModes)
else:
p = p + 3
elif oc.opcode == 7: # Less than
if getParameter(program, p+1, oc.parameterModes) < getParameter(program, p+2, oc.parameterModes):
program[program[p+3]] = 1
else:
program[program[p+3]] = 0
p = p + 4
elif oc.opcode == 8: # Equals
if getParameter(program, p+1, oc.parameterModes) == getParameter(program, p+2, oc.parameterModes):
program[program[p+3]] = 1
else:
program[program[p+3]] = 0
p = p + 4
else:
raise Exception('Invalid operator', oc.opcode)
oc = parseOpcode(program[p])
return IntcodeResult(program, output)
def part1(program):
return intcode(program, 1).output
def part2(program):
return intcode(program, 5).output
## Unit tests ########################################################
class TestDay02(unittest.TestCase):
def test_intcode_example_program_1(self):
self.assertEqual(intcode([1,0,0,0,99]).program, [2,0,0,0,99])
def test_intcode_example_program_2(self):
self.assertEqual(intcode([2,3,0,3,99]).program, [2,3,0,6,99])
def test_intcode_example_program_3(self):
self.assertEqual(intcode([2,4,4,5,99,0]).program, [2,4,4,5,99,9801])
def test_intcode_example_program_4(self):
self.assertEqual(intcode([1,1,1,4,99,5,6,0,99]).program, [30,1,1,4,2,5,6,0,99])
def test_intcode_example_program(self):
self.assertEqual(intcode([1,9,10,3,2,3,11,0,99,30,40,50]).program, [3500,9,10,70,2,3,11,0,99,30,40,50])
class TestParseOpcode(unittest.TestCase):
def test_1(self):
self.assertEqual(parseOpcode(1002), (2, deque([0, 1])))
def test_2(self):
self.assertEqual(parseOpcode(101003), (3, deque([0, 1, 0, 1])))
class TestDay05_part1(unittest.TestCase):
def test_intcode_output_is_input_1(self):
self.assertEqual(intcode([3,0,4,0,99], 3).output[0], 3)
def test_intcode_output_is_input_2(self):
self.assertEqual(intcode([3,0,4,0,99], 7).output[0], 7)
def test_intcode_parameter_mode(self):
self.assertEqual(intcode([1002,4,3,4,33]).program, [1002,4,3,4,99])
def test_intcode_negative_numbers(self):
self.assertEqual(intcode([1101,100,-1,4,0]).program, [1101,100,-1,4,99])
class TestDay05_part2(unittest.TestCase):
def test_intcode_if_input_is_8_return_1(self):
self.assertEqual(intcode([3,9,8,9,10,9,4,9,99,-1,8], 8).output[0], 1)
def test_intcode_if_input_is_not_8_return_0(self):
self.assertEqual(intcode([3,9,8,9,10,9,4,9,99,-1,8], 3).output[0], 0)
def test_intcode_if_input_is_less_than_8_return_1(self):
self.assertEqual(intcode([3,9,7,9,10,9,4,9,99,-1,8], 7).output[0], 1)
def test_intcode_if_input_is_not_less_than_8_return_0(self):
self.assertEqual(intcode([3,9,7,9,10,9,4,9,99,-1,8], 8).output[0], 0)
def test_intcode_immediate_mode_if_input_is_8_return_1(self):
self.assertEqual(intcode([3,3,1108,-1,8,3,4,3,99], 8).output[0], 1)
def test_intcode_immediate_mode_if_input_is_not_8_return_0(self):
self.assertEqual(intcode([3,3,1108,-1,8,3,4,3,99], 3).output[0], 0)
def test_intcode_immediate_mode_if_input_is_less_than_8_return_1(self):
self.assertEqual(intcode([3,3,1107,-1,8,3,4,3,99], 7).output[0], 1)
def test_intcode_immediate_mode_if_input_is_not_less_than_8_return_0(self):
self.assertEqual(intcode([3,3,1107,-1,8,3,4,3,99], 8).output[0], 0)
def test_intcode_position_mode_check_if_input_is_not_zero_return_1(self):
self.assertEqual(intcode([3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9], 2).output[0], 1)
def test_intcode_position_mode_check_if_input_is_zero_return_0(self):
self.assertEqual(intcode([3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9], 0).output[0], 0)
def test_intcode_immediate_mode_check_if_input_is_not_zero_return_1(self):
self.assertEqual(intcode([3,3,1105,-1,9,1101,0,0,12,4,12,99,1], 2).output[0], 1)
def test_intcode_immediate_mode_check_if_input_is_zero_return_0(self):
self.assertEqual(intcode([3,3,1105,-1,9,1101,0,0,12,4,12,99,1], 0).output[0], 0)
def test_intcode_if_input_is_below_8(self):
self.assertEqual(intcode([3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99], 7).output[0], 999)
def test_intcode_if_input_is__8(self):
self.assertEqual(intcode([3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99], 8).output[0], 1000)
def test_intcode_if_input_is_above_8(self):
self.assertEqual(intcode([3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99], 9).output[0], 1001)
## Main ########################################################
if __name__ == '__main__':
print("Advent of code day 5")
print("Part1 result: {}".format(part1(getCommaSeparatedIntsFromFile(sys.argv[1]))))
print("Part2 result: {}".format(part2(getCommaSeparatedIntsFromFile(sys.argv[1]))))
```
#### File: day1/part2/main.py
```python
from utils import read_input, write_output, check_result
def calc(lines):
result = 0
for word in lines.split():
fuel = int(int(word) / 3 - 2)
while fuel > 0:
result += fuel
fuel = int(fuel / 3 - 2)
return result
if __name__ == '__main__':
lines = read_input()
result = str(calc(lines))
write_output(result)
check_result(result)
```
#### File: day2/part2/main.py
```python
from utils import read_input, write_output, check_result
def calc(lines):
for noun in range(0, 100):
for verb in range(0, 100):
words = lines.split(',')
pointer = 0
words[1] = noun
words[2] = verb
while pointer < len(words):
opcode = int(words[pointer])
if opcode == 1:
pointer += 1
parameter = int(words[pointer])
value1 = int(words[parameter])
pointer += 1
parameter = int(words[pointer])
value2 = int(words[parameter])
sum = value1 + value2
pointer += 1
parameter = int(words[pointer])
words[parameter] = sum
elif opcode == 2:
pointer += 1
parameter = int(words[pointer])
value1 = int(words[parameter])
pointer += 1
parameter = int(words[pointer])
value2 = int(words[parameter])
sum = value1 * value2
pointer += 1
parameter = int(words[pointer])
words[parameter] = sum
elif opcode == 99:
break
pointer += 1
if int(words[0]) == 19690720:
return 100 * noun + verb
return None
if __name__ == '__main__':
lines = read_input()
result = str(calc(lines))
write_output(result)
check_result(result)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.