id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/ats/api/webhook_receivers_api.py | import re # noqa: F401
import sys # noqa: F401
from MergePythonSDK.shared.api_client import ApiClient, Endpoint as _Endpoint
from MergePythonSDK.shared.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from MergePythonSDK.ats.model.webhook_receiver import WebhookReceiver
from MergePythonSDK.ats.model.webhook_receiver_request import WebhookReceiverRequest
class WebhookReceiversApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.webhook_receivers_create_endpoint = _Endpoint(
settings={
'response_type': (WebhookReceiver,),
'auth': [
'accountTokenAuth',
'bearerAuth'
],
'endpoint_path': '/ats/v1/webhook-receivers',
'operation_id': 'webhook_receivers_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'webhook_receiver_request',
],
'required': [
'webhook_receiver_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'webhook_receiver_request':
(WebhookReceiverRequest,),
},
'attribute_map': {
},
'location_map': {
'webhook_receiver_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data'
]
},
api_client=api_client
)
self.webhook_receivers_list_endpoint = _Endpoint(
settings={
'response_type': ([WebhookReceiver],),
'auth': [
'accountTokenAuth',
'bearerAuth'
],
'endpoint_path': '/ats/v1/webhook-receivers',
'operation_id': 'webhook_receivers_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def webhook_receivers_create(
self,
webhook_receiver_request,
**kwargs
) -> "WebhookReceiver":
"""webhook_receivers_create # noqa: E501
Creates a `WebhookReceiver` object with the given values. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.webhook_receivers_create(webhook_receiver_request, async_req=True)
>>> result = thread.get()
Args:
webhook_receiver_request (WebhookReceiverRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
WebhookReceiver
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['webhook_receiver_request'] = \
webhook_receiver_request
return self.webhook_receivers_create_endpoint.call_with_http_info(**kwargs)
def webhook_receivers_list(
self,
**kwargs
) -> "[WebhookReceiver]":
"""webhook_receivers_list # noqa: E501
Returns a list of `WebhookReceiver` objects. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.webhook_receivers_list(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
[WebhookReceiver]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.webhook_receivers_list_endpoint.call_with_http_info(**kwargs) | PypiClean |
/FitBenchmarking-1.0.0.tar.gz/FitBenchmarking-1.0.0/fitbenchmarking/controllers/matlab_opt_controller.py | import matlab
import numpy as np
from fitbenchmarking.controllers.base_controller import Controller
from fitbenchmarking.controllers.matlab_mixin import MatlabMixin
class MatlabOptController(MatlabMixin, Controller):
"""
Controller for MATLAB Optimization Toolbox, implementing lsqcurvefit
"""
algorithm_check = {
'all': ['levenberg-marquardt', 'trust-region-reflective'],
'ls': ['levenberg-marquardt', 'trust-region-reflective'],
'deriv_free': [],
'general': [],
'simplex': [],
'trust_region': ['levenberg-marquardt', 'trust-region-reflective'],
'levenberg-marquardt': ['levenberg-marquardt'],
'gauss_newton': [],
'bfgs': [],
'conjugate_gradient': [],
'steepest_descent': [],
'global_optimization': []}
jacobian_enabled_solvers = ['levenberg-marquardt',
'trust-region-reflective']
controller_name = 'matlab_opt'
incompatible_problems = ['mantid']
def __init__(self, cost_func):
"""
Initialises variables used for temporary storage.
:param cost_func: Cost function object selected from options.
:type cost_func: subclass of
:class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`
"""
super().__init__(cost_func)
self.support_for_bounds = True
self.param_ranges = None
self.x_data_mat = None
self.y_data_mat = None
self._status = None
self.result = None
def setup(self):
"""
Setup for Matlab Optimization Toolbox fitting
"""
# Convert initial params into matlab array
self.y_data_mat = matlab.double(np.zeros(self.data_y.shape).tolist())
self.initial_params_mat = matlab.double([self.initial_params])
self.x_data_mat = matlab.double(self.data_x.tolist())
# set matlab workspace variable for selected minimizer
self.eng.workspace['minimizer'] = self.minimizer
# set bounds if they have been set in problem definition file
if self.value_ranges is not None:
lb, ub = zip(*self.value_ranges)
self.param_ranges = (matlab.double(lb), matlab.double(ub))
else:
# if no bounds are set, then pass empty arrays to
# lsqcurvefit function
self.param_ranges = (matlab.double([]), matlab.double([]))
# serialize cost_func.eval_r and jacobian.eval (if not
# using default jacobian) and open within matlab engine
# so matlab fitting function can be called
self.eng.workspace['eval_f'] = self.py_to_mat('eval_r')
self.eng.evalc('f_wrapper = @(p, x)double(eval_f(p));')
self.eng.workspace['init'] = self.initial_params_mat
self.eng.workspace['x'] = self.x_data_mat
# if default jacobian is not selected then pass _jeval
# function to matlab
if not self.cost_func.jacobian.use_default_jac:
self.eng.workspace['eval_j'] = self.py_to_mat('jac_res')
self.eng.evalc('j_wrapper = @(p, x)double(eval_j(p));')
self.eng.workspace['eval_func'] = [self.eng.workspace['f_wrapper'],
self.eng.workspace['j_wrapper']]
self.eng.evalc('options = optimoptions("lsqcurvefit", '
'"Algorithm", minimizer, '
'"SpecifyObjectiveGradient", true);')
else:
self.eng.workspace['eval_func'] = self.eng.workspace['f_wrapper']
self.eng.evalc('options = optimoptions("lsqcurvefit", '
'"Algorithm", minimizer);')
def fit(self):
"""
Run problem with Matlab Optimization Toolbox
"""
self.result, _, _, exitflag, _ = self.eng.lsqcurvefit(
self.eng.workspace['eval_func'], self.initial_params_mat,
self.x_data_mat, self.y_data_mat, self.param_ranges[0],
self.param_ranges[1], self.eng.workspace['options'], nargout=5)
self._status = int(exitflag)
def cleanup(self):
"""
Convert the result to a numpy array and populate the variables results
will be read from.
"""
if self._status == 1:
self.flag = 0
elif self._status == 0:
self.flag = 1
else:
self.flag = 2
self.final_params = np.array(self.result[0],
dtype=np.float64).flatten() | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/README.txt | ARGONAUT
========
Version 0.3.4 (30th March 2011)
Author: Jason Robinson
jaywink (at) basshero.org
http://www.basshero.org
1. Description
==============
Argonaut is a blogging engine built with Pylons. It is lightweight
and can be deployed on many types of web servers running Python.
The application is still very much in alpha stages and as such
there are bugs in the system and a lot of features which have
not been implemented.
For more information please see the following links:
Authors webpage
http://www.basshero.org
Pylons HQ
http://www.pylonshq.com
2. Licence
==========
Argonaut is distributed under the FreeBSD license. This means you can use,
copy and distribute the code and application for free and with no obligations.
It is however required that the included copyright is included with the application
using source or binary components. Please see the file LICENSE.txt for the full
license.
The licenses of the JavaScript components included with Argonaut do not
enforce any additional requirements for reuse or distribution. Please see the
licenses of these components and any included icons in the folder 'third_party_licenses'.
3. Installation
===============
3.1 Prequisites for install
---------------------------
- Python 2.4 - 2.7 [http://www.python.org]
- Pylons 1.0 [http://pylonshq.com/]
- Python setuptools (easy_install) [http://pypi.python.org/pypi/setuptools]
Please see Pylons documentation to get started with Pylons [http://pylonshq.com/docs/en/0.9.7/gettingstarted/].
3.2 Other components
--------------------
In addition to Pylons, Argonaut uses the following components:
- Mako (templates, the View) [http://www.makotemplates.org/]
- SQLAlchemy (the Model) [http://www.sqlalchemy.org/]
- Sqlalchemy-migrate (database migration) [http://pypi.python.org/pypi/sqlalchemy-migrate]
- repoze.what (authentication and access rights) [http://what.repoze.org/docs/1.0/]
- CKEditor (for writing posts) [http://ckeditor.com/]
- AddToAny (for sharing) [http://www.addtoany.com/]
- jQuery (for additional magic) [http://jquery.com/]
- Simple3color (default theme) [http://www.oswd.org/design/preview/id/3533]
- TurboMail (for notifications) [http://www.python-turbomail.org/]
- pwi (Picasa Webalbum Integrator javascript library, for gallery) [http://code.google.com/p/pwi/]
Of these the JavaScript components CKEditor, jQuery, pwi and AddToAny are
distributed along with this package. The Python components are downloaded
and installed by easy_install.
3.3 Installation and Setup
--------------------------
Prequisites for install must be fulfilled. Install Argonaut using easy_install:
easy_install argonaut
OR with local .egg file
easy_install <path_to_egg_file>
Make a config file as follows:
paster make-config argonaut config.ini
Tweak the config file as appropriate. Please see Pylons application
setup pages for hints on editing the config file [http://pythonpaste.org/deploy/].
After this run the following to setup the application:
paster setup-app config.ini#arg
Then you are ready to go.
You can test the installation by running:
paster serve config.ini
After this visit the link http://127.0.0.1:5000
Optionally you can extract the source and run Argonaut by launching development
mode via setup.py. Just extract the source, install Pylons and in the Argonaut
base directory run:
python setup.py develop
paster serve development.ini
4. Updating from a previous version
===================================
An automatic database schema update routine has been implemented since 0.3 version.
Unfortunately this does not handle all database related changes. Please see changelog below
for notes on what needs to be done with version upgrades.
5. Usage
========
5.1 Modifying the site
----------------------
Argonaut features templates that can be used to control the site
structure, layout and texts. Unfortunately in this early version
there is no admin panel so all editing must be made to the files
directly.
Template files are situated in argonaut/templates. Please see
Mako documentation on how to change the templates.
5.2 Configuration
-----------------
During application setup a database will be created in the form
that is configured in config.ini. In addition to blog data, Argonaut
also stores some configuration values in the database. These are
stored in the table 'config'.
5.3 Users
---------
The default user for writing posts is 'admin', password 'admin'.
Currently users can only be added directly to the database. An
admin panel will be added later.
5.4 Other usage notes
---------------------
Proper documentation and usage will be added slowly over
future releases :)
6. Support
==========
Please contact the author at jaywink (at) basshero.org for support,
ideas and bug reports.
7. Changelog
============
0.3.4 30th March 2011
- Added custom javascript template that is loaded from base template for site wide usage.
- Removed usage of sqlalchemy-migrate since it was causing problems. A new type of DB migration
will be thought out later.
0.3.3 29th March 2011
- Fixed routing for landing page with lowest page_order
- Post commenting can now be disabled with the config setting comments_enabled, true/false
- Fixed character encoding problem when getting page name from database
- Base page now only displays links to active pages
- Added a new page type, pwi_gallery, which is a gallery page using the jquery plugin 'pwi'
(Picasa Webalbum Integrator javascript library, http://code.google.com/p/pwi/).
0.3.2 20th February 2011
- Added 'media' and 'social' models to the database. Media contains links to
images and other media items. Social contains links to contact information or
other sites. Links are given a media ID which is displayed in the Social -box with
an url to the site or contact information.
0.3 13th February 2011
- sqlalchemy-migrate is now used to do automatic database schema
upgrades according to model changes. Added as dependency, installed
via easy_install automatically. Implementation thanks to:
http://shane.caraveo.com/2010/09/13/database-migrations-for-sqlalchemy-part-duex/
- Default landing page is now the one with the lowest page_order setting.
- Pages are now mapped to a page type. Default page types are 'blog', 'archives' and
'tags'.
- Page urls are now determined from page type, but can also be customised.
- Custom page support has been added. Custom pages are mapped to page type 'custom'
which redirects traffic to a mako template file as indicated in the pages table.
- Version number will now be displayed in the Powered by -section in the main template.
Removed these text strings and urls from the configuration table and placed them in
the base template.
Updating from version 0.2:
- Pre-upgrade the database table 'pages' needs to be dropped for an easy upgrade. After
this the script 'paster setup-app [config_file]#arg' needs to be run to create the
table with the new schema. The rest of the database changes should be handled by
the automatic schema update mechanism.
0.2 6th December 2010
- Initial Pylons version release
0.1.x The 0.1 versions are the old PHP powered versions which were never released.
| PypiClean |
/CsuPTMD-1.0.12.tar.gz/CsuPTMD-1.0.12/PTMD/maskrcnn_benchmark/structures/boxlist_ops.py | import torch
from .bounding_box import BoxList
from PTMD.maskrcnn_benchmark.layers import nms as _box_nms
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maximum suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
score = boxlist.get_field(score_field)
keep = _box_nms(boxes, score, nms_thresh)
if max_proposals > 0:
keep = keep[: max_proposals]
boxlist = boxlist[keep]
return boxlist.convert(mode)
def remove_small_boxes(boxlist, min_size):
"""
Only keep boxes with both sides >= min_size
Arguments:
boxlist (Boxlist)
min_size (int)
"""
# TODO maybe add an API for querying the ws / hs
xywh_boxes = boxlist.convert("xywh").bbox
_, _, ws, hs = xywh_boxes.unbind(dim=1)
keep = (
(ws >= min_size) & (hs >= min_size)
).nonzero().squeeze(1)
return boxlist[keep]
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def boxlist_iou(boxlist1, boxlist2):
"""Compute the intersection over union of two set of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Arguments:
box1: (BoxList) bounding boxes, sized [N,4].
box2: (BoxList) bounding boxes, sized [M,4].
Returns:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
if boxlist1.size != boxlist2.size:
raise RuntimeError(
"boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2))
N = len(boxlist1)
M = len(boxlist2)
area1 = boxlist1.area()
area2 = boxlist2.area()
box1, box2 = boxlist1.bbox, boxlist2.bbox
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]
TO_REMOVE = 1
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
# TODO redundant, remove
def _cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def cat_boxlist(bboxes):
"""
Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList])
"""
assert isinstance(bboxes, (list, tuple))
assert all(isinstance(bbox, BoxList) for bbox in bboxes)
size = bboxes[0].size
assert all(bbox.size == size for bbox in bboxes)
mode = bboxes[0].mode
assert all(bbox.mode == mode for bbox in bboxes)
fields = set(bboxes[0].fields())
assert all(set(bbox.fields()) == fields for bbox in bboxes)
cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode)
for field in fields:
data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
cat_boxes.add_field(field, data)
return cat_boxes | PypiClean |
/KratosParticleMechanicsApplication-9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/ParticleMechanicsApplication/particle_gid_output_process.py | import KratosMultiphysics
# Import applications and dependencies
import KratosMultiphysics.ParticleMechanicsApplication as KratosParticle
from KratosMultiphysics.deprecation_management import DeprecationManager
# Import time library
from time import time
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("Expected input shall be a Parameters object, encapsulating a json string")
model_part = Model[settings["Parameters"]["model_part_name"].GetString()]
output_name = settings["Parameters"]["output_name"].GetString()
postprocess_parameters = settings["Parameters"]["postprocess_parameters"]
return ParticleGiDOutputProcess(model_part, output_name, postprocess_parameters)
class ParticleGiDOutputProcess(KratosMultiphysics.Process):
defaults = KratosMultiphysics.Parameters("""{
"result_file_configuration": {
"gidpost_flags": {
"GiDPostMode": "GiD_PostBinary",
"WriteDeformedMeshFlag": "WriteUndeformed",
"WriteConditionsFlag": "WriteElementsOnly",
"MultiFileFlag": "SingleFile"
},
"file_label": "time",
"output_control_type": "step",
"output_interval": 1.0,
"body_output": true,
"node_output": false,
"skin_output": false,
"plane_output": [],
"nodal_results": [],
"nodal_nonhistorical_results": [],
"nodal_flags_results": [],
"gauss_point_results": [],
"additional_list_files": []
},
"point_data_configuration": []
}""")
def __init__(self, model_part, file_name, param):
KratosMultiphysics.Process.__init__(self)
if param is None:
param = self.defaults
else:
self.TranslateLegacyVariablesAccordingToCurrentStandard(param)
param.ValidateAndAssignDefaults(self.defaults)
# Default
self.param = param
self.base_file_name = file_name
self.model_part = model_part
self.next_output = 0.0
# This function can be extended with new deprecated variables as they are generated
def TranslateLegacyVariablesAccordingToCurrentStandard(self, settings):
# Defining a string to help the user understand where the warnings come from (in case any is thrown)
context_string = type(self).__name__
if settings.Has('result_file_configuration'):
sub_settings_where_var_is = settings['result_file_configuration']
old_name = 'output_frequency'
new_name = 'output_interval'
if DeprecationManager.HasDeprecatedVariable(context_string, sub_settings_where_var_is, old_name, new_name):
DeprecationManager.ReplaceDeprecatedVariableName(sub_settings_where_var_is, old_name, new_name)
# Public Functions
def ExecuteInitialize(self):
result_file_configuration = self.param["result_file_configuration"]
result_file_configuration.ValidateAndAssignDefaults(self.defaults["result_file_configuration"])
# Set up output frequency and format
output_file_label = result_file_configuration["file_label"].GetString()
if output_file_label == "time":
self.output_label_is_time = True
elif output_file_label == "step":
self.output_label_is_time = False
else:
msg = "{0} Error: Unknown value \"{1}\" read for parameter \"{2}\"".format(self.__class__.__name__,output_file_label,"file_label")
raise Exception(msg)
output_control_type = result_file_configuration["output_control_type"].GetString()
if output_control_type == "time":
self.output_control_is_time = True
elif output_control_type == "step":
self.output_control_is_time = False
else:
msg = "{0} Error: Unknown value \"{1}\" read for parameter \"{2}\"".format(self.__class__.__name__,output_file_label,"file_label")
raise Exception(msg)
self.output_frequency = result_file_configuration["output_interval"].GetDouble()
# Set Variable list to print
self.variable_name_list = result_file_configuration["gauss_point_results"]
self.variable_list = []
for i in range(self.variable_name_list.size()):
var_name = self.variable_name_list[i].GetString()
variable = self._get_variable(var_name)
self.variable_list.append(variable)
def ExecuteBeforeSolutionLoop(self):
# Initiate Output Mesh
self.mesh_file = open(self.base_file_name + ".post.msh",'w')
self.mesh_file.write("MESH \"")
self.mesh_file.write("outmesh")
self.mesh_file.write("\" dimension 3 ElemType Point Nnode 1\n")
self.mesh_file.write("Coordinates\n")
for mpm in self.model_part.Elements:
coord = mpm.CalculateOnIntegrationPoints(KratosParticle.MP_COORD,self.model_part.ProcessInfo)[0]
self.mesh_file.write("{} {} {} {}\n".format( mpm.Id, coord[0], coord[1], coord[2]))
self.mesh_file.write("End Coordinates\n")
self.mesh_file.write("Elements\n")
for mpm in self.model_part.Elements:
self.mesh_file.write("{} {}\n".format(mpm.Id, mpm.Id))
self.mesh_file.write("End Elements\n")
self.mesh_file.flush()
# Initiate Output File
self.result_file = open(self.base_file_name + ".post.res",'w')
self.result_file.write("GiD Post Results File 1.0\n")
def ExecuteInitializeSolutionStep(self): pass
def ExecuteFinalizeSolutionStep(self): pass
def ExecuteFinalize(self): pass
def PrintOutput(self):
# Print the output
time = self._get_pretty_time(self.model_part.ProcessInfo[KratosMultiphysics.TIME])
# Write results to the initiated result file
self._write_mp_results(time)
# Schedule next output
if self.output_frequency > 0.0: # Note: if == 0, we'll just always print
if self.output_control_is_time:
while self._get_pretty_time(self.next_output) <= time:
self.next_output += self.output_frequency
else:
while self.next_output <= self.model_part.ProcessInfo[KratosMultiphysics.STEP]:
self.next_output += self.output_frequency
def IsOutputStep(self):
if self.output_control_is_time:
time = self._get_pretty_time(self.model_part.ProcessInfo[KratosMultiphysics.TIME])
return (time >= self._get_pretty_time(self.next_output))
else:
return ( self.model_part.ProcessInfo[KratosMultiphysics.STEP] >= self.next_output )
# Private Functions
def _get_pretty_time(self,time):
pretty_time = "{0:.12g}".format(time)
pretty_time = float(pretty_time)
return pretty_time
def _get_attribute(self, my_string, function_pointer, attribute_type):
"""Return the python object named by the string argument.
To be used with functions from KratosGlobals
Examples:
variable = self._get_attribute("DISPLACEMENT",
KratosMultiphysics.ParticleMechanicsApplication.GetVariable,
"Variable")
"""
splitted = my_string.split(".")
if len(splitted) == 0:
raise Exception("Something wrong. Trying to split the string " + my_string)
if len(splitted) > 3:
raise Exception("Something wrong. String " + my_string + " has too many arguments")
attribute_name = splitted[-1]
if len(splitted) == 2 or len(splitted) == 3:
warning_msg = "Ignoring \"" + my_string.rsplit(".",1)[0]
warning_msg += "\" for " + attribute_type +" \"" + attribute_name + "\""
KratosMultiphysics.Logger.PrintInfo("Warning in mpm gid output", warning_msg)
return function_pointer(attribute_name) # This also checks if the application has been imported
def _get_variable(self, my_string):
"""Return the python object of a Variable named by the string argument.
Examples:
recommended usage:
variable = self._get_variable("MP_VELOCITY")
deprecated:
variable = self._get_variables("KratosMultiphysics.ParticleMechanicsApplication.MP_VELOCITY")
"""
return self._get_attribute(my_string, KratosMultiphysics.KratosGlobals.GetVariable, "Variable")
def _write_mp_results(self, step_label=None):
clock_time = self._start_time_measure()
for i in range(self.variable_name_list.size()):
var_name = self.variable_name_list[i].GetString()
variable = self.variable_list[i]
is_scalar = self._is_scalar(variable)
# Write in result file
self.result_file.write("Result \"")
self.result_file.write(var_name)
if is_scalar:
self.result_file.write('" "Kratos" {} Scalar OnNodes\n'.format(step_label))
else:
self.result_file.write('" "Kratos" {} Vector OnNodes\n'.format(step_label))
self.result_file.write("Values\n")
for mpm in self.model_part.Elements:
print_variable = mpm.CalculateOnIntegrationPoints(variable,self.model_part.ProcessInfo)[0]
# Check whether variable is a scalar or vector
if isinstance(print_variable, float) or isinstance(print_variable, int):
print_size = 1
else:
print_size = print_variable.Size()
# Write variable as formated
if print_size == 1:
self.result_file.write("{} {}\n".format(mpm.Id, print_variable))
elif print_size == 3:
self.result_file.write("{} {} {} {}\n".format(mpm.Id, print_variable[0], print_variable[1], print_variable[2]))
elif print_size == 6:
self.result_file.write("{} {} {} {} {} {} {}\n".format(mpm.Id, print_variable[0], print_variable[1], print_variable[2], print_variable[3], print_variable[4], print_variable[5]))
else:
KratosMultiphysics.Logger.PrintInfo("Warning in mpm gid output", "Printing format is not defined for variable: ", var_name, "with size: ", print_size)
self.result_file.write("End Values\n")
self._stop_time_measure(clock_time)
def _start_time_measure(self):
return time()
def _stop_time_measure(self, time_ip):
time_fp = time()
KratosMultiphysics.Logger.PrintInfo("::[Particle GiD Output Process]:: ", "[Spent time for output = ", time_fp - time_ip, "sec]")
def _is_scalar(self,variable):
is_scalar = False
if (isinstance(variable,KratosMultiphysics.IntegerVariable) or isinstance(variable,KratosMultiphysics.DoubleVariable) or isinstance(variable,KratosMultiphysics.BoolVariable)):
is_scalar = True
elif (isinstance(variable,KratosMultiphysics.StringVariable)):
raise Exception("String variable cant be printed.")
return is_scalar | PypiClean |
/Cahier-0.1.1.tar.gz/Cahier-0.1.1/cahier/config.py |
# Copyright 2014-2015 Louis Paternault
#
# Cahier is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cahier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with Cahier. If not, see <http://www.gnu.org/licenses/>.
"""Management of configuration files."""
import configparser
import os
import shlex
from cahier import errors
class ConfigurationError(errors.CahierError):
"""Error in configuration files."""
def __init__(self, filename, message):
super(ConfigurationError, self).__init__()
self.filename = filename
self.message = message
def __str__(self):
return "Configuration error ({filename}): {message}".format(
filename=self.filename,
message=self.message,
)
def config_assert_has(config, name, section, option):
"""Assert that 'config' as 'section' defined, with 'option' key in it.
Raise a ConfigurationError() if not.
"""
if not config.has_section(section):
raise ConfigurationError(
filename=name,
message=(
"Missing section {section} in file {filename}."
).format(section=section, filename=name),
)
if option and (not config.has_option(section, option)):
raise ConfigurationError(
filename=name,
message=(
"Missing key {key} in section {section} in file {filename}."
).format(key=option, section=section, filename=name)
)
def load_cahierrc(filename):
"""Load ~/.cahier/cahier.cfg, and return it as a configparser object."""
config = configparser.ConfigParser()
config.read_dict({
'options': {
'casesensitive': "True",
},
'bin': {
'editor': "$EDITOR",
},
'wiki': {
'extension': "mdwn",
},
})
config.read([filename])
if config.has_section('wiki'):
if 'fileformat' not in config['wiki'].keys():
raise ConfigurationError(
filename=filename,
message="missing key 'fileformat' in section 'wiki'.",
)
else:
raise ConfigurationError(
filename=filename,
message="missing section 'wiki'.",
)
return config
def load_ftplugin(filename):
"""Load ftplugin.
Return plugin 'filename' as a configparser object.
"""
# Reading file
config = configparser.ConfigParser()
config.read_dict({
'preprocess': {},
})
config.read([filename])
# Checking arguments
for key in config['preprocess']:
if key == 'name' or key.startswith('cmd'):
continue
raise ConfigurationError(
filename=filename,
message=(
""""{key}" key (in section {section}) must """
"""be 'name' or start with 'cmd'."""
).format(key=key, section="preprocess")
)
return config
def load_profiles(dirname):
"""Load profiles of directory 'dirname'.
Return a dictionary of profiles, as configparser objects indexed by
basenames.
"""
profiles = {}
for root, __ignored, files in os.walk(dirname):
for filename in files:
if filename.endswith('.cfg'):
# Preprocessing
basename = filename[:-len('.cfg')]
fullname = os.path.join(root, filename)
# Reading configuration
config = configparser.ConfigParser()
config.read_dict({
'options': {
'workdays': "",
},
})
config.read([fullname])
# Checking for compulsory arguments
config_assert_has(config, fullname, 'directories', 'calendar')
config_assert_has(config, fullname, 'config', 'setup')
workdays = shlex.split(config['options']['workdays'])
if len(workdays) != len(set([
day.split(':')[0]
for day
in workdays
])):
raise ConfigurationError(
fullname,
(
"Only one item by day is allowed in "
"key 'workdays' of section 'options'."
),
)
profiles[basename] = config
return profiles | PypiClean |
/CmtBasicModelingInterface-0.1.2.tar.gz/CmtBasicModelingInterface-0.1.2/cmt/bmi/interfaces.py | class Error(Exception):
"""Base class for BMI exceptions"""
pass
class FatalError(Exception):
"""
Raise this exception if an unrecoverable error was found
"""
pass
class BadVarNameError(Error):
"""Exception to indicate a bad input/output variable name"""
def __init__(self, name):
super(BadVarNameError, self).__init__()
self.name = name
def __str__(self):
return self.name
class MissingModelAttributeError(Error):
"""
Raise this exception if a component is missing a required attribute.
"""
def __init__(self, attrib):
super(MissingModelAttributeError, self).__init__()
self.attrib = attrib
def __str__(self):
return self.attrib
class TimeBoundsError(Error):
"""
Raise this exception if a component updates beyond its time horizon
"""
pass
class BmiGridType(int):
"""
Base type to indicate the type of a BMI model's grid.
:code: Grid type code as an int
:name: Name of the grid type as a string
"""
def __new__(cls, code, name):
obj = super(BmiGridType, cls).__new__(cls, code)
obj.name = name
return obj
def __str__(self):
return self.name
def __repr__(self):
return 'BmiGridType(%d, %s)' % (self, self.name)
GRID_TYPE_UNKNOWN = BmiGridType(-1, 'Unknown')
GRID_TYPE_NONE = BmiGridType(0, 'No grid')
GRID_TYPE_UNIFORM = BmiGridType(1, 'Uniform rectilinear')
GRID_TYPE_RECTILINEAR = BmiGridType(2, 'Rectilinear')
GRID_TYPE_STRUCTURED = BmiGridType(3, 'Structured')
GRID_TYPE_UNSTRUCTURED = BmiGridType(4, 'Unstructured')
class BmiBase(object):
"""
Definition of the Basic Modeling Interface
"""
def initialize(self, file_name):
"""
Initialize model.
:file_name: String of configuration file
"""
pass
def update(self, **kwds):
"""
Update model by one time step.
"""
pass
def finalize(self):
"""
Clean-up model
"""
pass
def get_input_var_names(self):
"""
Get names of input variables to the model as standard names.
:returns: A list of input standard names as strings
"""
pass
def get_output_var_names(self):
"""
Get names of output variables to the model as standard names.
:returns: A list of output standard names as strings
"""
pass
def get_var_type(self, var_name):
"""
Get type of an exchange item.
"""
pass
def get_var_units(self, var_name):
"""
Get units of an exchange item.
"""
pass
def get_var_rank(self, var_name):
"""
Rank of exchange item.
"""
pass
def get_time_step(self):
"""
Model time step.
"""
pass
def get_start_time(self):
"""
Model start time.
"""
pass
def get_current_time(self):
"""
Current time of model.
"""
pass
def get_end_time(self):
"""
Model stop time.
"""
pass
class BmiExtendedBase(object):
"""
An extension interface for a BMI.
"""
def update_until(self, time):
"""
Update model until some time.
:time: Update duration
"""
pass
def run_model (self):
"""
Initialize, run, and finalize a model.
"""
pass
class BmiUnstructured(object):
"""
BMI for a model that uses an unstructured grid.
"""
def get_x(self, name):
"""
Get x-coordinates of grid nodes.
"""
pass
def get_y(self, name):
"""
Get y-coordinates of grid nodes.
"""
pass
def get_connectivity(self, name):
"""
Get cell connectivity.
"""
pass
def get_offset(self, name):
"""
Get cell offset.
"""
pass
class BmiStructured(object):
"""
BMI for a model that uses a structured grid.
"""
def get_grid_shape(self, name):
"""
Get shape of grid for variable, name.
:name: Standard name
"""
pass
def get_x(self, name):
"""
Get x-coordinates of grid nodes.
"""
pass
def get_y(self, name):
"""
Get y-coordinates of grid nodes.
"""
pass
class BmiRectilinear(object):
"""
BMI for a model that uses a rectilinear grid.
"""
def get_grid_shape(self, name):
"""
Get shape of grid for variable, name.
:name: Standard name
"""
pass
def get_columns(self, name):
"""
Get coordinates of grid columns.
"""
pass
def get_rows(self, name):
"""
Get coordinates of grid rows.
"""
pass
class BmiUniformRectilinear(object):
"""
BMI for a model that exposes a uniform rectilinear grid.
"""
def get_grid_shape(self, name):
"""
Get shape of grid for variable, name.
:name: Standard name
"""
pass
def get_grid_spacing(self, name):
"""
Get spacing of grid for variable, name.
:name: Standard name
"""
pass
def get_grid_origin(self, name):
"""
Get origin of grid for variable, name.
:name: Standard name
"""
pass
class BmiNoGrid(object):
"""
BMI for a model that does not have a grid.
""" | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/a11yhelp/dialogs/lang/nb.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("a11yhelp","nb",{title:"Instruksjoner for tilgjengelighet",contents:"Innhold for hjelp. Trykk ESC for å lukke denne dialogen.",legend:[{name:"Generelt",items:[{name:"Verktøylinje for editor",legend:"Trykk ${toolbarFocus} for å navigere til verktøylinjen. Flytt til neste og forrige verktøylinjegruppe med TAB og SHIFT-TAB. Flytt til neste og forrige verktøylinjeknapp med HØYRE PILTAST og VENSTRE PILTAST. Trykk MELLOMROM eller ENTER for å aktivere verktøylinjeknappen."},{name:"Dialog for editor",
legend:"Mens du er i en dialog, trykk TAB for å navigere til neste dialogfelt, press SHIFT + TAB for å flytte til forrige felt, trykk ENTER for å akseptere dialogen, trykk ESC for å avbryte dialogen. For dialoger med flere faner, trykk ALT + F10 for å navigere til listen over faner. Gå til neste fane med TAB eller HØYRE PILTAST. Gå til forrige fane med SHIFT + TAB eller VENSTRE PILTAST. Trykk MELLOMROM eller ENTER for å velge fanen."},{name:"Kontekstmeny for editor",legend:"Trykk ${contextMenu} eller MENYKNAPP for å åpne kontekstmeny. Gå til neste alternativ i menyen med TAB eller PILTAST NED. Gå til forrige alternativ med SHIFT+TAB eller PILTAST OPP. Trykk MELLOMROM eller ENTER for å velge menyalternativet. Åpne undermenyen på valgt alternativ med MELLOMROM eller ENTER eller HØYRE PILTAST. Gå tilbake til overordnet menyelement med ESC eller VENSTRE PILTAST. Lukk kontekstmenyen med ESC."},
{name:"Listeboks for editor",legend:"I en listeboks, gå til neste alternativ i listen med TAB eller PILTAST NED. Gå til forrige alternativ i listen med SHIFT + TAB eller PILTAST OPP. Trykk MELLOMROM eller ENTER for å velge alternativet i listen. Trykk ESC for å lukke listeboksen."},{name:"Verktøylinje for elementsti",legend:"Trykk ${elementsPathFocus} for å navigere til verktøylinjen som viser elementsti. Gå til neste elementknapp med TAB eller HØYRE PILTAST. Gå til forrige elementknapp med SHIFT+TAB eller VENSTRE PILTAST. Trykk MELLOMROM eller ENTER for å velge elementet i editoren."}]},
{name:"Hurtigtaster",items:[{name:"Angre",legend:"Trykk ${undo}"},{name:"Gjør om",legend:"Trykk ${redo}"},{name:"Fet tekst",legend:"Trykk ${bold}"},{name:"Kursiv tekst",legend:"Trykk ${italic}"},{name:"Understreking",legend:"Trykk ${underline}"},{name:"Lenke",legend:"Trykk ${link}"},{name:"Skjul verktøylinje",legend:"Trykk ${toolbarCollapse}"},{name:"Gå til forrige fokusområde",legend:"Trykk ${accessPreviousSpace} for å komme til nærmeste fokusområde før skrivemarkøren som ikke kan nås på vanlig måte, for eksempel to tilstøtende HR-elementer. Gjenta tastekombinasjonen for å komme til fokusområder lenger unna i dokumentet."},
{name:"Gå til neste fokusområde",legend:"Trykk ${accessNextSpace} for å komme til nærmeste fokusområde etter skrivemarkøren som ikke kan nås på vanlig måte, for eksempel to tilstøtende HR-elementer. Gjenta tastekombinasjonen for å komme til fokusområder lenger unna i dokumentet."},{name:"Hjelp for tilgjengelighet",legend:"Trykk ${a11yHelp}"}]}]}); | PypiClean |
/KD_Lib-0.0.32.tar.gz/KD_Lib-0.0.32/KD_Lib/KD/text/BERT2LSTM/bert2lstm.py | import random
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AdamW, BertForSequenceClassification, BertTokenizer
from KD_Lib.KD.common import BaseClass
from KD_Lib.KD.text.utils import get_bert_dataloader
class BERT2LSTM(BaseClass):
"""
Implementation of Knowledge distillation from the paper "Distilling Task-Specific
Knowledge from BERT into Simple Neural Networks" https://arxiv.org/pdf/1903.12136.pdf
:param student_model (torch.nn.Module): Student model
:param distill_train_loader (torch.utils.data.DataLoader): Student Training Dataloader for distillation
:param distill_val_loader (torch.utils.data.DataLoader): Student Testing/validation Dataloader
:param train_df (pandas.DataFrame): Dataframe for training the teacher model
:param val_df (pandas.DataFrame): Dataframe for validating the teacher model
:param loss_fn (torch.nn.module): Loss function
:param temp (float): Temperature parameter for distillation
:param distil_weight (float): Weight paramter for distillation loss
:param device (str): Device used for training; 'cpu' for cpu and 'cuda' for gpu
:param log (bool): True if logging required
:param logdir (str): Directory for storing logs
"""
def __init__(
self,
student_model,
distill_train_loader,
distill_val_loader,
optimizer_student,
train_df,
val_df,
num_classes=2,
seed=42,
distil_weight=0.5,
device="cpu",
log=False,
logdir="./Experiments",
max_seq_length=128,
):
teacher_model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased",
num_labels=num_classes,
output_attentions=False,
output_hidden_states=False,
)
optimizer_teacher = AdamW(teacher_model.parameters(), lr=2e-5, eps=1e-8)
super(BERT2LSTM, self).__init__(
teacher_model,
student_model,
distill_train_loader,
distill_val_loader,
optimizer_teacher,
optimizer_student,
None,
None,
distil_weight,
device,
log,
logdir,
)
self.set_seed(42)
self.train_df, self.val_df = train_df, val_df
self.bert_tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.max_seq_length = max_seq_length
def set_seed(self, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def _get_teacher_dataloaders(self, batch_size=16, mode="train"):
"""
Helper function for generating dataloaders for the teacher
"""
df = self.val_df if (mode == "validate") else self.train_df
return get_bert_dataloader(
df, self.bert_tokenizer, self.max_seq_length, batch_size, mode
)
def calculate_kd_loss(self, y_pred_student, y_pred_teacher, y_true):
"""
Function used for calculating the KD loss during distillation
:param y_pred_student (torch.FloatTensor): Prediction made by the student model
:param y_pred_teacher (torch.FloatTensor): Prediction made by the teacher model
:param y_true (torch.FloatTensor): Original label
"""
teacher_out = y_pred_teacher
student_out = y_pred_student
self.criterion_ce = torch.nn.CrossEntropyLoss()
self.criterion_mse = torch.nn.MSELoss()
loss = (1 - self.distil_weight) * self.criterion_ce(student_out, y_true)
loss += (self.distil_weight) * self.criterion_mse(teacher_out, student_out)
return loss
def train_teacher(
self,
epochs=1,
plot_losses=True,
save_model=True,
save_model_pth="./models/teacher.pt",
train_batch_size=16,
batch_print_freq=40,
val_batch_size=16,
):
"""
Function that will be training the teacher
:param epochs (int): Number of epochs you want to train the teacher
:param plot_losses (bool): True if you want to plot the losses
:param save_model (bool): True if you want to save the teacher model
:param save_model_pth (str): Path where you want to store the teacher model
:param train_batch_size (int): Batch size paramter for generating dataloaders
:param batch_print_freq (int): Frequency at which batch number needs to be printed per epoch
"""
self.teacher_train_loader = self._get_teacher_dataloaders(
train_batch_size, mode="train"
)
self.teacher_model.to(self.device)
self.teacher_model.train()
# training_stats = []
loss_arr = []
length_of_dataset = len(self.teacher_train_loader.dataset)
best_acc = 0.0
self.best_teacher_model_weights = deepcopy(self.teacher_model.state_dict())
print("Training Teacher... ")
for ep in range(0, epochs):
print("")
print("======== Epoch {:} / {:} ========".format(ep + 1, epochs))
epoch_loss = 0.0
correct = 0
for step, batch in enumerate(self.teacher_train_loader):
if step % (batch_print_freq) == 0 and not step == 0:
print(
" Batch {:>5,} of {:>5,}.".format(
step, len(self.teacher_train_loader)
)
)
b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)
self.optimizer_teacher.zero_grad()
loss, logits = self.teacher_model(
b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels,
)
epoch_loss += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to("cpu").numpy()
preds = np.argmax(logits, axis=1).flatten()
labels = label_ids.flatten()
correct += np.sum(preds == labels)
loss.backward()
# For preventing exploding gradients
torch.nn.utils.clip_grad_norm_(self.teacher_model.parameters(), 1.0)
self.optimizer_teacher.step()
epoch_acc = correct / length_of_dataset
print(f"Loss: {epoch_loss} | Accuracy: {epoch_acc}")
_, epoch_val_acc = self.evaluate_teacher(val_batch_size)
if epoch_val_acc > best_acc:
best_acc = epoch_val_acc
self.best_teacher_model_weights = deepcopy(
self.teacher_model.state_dict()
)
if self.log:
self.writer.add_scalar("Training loss/Teacher", epoch_loss, epochs)
self.writer.add_scalar("Training accuracy/Teacher", epoch_acc, epochs)
self.writer.add_scalar(
"Validation accuracy/Teacher", epoch_val_acc, epochs
)
loss_arr.append(epoch_loss)
self.teacher_model.load_state_dict(self.best_teacher_model_weights)
if save_model:
torch.save(self.teacher_model.state_dict(), save_model_pth)
if plot_losses:
plt.plot(loss_arr)
def train_student(
self,
epochs=10,
plot_losses=True,
save_model=True,
save_model_pth="./models/student.pth",
):
"""
Function that will be training the student
:param epochs (int): Number of epochs you want to train the teacher
:param plot_losses (bool): True if you want to plot the losses
:param save_model (bool): True if you want to save the student model
:param save_model_pth (str): Path where you want to save the student model
"""
self.teacher_distill_loader = self._get_teacher_dataloaders(
batch_size=self.train_loader.batch_size, mode="distill"
)
y_pred_teacher = []
print("Obtaining teacher predictions...")
self.teacher_model.eval()
self.teacher_model.to(self.device)
for batch in self.teacher_distill_loader:
b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)
with torch.no_grad():
(loss, logits) = self.teacher_model(
b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels,
)
logits = logits.detach().cpu().numpy()
y_pred_teacher.append(logits)
self.student_model.train()
loss_arr = []
length_of_dataset = len(self.train_loader.dataset)
best_acc = 0.0
self.best_student_model_weights = deepcopy(self.student_model.state_dict())
self.student_model.to(self.device)
print("\nTraining student...")
for ep in range(epochs):
print("")
print("======== Epoch {:} / {:} ========".format(ep + 1, epochs))
epoch_loss = 0.0
correct = 0
for (data, data_len, label), bert_prob in zip(
self.train_loader, y_pred_teacher
):
data = data.to(self.device)
data_len = data_len.to(self.device)
label = label.to(self.device)
bert_prob = torch.tensor(bert_prob, dtype=torch.float)
teacher_out = bert_prob.to(self.device)
self.optimizer_student.zero_grad()
student_out = self.student_model(data, data_len).squeeze(1)
loss = self.calculate_kd_loss(student_out, teacher_out, label)
pred = student_out.argmax(dim=1, keepdim=True)
correct += pred.eq(label.view_as(pred)).sum().item()
loss.backward()
# ##For preventing exploding gradients
# torch.nn.utils.clip_grad_norm_(self.student_model.parameters(), 1.0)
self.optimizer_student.step()
epoch_loss += loss
epoch_acc = correct / length_of_dataset
print(f"Loss: {epoch_loss} | Accuracy: {epoch_acc}")
_, epoch_val_acc = self.evaluate_student()
if epoch_val_acc > best_acc:
best_acc = epoch_val_acc
self.best_student_model_weights = deepcopy(
self.student_model.state_dict()
)
if self.log:
self.writer.add_scalar("Training loss/Student", epoch_loss, epochs)
self.writer.add_scalar("Training accuracy/Student", epoch_acc, epochs)
self.writer.add_scalar(
"Validation accuracy/Student", epoch_val_acc, epochs
)
loss_arr.append(epoch_loss)
print(f"Epoch: {ep+1}, Loss: {epoch_loss}, Accuracy: {epoch_acc}")
self.student_model.load_state_dict(self.best_student_model_weights)
if save_model:
torch.save(self.student_model.state_dict(), save_model_pth)
if plot_losses:
plt.plot(loss_arr)
def evaluate_student(self, verbose=True):
"""
Function used for evaluating student
:param verbose (bool): True if the accuracy needs to be printed else False
"""
self.student_model.eval()
self.student_model.to(self.device)
length_of_dataset = len(self.val_loader.dataset)
correct = 0
outputs = []
with torch.no_grad():
for data, data_len, target in self.val_loader:
data = data.to(self.device)
data_len = data_len.to(self.device)
target = target.to(self.device)
output = self.student_model(data, data_len).squeeze(1)
outputs.append(output)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
accuracy = correct / length_of_dataset
if verbose:
print("-" * 80)
print(f"Accuracy: {accuracy}")
return outputs, accuracy
def evaluate_teacher(self, val_batch_size=16, verbose=True):
"""
Function used for evaluating student
:param max_seq_length (int): Maximum sequence length paramter for generating dataloaders
:param val_batch_size (int): Batch size paramter for generating dataloaders
:param verbose (bool): True if the accuracy needs to be printed else False
"""
self.teacher_val_loader = self._get_teacher_dataloaders(
val_batch_size, mode="validate"
)
self.teacher_model.to(self.device)
self.teacher_model.eval()
correct = 0
length_of_dataset = len(self.teacher_val_loader.dataset)
print("Evaluating teacher...")
outputs = []
for batch in self.teacher_val_loader:
b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)
with torch.no_grad():
(loss, logits) = self.teacher_model(
b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels,
)
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to("cpu").numpy()
# out = F.softmax(logits, dim=1)
preds = np.argmax(logits, axis=1).flatten()
labels = label_ids.flatten()
correct += np.sum(preds == labels)
outputs.append(preds)
accuracy = correct / length_of_dataset
if verbose:
print("-" * 80)
print(f"Accuracy: {accuracy}")
return outputs, accuracy | PypiClean |
/ApiDoc-1.4.0.tar.gz/ApiDoc-1.4.0/apidoc/object/source_raw.py | from apidoc.lib.util.enum import Enum
from apidoc.object import Comparable
class Root():
"""Root object of sources elements
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.configuration = Configuration()
self.versions = {}
self.categories = {}
self.methods = {}
self.types = {}
self.references = {}
class Element():
"""Generic element
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.name = None
self.description = None
class Sampleable():
"""Element who can provide samples
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.sample = None
def get_sample(self):
"""Return the a sample for the element
"""
if self.sample is None:
return self.get_default_sample()
return self.sample
def get_default_sample(self):
"""Return default value for the element
"""
return "my_%s" % self.name
class Constraintable():
"""Element who can provide constraints
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.constraints = {}
class Displayable():
"""Element who can be displayed
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.display = True
self.label = ""
class Configuration(Element):
"""Element Configuration
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.uri = None
self.title = None
class Version(Element, Displayable, Comparable):
"""Element Version
"""
class Status(Enum):
"""List of availables Status for this element
"""
current = 1
beta = 2
deprecated = 3
draft = 4
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.uri = None
self.full_uri = None
self.major = 1
self.minor = 0
self.status = Version.Status("current")
self.methods = {}
self.types = {}
self.references = {}
def get_comparable_values(self):
"""Return a tupple of values representing the unicity of the object
"""
return (int(self.major), int(self.minor), str(self.name))
class Category(Element, Displayable):
"""Element Category
"""
def __init__(self, name):
"""Class instantiation
"""
super().__init__()
self.name = name
self.label = name
self.order = 99
class Method(Element, Displayable, Comparable):
"""Element Method
"""
class Methods(Enum):
"""List of availables Methods for this element
"""
get = 1
post = 2
put = 3
delete = 4
head = 5
option = 6
patch = 7
@property
def message(self):
"""Return default message for this element
"""
if self.code != 200:
for code in self.response_codes:
if code.code == self.code:
return code.message
raise ValueError("Unknown response code \"%s\" in \"%s\"." % (self.code, self.name))
return "OK"
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.code = 200
self.uri = None
self.absolute_uri = None
self.full_uri = None
self.category = None
self.method = Method.Methods("get")
self.request_headers = {}
self.request_parameters = {}
self.request_body = None
self.response_codes = []
self.response_body = None
def get_comparable_values(self):
"""Return a tupple of values representing the unicity of the object
"""
return (str(self.name))
class Parameter(Element, Sampleable):
"""Element Parameter
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = None
self.optional = False
self.generic = False
self.type_object = None
self.position = 0
def get_object(self):
object = Object.factory(self.type, None)
object.name = self.name
return object
def get_default_sample(self):
"""Return default value for the element
"""
if self.type not in Object.Types or self.type is Object.Types.type:
return self.type_object.get_sample()
else:
return self.get_object().get_sample()
class ResponseCode(Element):
"""Element ResponseCode
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.code = 200
self.message = None
self.generic = False
class Type(Element, Comparable, Sampleable):
"""Element Type
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.format = TypeFormat()
self.category = None
self.item = None
def get_sample(self):
"""Return the a sample for the element
"""
if self.item is not None:
return self.item.get_sample()
else:
return super().get_sample()
def get_comparable_values(self):
"""Return a tupple of values representing the unicity of the object
"""
return (str(self.name))
class TypeFormat():
"""Element TypeFormat
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.pretty = None
self.advanced = None
class Constraint(Comparable):
"""An oobject's constraint
"""
def __init__(self, name, constraint):
"""Class instantiation
"""
super().__init__()
self.name = name
self.constraint = constraint
def __str__(self):
return '%s: %s' % (self.name, str(self.constraint))
def __repr__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
def get_comparable_values(self):
"""Return a tupple of values representing the unicity of the object
"""
return (str(self.name))
class Object(Element, Sampleable):
"""Element Object
"""
class Types(Enum):
"""List of availables Types for this element
"""
object = 1
array = 2
number = 3
string = 4
boolean = 5
none = 6
reference = 7
type = 8
dynamic = 9
const = 10
enum = 11
integer = 12
any = 13
@classmethod
def factory(cls, str_type, version):
"""Return a proper object
"""
type = Object.Types(str_type)
if type is Object.Types.object:
object = ObjectObject()
elif type is Object.Types.array:
object = ObjectArray()
elif type is Object.Types.number:
object = ObjectNumber()
elif type is Object.Types.integer:
object = ObjectInteger()
elif type is Object.Types.string:
object = ObjectString()
elif type is Object.Types.boolean:
object = ObjectBoolean()
elif type is Object.Types.reference:
object = ObjectReference()
elif type is Object.Types.type:
object = ObjectType()
elif type is Object.Types.none:
object = ObjectNone()
elif type is Object.Types.dynamic:
object = ObjectDynamic()
elif type is Object.Types.const:
object = ObjectConst()
elif type is Object.Types.enum:
object = ObjectEnum()
else:
object = Object()
object.type = type
object.version = version
return object
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = None
self.optional = False
class ObjectObject(Object, Constraintable):
"""Element ObjectObject
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("object")
self.properties = {}
self.pattern_properties = {}
self.additional_properties = None
class ObjectArray(Object, Constraintable):
"""Element ObjectArray
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("array")
self.items = None
self.sample_count = 2
class ObjectNumber(Object, Constraintable):
"""Element ObjectNumber
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("number")
def get_default_sample(self):
"""Return default value for the element
"""
return '13.37'
class ObjectInteger(Object, Constraintable):
"""Element ObjectInteger
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("integer")
def get_default_sample(self):
"""Return default value for the element
"""
return '42'
class ObjectString(Object, Constraintable):
"""Element ObjectString
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("string")
class ObjectBoolean(Object, Constraintable):
"""Element ObjectBoolean
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("boolean")
def get_default_sample(self):
"""Return default value for the element
"""
return True
class ObjectNone(Object, Constraintable):
"""Element ObjectNone
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("none")
class ObjectDynamic(Object, Constraintable):
"""Element ObjectDynamic
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("dynamic")
self.items = None
def get_default_sample(self):
"""Return default value for the element
"""
return {
"key1": "my_%s" % self.name,
"key2": "sample"
}
class ObjectConst(Object, Constraintable):
"""Element ObjectConst
"""
class Types(Enum):
"""List of availables Primaries for this element
"""
string = 1
boolean = 2
number = 3
integer = 4
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("const")
self.const_type = ObjectConst.Types.string
self.value = None
def get_default_sample(self):
"""Return default value for the element
"""
return self.value
class ObjectEnum(Object, Constraintable):
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("enum")
self.values = []
self.descriptions = []
def get_default_sample(self):
"""Return default value for the element
"""
if not self.values:
return super().get_default_sample()
return self.values[0]
class EnumValue(Object, Comparable):
def get_comparable_values(self):
"""Return a tupple of values representing the unicity of the object
"""
return (str(self.name), str(self.description))
class ObjectReference(Object):
"""Element ObjectReference
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("reference")
self.reference_name = None
class ObjectType(Object, Constraintable):
"""Element ObjectType
"""
def __init__(self):
"""Class instantiation
"""
super().__init__()
self.type = Object.Types("type")
self.type_name = None
self.type_object = None
def get_default_sample(self):
"""Return default value for the element
"""
if self.type_object is None:
return super().get_default_sample()
return self.type_object.get_sample() | PypiClean |
/HydPy-5.0.1-cp38-cp38-win_amd64.whl/hydpy/docs/rst/HydPy-C.rst | .. _HydPy-C:
HydPy-C (Conv)
==============
`HydPy-C` models are no real hydrological models. Instead, they serve as
converters that allow connecting different kinds of models providing output
and requiring input that does not fit immediately. The most typical use
case is interpolating data, which is implemented by the application model
|conv_v001| using the nearest-neighbour, by application model |conv_v002|
using the inverse distance weighted approach, and by application model
|conv_v003| combining inverse distance weighting with linear regression.
Base model:
.. toctree::
:maxdepth: 1
conv
Application model:
.. toctree::
conv_v001
conv_v002
conv_v003 | PypiClean |
/ORE_strhub-0.0.1-py3-none-any.whl/strhub/models/trba/model.py | import torch.nn as nn
from strhub.models.modules import BidirectionalLSTM
from .feature_extraction import ResNet_FeatureExtractor
from .prediction import Attention
from .transformation import TPS_SpatialTransformerNetwork
class TRBA(nn.Module):
def __init__(self, img_h, img_w, num_class, num_fiducial=20, input_channel=3, output_channel=512, hidden_size=256,
use_ctc=False):
super().__init__()
""" Transformation """
self.Transformation = TPS_SpatialTransformerNetwork(
F=num_fiducial, I_size=(img_h, img_w), I_r_size=(img_h, img_w),
I_channel_num=input_channel)
""" FeatureExtraction """
self.FeatureExtraction = ResNet_FeatureExtractor(input_channel, output_channel)
self.FeatureExtraction_output = output_channel
self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) # Transform final (imgH/16-1) -> 1
""" Sequence modeling"""
self.SequenceModeling = nn.Sequential(
BidirectionalLSTM(self.FeatureExtraction_output, hidden_size, hidden_size),
BidirectionalLSTM(hidden_size, hidden_size, hidden_size))
self.SequenceModeling_output = hidden_size
""" Prediction """
if use_ctc:
self.Prediction = nn.Linear(self.SequenceModeling_output, num_class)
else:
self.Prediction = Attention(self.SequenceModeling_output, hidden_size, num_class)
def forward(self, image, max_label_length, text=None):
""" Transformation stage """
image = self.Transformation(image)
""" Feature extraction stage """
visual_feature = self.FeatureExtraction(image)
visual_feature = visual_feature.permute(0, 3, 1, 2) # [b, c, h, w] -> [b, w, c, h]
visual_feature = self.AdaptiveAvgPool(visual_feature) # [b, w, c, h] -> [b, w, c, 1]
visual_feature = visual_feature.squeeze(3) # [b, w, c, 1] -> [b, w, c]
""" Sequence modeling stage """
contextual_feature = self.SequenceModeling(visual_feature) # [b, num_steps, hidden_size]
""" Prediction stage """
if isinstance(self.Prediction, Attention):
prediction = self.Prediction(contextual_feature.contiguous(), text, max_label_length)
else:
prediction = self.Prediction(contextual_feature.contiguous()) # CTC
return prediction # [b, num_steps, num_class] | PypiClean |
/LitleSdkPython3-9.3.1b0.tar.gz/LitleSdkPython3-9.3.1b0/litleSdkPythonTestv2/unit/TestConfigOverride.py |
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
from mock import *
class TestConfigOverride(unittest.TestCase):
def setUp(self):
self.seq = list(range(10))
def testUserOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, user='Dan')
match_re = RegexMatcher(".*?<user>Dan</user>.*?")
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testPasswordOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, password = 'customPassword')
match_re = RegexMatcher(".*?<password>customPassword</password>.*?")
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testVersionOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, version="3.14")
match_re = RegexMatcher('.*?version="9.3".*?')
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testMerchantIdOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, merchantId="98765")
match_re = RegexMatcher('.*?merchantId="98765".*?')
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testReportGroupOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, reportGroup="testReports")
match_re = RegexMatcher('.*?reportGroup="testReports".*?')
comm.http_post.assert_called_once_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def testTimeoutOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, timeout=42)
comm.http_post.assert_called_once_with(ANY, url=ANY, proxy=ANY, timeout=42)
def testUrlOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, url="www.customurl.com")
comm.http_post.assert_called_once_with(ANY, url="www.customurl.com", proxy=ANY, timeout=ANY)
def testProxyOverride(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization, proxy="bumpyproxy:1776")
comm.http_post.assert_called_once_with(ANY, url=ANY, proxy="bumpyproxy:1776", timeout=ANY)
def testMissingUser(self):
config2 = Configuration()
config2.password = 'Pass'
config2.merchantId = '12345'
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config2)
comm.http_post = MagicMock()
with self.assertRaises(AttributeError):
litleOnlineRequest(config2)
def testMissingPassword(self):
config3 = Configuration()
config3.username = 'User'
config3.merchantId = '12345'
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config3)
comm.http_post = MagicMock()
with self.assertRaises(AttributeError):
litleOnlineRequest(config3)
def testMissingId(self):
config4 = Configuration()
config4.username = 'User'
config4.password = 'Pass'
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config4)
comm.http_post = MagicMock()
with self.assertRaises(AttributeError):
litleOnlineRequest(config4)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestConfigOverride)
return suite
if __name__ =='__main__':
unittest.main() | PypiClean |
/KonFoo-3.0.0-py3-none-any.whl/konfoo/core.py | from __future__ import annotations
import abc
import calendar
import copy
import csv
import datetime
import ipaddress
import json
import math
import struct
import time
from configparser import ConfigParser
from operator import attrgetter
from typing import (
Any, Callable,
Iterable, Iterator,
Literal,
ItemsView, KeysView, ValuesView,
Mapping, MutableSequence, NamedTuple, Type)
from .categories import Category
from .enums import Enumeration
from .exceptions import (
ByteOrderTypeError, ByteOrderValueError,
EnumTypeError, FactoryTypeError, MemberTypeError,
ProviderTypeError, ContainerLengthError,
FieldAddressError, FieldAlignmentError, FieldByteOrderError,
FieldIndexError, FieldSizeError, FieldTypeError, FieldValueError,
FieldValueEncodingError,
FieldGroupByteOrderError, FieldGroupOffsetError, FieldGroupSizeError
)
from .globals import (
ItemClass, Byteorder, BYTEORDER, clamp)
from .options import (
Option,
byte_order_option, get_byte_order, nested_option, get_nested,
verbose_option, verbose
)
from .providers import Provider
def is_any(instance: Any) -> bool:
return isinstance(instance, (Structure, Sequence, Field))
def is_provider(instance: Any) -> bool:
return isinstance(instance, Provider)
def is_field(instance: Any) -> bool:
return isinstance(instance, Field)
def is_container(instance: Any) -> bool:
return isinstance(instance, (Sequence, Structure))
def is_sequence(instance: Any) -> bool:
return isinstance(instance, Sequence)
def is_array(instance: Any) -> bool:
return isinstance(instance, Array)
def is_structure(instance: Any) -> bool:
return isinstance(instance, Structure)
def is_mapping(instance: Any) -> bool:
return isinstance(instance, Mapping)
def is_pointer(instance: Any):
return isinstance(instance, Pointer)
def is_mixin(instance: Any) -> bool:
return is_container(instance) or is_pointer(instance)
class Patch(NamedTuple):
""" The :class:`Patch` class contains the relevant information to patch a
memory area of a `data source` accessed via a data :class:`Provider` by a
:class:`Pointer` field.
:param bytes buffer: byte stream for the memory area to patch within the
data source. The byte stream contains the data of the patch item.
:param int address: address of the memory area to patch in the data source.
:param Byteorder byteorder: byte order of the memory area to patch in the
data source.
:param int bit_size: bit size of the patch item.
:param int bit_offset: bit offset of the patch item within the memory area.
:param bool inject: if :data:`True` the patch item must be injected into the
memory area.
"""
#: Byte stream for the memory area in the data source to patch.
buffer: bytes
#: Start address of the memory area in the data source to patch.
address: int
#: Byte order of the memory area in the data source to patch.
byteorder: Byteorder
#: Bit size of the patch item.
bit_size: int
#: Bit offset of the patch item within the memory area.
bit_offset: int
#: Indicate the patch item must be injected into the memory area or not.
inject: bool = False
class Index(NamedTuple):
""" The :class:`Index` class contains the relevant information of the
location of a :class:`Field` in a `byte stream` and in a `data source`.
The `byte stream` is normally provided by a :class:`Pointer` field. The
`data source` is normally accessed via a data :class:`Provider` by a
:class:`Pointer` field.
:param int byte: byte offset of the :class:`Field` in the byte stream.
:param int bit: bit offset of the :class:`Field` relative to its byte offset.
:param int address: address of the :class:`Field` in the data source.
:param int base_address: start address of the byte stream in the data source.
:param bool update: if :data:`True` the byte stream needs to be updated.
"""
#: Byte offset of the :class:`Field` in the byte stream.
byte: int = 0
#: Bit offset of the :class:`Field` relative to its byte offset.
bit: int = 0
#: Address of the :class:`Field` in the data source.
address: int = 0
#: Start address of the byte stream in the data source.
base_address: int = 0
#: Indicates the byte stream needs to be updated or not.
update: bool = False
class Alignment(NamedTuple):
""" The :class:`Alignment` class contains the location of the :class:`Field`
within an aligned group of consecutive fields.
:param int byte_size: size of the *field group* in bytes
which the :class:`Field` is aligned to.
:param int bit_offset: bit offset of the :class:`Field`
within its aligned *field group*.
"""
#: Size of the *field group* in bytes which the :class:`Field` is aligned to.
byte_size: int = 0
#: Bit offset of the :class:`Field` within its aligned *field group*.
bit_offset: int = 0
class CustomizedJsonEncoder(json.JSONEncoder):
""" Customized JSON encoder.
"""
def default(self, instance):
if isinstance(instance, Category):
return instance.name
return super().default(instance)
class Container:
""" The :class:`Container` class is an *abstract interface* for all classes
which can contain :class:`Field` items. Container classes are
:class:`Structures <Structure>`,
:class:`Sequences <Sequence>`,
:class:`Arrays <Array>` and
:class:`Pointers <Pointer>`.
The :class:`Container` class provides core features to **view**, **save**
and **load** the *attributes* of the :class:`Field` items in the `Container`.
"""
@abc.abstractmethod
def view_fields(self,
*attributes: str,
**options: Any) -> dict[str, Any] | list[Any]:
""" Returns a container with the selected field *attribute* or with the
dictionary of the selected field *attributes* for each :class:`Field`
*nested* in the `Container`.
The *attributes* of each :class:`Field` for containers *nested* in the
`Container` are viewed as well (chained method call).
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple[str, ...] fieldnames: sequence of dictionary keys for the
selected field *attributes*. Defaults to ``(*attributes)``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` views their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
.. note::
This abstract method must be implemented by a derived class.
"""
pass
@abc.abstractmethod
def field_items(self,
path: str = str(),
**options: Any) -> list[tuple[str, Field]]:
""" Returns a **flatten** list of ``('field path', field item)`` tuples
for each :class:`Field` *nested* in the `Container`.
:param str path: item path.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Container` list their referenced :attr:`~Pointer.data` object field
items as well (chained method call).
.. note::
This abstract method must be implemented by a derived class.
"""
return list()
@nested_option()
def to_list(self,
*attributes: str,
**options: Any) -> list[tuple[str, Any] |
tuple[str, tuple[Any, ...]]]:
""" Returns a **flatten** list of ``('field path', attribute)`` or
``('field path', tuple(attributes))`` tuples for each :class:`Field`
*nested* in the `Container`.
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword str name: name of the `Container`.
Default is the class name of the instance.
:keyword bool chain: if :data:`True` the field *attributes* are chained
to its field path. Defaults to ``False``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` lists their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
"""
# Name of the Container
name = options.pop('name', self.__class__.__name__)
fields = list()
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
for item in self.field_items(**options):
field_path, field = item
if field_path.startswith('['):
# Sequence
field_path = f"{name}{field_path}"
else:
field_path = f"{name}.{field_path}"
if options.get('chain', False) and len(attributes) > 1:
fields.append((field_path, *field_getter(field)))
else:
fields.append((field_path, field_getter(field)))
return fields
@nested_option()
def to_dict(self,
*attributes: str,
**options: Any) -> dict[str, Any | tuple[Any, ...]]:
""" Returns a **flatten** :class:`dict` of ``{'field path': attribute}``
or ``{'field path': tuple(attributes)}`` pairs for each :class:`Field`
*nested* in the `Container`.
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword str name: name of the `Container`.
Default is the class name of the instance.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` lists their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
"""
# Name of the Container
name = options.pop('name', self.__class__.__name__)
# Save to file
save = options.pop('save', False)
fields = dict()
fields[name] = dict()
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
for item in self.field_items(**options):
field_path, field = item
if save and field_path.startswith('['):
# Sequence element
field_path = '_' + field_path
fields[name][field_path] = field_getter(field)
return fields
@nested_option()
def save(self,
file: str,
*attributes: str,
**options: Any) -> None:
""" Saves the selected field *attributes* for each :class:`Field`
*nested* in the `Container` to an ``.ini`` *file*.
:param str file: name and location of the ``.ini`` *file*.
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword str section: section in the ``.ini`` file to look for the
:class:`Field` values of the `Container`. If no *section* is
specified the class name of the instance is used.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` saves their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
Example:
>>> class Foo(Structure):
... def __init__(self):
... super().__init__()
... self.stream = Stream()
... self.float = Float()
... self.structure = Structure()
... self.structure.decimal = Decimal(8)
... self.array = Array(Byte, 3)
... self.pointer = Pointer()
>>> foo = Foo()
>>> foo.to_list(nested=True)
[('Foo.stream', ''),
('Foo.float', 0.0),
('Foo.structure.decimal', 0),
('Foo.array[0]', '0x0'),
('Foo.array[1]', '0x0'),
('Foo.array[2]', '0x0'),
('Foo.pointer', '0x0')]
>>> foo.to_json(nested=True)
'{"stream": "",
"float": 0.0,
"structure": {"decimal": 0},
"array": ["0x0", "0x0", "0x0"],
"pointer": {"value": "0x0",
"data": null}}'
>>> foo.save('foo.ini')
File `foo.ini`:
.. code-block:: ini
[Foo]
stream =
float = 0.0
structure.decimal = 0
array[0] = 0x0
array[1] = 0x0
array[2] = 0x0
pointer = 0x0
"""
options['save'] = True
parser = ConfigParser()
parser.read_dict(self.to_dict(*attributes, **options))
with open(file, 'w') as file_:
parser.write(file_)
file_.close()
@nested_option()
@verbose_option(True)
def load(self,
file: str,
**options: Any) -> None:
""" Loads the field *value* for each :class:`Field` *nested* in the
`Container` from an ``.ini`` *file*.
:param str file: name and location of the ``.ini`` file.
:keyword str section: section in the ``.ini`` file to look-up the
value for each :class:`Field` in the `Container`.
If no *section* is specified the class name of the instance is used.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` load their referenced :attr:`~Pointer.data` object field
values as well (chained method call).
:keyword bool verbose: if :data:`True` the loading is executed in verbose
mode.
File `foo.ini`:
.. code-block:: ini
[Foo]
stream =
float = 0.0
structure.decimal = 0
array[0] = 0x0
array[1] = 0x0
array[2] = 0x0
pointer = 0x0
Example:
>>> class Foo(Structure):
... def __init__(self):
... super().__init__()
... self.stream = Stream()
... self.float = Float()
... self.structure = Structure()
... self.structure.decimal = Decimal(8)
... self.array = Array(Byte, 3)
... self.pointer = Pointer()
>>> foo = Foo()
>>> foo.load('foo.ini')
[Foo]
Foo.stream =
Foo.float = 0.0
Foo.structure.decimal = 0
Foo.array[0] = 0x0
Foo.array[1] = 0x0
Foo.array[2] = 0x0
Foo.pointer = 0x0
>>> foo.to_list(nested=True)
[('Foo.stream', ''),
('Foo.float', 0.0),
('Foo.structure.decimal', 0),
('Foo.array[0]', '0x0'),
('Foo.array[1]', '0x0'),
('Foo.array[2]', '0x0'),
('Foo.pointer', '0x0')]
>>> foo.to_json(nested=True)
'{"stream": "",
"float": 0.0,
"structure": {"decimal": 0},
"array": ["0x0", "0x0", "0x0"],
"pointer": {"value": "0x0",
"data": null}}'
"""
section = options.pop('section', self.__class__.__name__)
parser = ConfigParser()
parser.read(file)
if parser.has_section(section):
verbose(options, f"[{section}]")
for field_path, field in self.field_items(**options):
if field_path.startswith('['):
# Sequence element
option = '_' + field_path
else:
option = field_path
if parser.has_option(section, option):
# Bool fields
if field.is_bool():
field.value = parser.getboolean(section, option)
# Float fields
elif field.is_float():
field.value = parser.getfloat(section, option)
# String fields
elif field.is_string():
field.value = parser.get(section, option)
# Stream fields
elif field.is_stream():
value = parser.get(section, option)
stream = bytes.fromhex(value.replace("'", ""))
# Auto size a zero sized stream field to the current length
if not field:
field.resize(len(stream))
field.value = stream
# Decimal fields
else:
field.value = parser.get(section, option)
if field_path.startswith('['):
verbose(options,
f"{section}{field_path} = {field.value}")
else:
verbose(options,
f"{section}.{field_path} = {field.value}")
else:
verbose(options, f"No section [{section}] found.")
@nested_option()
def to_json(self,
*attributes: str,
**options: Any) -> str:
""" Returns the selected field *attributes* for each :class:`Field`
*nested* in the `Container` as a JSON formatted string.
The *attributes* of each :class:`Field` for containers *nested* in the
`Container` are viewed as well (chained method call).
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple[str, ...] fieldnames: sequence of dictionary keys for the
selected field *attributes*. Defaults to ``(*attributes)``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` views their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
"""
nested = options.pop('nested', False)
fieldnames = options.pop('fieldnames', attributes)
if 'cls' in options.keys():
return json.dumps(self.view_fields(*attributes,
nested=nested,
fieldnames=fieldnames),
**options)
else:
return json.dumps(self.view_fields(*attributes,
nested=nested,
fieldnames=fieldnames),
cls=CustomizedJsonEncoder,
**options)
@nested_option()
def write_json(self,
file: str,
*attributes: str,
**options: Any) -> None:
""" Writes the selected field *attributes* for each :class:`Field`
*nested* in the `Container` to a ``.json`` *file*.
:param str file: name and location of the ``.json`` *file*.
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple[str, ...] fieldnames: sequence of dictionary keys for the
field *path* and the selected field *attributes*.
Defaults to ``(*attributes)``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` lists their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
"""
content = self.to_json(*attributes, **options)
with open(file, 'w', newline='') as file_:
file_.write(content)
@staticmethod
def _get_fieldnames(*attributes: str,
**options: Any) -> list[str]:
# Default dictionary keys
keys = ['id']
if attributes:
keys.extend(attributes)
else:
keys.append('value')
# Customized dictionary keys
return options.get('fieldnames', keys)
@nested_option()
def to_csv(self,
*attributes: str,
**options: Any) -> list[dict[str, Any]]:
""" Returns a **flatten** list of dictionaries containing the field
*path* and the selected field *attributes* for each :class:`Field`
*nested* in the `Container`.
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword str name: name of the `Container`.
Default is the class name of the instance.
:keyword tuple[str, ...] fieldnames: sequence of dictionary keys for
the field *path* and the selected field *attributes*.
Defaults to ``('id', *attributes)``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` lists their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
"""
keys = self._get_fieldnames(*attributes, **options)
options['chain'] = True
return [dict(zip(keys, field)) for field in
self.to_list(*attributes, **options)]
@nested_option()
def write_csv(self,
file: str,
*attributes: str,
**options: Any) -> None:
""" Writes the field *path* and the selected field *attributes* for each
:class:`Field` *nested* in the `Container` to a ``.csv`` *file*.
:param str file: name and location of the ``.csv`` *file*.
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword str name: name of the `Container`.
Default is the class name of the instance.
:keyword tuple[str, ...] fieldnames: sequence of dictionary keys for the
field *path* and the selected field *attributes*.
Defaults to ``('id', *attributes)``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Container` lists their referenced :attr:`~Pointer.data` object field
attributes as well (chained method call).
"""
with open(file, 'w', newline='') as file_:
fieldnames = self._get_fieldnames(*attributes, **options)
writer = csv.DictWriter(file_, fieldnames)
writer.writeheader()
content = self.to_csv(*attributes, **options)
for row in content:
writer.writerow(row)
class Structure(dict, Container):
""" The :class:`Structure` is a :class:`dict` whereby the dictionary `key`
describes the *name* of a *member* of the `Structure` and the `value` of the
dictionary `key` describes the *type* of the *member*. Allowed *members* are
:class:`Structure`, :class:`Sequence`, :class:`Array` or :class:`Field`
instances.
The :class:`Structure` class extends the :class:`dict` with the
:class:`Container` interface and attribute getter and setter for the
``{'key': value}`` tuples to access and to assign the *members* of the
`Structure` easier, but this comes with the trade-off that the dictionary
`keys` must be valid Python attribute names.
A `Structure` has additional methods to **read**, **deserialize**,
**serialize** and **view** binary data:
- **Read** from a :class:`Provider` the necessary bytes for each
:attr:`~Pointer.data` object referenced by the :class:`Pointer` fields
in the `Structure` via :meth:`read_from()`.
- **Deserialize** the :attr:`~Field.value` for each :class:`Field`
in the `Structure` from a byte stream via :meth:`deserialize()`.
- **Serialize** the :attr:`~Field.value` for each :class:`Field`
in the `Structure` to a byte stream via :meth:`serialize()`.
- Indexes all fields in the `Structure`
via :meth:`index_fields()`.
- Get the **first** :class:`Field` in the `Structure`
via :meth:`first_field()`.
- Get the accumulated **size** of all fields in the `Structure`
via :meth:`container_size()`.
- View the selected *attributes* for each :class:`Field` in the `Structure`
via :meth:`view_fields()`.
- List the **path** to the field and the field **item** itself for each
:class:`Field` in the `Structure` as a flatted list via :meth:`field_items()`.
- Get the **metadata** of the `Structure` via :meth:`describe()`.
"""
# Item type.
item_type: ItemClass = ItemClass.Structure
def __init__(self,
*args: Mapping | None,
**kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def __bytes__(self) -> bytes:
buffer = bytearray()
self.serialize(buffer)
return bytes(buffer)
def __getitem__(self, key: str) -> Structure | Sequence | Field:
return super().__getitem__(key)
def __setitem__(self,
name: str,
item: Structure | Sequence | Field | Mapping):
# Structure
if is_structure(item):
super().__setitem__(name, item)
# Dictionaries
elif is_mapping(item):
super().__setitem__(name, Structure(item))
# Sequence
elif is_sequence(item):
super().__setitem__(name, item)
# Field
elif is_field(item):
super().__setitem__(name, item)
else:
raise MemberTypeError(self, item, name)
def __getattr__(self, name: str) -> Any:
""" Returns the :class:`Field` of the `Structure` member whose
dictionary key is equal to the *name*.
If the attribute *name* is in the namespace of the `Structure` class
then the base class is called instead.
The `__getattr__` method is only called when the method
`__getattribute__` raises an `AttributeError` exception.
"""
# Namespace check for dict attribute
if hasattr(Structure, name):
return super().__getattribute__(name)
try:
return self[name]
except KeyError:
raise AttributeError(
f"'{self.__class__.__name__,}' object has not attribute '{name}'")
def __setattr__(self,
name: str,
item: Structure | Sequence | Field) -> None:
""" Assigns the *item* to the member of the `Structure` whose dictionary
key is equal to the *name*.
If the attribute *name* is in the namespace of the `Structure` base class
then the base class is called instead.
"""
# Attribute check
if hasattr(Structure, name):
return super().__setattr__(name, item)
elif is_any(item):
self[name] = item
elif callable(item):
# Field Factory
setitem = item()
if is_any(setitem):
super().__setitem__(name, setitem)
else:
raise FactoryTypeError(self, item, setitem, name)
else:
raise MemberTypeError(self, item, name)
@nested_option()
def read_from(self,
provider: Provider,
**options: Any) -> None:
""" All :class:`Pointer` fields in the `Structure` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Structure` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
"""
for item in self.values():
# Container or Pointer
if is_mixin(item):
item.read_from(provider, **options)
@byte_order_option()
@nested_option()
def deserialize(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> Index:
""" De-serializes the `Structure` from the byte *buffer* starting at
the beginning of the *buffer* or with the given *index* by mapping the
bytes to the :attr:`~Field.value` for each :class:`Field` in the
`Structure` in accordance with the decoding *byte order* for the
de-serialization and the decoding :attr:`byte_order` of each
:class:`Field` in the `Structure`.
A specific decoding :attr:`~Field.byte_order` of a :class:`Field`
overrules the decoding *byte order* for the de-serialization.
Returns the :class:`Index` of the *buffer* after the last de-serialized
:class:`Field` in the `Structure`.
Optional the de-serialization of the referenced :attr:`~Pointer.data`
objects of all :class:`Pointer` fields in the `Structure` can be
enabled.
:param bytes buffer: byte stream to de-serialize from.
:param Index index: current read :class:`Index` within the *buffer* to
de-serialize.
:keyword byte_order: decoding byte order for the de-serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
:keyword bool nested: if :data:`True` all :class:`Pointer` fields of a
`Structure` de-serialize their referenced :attr:`~Pointer.data`
object as well (chained method call).
Each :class:`Pointer` field uses for the de-serialization of its
referenced :attr:`~Pointer.data` object its own
:attr:`~Pointer.bytestream`.
"""
for item in self.values():
index = item.deserialize(buffer, index, **options)
return index
@byte_order_option()
@nested_option()
def serialize(self,
buffer: bytearray = bytearray(),
index: Index = Index(),
**options: Any) -> Index:
""" Serializes the `Structure` to the byte *buffer* starting at the
beginning of the *buffer* or with the given *index* by mapping the
:attr:`~Field.value` for each :class:`Field` in the `Structure` to the
byte *buffer* in accordance with the encoding *byte order* for the
serialization and the encoding :attr:`byte_order` of each :class:`Field`
in the `Structure`.
A specific encoding :attr:`~Field.byte_order` of a :class:`Field`
overrules the encoding *byte order* for the serialization.
Returns the :class:`Index` of the *buffer* after the last serialized
:class:`Field` in the `Structure`.
Optional the serialization of the referenced :attr:`~Pointer.data`
objects of all :class:`Pointer` fields in the `Structure` can be
enabled.
:param bytearray buffer: byte stream to serialize to.
:param Index index: current write :class:`Index` within the *buffer*.
:keyword byte_order: encoding byte order for the serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
:keyword bool nested: if :data:`True` all :class:`Pointer` fields of a
`Structure` serialize their referenced :attr:`~Pointer.data` object
as well (chained method call).
Each :class:`Pointer` field uses for the serialization of its
referenced :attr:`~Pointer.data` object its own
:attr:`~Pointer.bytestream`.
"""
for item in self.values():
index = item.serialize(buffer, index, **options)
return index
@nested_option()
def index_fields(self,
index: Index = Index(),
**options: Any) -> Index:
""" Indexes all fields in the `Structure` starting with the given
*index* and returns the :class:`Index` after the last :class:`Field`
in the `Structure`.
:param Index index: start :class:`Index` for the first :class:`Field`
in the `Structure`.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields of the
`Structure` indexes their referenced :attr:`~Pointer.data` object
fields as well (chained method call).
"""
for name, item in self.items():
# Container
if is_container(item):
index = item.index_fields(index, **options)
# Pointer
elif is_pointer(item) and get_nested(options):
index = item.index_field(index)
item.index_data()
# Field
elif is_field(item):
index = item.index_field(index)
else:
raise MemberTypeError(self, item, name, index)
return index
def container_size(self) -> tuple[int, int]:
""" Returns the accumulated bit size of all fields in the `Structure` as
a tuple in the form of ``(number of bytes, remaining number of bits)``.
"""
length = 0
for name, item in self.items():
# Container
if is_container(item):
byte_length, bit_length = item.container_size()
length += bit_length + byte_length * 8
# Field
elif is_field(item):
length += item.bit_size
else:
raise MemberTypeError(self, item, name)
return divmod(length, 8)
def first_field(self) -> Field | None:
""" Returns the first :class:`Field` in the `Structure`, or :data:`None`
for an empty `Structure`.
"""
for name, item in self.items():
# Container
if is_container(item):
field = item.first_field()
# Container is not empty
if field is not None:
return field
# Field
elif is_field(item):
return item
else:
raise MemberTypeError(self, item, name)
return None
def initialize_fields(self,
content: dict[str, Any]) -> None:
""" Initializes the :class:`Field` members in the `Structure` with
the *values* in the *content* dictionary.
:param dict[str, Any] content: a dictionary contains the :class:`Field`
values for each member in the `Structure`.
"""
for name, value in content.items():
item = self[name]
# Container or Pointer
if is_mixin(item):
item.initialize_fields(value)
# Fields
elif is_field(item):
item.value = value
else:
raise MemberTypeError(self, item, name)
@nested_option()
def view_fields(self,
*attributes: str,
**options: Any) -> dict[str, Any]:
""" Returns an :class:`dict` which contains the ``{'member name':
field attribute}``, or the ``{'member name': dict(field attributes)}``
tuples for each :class:`Field` *nested* in the `Structure`.
The *attributes* of each :class:`Field` for containers *nested* in the
`Structure` are viewed as well (chained method call).
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple[str, ...] fieldnames: sequence of dictionary keys for the
selected field *attributes*. Defaults to ``(*attributes)``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields nested
in the `Structure` views their referenced :attr:`~Pointer.data`
object field attributes as well (chained method call).
"""
members = dict()
for name, item in self.items():
# Container
if is_container(item):
members[name] = item.view_fields(*attributes, **options)
# Pointer
elif is_pointer(item) and get_nested(options):
members[name] = item.view_fields(*attributes, **options)
# Field
elif is_field(item):
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
if len(attributes) > 1:
fieldnames = options.get('fieldnames', attributes)
members[name] = dict(zip(fieldnames, field_getter(item)))
else:
members[name] = field_getter(item)
return members
@nested_option()
def field_items(self,
path: str = str(),
**options: Any) -> list[tuple[str, Field]]:
""" Returns a **flatten** list of ``('field path', field item)`` tuples
for each :class:`Field` *nested* in the `Structure`.
:param str path: field path of the `Structure`.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Structure` list their referenced :attr:`~Pointer.data` object field
items as well (chained method call).
"""
parent = path if path else str()
items = list()
for name, item in self.items():
item_path = f"{parent}.{name}" if parent else name
# Container
if is_container(item):
for field in item.field_items(item_path, **options):
items.append(field)
# Pointer
elif is_pointer(item) and get_nested(options):
for field in item.field_items(item_path, **options):
items.append(field)
# Field
elif is_field(item):
items.append((item_path, item))
else:
raise MemberTypeError(self, item, item_path)
return items
@nested_option(True)
def describe(self,
name: str = str(),
**options: Any) -> dict[str, Any]:
""" Returns the **metadata** of the `Structure` as a :class:`dict`.
.. code-block:: python
metadata = {
'class': self.__class__.__name__,
'name': name if name else self.__class__.__name__,
'size': len(self),
'type': Structure.item_type.name
'member': [
item.describe(member) for member, item in self.items()
]
}
:param str name: optional name for the `Structure`.
Fallback is the class name.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields of the
`Structure` lists their referenced :attr:`~Pointer.data` object
fields as well (chained method call). Default is :data:`True`.
"""
members = list()
metadata = dict()
metadata['class'] = self.__class__.__name__
metadata['name'] = name if name else self.__class__.__name__
metadata['size'] = len(self)
metadata['type'] = self.item_type.name
metadata['member'] = members
for member_name, item in self.items():
# Container
if is_container(item):
members.append(item.describe(member_name, **options))
# Pointer
elif is_pointer(item) and get_nested(options):
members.append(item.describe(member_name, **options))
# Field
elif is_field(item):
members.append(item.describe(member_name, nested=False))
else:
raise MemberTypeError(self, item, member_name)
return metadata
class Sequence(MutableSequence, Container):
""" The :class:`Sequence` is a mutable sequence containing heterogeneous
*items*, and is extended with the :class:`Container` interface.
Allowed *items* are :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instances.
A `Sequence` is:
- *containable*: ``item`` in ``self`` returns :data:`True` if *item* is in
the `Sequence`.
- *sized*: ``len(self)`` returns the number of items in the `Sequence`.
- *indexable* ``self[index]`` returns the *item* at the *index*
of the `Sequence`.
- *iterable* ``iter(self)`` iterates over the *items* in the `Sequence`
A `Sequence` supports the usual methods for sequences:
- **Append** an item to the `Sequence` via :meth:`append()`.
- **Insert** an item before the *index* into the `Sequence`
via :meth:`insert()`.
- **Extend** the `Sequence` with items via :meth:`extend()`.
- **Clear** the `Sequence` via :meth:`clear()`.
- **Pop** an item with the *index* from the `Sequence` via :meth:`pop()`.
- **Remove** the first occurrence of an *item* from the `Sequence`
via :meth:`remove()`.
- **Reverse** all items in the `Sequence` via :meth:`reverse()`.
A `Sequence` has additional methods to **read**, **deserialize**,
**serialize** and **view** binary data:
- **Read** from a :class:`Provider` the necessary bytes for each
:attr:`~Pointer.data` object referenced by the :class:`Pointer` fields
in the `Sequence` via :meth:`read_from()`.
- **Deserialize** the :attr:`~Field.value` for each :class:`Field`
in the `Sequence` from a byte stream via :meth:`deserialize()`.
- **Serialize** the :attr:`~Field.value` for each :class:`Field`
in the `Sequence` to a byte stream via :meth:`serialize()`.
- Indexes all fields in the `Sequence` via :meth:`index_fields()`.
- Get the **first** :class:`Field`
in the `Sequence` via :meth:`first_field()`.
- Get the accumulated **size** of all fields in the `Sequence`
via :meth:`container_size()`.
- View the selected *attributes* for each :class:`Field`
in the `Sequence` via :meth:`view_fields()`.
- List the **path** to the field and the field **item** itself for each
:class:`Field` in the `Sequence` as a flatted list via :meth:`field_items()`.
- Get the **metadata** of the `Sequence` via :meth:`describe()`.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the `Sequence`.
:type iterable: Iterable[Structure|Sequence|Field]|Structure|Sequence|Field|None
"""
# Item type.
item_type: ItemClass = ItemClass.Sequence
def __init__(self,
iterable: (Iterable[Structure | Sequence | Field] |
Structure | Sequence | Field | None) = None) -> None:
# Data object
self._data = []
if iterable is None:
pass
elif is_any(iterable):
self.append(iterable)
else:
for member, item in enumerate(iterable):
if not is_any(item):
raise MemberTypeError(self, item, member=member)
self.append(item)
def __bytes__(self) -> bytes:
buffer = bytearray()
self.serialize(buffer)
return bytes(buffer)
def __str__(self) -> str:
return str(self._data)
def __repr__(self) -> str:
return repr(self._data)
def __contains__(self, key: Structure | Sequence | Field) -> bool:
return key in self._data
def __len__(self) -> int:
return len(self._data)
def __getitem__(self,
index: int | slice) -> Structure | Sequence | Field | list:
return self._data[index]
def __setitem__(self,
index: int,
item: Structure | Sequence | Field) -> None:
if not is_any(item):
raise MemberTypeError(self, item, member=index)
self._data[index] = item
def __delitem__(self, index: int) -> None:
del self._data[index]
def __iter__(self) -> Iterator[Structure | Sequence | Field]:
return iter(self._data)
def append(self,
item: Structure | Sequence | Field) -> None:
""" Appends the *item* to the end of the `Sequence`.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
if not is_any(item):
raise MemberTypeError(self, item, member=len(self))
self._data.append(item)
def insert(self,
index: int,
item: Structure | Sequence | Field) -> None:
""" Inserts the *item* before the *index* into the `Sequence`.
:param int index: `Sequence` index.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
if not is_any(item):
raise MemberTypeError(self, item, member=len(self))
self._data.insert(index, item)
def pop(self, index: int = -1) -> Structure | Sequence | Field:
""" Removes and returns the item at the *index* from the `Sequence`.
:param int index: `Sequence` index.
"""
return self._data.pop(index)
def clear(self) -> None:
""" Remove all items from the `Sequence`."""
self._data.clear()
def remove(self, item: Structure | Sequence | Field) -> None:
""" Removes the first occurrence of an *item* from the `Sequence`.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
self._data.remove(item)
def reverse(self) -> None:
""" In place reversing of the `Sequence` items."""
self._data.reverse()
def extend(self,
iterable: (Iterable[Structure | Sequence | Field] |
Structure | Sequence | Field)) -> None:
""" Extends the `Sequence` by appending items from the *iterable*.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the `Sequence`.
:type iterable: Iterable[Structure|Sequence|Field]|Structure|Sequence|Field
"""
# Sequence
if is_sequence(iterable):
self._data.extend(iterable)
# Structure
elif is_structure(iterable):
members = [item for item in iterable.values()]
self._data.extend(members)
# Field
elif is_field(iterable):
self._data.extend([iterable])
# Iterable
elif isinstance(iterable, (set, tuple, list)):
self._data.extend(Sequence(iterable))
else:
raise MemberTypeError(self, iterable, member=len(self))
@nested_option()
def read_from(self,
provider: Provider,
**options: Any) -> None:
""" All :class:`Pointer` fields in the `Sequence` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Sequence` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
"""
for item in iter(self):
# Container or Pointer
if is_mixin(item):
item.read_from(provider, **options)
@byte_order_option()
@nested_option()
def deserialize(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> Index:
""" De-serializes the `Sequence` from the byte *buffer* starting at
the beginning of the *buffer* or with the given *index* by mapping the
bytes to the :attr:`~Field.value` for each :class:`Field` in the
`Sequence` in accordance with the decoding *byte order* for the
de-serialization and the decoding :attr:`byte_order` of each
:class:`Field` in the `Sequence`.
A specific decoding :attr:`~Field.byte_order` of a :class:`Field`
overrules the decoding *byte order* for the de-serialization.
Returns the :class:`Index` of the *buffer* after the last de-serialized
:class:`Field` in the `Sequence`.
Optional the de-serialization of the referenced :attr:`~Pointer.data`
objects of all :class:`Pointer` fields in the `Sequence` can be
enabled.
:param bytes buffer: byte stream to de-serialize from.
:param Index index: current read :class:`Index` within the *buffer* to
de-serialize.
:keyword byte_order: decoding byte order for the de-serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
:keyword bool nested: if :data:`True` all :class:`Pointer` fields of a
`Sequence` de-serialize their referenced :attr:`~Pointer.data`
object as well (chained method call).
Each :class:`Pointer` field uses for the de-serialization of its
referenced :attr:`~Pointer.data` object its own
:attr:`~Pointer.bytestream`.
"""
for item in iter(self):
index = item.deserialize(buffer, index, **options)
return index
@byte_order_option()
@nested_option()
def serialize(self,
buffer: bytearray = bytearray(),
index: Index = Index(),
**options: Any) -> Index:
""" Serializes the `Sequence` to the byte *buffer* starting at the
beginning of the *buffer* or with the given *index* by mapping the
:attr:`~Field.value` for each :class:`Field` in the `Sequence` to the
byte *buffer* in accordance with the encoding *byte order* for the
serialization and the encoding :attr:`byte_order` of each :class:`Field`
in the `Sequence`.
A specific encoding :attr:`~Field.byte_order` of a :class:`Field`
overrules the encoding *byte order* for the serialization.
Returns the :class:`Index` of the *buffer* after the last serialized
:class:`Field` in the `Sequence`.
Optional the serialization of the referenced :attr:`~Pointer.data`
objects of all :class:`Pointer` fields in the `Sequence` can be
enabled.
:param bytearray buffer: byte stream to serialize to.
:param Index index: current write :class:`Index` within the *buffer*.
:keyword byte_order: encoding byte order for the serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
:keyword bool nested: if :data:`True` all :class:`Pointer` fields of a
`Sequence` serialize their referenced :attr:`~Pointer.data` object
as well (chained method call).
Each :class:`Pointer` field uses for the serialization of its
referenced :attr:`~Pointer.data` object its own
:attr:`~Pointer.bytestream`.
"""
for item in iter(self):
index = item.serialize(buffer, index, **options)
return index
@nested_option()
def index_fields(self,
index: Index = Index(),
**options: Any) -> Index:
""" Indexes all fields in the `Sequence` starting with the given
*index* and returns the :class:`Index` after the last :class:`Field`
in the `Sequence`.
:param Index index: start :class:`Index` for the first :class:`Field`
in the `Sequence`.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Sequence` indexes their referenced :attr:`~Pointer.data` object
fields as well (chained method call).
"""
for name, item in enumerate(self):
# Container
if is_container(item):
index = item.index_fields(index, **options)
# Pointer
elif is_pointer(item) and get_nested(options):
index = item.index_field(index)
item.index_data()
# Field
elif is_field(item):
index = item.index_field(index)
else:
raise MemberTypeError(self, item, name, index)
return index
def container_size(self) -> tuple[int, int]:
""" Returns the accumulated bit size of all fields in the `Sequence` as
a tuple in the form of ``(number of bytes, remaining number of bits)``.
"""
length = 0
for name, item in enumerate(self):
# Container
if is_container(item):
byte_length, bit_length = item.container_size()
length += bit_length + byte_length * 8
# Field
elif is_field(item):
length += item.bit_size
else:
raise MemberTypeError(self, item, name)
return divmod(length, 8)
def first_field(self) -> Field | None:
""" Returns the first :class:`Field` in the `Sequence`, or :data:`None`
for an empty `Sequence`.
"""
for name, item in enumerate(self):
# Container
if is_container(item):
field = item.first_field()
# Container is not empty
if field is not None:
return field
# Field
elif is_field(item):
return item
else:
raise MemberTypeError(self, item, name)
return None
def initialize_fields(self, content: list[Any]) -> None:
""" Initializes the :class:`Field` items in the `Sequence` with
the *values* in the *content* list.
:param list[Any] content: a list contains the :class:`Field` values for
each item in the `Sequence`.
"""
for name, pair in enumerate(zip(self, content)):
item, value = pair
# Container or Pointer
if is_mixin(item):
item.initialize_fields(value)
# Fields
elif is_field(item):
item.value = value
else:
raise MemberTypeError(self, item, name)
@nested_option()
def view_fields(self,
*attributes: str,
**options: Any) -> list[Any]:
""" Returns a list with the selected field *attribute* or a list with the
dictionaries of the selected field *attributes* for each :class:`Field`
*nested* in the `Sequence`.
The *attributes* of each :class:`Field` for containers *nested* in the
`Sequence` are viewed as well (chained method call).
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple[str, ...] fieldnames: sequence of dictionary keys for the
selected field *attributes*. Defaults to ``(*attributes)``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields nested
in the `Sequence` views their referenced :attr:`~Pointer.data` object
field attributes as well (chained method call).
"""
items = list()
for index, item in enumerate(self):
if is_container(item):
# Container
items.append(item.view_fields(*attributes, **options))
elif is_pointer(item) and get_nested(options):
# Pointer
items.append(item.view_fields(*attributes, **options))
elif is_field(item):
# Field
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
if len(attributes) > 1:
fieldnames = options.get('fieldnames', attributes)
items.append(dict(zip(fieldnames, field_getter(item))))
else:
items.append(field_getter(item))
else:
raise MemberTypeError(self, item, index)
return items
@nested_option()
def field_items(self,
path: str = str(),
**options: Any):
""" Returns a **flatten** list of ``('field path', field item)`` tuples
for each :class:`Field` *nested* in the `Sequence`.
:param str path: field path of the `Sequence`.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in
the `Sequence` list their referenced :attr:`~Pointer.data` object
field items as well (chained method call).
"""
items = list()
for index, item in enumerate(self):
if path:
item_path = f"{path}[{str(index)}]"
else:
item_path = f"[{str(index)}]"
# Container
if is_container(item):
for field_item in item.field_items(item_path, **options):
items.append(field_item)
# Pointer
elif is_pointer(item) and get_nested(options):
for field_item in item.field_items(item_path, **options):
items.append(field_item)
# Field
elif is_field(item):
items.append((item_path, item))
else:
raise MemberTypeError(self, item, item_path)
return items
@nested_option(True)
def describe(self,
name: str = str(),
**options: Any) -> dict[str, Any]:
""" Returns the **metadata** of the `Sequence` as a :class:`dict`.
.. code-block:: python
metadata = {
'class': self.__class__.__name__,
'name': name if name else self.__class__.__name__,
'size': len(self),
'type': Sequence.item_type.name
'member': [
item.describe('name[idx]') for idx, item in enumerate(self)
]
}
:param str name: optional name for the `Sequence`.
Fallback is the class name.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
`Sequence` lists their referenced :attr:`~Pointer.data` object fields
as well (chained method call). Default is :data:`True`.
"""
members = list()
metadata = dict()
metadata['class'] = self.__class__.__name__
metadata['name'] = name if name else self.__class__.__name__
metadata['size'] = len(self)
metadata['type'] = self.item_type.name
metadata['member'] = members
for member_name, item in enumerate(self):
# Container
if is_container(item):
members.append(item.describe(
f"{metadata['name']}[{member_name}]", **options))
# Pointer
elif is_pointer(item) and get_nested(options):
members.append(item.describe(
f"{metadata['name']}[{member_name}]", **options))
# Field
elif is_field(item):
members.append(item.describe(
f"{metadata['name']}[{member_name}]", nested=False))
else:
raise MemberTypeError(self, item, member_name)
return metadata
class Array(Sequence):
""" The :class:`Array` is a :class:`Sequence` which contains *elements* of
one type. The *template* for the *array element* can be any :class:`Field`
instance or a *callable* (factory) which returns a :class:`Structure`,
:class:`Sequence`, :class:`Array` or any :class:`Field` instance.
A *callable template* (factory) is necessary to ensure that the internal
constructor for the array element produces complete copies for each array
element including the *nested* objects in the *template* for the array
element.
An `Array` of :class:`Pointer` fields should use a *callable* instead of
assigning a :class:`Pointer` field instance directly as the array element
*template* to ensure that the referenced :attr:`~Pointer.data` object of a
:class:`Pointer` field is also complete copied for each array element.
An `Array` adapts and extends a :class:`Sequence` with the following features:
- **Append** a new *array element* to the `Array` via :meth:`append()`.
- **Insert** a new *array element* before the *index* into the `Array`
via :meth:`insert()`.
- **Re-size** the `Array` via :meth:`resize()`.
An `Array` replaces the ``'type'`` key of the :attr:`~Sequence.metadata`
of a :class:`Sequence` with its own `item` type.
:param template: template for the *array element*.
The *template* can be any :class:`Field` instance or any *callable*
that returns a :class:`Structure`, :class:`Sequence`, :class:`Array`
or any :class:`Field` instance.
:param int capacity: capacity of the `Array` in number of *array elements*.
"""
# Item type.
item_type: ItemClass = ItemClass.Array
def __init__(self,
template: Callable | Structure | Sequence | Field,
capacity: int = 0) -> None:
super().__init__()
# Template for the array element.
if is_field(template):
# Field: Array element instance
self._template = template
elif callable(template):
# Callable: Array element factory
element = template()
if is_any(element):
self._template = template
else:
raise FactoryTypeError(self, template, element)
else:
raise MemberTypeError(self, template)
# Create array
self.resize(capacity)
def __create__(self):
if is_field(self._template):
# Field: Array element instance
return copy.copy(self._template)
else:
# Callable: Array element factory
return self._template()
def append(self) -> None:
""" Appends a new *array element* to the `Array`."""
super().append(self.__create__())
def insert(self, index: int) -> None:
""" Inserts a new *array element* before the *index* of the `Array`.
:param int index: `Array` index.
"""
super().insert(index, self.__create__())
def resize(self, capacity: int) -> None:
""" Re-sizes the `Array` by appending new *array elements* or
removing *array elements* from the end.
:param int capacity: new capacity of the `Array` in number of
*array elements*.
"""
count = max(int(capacity), 0) - len(self)
if count == 0:
pass
elif -count == len(self):
self.clear()
elif count > 0:
for i in range(count):
self.append()
else:
for i in range(abs(count)):
self.pop()
def initialize_fields(self,
content: list[Any]) -> None:
""" Initializes the :class:`Field` elements in the `Array` with the
*values* in the *content* list.
If the *content* list is shorter than the `Array` then the *content*
list is used as a rotating fill pattern for the :class:`Field` elements
in the `Array`.
:param list[Any] content: a list contains the :class:`Field` values for
each element in the `Array` or one :class:`Field` value for all
elements in the `Array`.
"""
if isinstance(content, (list, tuple)):
capacity = len(content)
for i in range(0, len(self), capacity):
for name, pair in enumerate(zip(self[i:i + capacity],
content),
start=i):
item, value = pair
if is_mixin(item):
# Container or Pointer
item.initialize_fields(value)
elif is_field(item):
# Fields
item.value = value
else:
raise MemberTypeError(self, item, name)
else:
for name, item in enumerate(self):
if is_mixin(item):
# Container or Pointer
item.initialize_fields(content)
elif is_field(item):
# Fields
item.value = content
else:
raise MemberTypeError(self, item, name)
class Field:
""" The :class:`Field` class is the *abstract class* for all field classes.
A `Field` has a specific **name**, **bit size**, **byte order**, and can be
**aligned to** other fields.
A `Field` has methods to **unpack**, **pack**, **deserialize** and
**serialize** its field **value** from and to a byte stream, and stores its
location within the byte stream and the providing data source in its field
**index**.
:param int bit_size: is the *size* of a `Field` in bits.
:param int align_to: aligns the `Field` to the number of bytes,
can be between ``1`` and ``8``.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Field`.
Default is :class:`~Byteorder.auto`.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
"""
# Item type.
item_type: ItemClass = ItemClass.Field
def __init__(self,
bit_size: int = 0,
align_to: int = 0,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__()
# Field index
self._index: Index = Index()
# Field alignment
self._align_to_byte_size: int = align_to
self._align_to_bit_offset: int = 0
# Field byte order
self._byte_order: Byteorder = Byteorder.auto
self.byte_order = byte_order
# Field bit size
self._bit_size = bit_size
# Field value
self._value = None
def __str__(self) -> str:
return (f"{self.name}"
f"({self.index!s}, "
f"{self.alignment!s}, "
f"{self.bit_size!s}, "
f"{self.value!s})")
def __repr__(self) -> str:
return (f"{self.__class__.__name__}"
f"(index={self.index!r}, "
f"alignment={self.alignment!r}, "
f"bit_size={self.bit_size!r}, "
f"value={self.value!r})")
@property
def alignment(self) -> Alignment:
""" Returns the :class:`Alignment` of the `Field` (read-only)."""
return Alignment(self._align_to_byte_size, self._align_to_bit_offset)
@property
def bit_size(self) -> int:
""" Returns the size of the `Field` in bits (read-only)."""
return self._bit_size
@property
def byte_order(self) -> Byteorder:
""" :class:`Byteorder` used to decode and encode the :attr:`value`
of the `Field`.
"""
return self._byte_order
@byte_order.setter
def byte_order(self,
value: (Literal['auto', 'big', 'little'] |
Byteorder | str)) -> None:
byte_order = value
if isinstance(byte_order, str):
byte_order = Byteorder.get_member(value)
if not byte_order:
raise ByteOrderValueError(self, self.index, value)
if not isinstance(byte_order, Byteorder):
raise ByteOrderTypeError(self, value)
self._byte_order = byte_order
@property
def index(self) -> Index:
""" :class:`Index` of the `Field`."""
return self._index
@index.setter
def index(self, value: Index) -> None:
# Field index
byte, bit, address, base, update = value
# Invalid field index
if byte < 0 or not (0 <= bit <= 64):
raise FieldIndexError(self, value)
# Field group size
group_size, offset = divmod(self.bit_size + bit, 8)
if offset:
group_size += 1
# Bad aligned field group?
if self.alignment.byte_size < group_size:
raise FieldGroupSizeError(self, value,
Alignment(group_size,
self.alignment.bit_offset))
# No Bit field?
if not self.is_bit():
# Set field alignment bit offset
self._align_to_bit_offset = bit
# Bad aligned field group?
elif self.alignment.bit_offset != bit:
raise FieldGroupOffsetError(self, value,
Alignment(self.alignment.byte_size,
bit))
# Invalid field address
if address < 0:
raise FieldAddressError(self, value, address)
# Set field index
self._index = Index(int(byte), int(bit),
int(address), int(base),
update)
@property
def name(self) -> str:
""" Returns the type name of the `Field` (read-only)."""
return self.item_type.name.capitalize() + str(self.bit_size)
@property
def value(self) -> Any:
""" Field value."""
return self._value
@value.setter
def value(self, x: Any) -> None:
self._value = x
@staticmethod
def is_bit() -> bool:
""" Returns ``False``."""
return False
@staticmethod
def is_bool() -> bool:
""" Returns ``False``."""
return False
@staticmethod
def is_decimal() -> bool:
""" Returns ``False``."""
return False
@staticmethod
def is_float() -> bool:
""" Returns ``False``."""
return False
@staticmethod
def is_pointer() -> bool:
""" Returns ``False``."""
return False
@staticmethod
def is_stream() -> bool:
""" Returns ``False``."""
return False
@staticmethod
def is_string() -> bool:
""" Returns ``False``."""
return False
@abc.abstractmethod
@byte_order_option()
def unpack(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> Any:
""" Unpacks the field :attr:`value` from the *buffer* at the given
*index* in accordance with the decoding *byte order* for the
de-serialization and the :attr:`byte_order` and :attr:`alignment`
of the `Field`.
The specific decoding :attr:`byte_order` of the `Field` overrules the
decoding *byte order* for the de-serialization.
Returns the deserialized field :attr:`value`.
:param bytes buffer: byte stream to unpack from.
:param Index index: current read :class:`Index` within the *buffer*.
:keyword byte_order: decoding byte order for the de-serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
.. note:: This abstract method must be implemented by a derived class.
"""
# Returns the deserialized field value.
return None
@abc.abstractmethod
@byte_order_option()
def pack(self,
buffer: bytearray = bytearray(),
**options: Any) -> bytes:
""" Packs the field :attr:`value` to the *buffer* at the given *index*
in accordance with the encoding *byte order* for the serialization and
the :attr:`byte_order` and :attr:`alignment` of the `Field`.
The specific encoding :attr:`byte_order` of the `Field` overrules the
encoding *byte order* for the serialization.
Returns the :class:`bytes` for the serialized field :attr:`value`.
:param bytearray buffer: byte stream to pack to.
:keyword byte_order: encoding byte order for the
serialization.
:type byte_order: Byteorder|Litreal['auto', 'big', 'little']
.. note:: This abstract method must be implemented by a derived class.
"""
# Returns the byte serialized field value.
return bytes()
@byte_order_option()
@nested_option()
def deserialize(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> Index:
""" De-serializes the `Field` from the byte *buffer* starting at the
beginning of the *buffer* or with the given *index* by unpacking the
bytes to the :attr:`value` of the `Field` in accordance with the
decoding *byte order* for the de-serialization and the decoding
:attr:`byte_order` of the `Field`.
The specific decoding :attr:`byte_order` of the `Field` overrules the
decoding *byte order* for the de-serialization.
Returns the :class:`Index` of the *buffer* after the `Field`.
Optional the de-serialization of the referenced :attr:`~Pointer.data`
object of a :class:`Pointer` field can be enabled.
:param bytes buffer: byte stream to de-serialize from.
:param Index index: current read :class:`Index` within the *buffer* to
de-serialize.
:keyword byte_order: decoding byte order for the de-serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
:keyword bool nested: if :data:`True` a :class:`Pointer` field
de-serialize its referenced :attr:`~Pointer.data` object as well
(chained method call).
Each :class:`Pointer` field uses for the de-serialization of its
referenced :attr:`~Pointer.data` object its own
:attr:`~Pointer.bytestream`.
"""
self.index = index
self._value = self.unpack(buffer, index, **options)
return self.index_field(index)
@byte_order_option()
@nested_option()
def serialize(self,
buffer: bytearray = bytearray(),
index: Index = Index(),
**options: Any) -> Index:
""" Serializes the `Field` to the byte *buffer* starting at the beginning
of the *buffer* or with the given *index* by packing the :attr:`value`
of the `Field` to the byte *buffer* in accordance with the encoding
*byte order* for the serialization and the encoding :attr:`byte_order`
of the `Field`.
The specific encoding :attr:`byte_order` of the `Field` overrules the
encoding *byte order* for the serialization.
Returns the :class:`Index` of the *buffer* after the `Field`.
Optional the serialization of the referenced :attr:`~Pointer.data` object
of a :class:`Pointer` field can be enabled.
:param bytearray buffer: byte stream to serialize to.
:param Index index: current write :class:`Index` of the *buffer*.
:keyword byte_order: encoding byte order for the serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
:keyword bool nested: if :data:`True` a :class:`Pointer` field serializes
its referenced :attr:`~Pointer.data` object as well
(chained method call).
Each :class:`Pointer` field uses for the encoding of its referenced
:attr:`~Pointer.data` object its own :attr:`~Pointer.bytestream`.
"""
self.index = index
buffer += self.pack(buffer, **options)
return self.index_field(index)
def index_field(self,
index: Index = Index()) -> Index:
""" Indexes the `Field` with the given *index* und returns the
:class:`Index` after the `Field`.
:param Index index: start :class:`Index` for the `Field`.
"""
# Set field index
# Note: Updates the field alignment offset as well
self.index = index
# Bit offset for the next field
byte, bit, address, base, update = index
bit += self.bit_size
# Field group size
group_size, offset = divmod(bit, 8)
# End of field group?
if self.alignment.byte_size == group_size:
# Bad aligned field group?
if offset != 0:
raise FieldGroupSizeError(self, index,
Alignment(group_size + 1,
self.alignment.bit_offset))
else:
# Move byte index for the next field group
byte += self.alignment.byte_size
# Reset bit offset for the next field group
bit = 0
# Move address for the next field group
address += self.alignment.byte_size
# Index for the next field
return Index(byte, bit, address, base, update)
@nested_option(True)
def describe(self,
name: str = str(),
**options: Any) -> dict[str, Any]:
""" Returns the **metadata** of a `Field` as a :class:`dict`.
.. code-block:: python
metadata = {
'address': self.index.address,
'alignment': [self.alignment.byte_size, self.alignment.bit_offset],
'class': self.name,
'index': [self.index.byte, self.index.bit],
'name': name if name else self.name,
'order': self.byte_order.value,
'size': self.bit_size,
'type': Field.item_type.name,
'value': self.value
}
:param str name: optional name for the `Field`.
Fallback is the class name.
:keyword bool nested: if :data:`True` a :class:`Pointer` field lists its
referenced :attr:`~Pointer.data` object fields as well
(chained method call). Default is :data:`True`.
"""
metadata = {
'address': self.index.address,
'alignment': list(self.alignment),
'class': self.name,
'order': self.byte_order.value,
'index': [self.index.byte, self.index.bit],
'name': name if name else self.name,
'size': self.bit_size,
'type': Field.item_type.name,
'value': self.value
}
return dict(sorted(metadata.items()))
class Stream(Field):
""" The :class:`Stream` field is a :class:`Field` with a variable *size*,
and returns its field :attr:`value` as a hexadecimal string.
Internally a `Stream` field uses a :class:`bytes` class to store the
data of its field :attr:`value`.
A `Stream` field is:
- *containable*: ``item`` in ``self`` returns :data:`True` if *item* is part
of the `Stream` field.
- *sized*: ``len(self)`` returns the length of the `Stream` field.
- *indexable* ``self[index]`` returns the *byte* at the *index*
of the `Stream` field.
- *iterable* ``iter(self)`` iterates over the bytes of the `Stream`
field.
:param int capacity: is the *capacity* of the `Stream` field in bytes.
Example:
>>> stream = Stream()
>>> stream.is_stream()
True
>>> stream.name
'Stream'
>>> stream.alignment
Alignment(byte_size=0, bit_offset=0)
>>> stream.byte_order
Byteorder.auto = 'auto'
>>> stream.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> stream.index_field()
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> stream.bit_size
0
>>> len(stream)
0
>>> bool(stream)
False
>>> stream.value
''
>>> bytes(stream)
b''
>>> stream.hex()
''
>>> stream.resize(10)
>>> stream.name
'Stream10'
>>> stream.alignment
Alignment(byte_size=10, bit_offset=0)
>>> stream.bit_size
80
>>> stream.index_field()
Index(byte=10, bit=0, address=10, base_address=0, update=False)
>>> stream.value
'00000000000000000000'
>>> stream.value = '0102030405'
>>> stream.value
'01020304050000000000'
>>> stream.resize(15)
>>> stream.value
'010203040500000000000000000000'
>>> stream.resize(10)
>>> stream.value = '0102030405060708090a0b0c'
>>> stream.value
'0102030405060708090a'
>>> stream.hex()
'0102030405060708090a'
>>> len(stream)
10
>>> [byte for byte in stream] # converts to int
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> [hex(byte) for byte in stream]
['0x1', '0x2', '0x3', '0x4', '0x5', '0x6', '0x7', '0x8', '0x9', '0xa']
>>> stream[5] # converts to int
6
>>> 7 in stream
True
>>> 0x0 in stream
False
>>> stream[5:].hex() # converts to bytes
'060708090a'
>>> stream.describe()
{'address': 0,
'alignment': [10, 0],
'class': 'Stream10',
'index': [0, 0],
'name': 'Stream10',
'order': 'auto',
'size': 80,
'type': 'Field',
'value': '0102030405060708090a'}
"""
# Item type.
item_type: ItemClass = ItemClass.Stream
def __init__(self, capacity: int = 0) -> None:
super().__init__()
# Field value
self._value: bytes = bytes()
# Stream size
self.resize(capacity)
def __bytes__(self) -> bytes:
return bytes(self._value)
def __contains__(self, key: int | bytes) -> bool:
return key in self._value
def __len__(self) -> int:
return len(self._value)
def __getitem__(self, key: int | slice) -> int | bytes:
return self._value[key]
def __iter__(self) -> Iterator[int]:
return iter(self._value)
@property
def name(self) -> str:
""" Returns the type name of the `Stream` field (read-only)."""
capacity = len(self)
if capacity > 0:
return self.item_type.name.capitalize() + str(capacity)
else:
return self.item_type.name.capitalize()
@property
def value(self) -> str:
""" Field value as a lowercase hexadecimal encoded string."""
return self._value.hex()
@value.setter
def value(self, stream: str | bytes | bytearray) -> None:
self._value = self.to_stream(stream, encoding='hex')
def hex(self) -> str:
""" Returns a string containing two hexadecimal digits for each byte
in the :attr:`value` of the `Stream` field.
"""
return self._value.hex()
@staticmethod
def is_stream() -> bool:
""" Returns :data:`True`."""
return True
def to_stream(self,
value: str | bytes | bytearray,
encoding: Literal['ascii', 'hex'] = 'hex') -> bytes:
if isinstance(value, str):
if encoding == 'hex':
bytestream = bytes.fromhex(value)
elif encoding == 'ascii':
bytestream = value.encode('ascii')
else:
raise FieldValueEncodingError(self, self.index, encoding)
elif isinstance(value, (bytearray, bytes)):
bytestream = bytes(value)
else:
raise FieldTypeError(self, self.index, value)
bytestream = bytestream[:len(self)]
bytestream += b'\x00' * max(len(self) - len(bytestream), 0)
return bytestream
@byte_order_option()
def unpack(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> bytes:
# Bad placed field
if index.bit:
raise FieldIndexError(self, index)
# Content of the buffer mapped by the field
offset = self.index.byte
size = offset + len(self)
bytestream = buffer[offset:size]
bytestream += b'\x00' * max(len(self) - len(bytestream), 0)
return bytestream
@byte_order_option()
def pack(self,
buffer: bytearray = bytearray(),
**options: Any) -> bytes:
# Bad placed field
if self.index.bit:
raise FieldIndexError(self, self.index)
return self._value
def resize(self, capacity: int) -> None:
""" Re-sizes the `Stream` field by appending zero bytes or
removing bytes from the end.
:param int capacity: `Stream` capacity in number of bytes.
"""
count = max(int(capacity), 0) - len(self)
if count == 0:
pass
elif -count == len(self):
self._value = bytes()
elif count > 0:
self._value += b'\x00' * count
else:
self._value = self._value[:count]
capacity = len(self)
self._bit_size = capacity * 8
self._align_to_byte_size = capacity
class String(Stream):
""" The :class:`String` field is a :class:`Stream` field with a variable
*size*, and returns its field :attr:`value` as a zero-terminated ASCII
string.
A `String` field is:
- *containable*: ``item`` in ``self`` returns :data:`True` if *item* is part
of the `String` field.
- *sized*: ``len(self)`` returns the length of the `String` field.
- *indexable* ``self[index]`` returns the *byte* at the *index*
of the `String` field.
- *iterable* ``iter(self)`` iterates over the bytes of the `String`
field.
:param int capacity: is the *capacity* of the `String` field in bytes.
Example:
>>> string = String()
>>> string.is_stream()
True
>>> string.is_string()
True
>>> string.is_terminated()
False
>>> string.name
'String'
>>> string.alignment
Alignment(byte_size=0, bit_offset=0)
>>> string.byte_order
Byteorder.auto = 'auto'
>>> string.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> string.index_field()
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> string.bit_size
0
>>> len(string)
0
>>> bool(string)
False
>>> string.value
''
>>> bytes(string)
b''
>>> string.hex()
''
>>> string.resize(10)
>>> string.name
'String10'
>>> string.alignment
Alignment(byte_size=10, bit_offset=0)
>>> string.bit_size
80
>>> string.index_field()
Index(byte=10, bit=0, address=10, base_address=0, update=False)
>>> string.value
''
>>> string.value = 'KonFoo'
>>> string.value
'KonFoo'
>>> string.resize(3)
>>> string.value
'Kon'
>>> string.resize(10)
>>> string.value
'Kon'
>>> string.value = 'KonFoo is Fun'
>>> string.value
'KonFoo is '
>>> string.hex()
'4b6f6e466f6f20697320'
>>> len(string)
10
>>> [byte for byte in string] # converts to int
[75, 111, 110, 70, 111, 111, 32, 105, 115, 32]
>>> [chr(byte) for byte in string] # converts to int
['K', 'o', 'n', 'F', 'o', 'o', ' ', 'i', 's', ' ']
>>> chr(string[5]) # converts to int -> chr
'o'
>>> ord(' ') in string
True
>>> 0x0 in string
False
>>> string[:6] # converts to bytes
b'KonFoo'
>>> string[3:6] # converts to bytes
b'Foo'
>>> string.describe()
{'address': 0,
'alignment': [10, 0],
'class': 'String10',
'index': [0, 0],
'name': 'String10',
'order': 'auto',
'size': 80,
'type': 'Field',
'value': 'KonFoo is '}
"""
# Item type.
item_type: ItemClass = ItemClass.String
@property
def value(self) -> str:
""" Field value as an ascii encoded string."""
length = self._value.find(b'\x00')
if length >= 0:
return self._value[:length].decode('ascii')
else:
return self._value.decode('ascii')
@value.setter
def value(self, string: str | bytes | bytearray) -> None:
self._value = self.to_stream(string, encoding='ascii')
@staticmethod
def is_string() -> bool:
""" Returns :data:`True`."""
return True
def is_terminated(self) -> bool:
""" Returns :data:`True` if the `String` field is zero-terminated."""
return self._value.find(b'\x00') >= 0
class Float(Field):
""" The :class:`Float` field is a :class:`Field` with a fix *size* of four
bytes, and returns its field :attr:`value` as a single precision float.
Internally a `Float` field uses a :class:`float` class to store the
data of its field :attr:`~Float.value`.
A `Float` field extends the :attr:`~Field.metadata` of a :class:`Field`
with a ``'max'`` and ``'min'`` key for its maximum and minimum possible
field :attr:`.value`.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Float` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> real = Float()
>>> real.is_float()
True
>>> real.name
'Float32'
>>> real.alignment
Alignment(byte_size=4, bit_offset=0)
>>> real.byte_order
Byteorder.auto = 'auto'
>>> real.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> real.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> real.bit_size
32
>>> real.min()
-3.4028234663852886e+38
>>> real.max()
3.4028234663852886e+38
>>> real.smallest()
1.1754943508222875e-38
>>> real.epsilon()
5.960464477539063e-08
>>> real.value
0.0
>>> bytes(real)
b'\\x00\\x00\\x00\\x00'
>>> int(real)
0
>>> float(real)
0.0
>>> bool(real)
False
>>> real.value = 0x10
>>> real.value
16.0
>>> real.value = -3.4028234663852887e+38
>>> real.value
-3.4028234663852886e+38
>>> real.value = 3.4028234663852887e+38
>>> real.value
3.4028234663852886e+38
>>> real.describe()
{'address': 0,
'alignment': [4, 0],
'class': 'Float32',
'index': [0, 0],
'max': 3.4028234663852886e+38,
'min': -3.4028234663852886e+38,
'name': 'Float32',
'order': 'auto',
'size': 32,
'type': 'Field',
'value': 3.4028234663852886e+38}
"""
# Item type.
item_type: ItemClass = ItemClass.Float
def __init__(self,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size=32, align_to=4, byte_order=byte_order)
# Field value
self._value: float = float()
def __bytes__(self) -> bytes:
if self.byte_order is Byteorder.big:
return struct.pack('>f', self._value)
elif self.byte_order is Byteorder.little:
return struct.pack('<f', self._value)
elif BYTEORDER is Byteorder.big:
return struct.pack('>f', self._value)
else:
return struct.pack('<f', self._value)
def __bool__(self) -> bool:
return bool(self._value)
def __int__(self) -> int:
return int(self._value)
def __float__(self) -> float:
return float(self._value)
@property
def value(self) -> float:
""" Field value as a single precision floating-point number."""
return float(self._value)
@value.setter
def value(self, x: int | float | bool) -> None:
self._value = self.to_float(x)
@staticmethod
def is_float() -> bool:
""" Returns :data:`True`."""
return True
def to_float(self, value: int | float | bool) -> float:
return clamp(float(value), self.min(), self.max())
@staticmethod
def epsilon() -> float:
return 2 ** -24
@staticmethod
def smallest() -> float:
""" Returns the smallest normalized field *value* of the `Float` field."""
return 2 ** -126
@staticmethod
def max() -> float:
""" Returns the maximal possible field *value* of the `Float` field."""
return (2 - 2 ** -23) * 2 ** 127
@staticmethod
def min() -> float:
""" Returns the minimal possible field *value* of the `Float` field."""
return -Float.max()
@byte_order_option()
def unpack(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> float:
# Bad placed field
if index.bit:
raise FieldIndexError(self, index)
# Decoding byte order of the buffer
byte_order = get_byte_order(options)
# Field byte order overrules!
if self.byte_order is not Byteorder.auto:
byte_order = self.byte_order
# Content of the buffer mapped by the field
offset = index.byte
size = offset + self.alignment.byte_size
content = buffer[offset:size]
# Not enough content!
if len(content) != 4:
return float()
# Unpack the content from the buffer
if byte_order is Byteorder.big:
return struct.unpack('>f', content)[0]
else:
return struct.unpack('<f', content)[0]
@byte_order_option()
def pack(self,
buffer: bytearray = bytearray(),
**options: Any) -> bytes:
# Bad placed field
if self.index.bit:
raise FieldIndexError(self, self.index)
# Encoding byte order of the buffer
byte_order = get_byte_order(options)
# Field byte order overrules!
if self.byte_order is not Byteorder.auto:
byte_order = self.byte_order
# Pack the field value to bytes
if byte_order is Byteorder.big:
return struct.pack('>f', self._value)
else:
return struct.pack('<f', self._value)
def describe(self,
name: str = str(),
**options: Any) -> dict[str, Any]:
metadata = super().describe(name, **options)
metadata['max'] = self.max()
metadata['min'] = self.min()
return dict(sorted(metadata.items()))
class Double(Field):
""" The :class:`Double` field is a :class:`Field` with a fix *size* of eight
bytes, and returns its field :attr:`value` as a double precision float.
Internally a `Double` field uses a :class:`float` class to store the
data of its field :attr:`~Float.value`.
A `Double` field extends the :attr:`~Field.metadata` of a :class:`Field`
with a ``'max'`` and ``'min'`` key for its maximum and minimum possible
field :attr:`.value`.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Double` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> double = Double()
>>> double.is_float()
True
>>> double.name
'Double64'
>>> double.alignment
Alignment(byte_size=8, bit_offset=0)
>>> double.byte_order
Byteorder.auto = 'auto'
>>> double.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> double.index_field()
Index(byte=8, bit=0, address=8, base_address=0, update=False)
>>> double.bit_size
64
>>> double.min()
-1.7976931348623157e+308
>>> double.max()
1.7976931348623157e+308
>>> double.smallest()
2.2250738585072014e-308
>>> double.epsilon()
1.1102230246251565e-16
>>> double.value
0.0
>>> bytes(double)
b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'
>>> int(double)
0
>>> float(double)
0.0
>>> bool(double)
False
>>> double.value = 0x10
>>> double.value
16.0
>>> double.value = -1.7976931348623158e+308
>>> double.value
-1.7976931348623157e+308
>>> double.value = 1.7976931348623158e+308
>>> double.value
1.7976931348623157e+308
>>> double.describe()
{'address': 0,
'alignment': [8, 0],
'class': 'Double64',
'index': [0, 0],
'max': 1.7976931348623157e+308,
'min': -1.7976931348623157e+308,
'name': 'Double64',
'order': 'auto',
'size': 64,
'type': 'Field',
'value': 1.7976931348623157e+308}
"""
# Item type.
item_type: ItemClass = ItemClass.Double
def __init__(self,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size=64, align_to=8, byte_order=byte_order)
# Field value
self._value: float = float()
def __bytes__(self) -> bytes:
if self.byte_order is Byteorder.big:
return struct.pack('>d', self._value)
elif self.byte_order is Byteorder.little:
return struct.pack('<d', self._value)
elif BYTEORDER is Byteorder.big:
return struct.pack('>d', self._value)
else:
return struct.pack('<d', self._value)
def __bool__(self) -> bool:
return bool(self._value)
def __int__(self) -> int:
return int(self._value)
def __float__(self) -> float:
return float(self._value)
@property
def value(self) -> float:
""" Field value as a double precision floating-point number."""
return float(self._value)
@value.setter
def value(self, x: int | float | bool) -> None:
self._value = self.to_float(x)
@staticmethod
def is_float() -> bool:
""" Returns :data:`True`."""
return True
def to_float(self, value: int | float | bool) -> float:
return clamp(float(value), self.min(), self.max())
@staticmethod
def epsilon() -> float:
return 2 ** -53
@staticmethod
def smallest() -> float:
""" Returns the smallest normalized field *value* of the `Double` field.
"""
return 2 ** -1022
@staticmethod
def max() -> float:
""" Returns the maximal possible field *value* of the `Double` field."""
return (2 - 2 ** -52) * 2 ** 1023
@staticmethod
def min() -> float:
""" Returns the minimal possible field *value* of the `Double` field."""
return -Double.max()
@byte_order_option()
def unpack(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> float:
# Bad placed field
if index.bit:
raise FieldIndexError(self, index)
# Decoding byte order of the buffer
byte_order = get_byte_order(options)
# Field byte order overrules!
if self.byte_order is not Byteorder.auto:
byte_order = self.byte_order
# Content of the buffer mapped by the field
offset = index.byte
size = offset + self.alignment.byte_size
content = buffer[offset:size]
# Not enough content!
if len(content) != 8:
return float()
# Unpack the content from the buffer
if byte_order is Byteorder.big:
return struct.unpack('>d', content)[0]
else:
return struct.unpack('<d', content)[0]
@byte_order_option()
def pack(self,
buffer=bytearray(),
**options: Any) -> bytes:
# Bad placed field
if self.index.bit:
raise FieldIndexError(self, self.index)
# Encoding byte order of the buffer
byte_order = get_byte_order(options)
# Field byte order overrules!
if self.byte_order is not Byteorder.auto:
byte_order = self.byte_order
# Pack the field value to bytes
if byte_order is Byteorder.big:
return struct.pack('>d', self._value)
else:
return struct.pack('<d', self._value)
def describe(self,
name: str = str(),
**options) -> dict[str, Any]:
metadata = super().describe(name, **options)
metadata['max'] = self.max()
metadata['min'] = self.min()
return dict(sorted(metadata.items()))
class Decimal(Field):
""" The :class:`Decimal` field is a :class:`Field` with a variable *size*
and returns its field :attr:`value` as a decimal number.
Internally a `Decimal` field uses an :class:`int` class to store the
data of its field :attr:`value`.
A `Decimal` field extends the :attr:`~Field.metadata` of a :class:`Field`
with a ``'max'`` and ``'min'`` key for its maximum and minimum possible
field :attr:`value` and a ``'signed'`` key to mark the decimal number as
signed or unsigned.
:param int bit_size: is the *size* of the `Decimal` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Decimal` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Decimal` field aligns itself
to the next matching byte size according to the *size* of the
`Decimal` field.
:param bool signed: if :data:`True` the `Decimal` field is signed otherwise
unsigned.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Decimal` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> unsigned = Decimal(16)
>>> unsigned.is_decimal()
True
>>> unsigned.name
'Decimal16'
>>> unsigned.alignment
Alignment(byte_size=2, bit_offset=0)
>>> unsigned.byte_order
Byteorder.auto = 'auto'
>>> unsigned.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> unsigned.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> unsigned.bit_size
16
>>> unsigned.signed
False
>>> unsigned.min()
0
>>> unsigned.max()
65535
>>> unsigned.value
0
>>> bytes(unsigned)
b'\\x00\\x00'
>>> int(unsigned)
0
>>> float(unsigned)
0.0
>>> hex(unsigned)
'0x0'
>>> bin(unsigned)
'0b0'
>>> oct(unsigned)
'0o0'
>>> bool(unsigned)
False
>>> unsigned.as_signed()
0
>>> unsigned.as_unsigned()
0
>>> unsigned.deserialize(bytes.fromhex('0080'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> unsigned.value
32768
>>> unsigned.value = 0x4000
>>> unsigned.value
16384
>>> unsigned.value = -1
>>> unsigned.value
0
>>> unsigned.value = 65536
>>> unsigned.value
65535
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> unsigned.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ffff'
>>> unsigned.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Decimal16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Decimal16',
'order': 'auto',
'signed': False,
'size': 16,
'type': 'Field',
'value': 65535}
Example:
>>> signed = Decimal(16, signed=True)
>>> signed.is_decimal()
True
>>> signed.name
'Decimal16'
>>> signed.alignment
Alignment(byte_size=2, bit_offset=0)
>>> signed.byte_order
Byteorder.auto = 'auto'
>>> signed.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> signed.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> signed.bit_size
16
>>> signed.signed
True
>>> signed.min()
-32768
>>> signed.max()
32767
>>> signed.value
0
>>> bytes(signed)
b'\\x00\\x00'
>>> int(signed)
0
>>> float(signed)
0.0
>>> hex(signed)
'0x0'
>>> bin(signed)
'0b0'
>>> oct(signed)
'0o0'
>>> bool(signed)
False
>>> signed.deserialize(bytes.fromhex('00c0'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> signed.value
-16384
>>> signed.value = -0x4000
>>> signed.value
-16384
>>> signed.value = -32769
>>> signed.value
-32768
>>> signed.value = 32768
>>> signed.value
32767
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> signed.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ff7f'
>>> signed.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Decimal16',
'index': [0, 0],
'max': 32767,
'min': -32768,
'name': 'Decimal16',
'order': 'auto',
'signed': True,
'size': 16,
'type': 'Field',
'value': 32767}
"""
# Item type.
item_type: ItemClass = ItemClass.Decimal
def __init__(self,
bit_size: int,
align_to: int | None = None,
signed: bool = False,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(byte_order=byte_order)
# Field signed?
self._signed = bool(signed)
# Field alignment, Field bit size
if align_to:
self._set_alignment(group_size=align_to)
self._set_bit_size(bit_size)
else:
self._set_bit_size(bit_size, auto_align=True)
# Field value
self._value: int = int()
def __bytes__(self) -> bytes:
size, offset = self.alignment
value = self.as_unsigned() << offset
if self.byte_order in (Byteorder.big, Byteorder.little):
return value.to_bytes(size, self.byte_order.value)
else:
return value.to_bytes(size, BYTEORDER.value)
def __bool__(self) -> bool:
return bool(self._value)
def __int__(self) -> int:
return int(self._value)
def __index__(self) -> int:
return int(self._value)
def __float__(self) -> float:
return float(self._value)
@property
def value(self) -> int:
""" Field value as a decimal number."""
return int(self._value)
@value.setter
def value(self, x: str | int | float | bool) -> None:
self._value = self.to_decimal(x)
@property
def signed(self) -> bool:
""" Returns :data:`True` if the `Decimal` field is signed."""
return self._signed
@signed.setter
def signed(self, value: bool) -> None:
self._signed = bool(value)
self._value = self._cast(self._value,
self.min(), self.max(),
self._signed)
@staticmethod
def is_decimal() -> bool:
""" Returns :data:`True`."""
return True
def to_decimal(self,
value: str | int | float | bool,
encoding: Literal['ascii'] | None = None) -> int:
if isinstance(value, str):
if encoding is None:
decimal = int(value, 0)
elif encoding == 'ascii':
decimal = ord(value[:1])
else:
raise FieldValueEncodingError(self, self.index, encoding)
else:
decimal = int(value)
return clamp(decimal, self.min(), self.max())
def _set_alignment(self,
group_size: int,
bit_offset: int = 0,
auto_align: bool = False) -> None:
""" Sets the alignment of the ``Decimal`` field.
:param int group_size: size of the aligned `Field` group in bytes,
can be between ``1`` and ``8``.
:param int bit_offset: bit offset of the `Decimal` field within the
aligned `Field` group, can be between ``0`` and ``63``.
:param bool auto_align: if :data:`True` the `Decimal` field aligns itself
to the next matching byte size according to the *size* of the
`Decimal` field.
"""
# Field alignment offset
field_offset = int(bit_offset)
# Auto alignment
if auto_align:
# Field alignment size
field_size, bit_offset = divmod(field_offset, 8)
if bit_offset != 0:
field_size += 1
field_size = max(field_size, 1)
# No auto alignment
else:
# Field alignment size
field_size = int(group_size)
# Field alignment
alignment = Alignment(field_size, field_offset)
# Invalid field alignment size
if field_size not in range(1, 8):
raise FieldAlignmentError(self, self.index, alignment)
# Invalid field alignment offset
if not (0 <= field_offset <= 63):
raise FieldAlignmentError(self, self.index, alignment)
# Invalid field alignment
if field_offset >= field_size * 8:
raise FieldAlignmentError(self, self.index, alignment)
# Set field alignment
self._align_to_byte_size = alignment.byte_size
self._align_to_bit_offset = alignment.bit_offset
def _set_bit_size(self,
size: int,
step: int = 1,
auto_align: bool = False) -> None:
""" Sets the *size* of the `Decimal` field.
:param int size: is the *size* of the `Decimal` field in bits,
can be between ``1`` and ``64``.
:param int step: is the minimal required step *size* for the `Decimal`
field in bits.
:param bool auto_align: if :data:`True` the `Decimal` field aligns itself
to the next matching byte size according to the *size* of the
`Decimal` field.
"""
# Field size
bit_size = int(size)
# Invalid field size
if bit_size % step != 0 or not (1 <= bit_size <= 64):
raise FieldSizeError(self, self.index, bit_size)
# Field group size
group_size, offset = divmod(bit_size, 8)
# Auto alignment
if auto_align:
if offset != 0:
self._align_to_byte_size = group_size + 1
else:
self._align_to_byte_size = group_size
# Invalid field alignment
elif group_size > self.alignment.byte_size:
raise FieldAlignmentError(self, self.index,
Alignment(group_size,
self.alignment.bit_offset))
# Set field size
self._bit_size = bit_size
def _cast(self,
value: int,
minimum: int,
maximum: int,
signed: bool) -> int:
# Sign conversion
if minimum <= value <= maximum:
return value
elif signed:
return value | ~self.bit_mask()
else:
return value & self.bit_mask()
def _max(self, signed: bool) -> int:
# Maximal possible field value
if signed:
return 2 ** (self._bit_size - 1) - 1
else:
return 2 ** self._bit_size - 1
def _min(self, signed: bool) -> int:
# Minimal possible field value
if signed:
return -2 ** (self._bit_size - 1)
else:
return 0
def bit_mask(self) -> int:
return 2 ** self._bit_size - 1
def max(self) -> int:
""" Returns the maximal possible field *value* of the `Decimal` field.
"""
return self._max(self._signed)
def min(self) -> int:
""" Returns the minimal possible field *value* of the `Decimal` field.
"""
return self._min(self._signed)
def as_unsigned(self) -> int:
""" Returns the field *value* of the `Decimal` field
as an unsigned integer.
"""
return self._cast(self._value,
self._min(False), self._max(False),
False)
def as_signed(self) -> int:
""" Returns the field *value* of the `Decimal` field
as a signed integer.
"""
return self._cast(self._value,
self._min(True), self._max(True),
True)
@byte_order_option()
def unpack(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> int:
# Content of the buffer mapped by the field group
offset = index.byte
size = offset + self.alignment.byte_size
content = buffer[offset:size]
# Decoding byte order of the buffer
byte_order = get_byte_order(options)
# Decode field value from the buffer
value = int.from_bytes(content, byte_order.value)
value >>= index.bit
value &= self.bit_mask()
# Field alignment
field_size, field_offset = divmod(self.bit_size, 8)
# Byte order conversion for field value necessary?
if self.byte_order is Byteorder.auto:
# No specific field byte order
pass
elif self.byte_order is byte_order:
# Field byte order matches the
# decoding byte order of the buffer
pass
elif field_size < 1:
# Byte order not relevant for field's smaller than one byte
pass
elif field_offset != 0:
# Bad sized field for independent byte order conversion
raise FieldGroupByteOrderError(self, index, byte_order)
elif field_size == 1:
# Byte order not relevant for field's with one byte
pass
else:
# Convert byte order of the field value
value = int.from_bytes(value.to_bytes(field_size,
byte_order.value),
self.byte_order.value)
# Limit field value
if value > self.max():
value |= ~self.bit_mask()
return value
@byte_order_option()
def pack(self,
buffer: bytearray = bytearray(),
**options: Any) -> bytes:
# Field value
value = clamp(self._value, self.min(), self.max())
value &= self.bit_mask()
# Encoding byte order of the buffer
byte_order = get_byte_order(options)
# Field alignment
field_size, field_offset = divmod(self.bit_size, 8)
# Byte order conversion for field value necessary?
if self.byte_order is Byteorder.auto:
# No specific field byte order
pass
elif self.byte_order is byte_order:
# Field byte order matches the
# encoding byte order of the buffer
pass
elif field_size < 1:
# Byte order not relevant for field's smaller than one byte
pass
elif field_offset != 0:
# Bad sized field for independent byte order conversion
raise FieldGroupByteOrderError(self, self.index, byte_order)
elif field_size == 1:
# Byte order not relevant for field's with one byte
pass
else:
# Convert byte order of the field value
value = int.from_bytes(value.to_bytes(field_size,
self.byte_order.value),
byte_order.value)
# Shift the field value to its field group offset
value <<= self.index.bit
# Content for the buffer mapped by the field group
offset = self.index.byte
size = offset + self.alignment.byte_size
if len(buffer) == size:
# Map the field value into the existing field group content of the buffer
view = memoryview(buffer)
value |= int.from_bytes(buffer[offset:size], byte_order.value)
view[offset:size] = value.to_bytes(self.alignment.byte_size,
byte_order.value)
return bytes()
else:
# Extent the buffer with the field group content and the field value
return value.to_bytes(self.alignment.byte_size, byte_order.value)
def describe(self,
name: str = str(),
**options: Any) -> dict[str, Any]:
metadata = super().describe(name, **options)
metadata['max'] = self.max()
metadata['min'] = self.min()
metadata['signed'] = self.signed
return dict(sorted(metadata.items()))
class Bit(Decimal):
""" The :class:`Bit` field is an unsigned :class:`Decimal` with a *size* of
one bit, and returns its field :attr:`value` as an unsigned integer number.
:param int number: is the bit offset of the `Bit` field within the
aligned bytes, can be between ``0`` and ``63``.
:param int|None align_to: aligns the `Bit` field to the number of bytes,
can be between ``1`` and ``8``.
Example:
>>> bit = Bit(0)
>>> bit.is_decimal()
True
>>> bit.is_bit()
True
>>> bit.name
'Bit'
>>> bit.alignment
Alignment(byte_size=1, bit_offset=0)
>>> bit.byte_order
Byteorder.auto = 'auto'
>>> bit.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> bit.index_field()
Index(byte=0, bit=1, address=0, base_address=0, update=False)
>>> bit.bit_size
1
>>> bit.signed
False
>>> bit.min()
0
>>> bit.max()
1
>>> bit.value
0
>>> bit.signed
False
>>> bit.value
0
>>> bytes(bit)
b'\\x00'
>>> int(bit)
0
>>> float(bit)
0.0
>>> hex(bit)
'0x0'
>>> bin(bit)
'0b0'
>>> oct(bit)
'0o0'
>>> bool(bit)
False
>>> bit.as_signed()
0
>>> bit.as_unsigned()
0
>>> bit.deserialize(bytes.fromhex('01'))
Index(byte=0, bit=1, address=0, base_address=0, update=False)
>>> bit.value
1
>>> bit.value = 0
>>> bit.value
0
>>> bit.value = False
>>> bit.value
0
>>> bit.value = True
>>> bit.value
1
>>> bit.value = -1
>>> bit.value
0
>>> bit.value = 2
>>> bit.value
1
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> bit.serialize(bytestream)
Index(byte=0, bit=1, address=0, base_address=0, update=False)
>>> bytestream.hex()
'01'
>>> bit.describe()
{'address': 0,
'alignment': [1, 0],
'class': 'Bit',
'index': [0, 0],
'max': 1,
'min': 0,
'name': 'Bit',
'order': 'auto',
'signed': False,
'size': 1,
'type': 'Field',
'value': 1}
"""
# Item type.
item_type: ItemClass = ItemClass.Bit
def __init__(self,
number: int,
align_to: int | None = None) -> None:
super().__init__(bit_size=1, align_to=align_to)
# Field alignment
if align_to:
self._set_alignment(group_size=align_to,
bit_offset=number)
else:
self._set_alignment(group_size=0,
bit_offset=number,
auto_align=True)
@property
def name(self) -> str:
""" Returns the type name of the `Bit` field (read-only)."""
return self.item_type.name.capitalize()
@staticmethod
def is_bit() -> bool:
""" Returns :data:`True`."""
return True
class Byte(Decimal):
""" The :class:`Byte` field is an unsigned :class:`Decimal` field with a
*size* of one byte, and returns its field :attr:`value` as a lowercase
hexadecimal string prefixed with ``0x``.
:param int|None align_to: aligns the `Byte` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Byte` field aligns itself
to the next matching byte size according to the *size* of the
`Byte` field.
Example:
>>> byte = Byte()
>>> byte.is_decimal()
True
>>> byte.name
'Byte'
>>> byte.alignment
Alignment(byte_size=1, bit_offset=0)
>>> byte.byte_order
Byteorder.auto = 'auto'
>>> byte.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> byte.index_field()
Index(byte=1, bit=0, address=1, base_address=0, update=False)
>>> byte.bit_size
8
>>> byte.signed
False
>>> byte.min()
0
>>> byte.max()
255
>>> byte.value
'0x0'
>>> bytes(byte)
b'\\x00'
>>> int(byte)
0
>>> float(byte)
0.0
>>> hex(byte)
'0x0'
>>> bin(byte)
'0b0'
>>> oct(byte)
'0o0'
>>> bool(byte)
False
>>> byte.as_signed()
0
>>> byte.as_unsigned()
0
>>> byte.deserialize(bytes.fromhex('20'))
Index(byte=1, bit=0, address=1, base_address=0, update=False)
>>> byte.value
'0x20'
>>> byte.value = 16
>>> byte.value
'0x10'
>>> byte.value = -1
>>> byte.value
'0x0'
>>> byte.value = 256
>>> byte.value
'0xff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> byte.serialize(bytestream)
Index(byte=1, bit=0, address=1, base_address=0, update=False)
>>> bytestream.hex()
'ff'
>>> byte.describe()
{'address': 0,
'alignment': [1, 0],
'class': 'Byte',
'index': [0, 0],
'max': 255,
'min': 0,
'name': 'Byte',
'order': 'auto',
'signed': False,
'size': 8,
'type': 'Field',
'value': '0xff'}
"""
# Item type.
item_type: ItemClass = ItemClass.Byte
def __init__(self,
align_to: int | None = None) -> None:
super().__init__(bit_size=8, align_to=align_to)
@property
def name(self) -> str:
""" Returns the type name of the `Byte` field (read-only)."""
return self.item_type.name.capitalize()
@property
def value(self) -> str:
""" Field value as a lowercase hexadecimal string prefixed with ``0x``.
"""
return hex(self._value)
@value.setter
def value(self, x: str | int | float | bool) -> None:
self._value = self.to_decimal(x)
class Char(Decimal):
""" The :class:`Char` field is an unsigned :class:`Decimal` field with a
*size* of one byte, and returns its field :attr:`value` as a unicode string
character.
:param int|None align_to: aligns the `Char` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Char` field aligns itself
to the next matching byte size according to the *size* of the
`Char` field.
Example:
>>> char = Char()
>>> char.is_decimal()
True
>>> char.name
'Char'
>>> char.alignment
Alignment(byte_size=1, bit_offset=0)
>>> char.byte_order
Byteorder.auto = 'auto'
>>> char.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> char.index_field()
Index(byte=1, bit=0, address=1, base_address=0, update=False)
>>> char.bit_size
8
>>> char.signed
False
>>> char.min()
0
>>> char.max()
255
>>> char.value
'\\x00'
>>> bytes(char)
b'\\x00'
>>> ord(char.value)
0
>>> int(char)
0
>>> float(char)
0.0
>>> hex(char)
'0x0'
>>> bin(char)
'0b0'
>>> oct(char)
'0o0'
>>> bool(char)
False
>>> char.as_signed()
0
>>> char.as_unsigned()
0
>>> char.deserialize(bytes.fromhex('41'))
Index(byte=1, bit=0, address=1, base_address=0, update=False)
>>> char.value
'A'
>>> char.value = 66
>>> char.value
'B'
>>> char.value = 0x41
>>> char.value
'A'
>>> char.value = 'F'
>>> char.value
'F'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> char.serialize(bytestream)
Index(byte=1, bit=0, address=1, base_address=0, update=False)
>>> bytestream.hex()
'46'
>>> char.describe()
{'address': 0,
'alignment': [1, 0],
'class': 'Char',
'index': [0, 0],
'max': 255,
'min': 0,
'name': 'Char',
'order': 'auto',
'signed': False,
'size': 8,
'type': 'Field',
'value': 'F'}
"""
# Item type.
item_type: ItemClass = ItemClass.Char
def __init__(self,
align_to: int | None = None) -> None:
super().__init__(bit_size=8, align_to=align_to)
@property
def name(self) -> str:
""" Returns the type name of the `Char` field (read-only)."""
return self.item_type.name.capitalize()
@property
def value(self) -> str:
""" Field value as a unicode string character."""
return chr(self._value)
@value.setter
def value(self, x: str | int | float) -> None:
self._value = self.to_decimal(x, encoding='ascii')
class Signed(Decimal):
""" The :class:`Signed` field is a signed :class:`Decimal` field with a
variable *size*, and returns its field :attr:`value` as a signed integer
number.
:param int bit_size: is the *size* of the `Signed` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Signed` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Signed` field aligns itself
to the next matching byte size according to the *size* of the
`Signed` field.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Signed` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> signed = Signed(16)
>>> signed.is_decimal()
True
>>> signed.name
'Signed16'
>>> signed.alignment
Alignment(byte_size=2, bit_offset=0)
>>> signed.byte_order
Byteorder.auto = 'auto'
>>> signed.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> signed.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> signed.bit_size
16
>>> signed.signed
True
>>> signed.min()
-32768
>>> signed.max()
32767
>>> signed.value
0
>>> bytes(signed)
b'\\x00\\x00'
>>> int(signed)
0
>>> float(signed)
0.0
>>> hex(signed)
'0x0'
>>> bin(signed)
'0b0'
>>> oct(signed)
'0o0'
>>> bool(signed)
False
>>> signed.as_signed()
0
>>> signed.as_unsigned()
0
>>> signed.deserialize(bytes.fromhex('00c0'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> signed.value
-16384
>>> signed.value = -0x4000
>>> signed.value
-16384
>>> signed.value = -32769
>>> signed.value
-32768
>>> signed.value = 32768
>>> signed.value
32767
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> signed.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ff7f'
>>> signed.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Signed16',
'index': [0, 0],
'max': 32767,
'min': -32768,
'name': 'Signed16',
'order': 'auto',
'signed': True,
'size': 16,
'type': 'Field',
'value': 32767}
"""
# Item type.
item_type: ItemClass = ItemClass.Signed
def __init__(self,
bit_size: int,
align_to: int | None = None,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size, align_to, True, byte_order)
class Unsigned(Decimal):
""" The :class:`Unsigned` field is an unsigned :class:`Decimal` field with
a variable *size*, and returns its field :attr:`value` as a lowercase
hexadecimal string prefixed with ``0x``.
:param int bit_size: is the *size* of the `Unsigned` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Unsigned` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Unsigned` field aligns itself
to the next matching byte size according to the *size* of the
`Unsigned` field.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Unsigned` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> unsigned = Unsigned(16)
>>> unsigned.is_decimal()
True
>>> unsigned.name
'Unsigned16'
>>> unsigned.alignment
Alignment(byte_size=2, bit_offset=0)
>>> unsigned.byte_order
Byteorder.auto = 'auto'
>>> unsigned.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> unsigned.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> unsigned.bit_size
16
>>> unsigned.signed
False
>>> unsigned.min()
0
>>> unsigned.max()
65535
>>> unsigned.value
'0x0'
>>> bytes(unsigned)
b'\\x00\\x00'
>>> int(unsigned)
0
>>> float(unsigned)
0.0
>>> hex(unsigned)
'0x0'
>>> bin(unsigned)
'0b0'
>>> oct(unsigned)
'0o0'
>>> bool(unsigned)
False
>>> unsigned.as_signed()
0
>>> unsigned.as_unsigned()
0
>>> unsigned.deserialize(bytes.fromhex('00c0'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> unsigned.value
'0xc000'
>>> unsigned.value = 0x4000
>>> unsigned.value
'0x4000'
>>> unsigned.value = -0x1
>>> unsigned.value
'0x0'
>>> unsigned.value = 0x10000
>>> unsigned.value
'0xffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> unsigned.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ffff'
>>> unsigned.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Unsigned16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Unsigned16',
'order': 'auto',
'signed': False,
'size': 16,
'type': 'Field',
'value': '0xffff'}
"""
# Item type.
item_type: ItemClass = ItemClass.Unsigned
def __init__(self,
bit_size: int,
align_to: int | None = None,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size, align_to, False, byte_order)
@property
def value(self) -> str:
""" Field value as a lowercase hexadecimal string prefixed with ``0x``.
"""
return hex(self._value)
@value.setter
def value(self, x: str | int | float | bool) -> None:
self._value = self.to_decimal(x)
class Bitset(Decimal):
""" The :class:`Bitset` field is an unsigned :class:`Decimal` field with a
variable *size* and returns its field :attr:`value` as a binary string
prefixed with ``0b``.
:param int bit_size: is the *size* of the `Bitset` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Bitset` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Bitset` field aligns itself
to the next matching byte size according to the *size* of the
`Bitset` field.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Bitset` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> bitset = Bitset(16)
>>> bitset.is_decimal()
True
>>> bitset.name
'Bitset16'
>>> bitset.alignment
Alignment(byte_size=2, bit_offset=0)
>>> bitset.byte_order
Byteorder.auto = 'auto'
>>> bitset.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> bitset.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bitset.bit_size
16
>>> bitset.signed
False
>>> bitset.min()
0
>>> bitset.max()
65535
>>> bitset.value
'0b0000000000000000'
>>> bytes(bitset)
b'\\x00\\x00'
>>> int(bitset)
0
>>> float(bitset)
0.0
>>> hex(bitset)
'0x0'
>>> bin(bitset)
'0b0'
>>> oct(bitset)
'0o0'
>>> bool(bitset)
False
>>> bitset.as_signed()
0
>>> bitset.as_unsigned()
0
>>> bitset.deserialize(bytes.fromhex('f00f'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bitset.value
'0b0000111111110000'
>>> bitset.value = 0b1111
>>> bitset.value
'0b0000000000001111'
>>> bitset.value = -1
>>> bitset.value
'0b0000000000000000'
>>> bitset.value = 0x10000
>>> bitset.value
'0b1111111111111111'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> bitset.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ffff'
>>> bitset.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Bitset16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Bitset16',
'order': 'auto',
'signed': False,
'size': 16,
'type': 'Field',
'value': '0b1111111111111111'}
"""
# Item type.
item_type: ItemClass = ItemClass.Bitset
def __init__(self,
bit_size: int,
align_to: int | None = None,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size, align_to, False, byte_order)
@property
def value(self) -> str:
""" Field value as a binary string prefixed with ``0b``."""
return f"{self._value:#0{self.bit_size + 2}b}"
@value.setter
def value(self, x: str | int | float | bool) -> None:
self._value = self.to_decimal(x)
class Bool(Decimal):
""" The :class:`Bool` field is an unsigned :class:`Decimal` field with a
variable *size*, and returns its field :attr:`value` as a boolean value.
:param int bit_size: is the *size* of the `Bool` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Bool` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Bool` field aligns itself
to the next matching byte size according to the *size* of the
`Bool` field.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Bool` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> boolean = Bool(16)
>>> boolean.is_decimal()
True
>>> boolean.is_bool()
True
>>> boolean.name
'Bool16'
>>> boolean.alignment
Alignment(byte_size=2, bit_offset=0)
>>> boolean.byte_order
Byteorder.auto = 'auto'
>>> boolean.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> boolean.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> boolean.bit_size
16
>>> boolean.signed
False
>>> boolean.min()
0
>>> boolean.max()
65535
>>> boolean.value
False
>>> bytes(boolean)
b'\\x00\\x00'
>>> int(boolean)
0
>>> float(boolean)
0.0
>>> hex(boolean)
'0x0'
>>> bin(boolean)
'0b0'
>>> oct(boolean)
'0o0'
>>> bool(boolean)
False
>>> boolean.as_signed()
0
>>> boolean.as_unsigned()
0
>>> boolean.deserialize(bytes.fromhex('0f00'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> boolean.value
True
>>> boolean.value = False
>>> boolean.value
False
>>> boolean.value = -1
>>> boolean.value
False
>>> boolean.value = 0x10000
>>> boolean.value
True
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> boolean.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ffff'
>>> boolean.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Bool16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Bool16',
'order': 'auto',
'signed': False,
'size': 16,
'type': 'Field',
'value': True}
"""
# Item type.
item_type: ItemClass = ItemClass.Bool
def __init__(self,
bit_size: int,
align_to: int | None = None,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size, align_to, False, byte_order)
@property
def value(self) -> bool:
""" Field value as a boolean value, :data:`True` or ``False``."""
return bool(self._value)
@value.setter
def value(self, x: bool | int | float | str) -> None:
self._value = self.to_decimal(x)
@staticmethod
def is_bool() -> bool:
""" Returns :data:`True`."""
return True
class Enum(Decimal):
""" The :class:`Enum` field is an unsigned :class:`Decimal` field with a
variable *size*, and returns its field :attr:`value` as an unsigned integer
number.
If an :class:`Enumeration` is available and a member matches the integer
number then the member name string is returned otherwise the integer number
is returned.
:param int bit_size: is the *size* of the `Enum` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Enum` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Enum` field aligns itself
to the next matching byte size according to the *size* of the
`Enum` field.
:param enumeration: :class:`Enumeration` definition of the `Enum` field.
:type enumeration: Type[Enumeration]|None
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Enum` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> enum = Enum(16, enumeration=ItemClass)
>>> enum.is_decimal()
True
>>> enum.name
'Enum16'
>>> enum.alignment
Alignment(byte_size=2, bit_offset=0)
>>> enum.byte_order
Byteorder.auto = 'auto'
>>> enum.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> enum.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> enum.bit_size
16
>>> enum.signed
False
>>> bytes(enum)
b'\\x00\\x00'
>>> enum.min()
0
>>> enum.max()
65535
>>> enum.value
0
>>> int(enum)
0
>>> float(enum)
0.0
>>> hex(enum)
'0x0'
>>> bin(enum)
'0b0'
>>> oct(enum)
'0o0'
>>> bool(enum)
False
>>> enum.as_signed()
0
>>> enum.as_unsigned()
0
>>> enum.deserialize(bytes.fromhex('2800'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> enum.value
'Decimal'
>>> enum.value = 48
>>> enum.value
'Enum'
>>> enum.value = 'Enum'
>>> enum.value
'Enum'
>>> enum.value = 40
>>> enum.value
'Decimal'
>>> enum.value = -1
>>> enum.value
0
>>> enum.value = 65536
>>> enum.value
65535
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> enum.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ffff'
>>> enum.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Enum16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Enum16',
'order': 'auto',
'signed': False,
'size': 16,
'type': 'Field',
'value': 65535}
"""
# Item type.
item_type: ItemClass = ItemClass.Enum
def __init__(self,
bit_size: int,
align_to: int | None = None,
enumeration: Type[Enumeration] | None = None,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size, align_to, False, byte_order)
# Field enumeration class
if enumeration is None:
self._enum: Type[Enumeration] | None = None
elif issubclass(enumeration, Enumeration):
self._enum: Type[Enumeration] | None = enumeration
else:
raise EnumTypeError(self, enumeration)
@property
def value(self) -> int | str:
""" Field value as an enum name string.
Fall back is an unsigned integer number.
"""
if self._enum and issubclass(self._enum, Enumeration):
name = self._enum.get_name(self._value)
if name:
return name
return self._value
@value.setter
def value(self, x: str | int | Enumeration) -> None:
if isinstance(x, str):
try:
decimal = int(x, 0)
except ValueError:
if self._enum and issubclass(self._enum, Enumeration):
decimal = int(self._enum.get_value(x))
if decimal < 0:
raise FieldValueError(self, self.index, x)
else:
raise FieldValueError(self, self.index, x)
else:
decimal = x
self._value = self.to_decimal(decimal)
class Scaled(Decimal):
""" The :class:`Scaled` field is a signed :class:`Decimal` field with a
variable *size*, and returns its scaled field :attr:`value` as a
floating-point number.
The scaled field *value* is:
``(unscaled field value / scaling base) * scaling factor``
The unscaled field *value* is:
``(scaled field value / scaling factor) * scaling base``
The scaling base is:
``2 ** (field size - 1) / 2``
A `Scaled` field extends the :attr:`~Field.metadata` of a :class:`Decimal`
with a ``'scale'`` key for its scaling factor.
:param float|int scale: scaling factor of the `Scaled` field.
:param int bit_size: is the *size* of the `Scaled` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Scaled` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Scaled` field aligns itself
to the next matching byte size according to the *size* of the
`Scaled` field.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Scaled` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> scaled = Scaled(100, 16)
>>> scaled.is_decimal()
True
>>> scaled.name
'Scaled16'
>>> scaled.alignment
Alignment(byte_size=2, bit_offset=0)
>>> scaled.byte_order
Byteorder.auto = 'auto'
>>> scaled.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> scaled.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> scaled.scale
100.0
>>> scaled.scaling_base()
16384.0
>>> scaled.bit_size
16
>>> scaled.signed
True
>>> scaled.min()
-32768
>>> scaled.max()
32767
>>> scaled.value
0.0
>>> bytes(scaled)
b'\\x00\\x00'
>>> int(scaled)
0
>>> float(scaled)
0.0
>>> hex(scaled)
'0x0'
>>> bin(scaled)
'0b0'
>>> oct(scaled)
'0o0'
>>> bool(scaled)
False
>>> scaled.as_signed()
0
>>> scaled.as_unsigned()
0
>>> scaled.deserialize(bytes.fromhex('0040'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> scaled.value
100.0
>>> scaled.value = -100
>>> scaled.value
-100.0
>>> scaled.value = -200.001
>>> scaled.value
-200.0
>>> scaled.value = 200
>>> scaled.value
199.993896484375
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> scaled.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ff7f'
>>> scaled.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Scaled16',
'index': [0, 0],
'max': 32767,
'min': -32768,
'name': 'Scaled16',
'order': 'auto',
'scale': 100.0,
'signed': True,
'size': 16,
'type': 'Field',
'value': 199.993896484375}
"""
# Item type.
item_type: ItemClass = ItemClass.Scaled
def __init__(self,
scale: float | int,
bit_size: int,
align_to: int | None = None,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size, align_to, True, byte_order)
# Field scaling factor
self._scale: float = float(scale)
def __float__(self) -> float:
return self.value
@property
def value(self) -> float:
""" Field value as a floating-point number."""
return self.as_float(self._value)
@value.setter
def value(self, x: float | int) -> None:
self._value = self.to_scaled(x)
def as_float(self, value: int) -> float:
return (value / self.scaling_base()) * self.scale
def to_scaled(self, value: float | int) -> int:
return self.to_decimal((float(value) / self.scale) *
self.scaling_base())
@property
def scale(self) -> float:
""" Scaling factor of the `Scaled` field."""
return self._scale
@scale.setter
def scale(self, value: float | int) -> None:
self._scale = float(value)
def scaling_base(self):
""" Returns the scaling base of the `Scaled` field."""
return 2 ** (self.bit_size - 1) / 2
def describe(self,
name: str = str(),
**options: Any) -> dict[str, Any]:
metadata = super().describe(name, **options)
metadata['scale'] = self.scale
return dict(sorted(metadata.items()))
class Fraction(Decimal):
""" The :class:`Fraction` field is an unsigned :class:`Decimal` field with
a variable *size*, and returns its fractional field :attr:`value` as a
floating-point number.
A fractional number is bitwise encoded and has up to three bit parts for
this task.
The first part are the bits for the fraction part of a fractional number.
The number of bits for the fraction part is derived from the *bit size*
of the field and the required bits for the other two parts.
The fraction part is always smaller than one.
``fraction part = (2**bits - 1) / (2**bits)``
The second part are the *bits* for the *integer* part of a fractional
number.
``integer part = (2**bits - 1)``
The third part is the bit for the sign of a *signed* fractional
number. Only a *signed* fractional number posses this bit.
``sign part = {'0': '+', '1': '-'}``
A fractional number is multiplied by hundred.
:param int bits_integer: number of bits for the integer part of the
fraction number, can be between *1* and the *size* of the
`Fraction` field.
:param int bit_size: is the *size* of the `Fraction` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Fraction` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Fraction` field aligns itself
to the next matching byte size according to the *size* of the
`Fraction` field.
:param bool signed: if :data:`True` the `Fraction` field is signed otherwise
unsigned.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Fraction` field
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> unipolar = Fraction(2, 16)
>>> unipolar.is_decimal()
True
>>> unipolar.name
'Fraction2.16'
>>> unipolar.alignment
Alignment(byte_size=2, bit_offset=0)
>>> unipolar.byte_order
Byteorder.auto = 'auto'
>>> unipolar.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> unipolar.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> unipolar.bit_size
16
>>> unipolar.signed
False
>>> unipolar.min()
0
>>> unipolar.max()
65535
>>> unipolar.value
0.0
>>> bytes(unipolar)
b'\\x00\\x00'
>>> int(unipolar)
0
>>> float(unipolar)
0.0
>>> hex(unipolar)
'0x0'
>>> bin(unipolar)
'0b0'
>>> oct(unipolar)
'0o0'
>>> bool(unipolar)
False
>>> unipolar.as_signed()
0
>>> unipolar.as_unsigned()
0
>>> unipolar.deserialize(bytes.fromhex('0080'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> unipolar.value
200.0
>>> unipolar.value = 100
>>> unipolar.value
100.0
>>> unipolar.as_float(0x4000)
100.0
>>> unipolar.value = -1
>>> unipolar.value
0.0
>>> unipolar.value = 400
>>> unipolar.value
399.993896484375
>>> unipolar.as_float(0xffff)
399.993896484375
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> unipolar.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ffff'
>>> unipolar.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Fraction2.16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Fraction2.16',
'order': 'auto',
'signed': False,
'size': 16,
'type': 'Field',
'value': 399.993896484375}
Example:
>>> bipolar = Fraction(2, 16, 2, True)
>>> bipolar.is_decimal()
True
>>> bipolar.name
'Fraction2.16'
>>> bipolar.alignment
Alignment(byte_size=2, bit_offset=0)
>>> bipolar.byte_order
Byteorder.auto = 'auto'
>>> bipolar.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> bipolar.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bipolar.bit_size
16
>>> bipolar.signed
False
>>> bipolar.min()
0
>>> bipolar.max()
65535
>>> bipolar.value
0.0
>>> bytes(bipolar)
b'\\x00\\x00'
>>> int(bipolar)
0
>>> float(bipolar)
0.0
>>> hex(bipolar)
'0x0'
>>> bin(bipolar)
'0b0'
>>> oct(bipolar)
'0o0'
>>> bool(bipolar)
False
>>> bipolar.as_signed()
0
>>> bipolar.as_unsigned()
0
>>> bipolar.deserialize(bytes.fromhex('0040'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bipolar.value
100.0
>>> bipolar.value = -100
>>> bipolar.value
-100.0
>>> bipolar.as_float(0xc000)
-100.0
>>> bipolar.as_float(0x8000)
-0.0
>>> bipolar.value = -200
>>> bipolar.value
-199.993896484375
>>> bipolar.as_float(0xffff)
-199.993896484375
>>> bipolar.value = 200
>>> bipolar.value
199.993896484375
>>> bipolar.as_float(0x7fff)
199.993896484375
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> bipolar.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ff7f'
>>> bipolar.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Fraction2.16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Fraction2.16',
'order': 'auto',
'signed': True,
'size': 16,
'type': 'Field',
'value': 199.993896484375}
"""
# Item type.
item_type: ItemClass = ItemClass.Fraction
def __init__(self,
bits_integer: int,
bit_size: int,
align_to: int | None = None,
signed: bool = False,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size, align_to, False, byte_order)
# Number of bits of the integer part of the fraction number
self._bits_integer: int = clamp(int(bits_integer), 1, self._bit_size)
# Fraction number signed?
if self._bit_size <= 1:
self._signed_fraction: bool = False
else:
self._signed_fraction: bool = bool(signed)
def __float__(self) -> float:
return self.value
@property
def name(self) -> str:
""" Returns the type name of the `Fraction` field (read-only)."""
return (f"{self.item_type.name.capitalize()}"
f"{self._bits_integer}.{self.bit_size}")
@property
def value(self) -> float:
""" Field value as a floating-point number."""
return self.as_float(self._value)
@value.setter
def value(self, x: float | int) -> None:
self._value = self.to_fraction(x)
def as_float(self, value: int) -> float:
factor = 100.0
bits_fraction = max(self.bit_size - self._bits_integer, 0)
fraction = (value & (2 ** bits_fraction - 1)) / 2 ** bits_fraction
if self._signed_fraction:
mask = 2 ** (self.bit_size - 1)
if value & mask:
factor = -100.0
integer = (value & (mask - 1)) >> max(bits_fraction, 0)
else:
integer = value >> max(bits_fraction, 0)
return (integer + fraction) * factor
def to_fraction(self, value: float | int) -> int:
normalized = float(value) / 100.0
bits_fraction = max(self.bit_size - self._bits_integer, 0)
if self._signed_fraction:
integer = abs(int(normalized)) << max(bits_fraction, 0)
fraction = int(
math.fabs(normalized - int(normalized)) * 2 ** bits_fraction)
if normalized < 0:
mask = 2 ** (self.bit_size - 1)
else:
mask = 0
decimal = clamp(integer | fraction, 0, 2 ** (self.bit_size - 1) - 1)
decimal |= mask
else:
normalized = max(normalized, 0)
integer = int(normalized) << max(bits_fraction, 0)
fraction = int((normalized - int(normalized)) * 2 ** bits_fraction)
decimal = clamp(integer | fraction, 0, 2 ** self.bit_size - 1)
return self.to_decimal(decimal)
def describe(self,
name: str = str(),
**options: Any) -> dict[str, Any]:
metadata = super().describe(name, **options)
metadata['signed'] = self._signed_fraction
return dict(sorted(metadata.items()))
class Bipolar(Fraction):
""" The :class:`Bipolar` field is a signed :class:`Fraction` field with a
variable *size*, and returns its fractional field :attr:`value` as a
floating-point number.
:param int bits_integer: number of bits for the integer part of the
fraction number, can be between *1* and the *size* of the
`Bipolar` field.
:param int bit_size: is the *size* of the `Bipolar` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Bipolar` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Bipolar` field aligns itself
to the next matching byte size according to the *size* of the
`Bipolar` field.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Bipolar` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> bipolar = Bipolar(2, 16)
>>> bipolar.is_decimal()
True
>>> bipolar.name
'Bipolar2.16'
>>> bipolar.alignment
Alignment(byte_size=2, bit_offset=0)
>>> bipolar.byte_order
Byteorder.auto = 'auto'
>>> bipolar.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> bipolar.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bipolar.bit_size
16
>>> bipolar.signed
False
>>> bipolar.min()
0
>>> bipolar.max()
65535
>>> bipolar.value
0.0
>>> bytes(bipolar)
b'\\x00\\x00'
>>> int(bipolar)
0
>>> float(bipolar)
0.0
>>> hex(bipolar)
'0x0'
>>> bin(bipolar)
'0b0'
>>> oct(bipolar)
'0o0'
>>> bool(bipolar)
False
>>> bipolar.as_signed()
0
>>> bipolar.as_unsigned()
0
>>> bipolar.value = -100
>>> bipolar.value
-100.0
>>> bipolar.as_float(0xc000)
-100.0
>>> bipolar.as_float(0x8000)
-0.0
>>> bipolar.value = -200
>>> bipolar.value
-199.993896484375
>>> bipolar.as_float(0xffff)
-199.993896484375
>>> bipolar.value = 200
>>> bipolar.value
199.993896484375
>>> bipolar.as_float(0x7fff)
199.993896484375
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> bipolar.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ff7f'
>>> bipolar.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Bipolar2.16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Bipolar2.16',
'order': 'auto',
'signed': True,
'size': 16,
'type': 'Field',
'value': 199.993896484375}
"""
# Item type.
item_type: ItemClass = ItemClass.Bipolar
def __init__(self,
bits_integer: int,
bit_size: int,
align_to: int | None = None,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bits_integer, bit_size, align_to, True, byte_order)
class Unipolar(Fraction):
""" The :class:`Unipolar` field is an unsigned :class:`Fraction` field with
a variable *size*, and returns its fractional field :attr:`value` as a
floating-point number.
:param int bits_integer: number of bits for the integer part of the
fraction number, can be between *1* and the *size* of the
`Unipolar` field.
:param int bit_size: is the *size* of the `Unipolar` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Unipolar` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Unipolar` field aligns itself
to the next matching byte size according to the *size* of the
`Unipolar` field.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Unipolar` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> unipolar = Unipolar(2, 16)
>>> unipolar.is_decimal()
True
>>> unipolar.name
'Unipolar2.16'
>>> unipolar.alignment
Alignment(byte_size=2, bit_offset=0)
>>> unipolar.byte_order
Byteorder.auto = 'auto'
>>> unipolar.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> unipolar.index_field()
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> unipolar.bit_size
16
>>> unipolar.signed
False
>>> unipolar.min()
0
>>> unipolar.max()
65535
>>> unipolar.value
0.0
>>> bytes(unipolar)
b'\\x00\\x00'
>>> int(unipolar)
0
>>> float(unipolar)
0.0
>>> hex(unipolar)
'0x0'
>>> bin(unipolar)
'0b0'
>>> oct(unipolar)
'0o0'
>>> bool(unipolar)
False
>>> unipolar.as_signed()
0
>>> unipolar.as_unsigned()
0
>>> unipolar.deserialize(bytes.fromhex('0080'))
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> unipolar.value
200.0
>>> unipolar.value = 100
>>> unipolar.value
100.0
>>> unipolar.as_float(0x4000)
100.0
>>> unipolar.value = -1
>>> unipolar.value
0.0
>>> unipolar.value = 400
>>> unipolar.value
399.993896484375
>>> unipolar.as_float(0xffff)
399.993896484375
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> unipolar.serialize(bytestream)
Index(byte=2, bit=0, address=2, base_address=0, update=False)
>>> bytestream.hex()
'ffff'
>>> unipolar.describe()
{'address': 0,
'alignment': [2, 0],
'class': 'Unipolar2.16',
'index': [0, 0],
'max': 65535,
'min': 0,
'name': 'Unipolar2.16',
'order': 'auto',
'signed': False,
'size': 16,
'type': 'Field',
'value': 399.993896484375}
"""
# Item type.
item_type: ItemClass = ItemClass.Unipolar
def __init__(self,
bits_integer: int,
bit_size: int,
align_to: int | None = None,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bits_integer, bit_size, align_to, False, byte_order)
class Datetime(Decimal):
""" The :class:`Datetime` field is an unsigned :class:`Decimal` field with
a fix *size* of four bytes, and returns its field :attr:`value` as an UTC
datetime string in the ISO format ``YYYY-mm-dd HH:MM:SS``.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `Datetime` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> datetime = Datetime()
>>> datetime.is_decimal()
True
>>> datetime.name
'Datetime32'
>>> datetime.alignment
Alignment(byte_size=4, bit_offset=0)
>>> datetime.byte_order
Byteorder.auto = 'auto'
>>> datetime.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> datetime.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> datetime.bit_size
32
>>> datetime.signed
False
>>> datetime.min()
0
>>> datetime.max()
4294967295
>>> datetime.value
'1970-01-01 00:00:00'
>>> bytes(datetime)
b'\\x00\\x00\\x00\\x00'
>>> int(datetime)
0
>>> float(datetime)
0.0
>>> hex(datetime)
'0x0'
>>> bin(datetime)
'0b0'
>>> oct(datetime)
'0o0'
>>> bool(datetime)
False
>>> datetime.as_signed()
0
>>> datetime.as_unsigned()
0
>>> datetime.deserialize(bytes.fromhex('ffffffff'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> datetime.value
'2106-02-07 06:28:15'
>>> datetime.value = '1969-12-31 23:59:59'
>>> datetime.value
'1970-01-01 00:00:00'
>>> datetime.value = '2106-02-07 06:28:16'
>>> datetime.value
'2106-02-07 06:28:15'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> datetime.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> datetime.describe()
{'address': 0,
'alignment': [4, 0],
'class': 'Datetime32',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'Datetime32',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Field',
'value': '2106-02-07 06:28:15'}
"""
# Item type.
item_type: ItemClass = ItemClass.Datetime
def __init__(self,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size=32, byte_order=byte_order)
@property
def value(self) -> str:
""" Field value as an UTC datetime string in the ISO format
``YYYY-mm-dd HH:MM:SS``"""
return str(datetime.datetime.utcfromtimestamp(self._value))
@value.setter
def value(self, x: int | str) -> None:
try:
self._value = self.to_decimal(x)
except (TypeError, ValueError):
self._value = self.to_timestamp(x)
def to_timestamp(self, value: str) -> int:
decimal = calendar.timegm(time.strptime(value, "%Y-%m-%d %H:%M:%S"))
return self.to_decimal(decimal)
class IPv4Address(Decimal):
""" The :class:`IPv4Address` field is an unsigned :class:`Decimal` field with a fix
*size* of four bytes and returns its field :attr:`value` as an IPv4 address
formatted string.
:param byte_order: byte order used to unpack and pack the :attr:`value`
of the `IPv4Address` field.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> ipv4 = IPv4Address()
>>> ipv4.is_decimal()
True
>>> ipv4.name
'Ipaddress32'
>>> ipv4.alignment
Alignment(byte_size=4, bit_offset=0)
>>> ipv4.byte_order
Byteorder.auto = 'auto'
>>> ipv4.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> ipv4.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> ipv4.bit_size
32
>>> ipv4.signed
False
>>> ipv4.min()
0
>>> ipv4.max()
4294967295
>>> ipv4.value
'0.0.0.0'
>>> bytes(ipv4)
b'\\x00\\x00\\x00\\x00'
>>> int(ipv4)
0
>>> float(ipv4)
0.0
>>> hex(ipv4)
'0x0'
>>> bin(ipv4)
'0b0'
>>> oct(ipv4)
'0o0'
>>> bool(ipv4)
False
>>> ipv4.as_signed()
0
>>> ipv4.as_unsigned()
0
>>> ipv4.deserialize(bytes.fromhex('ffffffff'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> ipv4.value
'255.255.255.255'
>>> ipv4.value = '192.168.0.0'
>>> ipv4.value
'192.168.0.0'
>>> ipv4.value = '255.255.255.255'
>>> ipv4.value
'255.255.255.255'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> ipv4.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> ipv4.describe()
{'address': 0,
'alignment': [4, 0],
'class': 'Ipaddress32',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'Ipaddress32',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Field',
'value': '255.255.255.255'}
"""
# Item type.
item_type: ItemClass = ItemClass.IPAddress
def __init__(self,
byte_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size=32, byte_order=byte_order)
@property
def value(self) -> str:
""" Field value as an IPv4 address formatted string."""
return str(ipaddress.IPv4Address(self._value))
@value.setter
def value(self, x: str | int) -> None:
self._value = int(ipaddress.IPv4Address(x))
class Pointer(Decimal, Container):
""" The :class:`Pointer` field is an unsigned :class:`Decimal` field with a
*size* of four bytes, and returns its field :attr:`value` as a hexadecimal
string.
A `Pointer` field refers absolutely to a :attr:`data` object of a data
:class:`Provider`.
The `Pointer` class extends the :class:`Decimal` field with the
:class:`Container` interface for its referenced :attr:`data` object.
A `Pointer` field has additional features to **read**, **write**,
**deserialize**, **serialize** and **view** binary data:
- **Deserialize** the :attr:`~Field.value` for each :class:`Field`
in the :attr:`data` object referenced by the `Pointer` field from
a byte stream via :meth:`deserialize_data`.
- **Serialize** the :attr:`~Field.value` for each :class:`Field`
in the :attr:`data` object referenced by the `Pointer` field to a
byte stream via :meth:`serialize_data`.
- **Indexes** each :class:`Field` in the :attr:`data` object
referenced by the `Pointer` field via :meth:`index_data`.
- **Read** from a :class:`Provider` the necessary bytes for the :attr:`data`
object referenced by the `Pointer` field via :meth:`read_from`.
- **Write** to a :class:`Provider` the necessary bytes for the
:attr:`data` object referenced by the `Pointer` field
via :meth:`write_to`.
- Get the accumulated **size** of all fields in the :attr:`data` object
referenced by the `Pointer` field via :attr:`data_size`.
- Indexes the `Pointer` field and each :class:`Field` in the :attr:`data`
object referenced by the `Pointer` field via :meth:`index_fields`.
- View the selected *attributes* of the `Pointer` field and for each
:class:`Field` in the :attr:`data` object referenced by the `Pointer`
field via :meth:`view_fields`.
- List the **path** to the field and the field **item** for the `Pointer`
field and for each :class:`Field` in the :attr:`data` object referenced by
the `Pointer` field as a flatted list via :meth:`field_items`.
- Get the **metadata** of the `Pointer` field via :meth:`describe`.
:param template: template for the :attr:`data` object
referenced by the `Pointer` field.
:type template: Structure|Sequence|Field|None
:param int|None address: absolute address of the :attr:`data` object
referenced by the `Pointer` field.
:param data_order: byte order used to unpack and pack the :attr:`data` object
referenced by the `Pointer` field.
:type data_order: Byteorder|Literal['big', 'little']
:param int bit_size: is the *size* of the `Pointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Pointer` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Pointer` field aligns itself
to the next matching byte size according to the *size* of the
`Pointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `Pointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = Pointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
>>> pointer.data_size
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.initialize_fields({'value': 0x8000})
>>> pointer.value
'0x8000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> pointer.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> pointer.bytestream = b'KonFoo is Fun'
>>> pointer.bytestream
'4b6f6e466f6f2069732046756e'
>>> pointer.serialize_data()
b''
>>> pointer.deserialize_data()
Index(byte=0, bit=0, address=4294967295, base_address=4294967295, update=False)
>>> pointer.serialize_data()
b''
>>> pointer.describe()
{'address': 0,
'alignment': [4, 0],
'class': 'Pointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'Pointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff'}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': None}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": null}'
>>> pointer.field_items()
[('field',
Pointer(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff'))]
>>> pointer.to_list()
[('Pointer.field', '0xffffffff')]
>>> pointer.to_dict()
{'Pointer': {'field': '0xffffffff'}}
"""
# Item type of a Pointer field.
item_type: ItemClass = ItemClass.Pointer
def __init__(self,
template: Structure | Sequence | Field | None = None,
address: int | None = None,
data_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(bit_size=bit_size,
align_to=align_to,
byte_order=field_order)
# Field value
if address:
self.value = address
# Data object
self._data = self.data = template
# Data objects bytestream
self._data_stream: bytes = bytes()
# Data objects byte order
self._data_byte_order = self.data_byte_order = data_order
@property
def address(self) -> int:
""" Returns the *data source* address of the :attr:`data` object
referenced by the `Pointer` field (read-only).
"""
return self._value
@property
def base_address(self) -> int:
""" Returns the *data source* base address of the :attr:`data` object
referenced by the `Pointer` field (read-only).
"""
return self._value
@property
def bytestream(self) -> str:
""" Byte stream of the `Pointer` field for the referenced :attr:`data`
object. Returned as a lowercase hexadecimal encoded string.
"""
return self._data_stream.hex()
@bytestream.setter
def bytestream(self,
value: bytes | bytearray | str) -> None:
if isinstance(value, str):
self._data_stream = bytes.fromhex(value)
elif isinstance(value, (bytearray, bytes)):
self._data_stream = bytes(value)
else:
raise FieldTypeError(self, self.index, value)
@property
def data(self) -> Structure | Sequence | Field | None:
""" `Data` object referenced by the `Pointer` field."""
return self._data
@data.setter
def data(self, value: Structure | Sequence | Field | None) -> None:
if value is None:
self._data = None
elif is_any(value):
self._data = value
else:
raise MemberTypeError(self, value, 'data')
@property
def data_byte_order(self) -> Byteorder:
""" :class:`Byteorder` used to deserialize and serialize the :attr:`data`
object referenced by the `Pointer` field.
"""
return self._data_byte_order
@data_byte_order.setter
def data_byte_order(self,
value: (Literal['little', 'big'],
Byteorder | str)) -> None:
byte_order = value
if isinstance(value, str):
byte_order = Byteorder.get_member(value)
if not byte_order:
raise ByteOrderValueError(self, self.index, value)
if not isinstance(byte_order, Byteorder):
raise ByteOrderTypeError(self, value)
if byte_order not in (Byteorder.big, Byteorder.little):
raise FieldByteOrderError(self, self.index, byte_order.value)
self._data_byte_order = byte_order
@property
def data_size(self) -> int:
""" Returns the size of the :attr:`data` object in bytes (read-only)."""
# Container
if is_container(self._data):
byte_length, bit_length = self._data.container_size()
return byte_length + math.ceil(bit_length / 8)
# Field
elif is_field(self._data):
return math.ceil(self._data.bit_size / 8)
else:
return 0
@property
def value(self) -> str:
""" Field value as a lowercase hexadecimal string prefixed with ``0x``."""
return hex(self._value)
@value.setter
def value(self, x: int | str) -> None:
self._value = self.to_decimal(x)
@staticmethod
def is_pointer() -> bool:
""" Returns :data:`True`."""
return True
def is_null(self) -> bool:
""" Returns :data:`True` if the `Pointer` field points to zero."""
return self._value == 0
def deserialize_data(self,
buffer: bytes = bytes(),
byte_order: (Literal['big', 'little'] |
Byteorder | None) = None) -> Index:
""" De-serializes the :attr:`data` object referenced by the `Pointer`
field from the byte *buffer* by mapping the bytes to the
:attr:`~Field.value` for each :class:`Field` in the :attr:`data` object
in accordance with the decoding *byte order* for the de-serialization
and the decoding :attr:`byte_order` of each :class:`Field` in the
:attr:`data` object.
A specific decoding :attr:`byte_order` of a :class:`Field` in
the :attr:`data` object overrules the decoding *byte order* for the
de-serialization.
Returns the :class:`Index` of the *buffer* after the last de-serialized
:class:`Field` in the :attr:`data` object.
:param bytes buffer: byte stream. Default is the internal
:attr:`bytestream` of the `Pointer` field.
:keyword byte_order: decoding byte order for the de-serialization.
Default is the :attr:`data_byte_order` of the `Pointer` field.
:type byte_order: Byteorder|Literal['big', 'little']
"""
index = Index(0, 0, self.address, self.base_address, False)
if self._data:
if byte_order not in ('big', 'little',
Byteorder.big, Byteorder.little):
byte_order = self.data_byte_order
index = self._data.deserialize(buffer or self._data_stream,
index,
nested=False,
byte_order=byte_order)
return index
def serialize_data(self,
byte_order: (Literal['big', 'little'] |
Byteorder | None) = None) -> bytes:
""" Serializes the :attr:`data` object referenced by the `Pointer` field
to bytes by mapping the :attr:`~Field.value` for each :class:`Field`
in the :attr:`data` object to a number of bytes in accordance with the
encoding *byte order* for the serialization and the encoding
:attr:`byte_order` of each :class:`Field` in the :attr:`data` object.
A specific encoding :attr:`~Field.byte_order` of a :class:`Field` in
the :attr:`data` object overrules the encoding *byte order* for the
serialization.
Returns a number of bytes for the serialized :attr:`data` object
referenced by the `Pointer` field.
:keyword byte_order: encoding byte order for the serialization.
Default is the :attr:`data_byte_order` of the `Pointer` field.
:type byte_order: Byteorder|Literal['big', 'little']
"""
if self._data is None:
return bytes()
if byte_order not in ('big', 'little',
Byteorder.big, Byteorder.little):
byte_order = self.data_byte_order
buffer = bytearray()
self._data.serialize(buffer,
Index(0, 0,
self.address, self.base_address,
False),
byte_order=byte_order)
return bytes(buffer)
def index_data(self) -> None:
""" Indexes each :class:`Field` in the :attr:`data` object referenced
by the `Pointer` field.
"""
# Start index for the Data Object
index = Index(0, 0, self.address, self.base_address, False)
# Container
if is_container(self._data):
self._data.index_fields(index, nested=True)
# Pointer
elif is_pointer(self._data):
self._data.index_field(index)
self._data.index_data()
# Field
elif is_field(self._data):
self._data.index_field(index)
@nested_option(True)
def read_from(self,
provider: Provider,
null_allowed: bool = False,
**options: Any) -> None:
""" Reads from the data :class:`Provider` the necessary number of bytes
for the :attr:`data` object referenced by the `Pointer` field.
A `Pointer` field stores the binary data read from the data
:class:`Provider` in its :attr:`bytestream`.
:param Provider provider: data :class:`Provider`.
:param bool null_allowed: if :data:`True` read access of address zero
(Null) is allowed.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`data` object of the `Pointer` field reads their referenced
:attr:`~Pointer.data` object fields as well (chained method call).
Each `Pointer` field stores the bytes for its referenced
:attr:`data` object in its :attr:`bytestream`.
"""
if self._data is None:
pass
elif is_provider(provider):
if self._value < 0:
pass
elif null_allowed or self._value > 0:
while True:
self.bytestream = provider.read(self.address,
self.data_size)
index = self.deserialize_data()
# Incomplete data object
if index.bit != 0:
length = index.byte, index.bit
raise ContainerLengthError(self, length)
if not index.update:
break
if is_mixin(self._data) and get_nested(options):
self._data.read_from(provider, **options)
else:
self.bytestream = bytes()
self.deserialize_data()
else:
raise ProviderTypeError(self, provider)
def patch(self,
item: Structure | Sequence | Field,
byte_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER) -> Patch | None:
""" Returns a memory :class:`Patch` for the given *item* that shall be
patched in the `data source`.
:param item: item to patch.
:type item: Structure|Sequence|Field
:param byte_order: encoding :class:`Byteorder` for the item.
:type byte_order: Byteorder|Literal['big', 'little']
"""
# Re-index the data object
self.index_data()
if is_container(item):
length = item.container_size()
if length[1] != 0:
# Incomplete container
raise ContainerLengthError(item, length)
field = item.first_field()
if field is None:
# Empty container?
return None
index = field.index
if index.bit != 0:
# Bad placed container
raise FieldIndexError(field, index)
# Create a dummy byte array filled with zero bytes.
# The dummy byte array is necessary because the length of
# the buffer must correlate to the field indexes of the
# appending fields.
buffer = bytearray(b'\x00' * index.byte)
# Append to the buffer the content mapped by the container fields
item.serialize(buffer, index, byte_order=byte_order)
# Content of the buffer mapped by the container fields
content = buffer[index.byte:]
if len(content) != length[0]:
# Not correct filled buffer!
raise BufferError(len(content), length[0])
return Patch(content,
index.address,
byte_order,
length[0] * 8,
0,
False)
elif is_field(item):
# Field index
index = item.index
# Field alignment
alignment = item.alignment
if index.bit != alignment.bit_offset:
# Bad aligned field?
raise FieldGroupOffsetError(
item, index, Alignment(alignment.byte_size, index.bit))
# Create a dummy byte array filled with zero bytes.
# The dummy byte array is necessary because the length of
# the buffer must correlate to the field index of the
# appending field group.
buffer = bytearray(b'\x00' * index.byte)
# Append to the buffer the content mapped by the field
item.serialize(buffer, index, byte_order=byte_order)
# Content of the buffer mapped by the field group
content = buffer[index.byte:]
if len(content) != alignment.byte_size:
# Not correct filled buffer!
raise BufferError(len(content), alignment.byte_size)
# Patch size in bytes for the field in the content buffer
patch_size, bit_offset = divmod(item.bit_size, 8)
if bit_offset != 0:
inject = True
patch_size += 1
else:
inject = False
# Patch offset in bytes for the field in the content buffer
patch_offset, bit_offset = divmod(alignment.bit_offset, 8)
if bit_offset != 0:
inject = True
if byte_order is Byteorder.big:
start = alignment.byte_size - (patch_offset + patch_size)
stop = alignment.byte_size - patch_offset
else:
start = patch_offset
stop = patch_offset + patch_size
return Patch(content[start:stop],
index.address + start,
byte_order,
item.bit_size,
bit_offset,
inject)
else:
raise MemberTypeError(self, item)
def write_to(self,
provider: Provider,
item: Structure | Sequence | Field,
byte_order: Byteorder = BYTEORDER) -> None:
""" Writes via a data :class:`Provider` the :class:`Field` values of
the given *item* to the `data source`.
:param Provider provider: data :class:`Provider`.
:param item: item to write.
:type item: Structure|Sequence|Field
:param Byteorder byte_order: encoding byte order of the *item*
to write.
"""
# Create memory patch for the item to write
patch = self.patch(item, byte_order)
if patch is None:
pass
elif is_provider(provider):
if patch.inject:
# Unpatched content of the memory area in the data source to modify
content = provider.read(patch.address, len(patch.buffer))
# Decimal value of the memory area to patch
value = int.from_bytes(content, byte_order.value)
# Inject memory patch content
bit_mask = ~((2 ** patch.bit_size - 1) << patch.bit_offset)
bit_mask &= (2 ** (len(patch.buffer) * 8) - 1)
value &= bit_mask
value |= int.from_bytes(patch.buffer, byte_order.value)
# Patched content for the memory area in the data source
buffer = value.to_bytes(len(patch.buffer), byte_order.value)
provider.write(buffer, patch.address, len(buffer))
else:
provider.write(patch.buffer, patch.address, len(patch.buffer))
else:
raise ProviderTypeError(self, provider)
@byte_order_option()
@nested_option()
def deserialize(self,
buffer: bytes = bytes(),
index: Index = Index(),
**options: Any) -> Index:
""" De-serializes the `Pointer` field from the byte *buffer* starting
at the beginning of the *buffer* or with the given *index* by mapping
the bytes to the :attr:`value` of the `Pointer` field in accordance with
the decoding *byte order* for the de-serialization and the decoding
:attr:`byte_order` of the `Pointer` field.
The specific decoding :attr:`byte_order` of the `Pointer` field
overrules the decoding *byte order* for the de-serialization.
Returns the :class:`Index` of the *buffer* after the `Pointer` field.
Optional the de-serialization of the referenced :attr:`data` object of
the `Pointer` field can be enabled.
:param bytes buffer: byte stream to de-serialize from.
:param Index index: current read :class:`Index` within the *buffer* to
de-serialize.
:keyword byte_order: decoding byte order for the de-serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
:keyword bool nested: if :data:`True` a `Pointer` field de-serialize its
referenced :attr:`data` object as well (chained method call).
Each :class:`Pointer` field uses for the de-serialization of its
referenced :attr:`data` object its own :attr:`bytestream`.
"""
# Field
index = super().deserialize(buffer, index, **options)
# Data Object
if self._data and get_nested(options):
options[str(Option.byte_order.value)] = self.data_byte_order
self._data.deserialize(self._data_stream,
Index(0, 0,
self.address, self.base_address,
False),
**options)
return index
@byte_order_option()
@nested_option()
def serialize(self,
buffer: bytearray = bytearray(),
index: Index = Index(),
**options: Any) -> Index:
""" Serializes the `Pointer` field to the byte *buffer* starting at the
beginning of the *buffer* or with the given *index* by mapping the
:attr:`value` of the `Pointer` field to the byte *buffer* in accordance
with the encoding *byte order* for the serialization and the encoding
:attr:`byte_order` of the `Pointer` field.
The specific encoding :attr:`byte_order` of the `Pointer` field
overrules the encoding *byte order* for the serialization.
Returns the :class:`Index` of the *buffer* after the `Pointer` field.
Optional the serialization of the referenced :attr:`data` object of the
`Pointer` field can be enabled.
:param bytearray buffer: byte stream to serialize to.
:param Index index: current write :class:`Index` within the *buffer*.
:keyword byte_order: encoding byte order for the serialization.
:type byte_order: Byteorder|Literal['auto', 'big', 'little']
:keyword bool nested: if :data:`True` a `Pointer` field serializes its
referenced :attr:`data` object as well (chained method call).
Each :class:`Pointer` field uses for the serialization of its
referenced :attr:`data` object its own :attr:`bytestream`.
"""
# Field
index = super().serialize(buffer, index, **options)
# Data Object
if self._data and get_nested(options):
options[str(Option.byte_order.value)] = self.data_byte_order
self._data_stream = bytearray()
self._data.serialize(self._data_stream,
Index(0, 0,
self.address, self.base_address,
False),
**options)
self._data_stream = bytes(self._data_stream)
return index
def initialize_fields(self,
content: dict[str, Any]) -> None:
""" Initializes the `Pointer` field itself and the :class:`Field` items
in the :attr:`data` object referenced by the `Pointer` field with the
*values* in the *content* dictionary.
The ``['value']`` key in the *content* dictionary refers to the `Pointer`
field itself and the ``['data']`` key refers to the :attr:`data` object
referenced by the `Pointer` field.
:param dict[str, Any] content: a dictionary contains the
:class:`~Field.value` for the `Pointer` field and the
:class:`~Field.value` for each :class:`Field` in the
:attr:`data` object referenced by the `Pointer` field.
"""
for name, value in content.items():
if name == 'value':
self.value = value
elif name == 'data':
# Container or Pointer
if is_mixin(self._data):
self._data.initialize_fields(value)
# Field
elif is_field(self._data):
self._data.value = value
@nested_option()
def index_fields(self,
index: Index = Index(),
**options: Any) -> Index:
""" Indexes the `Pointer` field and the :attr:`data` object referenced
by the `Pointer` field starting with the given *index* and returns the
:class:`Index` after the `Pointer` field.
:param Index index: :class:`Index` for the `Pointer` field.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`data` object referenced by the `Pointer` field indexes their
referenced :attr:`~Pointer.data` object fields as well
(chained method call).
"""
index = self.index_field(index)
# Container
if is_container(self._data):
self._data.index_fields(Index(0, 0,
self.address, self.base_address,
False),
**options)
# Pointer
elif is_pointer(self._data) and get_nested(options):
self._data.index_fields(Index(0, 0,
self.address, self.base_address,
False),
**options)
# Field
elif is_field(self._data):
self._data.index_field(Index(0, 0,
self.address, self.base_address,
False))
return index
@nested_option()
def view_fields(self,
*attributes: str,
**options: Any) -> dict[str, Any]:
""" Returns a :class:`dict` which contains the selected field
*attributes* of the `Pointer` field itself extended with a ``['data']``
key which contains the selected field *attribute* or the dictionaries of
the selected field *attributes* for each :class:`Field` *nested* in the
:attr:`data` object referenced by the `Pointer` field.
The *attributes* of each :class:`Field` for containers *nested* in the
:attr:`data` object referenced by the `Pointer` field are viewed as well
(chained method call).
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple[str, ...] fieldnames: sequence of dictionary keys for the
selected field *attributes*. Defaults to ``(*attributes)``.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`data` object referenced by the `Pointer` field views their
referenced :attr:`~Pointer.data` object field attributes as well
(chained method call).
"""
items = dict()
# Pointer field
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
if len(attributes) > 1:
for key, value in zip(attributes, field_getter(self)):
items[key] = value
else:
items['value'] = field_getter(self)
# Data object
if is_container(self._data):
# Container
items['data'] = self._data.view_fields(*attributes, **options)
elif is_pointer(self._data) and get_nested(options):
# Pointer
items['data'] = self._data.view_fields(*attributes, **options)
elif is_field(self._data):
# Field
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
if len(attributes) > 1:
fieldnames = options.get('fieldnames', attributes)
items['data'] = dict(zip(fieldnames, field_getter(self._data)))
else:
items['data'] = field_getter(self._data)
else:
# None
items['data'] = self._data
return items
@nested_option()
def field_items(self,
path: str = str(),
**options: Any) -> list[tuple[str, Field]]:
""" Returns a **flatten** list of ``('field path', field item)`` tuples
for the `Pointer` field itself and for each :class:`Field` *nested* in the
:attr:`data` object referenced by the `Pointer` field.
:param str path: path of the `Pointer` field.
:keyword bool nested: if :data:`True` all :class:`Pointer` fields in the
:attr:`data` object referenced by the `Pointer` field lists their
referenced :attr:`~Pointer.data` object field items as well
(chained method call).
"""
items = list()
# Field
items.append((path if path else 'field', self))
# Data Object
data_path = f"{path}.data" if path else 'data'
# Container
if is_container(self._data):
for field_item in self._data.field_items(data_path, **options):
items.append(field_item)
# Pointer
elif is_pointer(self._data) and get_nested(options):
for field_item in self._data.field_items(data_path, **options):
items.append(field_item)
# Field
elif is_field(self._data):
items.append((data_path, self._data))
return items
@nested_option(True)
def describe(self,
name: str = str(),
**options: Any) -> dict[str, Any]:
""" Returns the **metadata** of a `Pointer` as a :class:`dict`.
.. code-block:: python
metadata = {
'address': self.index.address,
'alignment': [self.alignment.byte_size, self.alignment.bit_offset],
'class': self.__class__.__name__,
'index': [self.index.byte, self.index.bit],
'max': self.max(),
'min': self.min(),
'name': name if name else self.__class__.__name__,
'order': self.byte_order.value,
'size': self.bit_size,
'type': Pointer.item_type.name,
'value': self.value,
'member': [self.data.describe()]
}
:param str name: optional name for the `Pointer` field.
Fallback is the class name.
:keyword bool nested: if :data:`True` a :class:`Pointer` field lists its
referenced :attr:`data` object fields as well (chained method call).
Default is :data:`True`.
"""
metadata = super().describe(name, **options)
metadata['class'] = self.__class__.__name__
metadata['name'] = name if name else self.__class__.__name__
metadata['type'] = Pointer.item_type.name
if is_any(self._data) and get_nested(options):
metadata['member'] = list()
metadata['member'].append(self._data.describe('data', **options))
return metadata
class StructurePointer(Pointer):
""" The :class:`StructurePointer` field is a :class:`Pointer` which refers
to a :class:`Structure`.
:param template: template for the :attr:`data` object
referenced by the `Pointer` field.
The *template* must be a :class:`Structure` instance.
:type template: Structure|None
:param int|None address: absolute address of the :attr:`data` object
referenced by the `Pointer` field.
:param data_order: byte order used to unpack and pack the :attr:`data` object
referenced by the `Pointer` field.
:type data_order: Byteorder|Literal['big', 'little']
:param int bit_size: is the *size* of the `Pointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Pointer` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Pointer` field aligns itself
to the next matching byte size according to the *size* of the
`Pointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `Pointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = StructurePointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
{}
>>> pointer.data_size
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> len(pointer)
0
>>> [name for name in pointer.keys()]
[]
>>> [member.value for member in pointer.values()]
[]
>>> [(name, member.value) for name, member in pointer.items()]
[]
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'StructurePointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'StructurePointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'class': 'Structure',
'name': 'data',
'size': 0,
'type': 'Structure',
'member': []}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': {}}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": {}}'
>>> pointer.field_items()
[('field',
StructurePointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff'))]
>>> pointer.to_list(nested=True)
[('StructurePointer.field', '0xffffffff')]
>>> pointer.to_dict(nested=True)
{'StructurePointer': {'field': '0xffffffff'}}
"""
def __init__(self,
template: Structure | None = None,
address: int | None = None,
data_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
if template is None:
template = Structure()
elif not is_structure(template):
raise MemberTypeError(self, template)
super().__init__(template=template,
address=address,
data_order=data_order,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
def __contains__(self, key: str) -> bool:
return key in self._data
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key: str) -> Structure | Sequence | Field:
return self._data[key]
def __iter__(self) -> Iterator[Structure | Sequence | Field]:
return iter(self._data)
def __getattr__(self, attr: str) -> Any:
return self._data[attr]
def items(self) -> ItemsView[str, Structure | Sequence | Field]:
return self._data.items()
def keys(self) -> KeysView[str]:
return self._data.keys()
def values(self) -> ValuesView[Structure | Sequence | Field]:
return self._data.values()
class SequencePointer(Pointer):
""" The :class:`SequencePointer` field is a :class:`Pointer` field which
refers to a :class:`Sequence`.
A `SequencePointer` field is:
- *containable*: ``item`` in ``self`` returns :data:`True` if *item* is part
of the referenced :class:`Sequence`.
- *sized*: ``len(self)`` returns the number of items in the referenced
:class:`Sequence`.
- *indexable* ``self[index]`` returns the *item* at the *index* of the
referenced :class:`Sequence`.
- *iterable* ``iter(self)`` iterates over the *items* of the referenced
:class:`Sequence`
A `SequencePointer` field supports the usual methods for sequences:
- **Append** an item to the referenced :class:`Sequence`
via :meth:`append()`.
- **Insert** an item before the *index* into the referenced :class:`Sequence`
via :meth:`insert()`.
- **Extend** the referenced :class:`Sequence` with items
via :meth:`extend()`.
- **Clear** the referenced :class:`Sequence` via :meth:`clear()`.
- **Pop** an item with the *index* from the referenced :class:`Sequence`
via :meth:`pop()`.
- **Remove** the first occurrence of an *item* from the referenced
:class:`Sequence` via :meth:`remove()`.
- **Reverse** all items in the referenced :class:`Sequence`
via :meth:`reverse()`.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the :class:`Sequence`.
:type iterable: Iterable[Structure|Sequence|Field]|Structure|Sequence|Field|None
:param int|None address: absolute address of the :attr:`data` object
referenced by the `Pointer` field.
:param data_order: byte order used to unpack and pack the :attr:`data` object
referenced by the `Pointer` field.
:type data_order: Byteorder|Literal['big', 'little']
:param int bit_size: is the *size* of the `Pointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Pointer` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Pointer` field aligns itself
to the next matching byte size according to the *size* of the
`Pointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `Pointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = SequencePointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
[]
>>> pointer.data_size
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer).hex()
'00000000'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> len(pointer)
0
>>> [item for item in pointer]
[]
>>> pointer[:]
[]
>>> pointer.append(Field())
>>> pointer[0]
Field(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value=None)
>>> len(pointer)
1
>>> pointer.pop()
Field(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value=None)
>>> pointer.insert(0, Field())
>>> pointer.data
[Field(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value=None)]
>>> pointer.remove(pointer[0])
>>> pointer.data
[]
>>> pointer.clear()
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'SequencePointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'SequencePointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'class': 'Sequence',
'name': 'data',
'size': 0,
'type': 'Sequence',
'member': []}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': []}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": []}'
>>> pointer.field_items()
[('field',
SequencePointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff'))]
>>> pointer.to_list(nested=True)
[('SequencePointer.field', '0xffffffff')]
>>> pointer.to_dict(nested=True)
{'SequencePointer': {'field': '0xffffffff'}}
"""
def __init__(self,
iterable: (Iterable[Structure | Sequence | Field] |
Structure | Sequence | Field | None) = None,
address: int | None = None,
data_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(template=Sequence(iterable),
address=address,
data_order=data_order,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
def __contains__(self,
key: Structure | Sequence | Field) -> bool:
return key in self._data
def __len__(self) -> int:
return len(self._data)
def __getitem__(self,
index: int | slice) -> Structure | Sequence | Field | list:
return self._data[index]
def __setitem__(self,
index: int,
item: Structure | Sequence | Field) -> None:
self._data[index] = item
def __delitem__(self,
index: int) -> None:
del self._data[index]
def append(self, item: Structure | Sequence | Field) -> None:
""" Appends the *item* to the end of the :class:`Sequence`.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
self._data.append(item)
def insert(self,
index: int,
item: Structure | Sequence | Field) -> None:
""" Inserts the *item* before the *index* into the :class:`Sequence`.
:param int index: :class:`Sequence` index.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
self._data.insert(index, item)
def pop(self,
index: int = -1) -> Structure | Sequence | Field:
""" Removes and returns the item at the *index* from the
:class:`Sequence`.
:param int index: :class:`Sequence` index.
"""
return self._data.pop(index)
def clear(self) -> None:
""" Remove all items from the :class:`Sequence`."""
self._data.clear()
def remove(self,
item: Structure | Sequence | Field) -> None:
""" Removes the first occurrence of an *item* from the :class:`Sequence`.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
self._data.remove(item)
def reverse(self) -> None:
""" In place reversing of the :class:`Sequence` items."""
self._data.reverse()
def extend(self, iterable: (Iterable[Structure | Sequence | Field] |
Structure | Sequence | Field)) -> None:
""" Extends the :class:`Sequence` by appending items from the *iterable*.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the :class:`Sequence`.
:type iterable: Iterable[Structure|Sequence|Field]|Structure|Sequence|Field
"""
self._data.extend(iterable)
class ArrayPointer(SequencePointer):
""" The :class:`ArrayPointer` field is a :class:`SequencePointer` field
which refers to a :class:`Array`.
An `ArrayPointer` field adapts and extends a :class:`SequencePointer`
field with the following features:
- **Append** a new :class:`Array` element to the referenced :class:`Array`
via :meth:`append()`.
- **Insert** a new :class:`Array` element before the *index*
into the referenced :class:`Array` via :meth:`insert()`.
- **Re-size** the referenced :class:`Array` via :meth:`resize()`.
:param template: template for the :class:`Array` element.
The *template* can be any :class:`Field` instance or any *callable*
that returns a :class:`Structure`, :class:`Sequence`, :class:`Array`
or any :class:`Field` instance.
:type template: Callable|Structure|Sequence|Field
:param int capacity: is the capacity of the :class:`Array` in number of
:class:`Array` elements.
:param int|None address: absolute address of the :attr:`data` object
referenced by the `Pointer` field.
:param data_order: byte order used to unpack and pack the :attr:`data` object
referenced by the `Pointer` field.
:type data_order: Byteorder|Literal['big', 'little']
:param int bit_size: is the *size* of the `Pointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Pointer` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Pointer` field aligns itself
to the next matching byte size according to the *size* of the
`Pointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `Pointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = ArrayPointer(Byte)
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
[]
>>> pointer.data_size
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> len(pointer)
0
>>> [item for item in pointer]
[]
>>> pointer[:]
[]
>>> pointer.append()
>>> pointer[0]
Byte(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=1, bit_offset=0),
bit_size=8,
value='0x0')
>>> len(pointer)
1
>>> pointer.pop()
Byte(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=1, bit_offset=0),
bit_size=8,
value='0x0')
>>> pointer.insert(0)
>>> pointer.data
[Byte(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=1, bit_offset=0),
bit_size=8,
value='0x0')]
>>> pointer.remove(pointer[0])
>>> pointer.data
[]
>>> pointer.resize(10)
>>> len(pointer)
10
>>> pointer.clear()
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'ArrayPointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'ArrayPointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'class': 'Array',
'name': 'data',
'size': 0,
'type': 'Array',
'member': []}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': []}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": []}'
>>> pointer.field_items()
[('field',
ArrayPointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff'))]
>>> pointer.to_list(nested=True)
[('ArrayPointer.field', '0xffffffff')]
>>> pointer.to_dict(nested=True)
{'ArrayPointer': {'field': '0xffffffff'}}
"""
def __init__(self,
template: Callable | Structure | Sequence | Field,
capacity: int = 0,
address: int | None = None,
data_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(address=address,
data_order=data_order,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
self._data: Array = Array(template, capacity)
def append(self) -> None:
""" Appends a new :class:`Array` element to the :class:`Array`."""
self._data.append()
def insert(self, index: int) -> None:
""" Inserts a new :class:`Array` element before the *index*
of the :class:`Array`.
:param int index: :class:`Array` index.
"""
self._data.insert(index)
def resize(self, capacity: int) -> None:
""" Re-sizes the :class:`Array` by appending new :class:`Array` elements
or removing :class:`Array` elements from the end.
:param int capacity: new capacity of the :class:`Array` in number of
:class:`Array` elements.
"""
if isinstance(self._data, Array):
self._data.resize(capacity)
class StreamPointer(Pointer):
""" The :class:`StreamPointer` field is a :class:`Pointer` field which
refers to a :class:`Stream` field.
A `StreamPointer` field is:
- *containable*: ``item`` in ``self`` returns :data:`True` if *item* is part
of the referenced :class:`Stream` field.
- *sized*: ``len(self)`` returns the length of the referenced
:class:`Stream` field.
- *indexable* ``self[index]`` returns the *byte* at the *index*
of the referenced :class:`Stream` field.
- *iterable* ``iter(self)`` iterates over the bytes of the referenced
:class:`Stream` field.
:param int capacity: is the *capacity* of the :class:`Stream` field in bytes.
:param int|None address: absolute address of the :attr:`data` object
referenced by the `Pointer` field.
:param int bit_size: is the *size* of the `Pointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Pointer` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Pointer` field aligns itself
to the next matching byte size according to the *size* of the
`Pointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `Pointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = StreamPointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
Stream(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value='')
>>> pointer.data_size
0
>>> len(pointer)
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> pointer.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> pointer.resize(10)
>>> pointer.data_size
10
>>> len(pointer)
10
>>> pointer.bytestream = b'KonFoo is Fun'
>>> pointer.bytestream
'4b6f6e466f6f2069732046756e'
>>> pointer.serialize_data().hex()
'00000000000000000000'
>>> pointer.deserialize_data()
Index(byte=10, bit=0, address=4294967305, base_address=4294967295, update=False)
>>> pointer.serialize_data()
b'KonFoo is '
>>> [byte for byte in pointer] # converts to int
[75, 111, 110, 70, 111, 111, 32, 105, 115, 32]
>>> [hex(byte) for byte in pointer]
['0x4b', '0x6f', '0x6e', '0x46', '0x6f', '0x6f', '0x20', '0x69', '0x73', '0x20']
>>> pointer[5] # converts to int
111
>>> 111 in pointer
True
>>> 0x0 in pointer
False
>>> b'KonFoo' in pointer
True
>>> pointer[:6] # converts to bytes
b'KonFoo'
>>> pointer[3:6] # converts to bytes
b'Foo'
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'StreamPointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'StreamPointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'address': 4294967295,
'alignment': [10, 0],
'class': 'Stream10',
'index': [0, 0],
'name': 'data',
'order': 'auto',
'size': 80,
'type': 'Field',
'value': '4b6f6e466f6f20697320'}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': '4b6f6e466f6f20697320'}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": "4b6f6e466f6f20697320"}'
>>> pointer.field_items()
[('field',
StreamPointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff')),
('data',
Stream(index=Index(byte=0, bit=0,
address=4294967295, base_address=4294967295,
update=False),
alignment=Alignment(byte_size=10, bit_offset=0),
bit_size=80,
value='4b6f6e466f6f20697320'))]
>>> pointer.to_list()
[('StreamPointer.field', '0xffffffff'),
('StreamPointer.data', '4b6f6e466f6f20697320')]
>>> pointer.to_dict()
{'StreamPointer': {'field': '0xffffffff', 'data': '4b6f6e466f6f20697320'}}
"""
def __init__(self,
capacity: int = 0,
address: int | None = None,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(template=None,
address=address,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
self._data: Stream = Stream(capacity)
def __contains__(self, key: int | bytes) -> bool:
return key in self._data
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, index: int | slice) -> int | bytes:
return self._data[index]
def __iter__(self) -> Iterator[int]:
return iter(self._data)
def resize(self, capacity: int) -> None:
""" Re-sizes the :class:`Stream` field by appending zero bytes or
removing bytes from the end.
:param int capacity: :class:`Stream` capacity in number of bytes.
"""
if isinstance(self._data, Stream):
self._data.resize(capacity)
class StringPointer(StreamPointer):
""" The :class:`StringPointer` field is a :class:`StreamPointer` field
which refers to a :class:`String` field.
:param int capacity: is the *capacity* of the :class:`String` field in bytes.
:param int|None address: absolute address of the :attr:`data` object
referenced by the `Pointer` field.
:param int bit_size: is the *size* of the `Pointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Pointer` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Pointer` field aligns itself
to the next matching byte size according to the *size* of the
`Pointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `Pointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = StringPointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
String(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value='')
>>> pointer.data_size
0
>>> len(pointer)
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> pointer.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> pointer.resize(10)
>>> pointer.data_size
10
>>> len(pointer)
10
>>> pointer.bytestream = b'KonFoo is Fun'
>>> pointer.bytestream
'4b6f6e466f6f2069732046756e'
>>> pointer.serialize_data().hex()
'00000000000000000000'
>>> pointer.deserialize_data()
Index(byte=10, bit=0, address=4294967305, base_address=4294967295, update=False)
>>> pointer.serialize_data()
b'KonFoo is '
>>> [byte for byte in pointer] # converts to int
[75, 111, 110, 70, 111, 111, 32, 105, 115, 32]
>>> [chr(byte) for byte in pointer] # converts to int
['K', 'o', 'n', 'F', 'o', 'o', ' ', 'i', 's', ' ']
>>> chr(pointer[5]) # converts to int -> chr
'o'
>>> ord(' ') in pointer
True
>>> 0x0 in pointer
False
>>> pointer[:6] # converts to bytes
b'KonFoo'
>>> pointer[3:6] # converts to bytes
b'Foo'
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'StringPointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'StringPointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'address': 4294967295,
'alignment': [10, 0],
'class': 'String10',
'index': [0, 0],
'name': 'data',
'order': 'auto',
'size': 80,
'type': 'Field',
'value': 'KonFoo is '}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': 'KonFoo is '}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": "KonFoo is "}'
>>> pointer.field_items()
[('field',
StringPointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff')),
('data',
String(index=Index(byte=0, bit=0,
address=4294967295, base_address=4294967295,
update=False),
alignment=Alignment(byte_size=10, bit_offset=0),
bit_size=80,
value='KonFoo is '))]
>>> pointer.to_list()
[('StringPointer.field', '0xffffffff'),
('StringPointer.data', 'KonFoo is ')]
>>> pointer.to_dict()
{'StringPointer': {'field': '0xffffffff', 'data': 'KonFoo is '}}
"""
def __init__(self,
capacity: int = 0,
address: int | None = None,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(capacity=0,
address=address,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
self._data: String = String(capacity)
class AutoStringPointer(StringPointer):
""" The :class:`AutoStringPointer` field is a :class:`StringPointer` field
which refers to an auto-sized :class:`String` field.
:param int|None address: absolute address of the :attr:`data` object
referenced by the `Pointer` field.
:param int bit_size: is the *size* of the `Pointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `Pointer` field to the number of bytes,
can be between ``1`` and ``8``.
If no field *alignment* is set the `Pointer` field aligns itself
to the next matching byte size according to the *size* of the
`Pointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `Pointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = AutoStringPointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
String(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=64, bit_offset=0),
bit_size=512,
value='')
>>> pointer.data_size
64
>>> len(pointer)
64
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> pointer.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> pointer.resize(10)
>>> pointer.data_size
10
>>> len(pointer)
10
>>> pointer.bytestream = b'KonFoo is Fun'
>>> pointer.bytestream
'4b6f6e466f6f2069732046756e'
>>> pointer.serialize_data().hex()
'00000000000000000000'
>>> pointer.deserialize_data()
Index(byte=10, bit=0, address=4294967305, base_address=4294967295, update=False)
>>> pointer.serialize_data()
b'KonFoo is '
>>> [byte for byte in pointer] # converts to int
[75, 111, 110, 70, 111, 111, 32, 105, 115, 32]
>>> [chr(byte) for byte in pointer] # converts to int
['K', 'o', 'n', 'F', 'o', 'o', ' ', 'i', 's', ' ']
>>> chr(pointer[5]) # converts to int -> chr
'o'
>>> ord(' ') in pointer
True
>>> 0x0 in pointer
False
>>> pointer[:6] # converts to bytes
b'KonFoo'
>>> pointer[3:6] # converts to bytes
b'Foo'
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'AutoStringPointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'AutoStringPointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'address': 4294967295,
'alignment': [10, 0],
'class': 'String10',
'index': [0, 0],
'name': 'data',
'order': 'auto',
'size': 80,
'type': 'Field',
'value': 'KonFoo is '}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': 'KonFoo is '}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": "KonFoo is "}'
>>> pointer.field_items()
[('field',
AutoStringPointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff')),
('data',
String(index=Index(byte=0, bit=0,
address=4294967295, base_address=4294967295,
update=False),
alignment=Alignment(byte_size=10, bit_offset=0),
bit_size=80,
value='KonFoo is '))]
>>> pointer.to_list()
[('AutoStringPointer.field', '0xffffffff'),
('AutoStringPointer.data', 'KonFoo is ')]
>>> pointer.to_dict()
{'AutoStringPointer': {'field': '0xffffffff', 'data': 'KonFoo is '}}
"""
#: Block size in *bytes* to read for the :class:`String` field.
BLOCK_SIZE = 64
#: Maximal allowed address of the :class:`String` field.
MAX_ADDRESS = 0xffffffff
def __init__(self,
address: int | None = None,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(capacity=AutoStringPointer.BLOCK_SIZE,
address=address,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
@nested_option(True)
def read_from(self,
provider: Provider,
null_allowed: bool = False,
**options: Any) -> None:
if self._data is None:
pass
elif is_provider(provider):
if self._value < 0:
pass
elif null_allowed or self._value > 0:
self._data_stream = bytes()
self.resize(0)
for address in range(self.address,
self.MAX_ADDRESS,
self.BLOCK_SIZE):
count = clamp(self.BLOCK_SIZE,
0,
(self.MAX_ADDRESS - address))
self._data_stream += provider.read(address, count)
self.resize(len(self) + count)
index = self.deserialize_data()
# Incomplete data object
if index.bit != 0:
length = index.byte, index.bit
raise ContainerLengthError(self, length)
# Terminated?
if self.data.is_terminated():
self.resize(len(self.data.value) + 1)
break
else:
self._data_stream = bytes()
self.resize(0)
self.deserialize_data()
else:
raise ProviderTypeError(self, provider)
class RelativePointer(Pointer):
""" The :class:`RelativePointer` field is a :class:`Pointer` field which
references its :attr:`data` object relative to a **base address** in the
*data source*.
.. important::
The :attr:`base_address` of a `RelativePointer` is defined by the field
:attr:`~Field.index` of the `RelativePointer` field.
:param template: template for the :attr:`data` object
referenced by the `RelativePointer` field.
:type template: Structure|Sequence|Field|None
:param int|None address: relative address of the :attr:`data` object
referenced by the `RelativePointer` field.
:param data_order: byte order used to unpack and pack the :attr:`data` object
referenced by the `RelativePointer` field.
:type data_order: Byteorder|Literal['big', 'little']
:param int bit_size: is the *size* of the `RelativePointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `RelativePointer` field to the number
of bytes, can be between ``1`` and ``8``.
If no field *alignment* is set the `RelativePointer` field aligns itself
to the next matching byte size according to the *size* of the
`RelativePointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `RelativePointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = RelativePointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
>>> pointer.data_size
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> pointer.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> pointer.bytestream = b'KonFoo is Fun'
>>> pointer.bytestream
'4b6f6e466f6f2069732046756e'
>>> pointer.serialize_data()
b''
>>> pointer.deserialize_data()
Index(byte=0, bit=0, address=4294967295, base_address=0, update=False)
>>> pointer.serialize_data()
b''
>>> pointer.describe()
{'address': 0,
'alignment': [4, 0],
'class': 'RelativePointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'RelativePointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff'}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': None}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": null}'
>>> pointer.field_items()
[('field',
RelativePointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff'))]
>>> pointer.to_list()
[('RelativePointer.field', '0xffffffff')]
>>> pointer.to_dict()
{'RelativePointer': {'field': '0xffffffff'}}
"""
def __init__(self,
template: Structure | Sequence | Field | None = None,
address: int | None = None,
data_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(template=template,
address=address,
data_order=data_order,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
@property
def address(self) -> int:
""" Returns the *data source* address of the :attr:`data` object
referenced by the `RelativePointer` field (read-only).
"""
return self._value + self.base_address
@property
def base_address(self) -> int:
""" Returns the *data source* base address of the :attr:`data` object
relative referenced by the `RelativePointer` field (read-only).
"""
return self.index.base_address
class StructureRelativePointer(RelativePointer):
""" The :class:`StructureRelativePointer` field is a :class:`RelativePointer`
which refers to a :class:`Structure`.
:param template: template for the :attr:`data` object
referenced by the `RelativePointer` field.
The *template* must be a :class:`Structure` instance.
:type template: Structure|None
:param int|None address: relative address of the :attr:`data` object
referenced by the `RelativePointer` field.
:param data_order: byte order used to unpack and pack the :attr:`data` object
referenced by the `RelativePointer` field.
:type data_order: Byteorder|Literal['big', 'little']
:param int bit_size: is the *size* of the `RelativePointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `RelativePointer` field to the number
of bytes, can be between ``1`` and ``8``.
If no field *alignment* is set the `RelativePointer` field aligns itself
to the next matching byte size according to the *size* of the
`RelativePointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `RelativePointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = StructureRelativePointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
{}
>>> pointer.data_size
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> len(pointer)
0
>>> [name for name in pointer.keys()]
[]
>>> [member.value for member in pointer.values()]
[]
>>> [(name, member.value) for name, member in pointer.items()]
[]
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'StructureRelativePointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'StructureRelativePointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'class': 'Structure',
'name': 'data',
'size': 0,
'type': 'Structure',
'member': []}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': {}}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": {}}'
>>> pointer.field_items()
[('field',
StructureRelativePointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff'))]
>>> pointer.to_list(nested=True)
[('StructureRelativePointer.field', '0xffffffff')]
>>> pointer.to_dict(nested=True)
{'StructureRelativePointer': {'field': '0xffffffff'}}
"""
def __init__(self,
template: Structure | None = None,
address: int | None = None,
data_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
if template is None:
template = Structure()
elif not is_structure(template):
raise MemberTypeError(self, template)
super().__init__(template=template,
address=address,
data_order=data_order,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
def __contains__(self, key: str) -> bool:
return key in self._data
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key: str) -> Structure | Sequence | Field:
return self._data[key]
def __iter__(self) -> Iterator[Structure | Sequence | Field]:
return iter(self._data)
def __getattr__(self, attr: str) -> Any:
return self._data[attr]
def items(self) -> ItemsView[str, Structure | Sequence | Field]:
return self._data.items()
def keys(self) -> KeysView[str]:
return self._data.keys()
def values(self) -> ValuesView[Structure | Sequence | Field]:
return self._data.values()
class SequenceRelativePointer(RelativePointer):
""" The :class:`SequenceRelativePointer` field is a :class:`RelativePointer`
which refers to a :class:`Sequence`.
A `SequenceRelativePointer` is:
- *containable*: ``item`` in ``self`` returns :data:`True` if *item* is part
of the referenced :class:`Sequence`.
- *sized*: ``len(self)`` returns the number of items in the referenced
:class:`Sequence`.
- *indexable* ``self[index]`` returns the *item* at the *index*
of the referenced :class:`Sequence`.
- *iterable* ``iter(self)`` iterates over the *items* of the referenced
:class:`Sequence`
A `SequenceRelativePointer` supports the usual methods:
- **Append** an item to the referenced :class:`Sequence`
via :meth:`append()`.
- **Insert** an item before the *index* into the referenced :class:`Sequence`
via :meth:`insert()`.
- **Extend** the referenced :class:`Sequence` with items
via :meth:`extend()`.
- **Clear** the referenced :class:`Sequence` via :meth:`clear()`.
- **Pop** an item with the *index* from the referenced :class:`Sequence`
via :meth:`pop()`.
- **Remove** the first occurrence of an *item* from the referenced
:class:`Sequence` via :meth:`remove()`.
- **Reverse** all items in the referenced :class:`Sequence`
via :meth:`reverse()`.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the :class:`Sequence`.
:type iterable: Iterable[Structure|Sequence|Field]|Structure|Sequence|Field
:param int|None address: relative address of the :attr:`data` object
referenced by the `RelativePointer` field.
:param data_order: byte order used to unpack and pack the :attr:`data` object
referenced by the `RelativePointer` field.
:type data_order: Byteorder|Literal['big', 'little']
:param int bit_size: is the *size* of the `RelativePointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `RelativePointer` field to the number
of bytes, can be between ``1`` and ``8``.
If no field *alignment* is set the `RelativePointer` field aligns itself
to the next matching byte size according to the *size* of the
`RelativePointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `RelativePointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = SequenceRelativePointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
[]
>>> pointer.data_size
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> len(pointer)
0
>>> [item for item in pointer]
[]
>>> pointer[:]
[]
>>> pointer.append(Field())
>>> pointer[0]
Field(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value=None)
>>> len(pointer)
1
>>> pointer.pop()
Field(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value=None)
>>> pointer.insert(0, Field())
>>> pointer.data
[Field(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value=None)]
>>> pointer.remove(pointer[0])
>>> pointer.data
[]
>>> pointer.clear()
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'SequenceRelativePointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'SequenceRelativePointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'class': 'Sequence',
'name': 'data',
'size': 0,
'type': 'Sequence',
'member': []}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': []}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": []}'
>>> pointer.field_items()
[('field',
SequenceRelativePointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff'))]
>>> pointer.to_list(nested=True)
[('SequenceRelativePointer.field', '0xffffffff')]
>>> pointer.to_dict(nested=True)
{'SequenceRelativePointer': {'field': '0xffffffff'}}
"""
def __init__(self,
iterable: (Iterable[Structure | Sequence | Field] |
Structure | Sequence | Field | None) = None,
address: int | None = None,
data_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(template=Sequence(iterable),
address=address,
data_order=data_order,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
def __contains__(self,
key: Structure | Sequence | Field) -> bool:
return key in self._data
def __len__(self) -> int:
return len(self._data)
def __getitem__(self,
index: int | slice) -> Structure | Sequence | Field | list:
return self._data[index]
def __setitem__(self,
index: int,
item: Structure | Sequence | Field) -> None:
self._data[index] = item
def __delitem__(self,
index: int) -> None:
del self._data[index]
def append(self,
item: Structure | Sequence | Field) -> None:
""" Appends the *item* to the end of the :class:`Sequence`.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
self._data.append(item)
def insert(self,
index: int,
item: Structure | Sequence | Field) -> None:
""" Inserts the *item* before the *index* into the :class:`Sequence`.
:param int index: :class:`Sequence` index.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
self._data.insert(index, item)
def pop(self,
index: int = -1) -> Structure | Sequence | Field:
""" Removes and returns the item at the *index* from the
:class:`Sequence`.
:param int index: :class:`Sequence` index.
"""
return self._data.pop(index)
def clear(self) -> None:
""" Remove all items from the :class:`Sequence`."""
self._data.clear()
def remove(self,
item: Structure | Sequence | Field) -> None:
""" Removes the first occurrence of an *item* from the :class:`Sequence`.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
:type item: Structure|Sequence|Field
"""
self._data.remove(item)
def reverse(self) -> None:
""" In place reversing of the :class:`Sequence` items."""
self._data.reverse()
def extend(self, iterable: (Iterable[Structure | Sequence | Field] |
Structure | Sequence | Field)) -> None:
""" Extends the :class:`Sequence` by appending items from the *iterable*.
:param iterable: any *iterable* that contains items of :class:`Structure`,
:class:`Sequence`, :class:`Array` or :class:`Field` instances. If the
*iterable* is one of these instances itself then the *iterable* itself
is appended to the :class:`Sequence`.
:type iterable: Iterable[Structure|Sequence|Field]|Structure|Sequence|Field
"""
self._data.extend(iterable)
class ArrayRelativePointer(SequenceRelativePointer):
""" The :class:`ArrayRelativePointer` field is a
:class:`SequenceRelativePointer` which refers to a :class:`Array`.
An `ArrayRelativePointer` adapts and extends a :class:`SequenceRelativePointer`
with the following features:
- **Append** a new :class:`Array` element to the :class:`Array`
via :meth:`append()`.
- **Insert** a new :class:`Array` element before the *index*
into the :class:`Array` via :meth:`insert()`.
- **Re-size** the :class:`Array` via :meth:`resize()`.
:param template: template for the :class:`Array` element.
The *template* can be any :class:`Field` instance or any *callable*
that returns a :class:`Structure`, :class:`Sequence`, :class:`Array`
or any :class:`Field` instance.
:param int capacity: is the capacity of the :class:`Array` in number of
:class:`Array` elements.
:param int|None address: relative address of the :attr:`data` object
referenced by the `RelativePointer` field.
:param data_order: byte order used to unpack and pack the :attr:`data` object
referenced by the `RelativePointer` field.
:type data_order: Byteorder|Literal['big', 'little']
:param int bit_size: is the *size* of the `RelativePointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `RelativePointer` field to the number
of bytes, can be between ``1`` and ``8``.
If no field *alignment* is set the `RelativePointer` field aligns itself
to the next matching byte size according to the *size* of the
`RelativePointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `RelativePointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = ArrayRelativePointer(Byte)
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
[]
>>> pointer.data_size
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> len(pointer)
0
>>> [item for item in pointer]
[]
>>> pointer[:]
[]
>>> pointer.append()
>>> pointer[0]
Byte(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=1, bit_offset=0),
bit_size=8,
value='0x0')
>>> len(pointer)
1
>>> pointer.pop()
Byte(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=1, bit_offset=0),
bit_size=8,
value='0x0')
>>> pointer.insert(0)
>>> pointer.data
[Byte(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=1, bit_offset=0),
bit_size=8,
value='0x0')]
>>> pointer.remove(pointer[0])
>>> pointer.data
[]
>>> pointer.resize(10)
>>> len(pointer)
10
>>> pointer.clear()
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'ArrayRelativePointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'ArrayRelativePointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'class': 'Array',
'name': 'data',
'size': 0,
'type': 'Array',
'member': []}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': []}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": []}'
>>> pointer.field_items()
[('field',
ArrayRelativePointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff'))]
>>> pointer.to_list(nested=True)
[('ArrayRelativePointer.field', '0xffffffff')]
>>> pointer.to_dict(nested=True)
{'ArrayRelativePointer': {'field': '0xffffffff'}}
"""
def __init__(self,
template: Callable | Structure | Sequence | Field,
capacity: int = 0,
address: int | None = None,
data_order: (Literal['big', 'little'] |
Byteorder) = BYTEORDER,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(address=address,
data_order=data_order,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
self._data = Array(template, capacity)
def append(self) -> None:
""" Appends a new :class:`Array` element to the :class:`Array`."""
self._data.append()
def insert(self, index: int) -> None:
""" Inserts a new :class:`Array` element before the *index*
of the :class:`Array`.
:param int index: :class:`Array` index.
"""
self._data.insert(index)
def resize(self, capacity: int) -> None:
""" Re-sizes the :class:`Array` by appending new :class:`Array` elements
or removing :class:`Array` elements from the end.
:param int capacity: new capacity of the :class:`Array` in number of
:class:`Array` elements.
"""
if isinstance(self._data, Array):
self._data.resize(capacity)
class StreamRelativePointer(RelativePointer):
""" The :class:`StreamRelativePointer` field is a :class:`RelativePointer`
field which refers to a :class:`Stream` field.
A `StreamRelativePointer` field is:
- *containable*: ``item`` in ``self`` returns :data:`True` if *item* is part
of the referenced :class:`Stream` field.
- *sized*: ``len(self)`` returns the length of the referenced
:class:`Stream` field.
- *indexable* ``self[index]`` returns the *byte* at the *index* of the
referenced :class:`Stream` field.
- *iterable* ``iter(self)`` iterates over the bytes of the referenced
:class:`Stream` field.
:param int capacity: is the *capacity* of the :class:`Stream` field in bytes.
:param int|None address: relative address of the :attr:`data` object
referenced by the `RelativePointer` field.
:param int bit_size: is the *size* of the `RelativePointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `RelativePointer` field to the number
of bytes, can be between ``1`` and ``8``.
If no field *alignment* is set the `RelativePointer` field aligns itself
to the next matching byte size according to the *size* of the
`RelativePointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `RelativePointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = StreamRelativePointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.data
Stream(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value='')
>>> pointer.data_size
0
>>> len(pointer)
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> pointer.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> pointer.resize(10)
>>> pointer.data_size
10
>>> len(pointer)
10
>>> pointer.bytestream = b'KonFoo is Fun'
>>> pointer.bytestream
'4b6f6e466f6f2069732046756e'
>>> pointer.serialize_data().hex()
'00000000000000000000'
>>> pointer.deserialize_data()
Index(byte=10, bit=0, address=4294967305, base_address=0, update=False)
>>> pointer.serialize_data()
b'KonFoo is '
>>> [byte for byte in pointer] # converts to int
[75, 111, 110, 70, 111, 111, 32, 105, 115, 32]
>>> [hex(byte) for byte in pointer]
['0x4b', '0x6f', '0x6e', '0x46', '0x6f', '0x6f', '0x20', '0x69', '0x73', '0x20']
>>> pointer[5] # converts to int
111
>>> 111 in pointer
True
>>> 0x0 in pointer
False
>>> pointer[:6] # converts to bytes
b'KonFoo'
>>> pointer[3:6] # converts to bytes
b'Foo'
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'StreamRelativePointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'StreamRelativePointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'address': 4294967295,
'alignment': [10, 0],
'class': 'Stream10',
'index': [0, 0],
'name': 'data',
'order': 'auto',
'size': 80,
'type': 'Field',
'value': '4b6f6e466f6f20697320'}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': '4b6f6e466f6f20697320'}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": "4b6f6e466f6f20697320"}'
>>> pointer.field_items()
[('field',
StreamRelativePointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff')),
('data',
Stream(index=Index(byte=0, bit=0,
address=4294967295, base_address=0,
update=False),
alignment=Alignment(byte_size=10, bit_offset=0),
bit_size=80,
value='4b6f6e466f6f20697320'))]
>>> pointer.to_list()
[('StreamRelativePointer.field', '0xffffffff'),
('StreamRelativePointer.data', '4b6f6e466f6f20697320')]
>>> pointer.to_dict()
{'StreamRelativePointer': {'field': '0xffffffff', 'data': '4b6f6e466f6f20697320'}}
"""
def __init__(self,
capacity: int = 0,
address: int | None = None,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(template=None,
address=address,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
self._data: Stream = Stream(capacity)
def __contains__(self, key: int | bytes) -> bool:
return key in self._data
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key: int | slice) -> int | bytes:
return self._data[key]
def __iter__(self) -> Iterator[int]:
return iter(self._data)
def resize(self, capacity: int) -> None:
""" Re-sizes the :class:`Stream` field by appending zero bytes or
removing bytes from the end.
:param int capacity: :class:`Stream` capacity in number of bytes.
"""
if isinstance(self._data, Stream):
self._data.resize(capacity)
class StringRelativePointer(StreamRelativePointer):
""" The :class:`StringRelativePointer` field is a
:class:`StreamRelativePointer` field which refers to a :class:`String` field.
:param int capacity: is the *capacity* of the :class:`String` field in bytes.
:param int|None address: relative address of the :attr:`data` object
referenced by the `RelativePointer` field.
:param int bit_size: is the *size* of the `RelativePointer` field in bits,
can be between ``1`` and ``64``.
:param int|None align_to: aligns the `RelativePointer` field to the number
of bytes, can be between ``1`` and ``8``.
If no field *alignment* is set the `RelativePointer` field aligns itself
to the next matching byte size according to the *size* of the
`RelativePointer` field.
:param field_order: byte order used to unpack and pack the :attr:`value`
of the `RelativePointer` field.
:type field_order: Byteorder|Literal['auto', 'big', 'little']
Example:
>>> pointer = StringRelativePointer()
>>> pointer.is_decimal()
True
>>> pointer.is_pointer()
True
>>> pointer.name
'Pointer32'
>>> pointer.alignment
Alignment(byte_size=4, bit_offset=0)
>>> pointer.byte_order
Byteorder.auto = 'auto'
>>> pointer.index
Index(byte=0, bit=0, address=0, base_address=0, update=False)
>>> pointer.index_field()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.bit_size
32
>>> pointer.signed
False
>>> pointer.min()
0
>>> pointer.max()
4294967295
>>> pointer.base_address
0
>>> pointer.address
0
>>> pointer.is_null()
True
>>> pointer.as_signed()
0
>>> pointer.as_unsigned()
0
>>> pointer.data
String(index=Index(byte=0, bit=0, address=0, base_address=0, update=False),
alignment=Alignment(byte_size=0, bit_offset=0),
bit_size=0,
value='')
>>> pointer.data_size
0
>>> len(pointer)
0
>>> pointer.data_byte_order
Byteorder.little = 'little'
>>> pointer.bytestream
''
>>> pointer.value
'0x0'
>>> bytes(pointer)
b'\\x00\\x00\\x00\\x00'
>>> int(pointer)
0
>>> float(pointer)
0.0
>>> hex(pointer)
'0x0'
>>> bin(pointer)
'0b0'
>>> oct(pointer)
'0o0'
>>> bool(pointer)
False
>>> pointer.deserialize(bytes.fromhex('00c0'))
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.value
'0xc000'
>>> pointer.value = 0x4000
>>> pointer.value
'0x4000'
>>> pointer.value = -0x1
>>> pointer.value
'0x0'
>>> pointer.value = 0x100000000
>>> pointer.value
'0xffffffff'
>>> bytestream = bytearray()
>>> bytestream
bytearray(b'')
>>> pointer.serialize(bytestream)
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> bytestream.hex()
'ffffffff'
>>> pointer.resize(10)
>>> pointer.data_size
10
>>> len(pointer)
10
>>> pointer.bytestream = b'KonFoo is Fun'
>>> pointer.bytestream
'4b6f6e466f6f2069732046756e'
>>> pointer.serialize_data().hex()
'00000000000000000000'
>>> pointer.deserialize_data()
Index(byte=10, bit=0, address=4294967305, base_address=0, update=False)
>>> pointer.serialize_data()
b'KonFoo is '
>>> [byte for byte in pointer] # converts to int
[75, 111, 110, 70, 111, 111, 32, 105, 115, 32]
>>> [chr(byte) for byte in pointer] # converts to int
['K', 'o', 'n', 'F', 'o', 'o', ' ', 'i', 's', ' ']
>>> chr(pointer[5]) # converts to int -> chr
'o'
>>> ord(' ') in pointer
True
>>> 0x0 in pointer
False
>>> pointer[:6] # converts to bytes
b'KonFoo'
>>> pointer[3:6] # converts to bytes
b'Foo'
>>> pointer.describe() #doctest: +SKIP
{'address': 0,
'alignment': [4, 0],
'class': 'StringRelativePointer',
'index': [0, 0],
'max': 4294967295,
'min': 0,
'name': 'StringRelativePointer',
'order': 'auto',
'signed': False,
'size': 32,
'type': 'Pointer',
'value': '0xffffffff',
'member': [
{'address': 4294967295,
'alignment': [10, 0],
'class': 'String10',
'index': [0, 0],
'name': 'data',
'order': 'auto',
'size': 80,
'type': 'Field',
'value': 'KonFoo is '}
]}
>>> pointer.index_fields()
Index(byte=4, bit=0, address=4, base_address=0, update=False)
>>> pointer.view_fields()
{'value': '0xffffffff', 'data': 'KonFoo is '}
>>> pointer.to_json()
'{"value": "0xffffffff", "data": "KonFoo is "}'
>>> pointer.field_items()
[('field',
StringRelativePointer(index=Index(byte=0, bit=0,
address=0, base_address=0,
update=False),
alignment=Alignment(byte_size=4, bit_offset=0),
bit_size=32,
value='0xffffffff')),
('data',
String(index=Index(byte=0, bit=0,
address=4294967295, base_address=0,
update=False),
alignment=Alignment(byte_size=10, bit_offset=0),
bit_size=80,
value='KonFoo is '))]
>>> pointer.to_list()
[('StringRelativePointer.field', '0xffffffff'),
('StringRelativePointer.data', 'KonFoo is ')]
>>> pointer.to_dict()
{'StringRelativePointer': {'field': '0xffffffff', 'data': 'KonFoo is '}}
"""
def __init__(self,
capacity: int = 0,
address: int | None = None,
bit_size: int = 32,
align_to: int | None = None,
field_order: (Literal['auto', 'big', 'little'] |
Byteorder) = 'auto') -> None:
super().__init__(capacity=0,
address=address,
bit_size=bit_size,
align_to=align_to,
field_order=field_order)
self._data: String = String(capacity) | PypiClean |
/Flask-AppBuilder-jack-3.3.4.tar.gz/Flask-AppBuilder-jack-3.3.4/flask_appbuilder/models/generic/filters.py | from flask_babel import lazy_gettext
from ..filters import BaseFilter, BaseFilterConverter
__all__ = [
"GenericFilterConverter",
"FilterNotContains",
"FilterEqual",
"FilterContains",
"FilterIContains",
"FilterNotEqual",
"FilterGreater",
"FilterSmaller",
"FilterStartsWith",
]
class FilterContains(BaseFilter):
name = lazy_gettext("Contains")
def apply(self, query, value):
return query.like(self.column_name, value)
class FilterIContains(BaseFilter):
"""
case insensitive like
"""
name = lazy_gettext("Contains (insensitive)")
def apply(self, query, value):
return query.ilike(self.column_name, value)
class FilterNotContains(BaseFilter):
name = lazy_gettext("Not Contains")
def apply(self, query, value):
return query.not_like(self.column_name, value)
class FilterEqual(BaseFilter):
name = lazy_gettext("Equal to")
def apply(self, query, value):
return query.equal(self.column_name, value)
class FilterNotEqual(BaseFilter):
name = lazy_gettext("Not Equal to")
def apply(self, query, value):
return query.not_equal(self.column_name, value)
class FilterGreater(BaseFilter):
name = lazy_gettext("Greater than")
def apply(self, query, value):
return query.greater(self.column_name, value)
class FilterSmaller(BaseFilter):
name = lazy_gettext("Smaller than")
def apply(self, query, value):
return query.smaller(self.column_name, value)
class FilterStartsWith(BaseFilter):
name = lazy_gettext("Start with")
def apply(self, query, value):
return query.starts_with(self.column_name, value)
class GenericFilterConverter(BaseFilterConverter):
"""
Class for converting columns into a supported list of filters
specific for SQLAlchemy.
"""
conversion_table = (
("is_enum", [FilterEqual, FilterNotEqual]),
(
"is_text",
[
FilterContains,
FilterIContains,
FilterNotContains,
FilterEqual,
FilterNotEqual,
FilterStartsWith,
],
),
(
"is_string",
[
FilterContains,
FilterIContains,
FilterNotContains,
FilterEqual,
FilterNotEqual,
FilterStartsWith,
],
),
("is_integer", [FilterEqual, FilterNotEqual, FilterGreater, FilterSmaller]),
("is_date", [FilterEqual, FilterNotEqual, FilterGreater, FilterSmaller]),
) | PypiClean |
/AstroAugmentations-0.1.0.tar.gz/AstroAugmentations-0.1.0/astroaugmentations/utils/kernel_creation.py | import albumentations as A
import numpy as np
def create_vla_psf(
save=False, hours=1, t_int=1, frequency=1.4,
pixel_resolution=1.8, configuration="B", size=150):
"""Generates and saves a psf for the VLA.
Args:
name (str):
Output file path to save psf to.
hours (float>0):
Length of 'observation' (time synthesis).
t_int (float):
Time integration length (in hrs).
frequency (float):
Frequency of the observation
pixel_resolution (float):
Pixel width of the saved psf in (arcsec).
Default 1.8 arcsec to match FIRST cutouts.
configuration (str):
VLA configuration to use.
"""
### Reading in antennae positions ###
RawData = []
with open('./VLA_raw_antenna_positions.txt') as f:
for line in f: #Create Array of all data.
LineArray = line.split()
RawData.append(LineArray)
# Split dataset By orientation (West, East and North)
WAntennae = RawData[1:25]
EAntennae = RawData[25:49]
NAntennae = RawData[49:]
# Split location data into Numpy Arrays of various configurations of the satalites
ArrayConfiguration = 'B'
W = np.array([])
for i in WAntennae:
if ArrayConfiguration in i:
W = np.append(W,i[-4:])
#Shape each matrix, so that each row of data is for one receiver with data columns of Lx(ns), Ly(ns), Lz(ns) and R(m).
W = np.reshape(W,(len(W)//4,4)).astype('float64')
E = np.array([])
for i in EAntennae:
if ArrayConfiguration in i:
E = np.append(E,i[-4:])
E = np.reshape(E,(len(E)//4,4)).astype('float64')
N = np.array([])
for i in NAntennae:
if ArrayConfiguration in i:
N = np.append(N,i[-4:])
N = np.reshape(N,(len(N)//4,4)).astype('float64')
c = 299792458 #[m/s]
NDist = N[:,:3]*10**(-9)*c #[m]
EDist = E[:,:3]*10**(-9)*c #[m]
WDist = W[:,:3]*10**(-9)*c #[m]
N_m = NDist[:,:3]
E_m = EDist[:,:3]
W_m = WDist[:,:3]
antennae = np.concatenate((N_m,E_m))
antennae = np.concatenate((antennae,W_m))
### Synthesise UV Coverage ###
# Place coordinates into boxes to show which are sampled in a mgrid of my choosing. Then FT to save a kernel.
observation_intervals = np.arange(0, hours, t_int)
UV_coords = []
for i in range(antennae.shape[0]):
for j in range(antennae.shape[0]):
for h in observation_intervals:
if i!=j:
u, v = single_baseline(
antennae[i], antennae[j], HA=hours/2-h,
d_deg=34.0784, frequency=frequency)
UV_coords.append([u, v])
UV = np.stack(UV_coords)
### Grid UV Coverage ###
lims = [UV.min(), UV.max()]
uv_grid = np.mgrid[
lims[0]:lims[1]:(lims[1]-lims[0])//(size-1),
lims[0]:lims[1]:(lims[1]-lims[0])//(size-1)
]
u_resolution = (lims[1]-lims[0])//(size-1)
v_resolution = (lims[1]-lims[0])//(size-1)
k_list = np.asarray([
np.where(
(uv_grid[0]>u) & (uv_grid[0]<=u+u_resolution) &
(uv_grid[1]>v) & (uv_grid[1]<=v+v_resolution),
1, 0
) for u, v in UV])
weighted_uv_sampling = k_list.sum(axis=0)
psf = np.fft.fftshift(np.fft.fft2(weighted_uv_sampling))
# Save generated psf
if type(save) is str:
np.save(save, psf)
else:
return psf
def single_baseline(antenna1, antenna2, frequency=1.4, HA=0, uv=True,d_deg=45):
"""Calculates the UV position of a single pair of antennae"""
c = 299792458 #units: [m/s]
frequency = frequency*10**9
baseline = antenna1-antenna2
if uv:
H_rad = 2*np.pi * HA/24 #units: [rad]
d = 2*np.pi * d_deg/360 #units: [rad]
baseline_u = (np.sin(H_rad)*baseline[0] + np.cos(H_rad)*baseline[1])*frequency/c
baseline_v = (
-np.sin(d)*np.cos(H_rad)*baseline[0] +
np.sin(d)*np.sin(H_rad)*baseline[1] +
np.cos(d)*baseline[2]
)*frequency/c
else:
baseline_u , baseline_v = baseline[0] , baseline[1]
return baseline_u, baseline_v #units: [lambda]
if __name__ == "__main__":
output_kernel = "./kernels/VLA_kernel"
create_vla_psf(
save = output_kernel,
hours = 1,
t_int = 1,
configuration = 'B'
)
print(f"> Generated default VLA PSF / kernel. Saved to:\n{output_kernel}.npy") | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/SVG/fonts/Asana-Math/DoubleStruck/Regular/Main.js | MathJax.OutputJax.SVG.FONTDATA.FONTS.AsanaMathJax_DoubleStruck={directory:"DoubleStruck/Regular",family:"AsanaMathJax_DoubleStruck",id:"ASANAMATHDOUBLESTRUCK",32:[0,0,249,0,0,""],8450:[709,20,708,22,669,"614 588c0 44 -122 79 -198 79c-61 0 -113 -15 -155 -40v-538c53 -36 120 -55 197 -55c71 0 157 26 203 60l8 -10l-27 -50c-58 -35 -157 -54 -218 -54c-240 0 -402 137 -402 364s184 365 411 365c93 0 183 -24 231 -41c-10 -47 -19 -100 -19 -152h-31v72zM215 127v464 c-57 -56 -86 -139 -86 -231c0 -97 31 -176 86 -233"],8461:[692,3,948,22,927,"432 -3c-52 2 -99 3 -145 3c-159 0 -265 -3 -265 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c33 -1 76 -3 117 -3h148c46 0 93 0 145 3v-30l-48 -3c-42 -4 -51 -6 -51 -90v-178c12 -2 39 -2 83 -2h233c45 0 72 0 84 2v178c0 82 -8 86 -51 90l-48 3v30 c52 -3 99 -3 148 -3c46 0 93 0 145 3v-30l-48 -3c-24 -2 -36 -5 -42 -15c-6 -11 -9 -37 -9 -75v-449c0 -84 11 -89 51 -91l48 -2v-30c-52 2 -99 3 -145 3c-49 0 -96 -1 -148 -3v30l48 2c44 2 51 12 51 91v223c-12 1 -39 2 -84 2h-233c-44 0 -71 -1 -83 -2v-223 c0 -79 7 -89 51 -91l48 -2v-30zM195 31c38 0 43 14 43 89v449c0 75 -5 89 -43 89c-27 0 -32 -15 -32 -89v-449c0 -74 5 -89 32 -89"],8469:[692,3,951,17,934,"934 662l-45 -3c-41 -4 -50 -6 -50 -90v-569h-87l-471 581h-2v-461c0 -79 7 -89 50 -91l45 -2v-30c-45 1 -80 3 -120 3c-36 0 -76 -1 -116 -1c-42 -1 -87 -1 -121 -2v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c34 -1 79 -3 121 -3h120c18 0 36 2 54 3 l477 -590h4v467c0 82 -8 86 -51 90l-44 3v30c40 -1 80 -3 116 -3c40 0 75 2 120 3v-30zM189 31c37 4 44 9 44 89v449c0 79 -6 85 -44 89c-26 -3 -31 -15 -31 -89v-449c0 -74 5 -86 31 -89"],8473:[692,3,720,22,697,"432 -3c-52 2 -188 3 -234 3c-159 0 -176 -3 -176 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30h467c98 3 208 -40 208 -153c0 -125 -108 -215 -240 -215c-20 0 -40 2 -60 5l-10 37c22 -7 44 -10 66 -10c81 0 144 64 144 149c0 104 -63 149 -162 149 c-46 0 -74 -5 -102 -12v-522c0 -79 7 -89 51 -91l48 -2v-30zM195 31c38 0 43 14 43 89v449c0 68 -6 85 -44 89c-26 -4 -31 -16 -31 -89v-449c0 -74 5 -89 32 -89"],8474:[709,176,785,22,764,"408 -20c110 -12 208 -109 356 -79v-21l-84 -56c-32 7 -64 14 -106 27l-148 45c-33 10 -57 15 -80 15c-11 0 -19 -4 -26 -8l-59 -32l2 32c48 37 90 65 109 72l11 4v2h-6c-211 0 -355 137 -355 367c0 204 144 361 376 361c214 0 366 -120 366 -342 c0 -207 -162 -378 -356 -385v-2zM657 336c0 198 -89 331 -275 331c-47 0 -87 -9 -121 -25v-575c39 -27 88 -44 148 -44c174 0 248 129 248 313zM215 109v502c-58 -51 -86 -133 -86 -230c0 -100 27 -202 86 -272"],8477:[692,3,784,22,786,"402 371c150 0 191 87 191 145c0 108 -80 138 -158 138c-46 0 -74 -5 -102 -12v-522c0 -79 7 -89 51 -91l48 -2v-30c-52 2 -150 3 -196 3c-159 0 -214 -3 -214 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30h480c151 0 191 -80 191 -150 c0 -109 -98 -174 -195 -193l227 -296c15 -19 37 -24 61 -26v-30c-26 1 -49 3 -72 3s-45 -2 -68 -3l-277 363l6 12c9 -1 18 -1 27 -1zM195 31c38 0 43 14 43 89v449c0 78 -4 85 -42 89c-27 -3 -33 -15 -33 -89v-449c0 -74 5 -89 32 -89"],8484:[692,3,816,15,788,"788 173c-5 -55 -6 -119 -6 -176c-98 1 -202 3 -305 3c-104 0 -358 -2 -462 -3v28c32 39 66 77 97 117l302 387c30 39 64 80 86 119h-228c-138 0 -149 -14 -152 -47l-6 -72h-34c4 55 4 109 0 163c132 -1 265 -3 397 -3h210c52 0 44 2 96 3l4 -29l-486 -619h273 c149 0 167 4 174 72l6 57h34zM709 648h-84l-474 -604h90c27 32 55 64 81 98l302 387c30 39 64 80 85 119"],8508:[525,15,718,10,688,"76 138l-10 -5c1 -34 94 -156 151 -126c45 27 37 121 41 209v252l-36 -1c-4 -195 17 -415 -23 -436c-29 -20 -105 45 -123 107zM688 525l-16 -119c-47 26 -138 22 -138 22c2 -2 -5 -152 -5 -224c-1 -89 2 -143 35 -146s63 35 81 92l26 -8c-43 -144 -98 -157 -128 -157 s-65 14 -83 58c-20 49 -1 366 -3 383l-171 4c-2 -16 10 -338 -10 -387c-18 -44 -39 -58 -70 -58c-29 0 -116 8 -151 152l26 9c17 -59 67 -96 100 -89c38 7 27 70 28 143s-3 228 -1 228c-15 -1 -64 14 -95 -20c-23 -25 -18 -53 -21 -76c-81 0 -144 129 26 148 c91 10 415 0 462 0c19 0 72 1 72 43c2 0 36 1 36 2"],8509:[485,230,494,-10,466,"264 -200v214l-146 447c-83 -2 -90 2 -117 4v-9c31 5 78 -12 93 -52c32 -52 156 -412 156 -412c-6 -24 -22 -62 -20 -136c0 -30 5 -58 34 -56zM466 419c5 -75 -98 -199 -184 -419c12 -29 25 -73 26 -127c0 -50 -11 -103 -44 -103c-30 0 -53 28 -54 94 c-2 74 14 112 20 136c0 0 -134 362 -167 415c-23 36 -49 32 -73 32v31c13 -4 110 -8 160 3c15 -69 77 -277 122 -418c32 64 93 210 103 317c5 55 -33 83 -32 83c2 -1 25 17 55 22c8 1 64 4 68 -66"],8510:[692,3,727,22,703,"703 517c-6 55 -14 123 -21 172c-303 7 -221 0 -415 0h-85c-51 1 -115 1 -160 3v-30l44 -2c41 -2 51 -12 51 -91v-449c0 -79 -10 -87 -51 -90l-44 -3v-30c33 1 76 3 117 3h148c46 0 93 0 145 -3v30l-48 3c-42 4 -51 6 -51 90v522c28 7 56 8 84 8c82 0 204 -3 221 -19 c10 -9 16 -43 21 -64l12 -50h32zM238 594v-474c0 -75 -5 -89 -43 -89c-27 0 -32 15 -32 89v449c0 79 6 87 38 91l12 -7c17 -10 25 -32 25 -59"],8511:[692,3,899,27,873,"660 27c38 0 43 14 43 89v474c0 27 -8 49 -25 59l-12 7c-32 -4 -38 -12 -38 -91v-449c0 -74 5 -89 32 -89zM185 31c38 0 43 14 43 89v474c0 27 -8 49 -25 59l-12 7c-32 -4 -38 -12 -38 -91v-449c0 -74 5 -89 32 -89zM595 120l-1 524c-27 2 -43 2 -83 2h-122 c-40 0 -56 0 -83 -2l1 -524c0 -79 5 -87 51 -90l48 -3v-30c-59 2 -181 3 -233 3s-87 -1 -146 -3v30l48 3c46 3 51 11 51 90v449c0 79 -5 87 -51 90l-48 3v30c72 -2 96 -3 146 -3c137 3 364 4 505 0c52 0 122 0 195 3v-30l-48 -3c-46 -3 -51 -11 -51 -90v-449 c0 -79 5 -87 51 -90l48 -3v-30c-59 2 -143 3 -195 3s-123 -1 -182 -3v30l48 3c46 3 51 11 51 90"],8512:[696,1,645,30,618,"72 9h47l245 361l-206 290h-43l213 -290zM618 194c-12 -70 -14 -101 -21 -195c0 0 -200 0 -271 1h-296l256 370c-53 73 -245 315 -252 326l551 -1c-1 -62 1 -77 11 -172h-25c-37 104 -68 106 -95 118c-25 6 -96 19 -287 19l203 -290l-193 -282c38 -1 21 1 130 -1 c107 -1 180 -1 213 17c38 19 42 61 50 90h26"],8517:[692,3,898,-39,888,"883 387c-28 -164 -147 -306 -277 -355c-129 -49 -216 -32 -349 -32h-136c-51 0 -115 -1 -160 -3l5 30l44 2c42 2 53 12 67 91l80 449c14 79 5 87 -36 90l-43 3l5 30c35 -1 82 -3 125 -3h197c65 0 130 2 174 3c206 6 339 -111 304 -305zM772 359c43 248 -91 293 -271 293 c-34 0 -86 -1 -115 -8l-105 -595c27 -7 78 -8 112 -8c50 0 109 3 167 28c121 52 188 155 212 290zM194 95l84 474c14 80 8 85 -28 89c-27 -3 -34 -15 -47 -89l-80 -449c-14 -79 -9 -87 22 -91l14 7c18 10 30 32 35 59"],8518:[694,10,667,-7,707,"189 -10c85 0 125 49 176 100l98 -90h53c29 168 108 613 109 617c7 27 31 34 78 34c3 16 6 32 4 32c-21 0 -191 11 -232 11c-6 0 -28 1 -36 -6l-57 -322c-62 117 -247 97 -345 -49c-108 -162 -4 -327 152 -327zM479 615c1 1 22 33 51 21c11 -4 24 -12 27 -21l-83 -471 c-29 -31 -77 -10 -76 8c0 2 53 302 81 463zM103 38c-37 35 -72 111 -39 214c18 68 60 112 101 136zM148 25l66 373c73 24 132 -16 158 -91l-32 -178c-67 -86 -127 -110 -192 -104"],8519:[463,20,546,24,522,"519 231h-269l-36 -205c77 -27 203 -7 260 95c4 7 8 10 15 10c20 0 12 -19 -16 -53c-151 -192 -515 -88 -438 173c66 222 345 270 452 143c32 -32 42 -108 32 -163zM283 418l-29 -165h217c48 179 -145 168 -188 165zM147 42l64 361c-180 -73 -226 -285 -64 -361"],8520:[669,0,388,-17,298,"176 616c7 43 67 73 96 37c10 -10 12 -23 10 -37c-9 -48 -71 -71 -97 -38c-10 10 -12 23 -9 38zM65 431c20 0 164 11 206 11c6 0 20 1 27 -6l-68 -387c2 -12 11 -16 45 -17h18l-6 -32c-17 4 -285 3 -304 0l6 32h22c38 2 42 4 51 18l29 157c26 153 27 159 26 170 c-1 19 -22 22 -48 22h-12c3 16 6 32 8 32zM239 363c-2 1 -34 45 -76 0c-6 -10 -28 -146 -55 -298c40 -23 55 -8 78 -3"],8521:[669,210,409,-98,353,"245 616c8 43 68 73 98 37c9 -10 12 -23 9 -37c-12 -71 -119 -69 -107 0zM327 442c6 0 19 1 25 -6l-93 -516c-26 -67 -106 -122 -187 -130c-26 -2 -151 5 -168 61c-9 29 6 61 29 71c28 15 57 -4 61 -28c5 -17 3 -37 -37 -57c15 -8 66 -29 121 -11c15 90 89 464 94 554 c-4 9 -19 19 -70 19h-13l6 32c21 0 189 11 232 11zM296 363c-6 37 -55 30 -74 0l-95 -530c41 19 73 46 91 90"],120120:[700,3,887,15,866,"355 -3c-36 1 -143 4 -179 4c-32 0 -97 -2 -161 -4v30l37 2c24 1 43 25 54 50l178 405c31 70 65 144 92 216h142l242 -568c38 -94 57 -102 72 -103l34 -2v-30c-44 1 -88 3 -133 3c-35 0 -79 -2 -133 -3v30l47 2c22 1 41 9 41 20c0 15 -11 41 -21 65l-46 115h-288l-28 -67 c-9 -21 -36 -88 -36 -104c0 -21 18 -27 49 -29l37 -2v-30zM603 269l-126 298l-128 -298h254zM476 673h-72l-219 -511c-9 -21 -36 -88 -36 -104c0 -15 10 -22 26 -25c18 7 32 25 41 46"],120121:[692,3,739,26,705,"705 227c0 -98 -74 -236 -293 -230c-37 1 -73 3 -109 3h-109c-52 -1 -121 -1 -168 -3v30l43 2c42 2 52 12 52 91v449c0 79 -10 87 -52 90l-43 3v30c36 -1 85 -3 129 -3h144c58 0 114 2 193 3c93 2 179 -32 179 -138c0 -91 -85 -150 -167 -163v-2c102 -3 201 -45 201 -162 zM572 534c0 95 -67 118 -151 118c-27 0 -56 -1 -84 -8v-243c18 -1 41 -2 71 -2c87 0 164 36 164 135zM602 209c0 111 -70 155 -198 155c-28 0 -49 -1 -67 -2v-315c28 -7 57 -8 84 -8c111 0 181 64 181 170zM242 116v453c0 75 -5 85 -43 89c-27 -3 -32 -16 -32 -89v-453 c0 -77 6 -85 42 -89c28 4 33 16 33 89"],120123:[692,3,898,22,876,"876 387c0 -164 -94 -306 -215 -355c-121 -50 -214 -32 -343 -32h-136c-51 0 -115 -1 -160 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c35 -1 82 -3 125 -3h197c65 0 130 2 174 3c205 6 358 -111 358 -305zM769 359c0 248 -142 293 -322 293 c-34 0 -86 -1 -114 -8v-595c28 -7 80 -8 114 -8c50 0 108 3 162 28c111 52 160 155 160 290zM238 95v474c0 78 -5 85 -43 89c-27 -3 -32 -15 -32 -89v-449c0 -79 6 -87 38 -91l12 7c17 10 25 32 25 59"],120124:[692,3,727,22,689,"673 0c-303 -7 -208 0 -415 0h-107c-44 -1 -93 -2 -129 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c33 -1 76 -3 117 -3h169c87 0 179 3 236 3c45 0 90 -1 129 -3c-11 -44 -16 -101 -16 -152h-35v65c0 13 -1 24 -9 27c-53 20 -132 20 -196 20 c-28 0 -56 0 -84 -7v-267c32 -2 68 -3 116 -3c60 0 95 4 106 11c5 4 7 13 8 26l5 52h30c-1 -41 -3 -79 -3 -116s2 -76 3 -111h-30l-5 59c-2 29 -3 36 -114 36c-48 0 -85 0 -116 -2v-280c28 -7 56 -8 84 -8c82 0 184 3 202 13c15 8 28 52 35 112h35 c-10 -48 -16 -110 -16 -164zM195 31c39 4 43 13 43 89v449c0 75 -5 89 -43 89c-27 0 -32 -15 -32 -89v-449c0 -73 5 -86 32 -89"],120125:[692,3,672,22,653,"432 -3c-52 2 -99 3 -145 3c-159 0 -265 -3 -265 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c33 -1 76 -3 117 -3h169c67 0 159 3 216 3c45 0 90 -1 129 -3c-11 -44 -16 -101 -16 -152h-35v65c0 13 -1 24 -9 27c-53 20 -132 20 -176 20 c-28 0 -56 0 -84 -7v-267c32 -2 68 -3 116 -3c60 0 95 4 106 11c5 4 7 13 8 26l5 52h30c-1 -41 -3 -79 -3 -116s2 -76 3 -111h-30l-5 59c-2 29 -3 36 -114 36c-48 0 -85 0 -116 -2v-207c0 -79 7 -89 51 -91l48 -2v-30zM195 31c38 0 43 14 43 89v449c0 75 -5 89 -43 89 c-27 0 -32 -15 -32 -89v-449c0 -74 5 -89 32 -89"],120126:[709,20,762,22,728,"728 246l-29 -10c-10 -4 -11 -8 -11 -37v-168c-86 -25 -194 -51 -263 -51c-223 0 -403 116 -403 367c0 220 181 362 422 362c124 0 202 -28 248 -41c-12 -47 -20 -100 -20 -152h-31v53c0 47 -36 64 -83 80c-32 11 -75 18 -140 18c-75 0 -136 -19 -182 -54v-518 c53 -46 126 -73 215 -73c77 0 135 17 140 35c4 12 5 16 5 41v101c0 23 -8 33 -34 35l-83 7v30c41 -2 82 -2 122 -2c41 0 82 0 123 2zM205 127v458c-50 -53 -76 -130 -76 -221c0 -95 26 -177 76 -237"],120128:[692,3,453,22,432,"432 -3c-52 2 -99 3 -145 3c-159 0 -265 -3 -265 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c33 -1 76 -3 117 -3h148c46 0 93 0 145 3v-30l-48 -3c-42 -4 -51 -6 -51 -90v-449c0 -79 7 -89 51 -91l48 -2v-30zM195 31c38 0 43 14 43 89v449 c0 75 -5 89 -43 89c-27 0 -32 -15 -32 -89v-449c0 -74 5 -89 32 -89"],120129:[692,194,440,-15,419,"419 662l-48 -3c-42 -4 -51 -6 -51 -90v-452c0 -65 5 -148 -62 -231c-53 -65 -80 -80 -251 -80c-7 0 -13 1 -16 2l-6 76l11 7c24 -16 45 -20 70 -20c21 0 35 38 37 45l1 653c0 79 -10 87 -51 90l-44 3v30c33 -1 76 -3 117 -3h148c46 0 93 0 145 3v-30zM225 -17v586 c0 74 -5 89 -43 89c-27 0 -33 -15 -33 -89v-700c34 0 59 7 68 35c5 17 8 53 8 79"],120130:[692,3,842,22,836,"432 -3c-52 2 -99 3 -145 3c-159 0 -265 -3 -265 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c33 -1 76 -3 117 -3h148c46 0 93 0 145 3v-30l-48 -3c-42 -4 -51 -6 -51 -90v-214l278 271c15 15 29 32 29 39v27c32 -1 60 -3 89 -3c28 0 56 2 80 3v-30 l-42 -2c-26 -1 -52 -15 -78 -38l-266 -240l330 -313c24 -23 44 -38 62 -40l21 -2v-30c-19 1 -43 3 -67 3c-23 0 -47 -2 -78 -3l-348 335l-10 -7v-205c0 -79 7 -89 51 -91l48 -2v-30zM195 31c38 0 43 14 43 89v449c0 75 -5 89 -43 89c-27 0 -32 -15 -32 -89v-449 c0 -74 5 -89 32 -89"],120131:[692,3,727,22,703,"703 172c-6 -55 -14 -123 -21 -172c-304 -7 -221 0 -415 0h-85c-51 -1 -115 -1 -160 -3v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c33 -1 76 -3 117 -3h148c46 0 93 0 145 3v-30l-48 -3c-42 -4 -51 -6 -51 -90v-522c28 -7 56 -8 84 -8 c82 0 204 3 221 19c10 9 16 43 21 64l12 50h32zM238 95v474c0 75 -5 89 -43 89c-27 0 -32 -15 -32 -89v-449c0 -79 6 -87 38 -91l12 7c17 10 25 32 25 59"],120132:[692,13,1066,16,1047,"1047 -3c-52 2 -99 3 -145 3c-49 0 -96 -1 -134 -3v30l34 2c44 2 51 12 51 91v454h-2c-15 -27 -29 -55 -42 -82l-178 -378c-19 -41 -36 -85 -54 -127h-20l-277 587h-2v-454c0 -80 8 -89 51 -91l44 -2v-30c-44 1 -80 3 -120 3c-36 0 -76 -1 -116 -1c-42 -1 -88 -1 -121 -2 v30l44 2c41 2 51 12 51 91v449c0 79 -10 87 -51 90l-44 3v30c33 -1 79 -3 121 -3h95c32 0 64 2 95 3l264 -556l262 556c32 -1 63 -3 95 -3s64 2 93 3v-30l-42 -3c-24 -2 -36 -5 -42 -15c-6 -11 -9 -37 -9 -75v-449c0 -84 11 -89 51 -91l48 -2v-30zM189 31c39 4 43 14 43 89 v449c0 78 -5 85 -43 89c-27 -3 -32 -15 -32 -89v-449c0 -74 5 -86 32 -89"],120134:[709,20,785,22,764,"764 367c0 -215 -168 -387 -387 -387c-203 0 -355 140 -355 364c0 205 144 365 376 365c214 0 366 -120 366 -342zM657 336c0 198 -89 331 -275 331c-47 0 -87 -9 -121 -25v-575c39 -28 88 -45 148 -45c167 0 248 130 248 314zM215 109v502c-58 -51 -86 -133 -86 -230 c0 -100 27 -202 86 -272"],120138:[709,20,524,24,503,"211 408c85 -24 292 -5 292 -175c0 -171 -154 -253 -302 -253c-70 0 -138 22 -172 38c9 51 9 106 8 153h32l8 -67c5 -42 84 -80 163 -80c22 0 44 3 65 10v268c-102 21 -281 11 -281 181c0 155 136 226 256 226c80 0 140 -26 181 -42c-10 -51 -14 -94 -14 -141h-32l-6 57 c-3 28 -7 37 -44 60c-28 17 -70 24 -109 24c-15 0 -30 -2 -45 -6v-253zM165 425v213c-33 -23 -57 -62 -57 -117c0 -42 15 -75 57 -96zM419 178c0 56 -26 89 -68 108v-231c40 26 68 68 68 123"],120139:[694,3,737,18,720,"720 694c-5 -52 -7 -107 -7 -161h-31l-4 79c-1 24 -10 33 -40 33h-155c-3 -30 -4 -62 -4 -100v-423c0 -81 8 -88 50 -90l49 -3v-30c-52 2 -99 3 -145 3h-47c-3 -1 -94 -3 -101 -3c-19 0 -84 -1 -117 -2v30l44 2c41 2 51 12 51 91v449c0 42 -3 44 -9 76h-154 c-29 0 -39 -9 -40 -33l-4 -79h-31c0 54 -2 109 -7 161c71 0 269 -3 341 -3h145c72 0 144 3 216 3zM334 32c41 4 50 7 50 90v423c0 38 -1 70 -4 100h-66c-8 -31 -5 -34 -5 -76v-449c0 -67 4 -83 25 -88"],120140:[692,22,907,12,889,"12 692c20 -1 201 -5 201 -5c13 0 76 2 76 2s94 0 146 3v-30l-48 -3c-42 -4 -51 -6 -51 -90v-344c0 -142 73 -183 206 -183c107 0 206 41 206 207v320c0 82 -8 86 -51 90l-44 3v30c40 -1 80 -3 116 -3c40 0 76 2 120 3v-30l-44 -3c-43 -4 -51 -6 -51 -90v-289 c0 -209 -78 -298 -273 -300c-165 -1 -410 -38 -410 234v355c0 38 -3 64 -9 75c-6 10 -18 13 -42 15l-48 3v30zM208 657c-40 -5 -42 -16 -42 -88v-344c0 -147 72 -196 165 -209c-58 33 -90 95 -90 198v355c0 70 -5 83 -33 88"],120141:[692,9,851,8,836,"836 662l-30 -3c-21 -2 -34 -24 -54 -72l-174 -421c-24 -57 -46 -114 -64 -175h-180s-38 116 -58 165l-188 462c-13 32 -27 40 -44 41l-36 3v30c43 -1 86 -3 130 -3h51c8 -1 82 0 82 0l133 3v-30l-56 -3c-19 -1 -39 -7 -30 -31l197 -507l183 473c21 54 0 63 -26 65l-52 3 v30c36 -1 71 -3 108 -3s72 2 108 3v-30zM174 659h-6c-19 0 -39 -7 -30 -31l239 -610h79c-15 49 -33 97 -50 138l-188 462c-13 32 -27 41 -44 41"],120142:[700,9,1119,8,1104,"1104 662l-32 -3c-22 -2 -33 -12 -38 -29l-153 -538c-8 -29 -19 -60 -27 -101h-50c-22 79 -46 137 -71 206l-125 339l-174 -442c-13 -34 -26 -68 -36 -103h-170l-142 618c-10 43 -29 48 -50 50l-28 3v30c40 -1 79 -3 120 -3h50c7 -1 76 0 76 0l126 3v-30l-42 -3 c-26 -2 -46 -7 -33 -66l99 -446h2l123 312c31 79 63 159 90 241h32c9 -40 27 -86 41 -126l155 -434h2l131 486c6 24 -8 30 -46 33l-40 3v30c35 -1 69 -3 105 -3s70 2 105 3v-30zM343 18l-137 591c-9 39 -26 49 -45 49c-22 0 -38 -11 -26 -65l132 -575h76"],120143:[700,3,783,14,765,"765 -3c-24 1 -48 3 -72 3h-140c-25 0 -39 -2 -63 -3l-16 24l-138 229c-82 -90 -159 -181 -202 -253c0 0 -34 3 -51 3c-18 0 -35 -2 -52 -3v30l26 3c31 4 47 33 65 56c44 57 113 130 187 208l-156 260c-33 55 -68 88 -92 90l-45 3l-2 26l271 27c24 -12 43 -42 57 -65 l112 -189c80 87 154 172 201 243h101v-30l-41 -3c-20 -1 -47 -42 -68 -68c-33 -42 -96 -108 -168 -184l190 -304c19 -30 42 -68 66 -70l30 -3v-30zM600 28l-317 526c-33 55 -68 89 -92 90l-36 2c3 -4 5 -7 7 -11l128 -216l199 -319c19 -30 42 -69 66 -70"],120144:[704,3,666,9,654,"654 679c-84 -97 -205 -266 -246 -355c-12 -26 -14 -51 -14 -75v-129c0 -82 8 -88 51 -90l48 -3v-30c-52 2 -99 3 -145 3c-49 0 -96 -1 -148 -3v30l48 3c43 2 51 10 51 90v142c0 16 -3 30 -9 40l-124 216c-52 90 -103 127 -128 130l-29 3v29l215 24c20 2 35 -16 52 -37 s31 -41 44 -65l97 -176l49 78c39 63 75 126 108 190h80v-15zM366 344l-100 174c-50 87 -99 124 -125 130c11 -15 20 -30 29 -46l172 -299"],120146:[463,14,602,42,596,"42 96c0 110 195 174 386 171c0 27 -1 56 -5 69c-40 124 -247 99 -308 48c21 -5 40 -9 47 -38c10 -67 -75 -72 -90 -25c-5 14 -4 60 39 91c104 74 280 72 354 -33l34 -58v-255c1 -6 7 -39 35 -39c40 0 36 61 36 78v40h26c0 -19 0 -74 -1 -78c-7 -44 -45 -75 -87 -73 c-38 3 -68 38 -72 80c-77 -134 -295 -81 -339 -60c-55 24 -55 66 -55 82zM272 220v-208c134 44 166 50 156 228c-89 0 -154 -20 -156 -20zM196 208c-61 -20 -132 -68 -113 -133c17 -54 111 -57 114 -60c0 54 0 193 -1 193"],120147:[694,10,667,18,649,"20 683c21 0 193 11 234 11c6 0 28 1 35 -6v-322c83 117 265 97 337 -49c80 -162 -59 -327 -210 -327c-43 0 -78 12 -109 43c-25 23 -48 55 -49 57l-114 -90h-53c0 168 0 613 -1 617c-2 27 -25 34 -72 34c0 16 0 32 2 32zM236 615c-1 1 -16 33 -47 21 c-12 -4 -26 -12 -31 -21v-471c24 -31 75 -10 77 8c1 2 1 302 1 463zM510 38c44 35 92 111 77 214c-6 68 -40 112 -77 136v-350zM463 25v373c-69 24 -135 -16 -174 -91v-178c52 -86 108 -110 174 -104"],120148:[456,16,546,29,517,"414 339c0 8 9 42 45 50c0 1 -14 10 -20 13c-31 16 -108 31 -162 12v-389c82 -19 174 -8 212 89c5 14 7 15 16 15c13 0 16 -5 9 -21c-42 -106 -156 -124 -212 -124c-136 0 -214 76 -232 100c-100 137 -10 350 200 372c65 5 242 -9 237 -115c-5 -56 -65 -53 -85 -26 c-5 7 -7 14 -8 24zM192 42v356c-169 -79 -185 -270 0 -356"],120149:[694,10,667,17,649,"647 683c-21 0 -193 11 -234 11c-6 0 -28 1 -35 -6v-322c-83 117 -265 97 -337 -49c-80 -162 59 -327 210 -327c43 0 78 12 109 43c25 23 48 55 49 57l114 -90h53c0 168 0 613 1 617c2 27 25 34 72 34c0 16 0 32 -2 32zM431 615c21 9 36 31 78 0v-471 c-24 -31 -75 -10 -77 8c-1 2 -1 302 -1 463zM157 38c-44 35 -92 111 -77 214c6 68 40 112 77 136v-350zM204 25v373c69 24 135 -16 174 -91v-178c-52 -86 -108 -110 -174 -104"],120150:[462,20,546,28,518,"518 231h-270v-205c82 -27 205 -7 244 95c2 7 6 10 13 10c20 0 14 -19 -7 -53c-118 -192 -499 -88 -468 173c27 222 295 268 427 143c35 -34 61 -108 61 -163zM248 418v-165h217c17 179 -174 168 -217 165zM179 42v361c-105 -77 -133 -288 0 -361"],120151:[720,0,448,18,456,"18 0v32h22c38 2 50 4 56 18c1 4 1 186 1 349h-75v37h75c0 53 0 125 1 132c17 91 141 167 271 150c97 -12 115 -128 44 -128c-50 0 -52 65 -19 84c-48 11 -99 6 -133 -2c1 -2 1 -170 1 -236h129v-37h-130v-342c6 -18 7 -25 75 -25h16v-32c-15 3 -310 4 -334 0zM212 659 c-63 -28 -70 -89 -70 -165v-58h71c0 63 0 223 -1 223zM214 399h-72c0 -116 0 -337 1 -339c7 -21 63 -22 70 0c1 2 1 223 1 339"],120152:[460,214,602,38,576,"207 -174v181c-39 0 -53 -1 -58 -2c-50 -7 -71 -53 -71 -89c9 -36 45 -70 129 -90zM561 -75c0 -92 -158 -139 -276 -139c-82 0 -134 7 -191 36c-58 49 -94 150 13 201c0 9 -38 40 -38 89c0 30 14 63 37 86c-39 39 -49 76 -49 98c4 44 27 82 53 100c47 33 97 45 146 45 c98 0 135 -33 145 -35c2 0 63 54 116 54c36 0 59 -34 59 -56c0 -13 -5 -32 -32 -31c-14 0 -28 10 -28 31c0 18 12 32 9 32c-23 0 -72 -20 -104 -47c24 -25 53 -66 53 -94c0 -62 -58 -121 -156 -140c-173 -39 -218 93 -218 -23c0 -30 22 -52 36 -58c14 -8 6 -7 117 -8 c132 -1 242 -2 292 -84c10 -18 16 -38 16 -57zM389 346c-30 66 -97 72 -126 72v-245c46 -9 193 56 126 173zM208 414c-75 -15 -109 -66 -109 -117s35 -104 108 -120c0 45 1 181 1 237zM263 -181l28 -2c100 0 217 42 217 106c0 75 -139 87 -245 83v-187"],120153:[699,0,673,24,650,"98 332c0 326 12 317 -48 319h-25c0 16 -1 32 1 32c21 0 204 16 245 16v-350c30 53 86 105 191 91c56 -9 90 -36 103 -84c7 -21 6 -13 7 -163c0 -152 -2 -150 14 -156c7 -2 21 -4 42 -5h22v-32c-13 3 -214 3 -226 0v32h22c39 2 48 4 54 18v301c-18 79 -118 69 -176 18 c-63 -55 -53 -108 -53 -206c0 -111 -2 -119 14 -126c7 -2 21 -4 42 -5h22v-32c-20 3 -306 3 -324 0v32h29c38 2 36 4 42 18c1 4 2 18 2 282zM231 642c-1 1 -31 50 -70 0c0 -187 -3 -580 -2 -582c9 -25 64 -32 71 2c1 2 1 429 1 580"],120154:[669,0,388,42,346,"126 616c0 43 54 73 90 37c12 -10 16 -23 16 -37c0 -48 -58 -71 -90 -38c-12 10 -16 23 -16 38zM48 431c20 0 162 11 204 11c6 0 20 1 28 -6v-387c4 -12 14 -16 48 -17h18v-32c-18 4 -286 3 -304 0v32h22c38 2 42 4 48 18c0 4 1 13 1 157c0 153 0 159 -3 170 c-5 19 -26 22 -52 22h-12c0 16 0 32 2 32zM234 363c-2 1 -42 45 -76 0c-4 -10 -2 -146 -2 -298c44 -23 56 -8 78 -3v301"],120155:[669,210,409,-35,316,"177 616c0 43 55 73 91 37c11 -10 16 -23 16 -37c0 -70 -107 -71 -107 0zM290 442c6 0 19 1 26 -6l-2 -516c-15 -67 -85 -122 -165 -130c-25 -2 -151 5 -178 61c-14 29 -5 61 16 71c26 15 58 -4 66 -28c8 -17 10 -37 -27 -57c17 -8 71 -29 123 -11c-1 90 7 464 -4 554 c-5 9 -22 19 -73 19h-13v32c21 0 188 11 231 11zM261 363c-12 37 -54 30 -68 0l-1 -530c38 19 59 46 69 90v440"],120156:[698,0,639,25,619,"27 683c20 0 203 15 243 15l1 -488l157 141c10 10 13 18 14 29c0 3 -4 14 -30 19v32c7 -1 169 -5 184 0v-32c-65 -5 -121 -50 -166 -90l-38 -34l69 -100c87 -125 92 -141 144 -143h14v-32c-12 4 -185 2 -195 0v32c41 -1 45 25 22 56c-8 12 -99 143 -100 144l-75 -66v-116 c2 -18 31 -23 77 -18v-32c-17 3 -307 3 -323 0v32c40 -7 72 -4 76 18c1 4 2 18 2 282c0 296 2 288 -7 304c-6 11 -39 14 -71 15c0 16 0 32 2 32zM212 615c-1 5 -43 43 -74 0c0 -187 -1 -553 0 -555c6 -18 57 -15 74 -6v561"],120157:[690,0,390,44,372,"46 683c82 1 171 13 248 5c0 -301 1 -634 2 -638c5 -14 37 -16 76 -18v-32c-20 3 -310 3 -328 0v32c38 2 70 4 76 18c1 4 2 18 2 282c0 296 1 287 -7 304c-6 12 -38 16 -71 15c0 16 0 32 2 32zM233 615c-1 8 -40 35 -71 0c-5 -10 -5 4 -5 -283c0 -187 0 -270 1 -272 c6 -20 58 -38 74 2"],120158:[466,0,977,25,959,"27 431c20 0 214 11 255 11c5 0 9 1 16 -6v-86c31 117 236 184 287 -5c67 172 242 141 282 32c12 -32 13 -39 14 -184c0 -152 -2 -150 14 -156c7 -2 43 -4 64 -5v-32c-13 3 -219 3 -231 0v32h22c38 2 48 4 54 18c1 4 2 12 2 137c0 132 17 231 -81 231 c-84 0 -121 -69 -132 -111c-5 -21 -5 -23 -5 -143c0 -112 -2 -120 14 -127c7 -2 43 -4 64 -5v-32c-13 3 -219 3 -231 0v32c38 -1 75 4 81 18c1 4 2 12 2 137c0 132 12 231 -86 231c-84 0 -118 -69 -129 -111c-5 -21 -5 -23 -5 -143c0 -112 -2 -120 14 -127c7 -2 35 -4 56 -5 v-32h-343v32h22c38 2 48 4 54 18c1 4 2 13 2 156c0 157 1 161 -7 178c-6 14 -36 20 -71 15c0 16 0 32 2 32zM140 62c3 -21 55 -23 85 -2c1 2 0 191 0 293c-30 23 -80 16 -85 0c0 -79 -1 -289 0 -291"],120159:[457,0,684,27,665,"140 62c3 -21 55 -23 85 -2c1 2 0 191 0 293c-30 23 -80 16 -85 0c0 -79 -1 -289 0 -291zM29 431c20 0 197 11 238 11c5 0 9 1 16 -6v-87c26 41 55 94 118 107c30 6 155 -4 179 -99c7 -23 6 -14 7 -164c0 -152 -2 -150 14 -156c7 -2 21 -4 42 -5h22v-32 c-12 4 -225 0 -226 0v32h22c38 2 48 4 54 18c1 4 2 12 2 136c0 113 19 238 -87 232c-86 0 -133 -68 -143 -119c-4 -17 -4 -27 -4 -136c0 -111 -2 -119 14 -126c10 -8 54 -4 64 -5v-32c-19 3 -314 3 -334 0v32c45 -1 74 3 76 18c1 4 2 13 2 156c0 157 2 161 -7 178 c-6 11 -37 16 -71 15"],120160:[462,11,602,27,572,"140 417c169 111 424 9 432 -183c2 -54 -18 -121 -61 -163c-148 -150 -439 -82 -479 104c-18 86 25 188 108 242zM439 374c-76 74 -214 39 -200 42l-1 -393c27 -9 122 -23 193 36c71 61 85 238 8 315zM160 387c-112 -75 -119 -253 0 -332c0 100 1 332 0 332"],120161:[442,194,681,29,666,"31 431c21 0 199 11 240 11c6 0 16 1 23 -6l1 -70c103 137 308 78 359 -79c46 -136 -45 -296 -190 -296c-36 0 -119 5 -168 73c-2 2 0 -112 0 -208c5 -14 15 -16 54 -18h22v-32c-20 3 -325 3 -343 0v32h22c38 2 48 4 54 18c1 4 2 17 2 253c0 267 3 258 -7 275 c-6 11 -39 14 -71 15c0 16 0 32 2 32zM467 28v380c-23 9 -38 11 -66 4c-80 -24 -101 -89 -106 -106v-178c30 -97 145 -100 172 -100zM513 38c18 28 26 30 46 72c46 92 27 234 -46 289v-361zM146 363v-497c1 -14 62 -29 85 2v495c-23 8 -37 30 -85 0"],120162:[442,194,681,22,660,"658 431c-21 0 -199 11 -240 11c-6 0 -16 1 -23 -6l-1 -70c-103 137 -308 78 -359 -79c-46 -136 45 -296 190 -296c36 0 119 5 168 73c2 2 0 -112 0 -208c-5 -14 -15 -16 -54 -18h-22v-32c20 3 325 3 343 0v32h-22c-38 2 -48 4 -54 18c-1 4 -2 17 -2 253 c0 267 -3 258 7 275c6 11 39 14 71 15c0 16 0 32 -2 32zM222 28v380c23 9 38 11 66 4c80 -24 101 -89 106 -106v-178c-30 -97 -145 -100 -172 -100zM176 38c-18 28 -26 30 -46 72c-46 92 -27 234 46 289v-361zM543 363v-497c-1 -14 -62 -29 -85 2v495c23 8 37 30 85 0"],120163:[442,0,509,27,497,"29 431c20 0 169 11 209 11c5 0 42 1 49 -6l1 -106c10 31 44 107 129 112c63 0 82 -41 80 -65c-3 -51 -81 -57 -87 -1c-2 40 37 43 12 43c-89 0 -134 -112 -134 -192v-170c6 -18 21 -25 89 -25v-32c-18 3 -333 3 -350 0v32c42 -1 70 1 72 18c1 4 2 13 2 156 c0 157 2 161 -7 178c-7 10 -35 14 -67 15c0 16 0 32 2 32zM222 363c-15 21 -55 38 -77 0v-303c14 -27 67 -28 76 2c1 2 1 219 1 301"],120164:[454,14,496,32,463,"44 -11c-4 0 -7 0 -11 6c0 93 -3 175 3 178c2 0 8 1 11 1c9 0 10 -2 14 -17c42 -150 169 -148 201 -145v179c-4 4 -92 16 -141 34c-90 35 -106 99 -70 156c53 84 253 99 333 32c20 17 38 35 43 35c4 0 7 0 11 -6v-141c-7 -7 -5 -6 -13 -6c-10 0 -13 1 -13 17 c-7 85 -70 116 -162 116h-11v-150c58 -10 207 -40 223 -133c8 -47 -20 -93 -53 -118c-61 -45 -227 -70 -312 15l-21 -24c-18 -21 -22 -29 -32 -29zM187 425c-10 0 -75 -10 -103 -38c-16 -18 -19 -39 -7 -57c25 -34 110 -45 110 -45v140zM319 15c10 3 106 12 106 86 c0 14 -8 61 -106 83v-169"],120165:[615,11,499,23,482,"254 615h58v-184h150v-32h-150v-384c54 0 111 25 131 66c8 16 8 19 9 62v38h30v-39c-1 -38 0 -50 -14 -76c-57 -105 -323 -109 -350 25c-2 9 -2 10 -3 159v149h-92v23c123 7 231 100 231 193zM245 20v379h-82v-143c0 -151 0 -153 5 -169c14 -51 62 -63 77 -67"],120166:[442,11,699,23,675,"25 431c21 0 175 11 216 11c6 0 42 1 49 -6v-414c81 -13 209 7 230 112c2 8 2 12 3 114c0 137 15 148 -55 151h-19v32c8 0 34 3 148 11v-182c0 -194 -1 -201 7 -213c7 -12 39 -22 71 -15v-32l-148 -11v94c-75 -113 -292 -101 -351 -70c-34 17 -63 41 -72 80 c-1 146 -3 279 -10 290s-36 16 -71 16c0 16 0 32 2 32zM223 363c-15 33 -55 48 -82 1c-1 -7 -2 -245 0 -259c5 -52 53 -67 82 -77v335"],120167:[441,11,669,17,653,"323 -11c-73 0 -77 -1 -81 7c-44 32 -104 460 -225 403v41c19 -4 316 -2 332 1v-42c-39 5 -62 -7 -58 -28c1 -5 126 -343 127 -342c1 0 111 297 115 309c12 39 -23 61 -47 61v41c8 -3 156 -4 167 0v-41c-43 0 -70 -20 -82 -52c-4 -13 -131 -339 -137 -351 c-9 -9 -30 -7 -111 -7zM341 50c0 5 -115 315 -117 318c-14 28 -83 23 -83 1c0 -3 112 -307 117 -317c13 -27 83 -23 83 -2"],120168:[437,12,889,17,844,"670 437c9 -2 166 -4 174 0v-38c-70 0 -79 -38 -142 -228c-32 -95 -66 -173 -67 -175c-2 -9 -22 -12 -31 -2c-1 2 -60 190 -92 285c-38 -111 -104 -282 -106 -284c-5 -6 -31 -6 -108 -6c-78 0 -77 0 -81 6c-1 2 -30 87 -65 189c-70 213 -64 204 -77 209c-8 4 -38 5 -58 6 v38c18 -4 313 -3 330 0v-38c-18 0 -63 -4 -63 -27c0 -4 8 -41 53 -172c31 -91 55 -167 55 -168c0 0 104 284 105 288c0 0 -16 66 -34 73c-6 3 -39 5 -56 6v38c11 -4 191 -3 202 0v-38c-25 0 -54 -6 -60 -16c-5 -10 -8 -2 46 -157l42 -140c1 1 93 259 95 266 c2 28 -26 50 -62 47v38zM214 372c-12 19 -77 22 -82 -1c-4 -16 102 -318 104 -320c6 -23 83 -26 83 0c0 6 -101 314 -105 321"],120169:[431,0,704,15,676,"15 0v32c50 0 110 24 140 47c7 6 61 58 103 99c-76 104 -161 207 -174 213c-11 5 -43 8 -69 8v32c22 -4 351 -3 368 0v-32c-33 -5 -46 -21 -40 -32c1 -3 47 -66 56 -78c21 20 71 58 75 63c7 10 18 40 -13 47v32c8 -3 182 -4 193 0v-32c-86 -6 -145 -59 -187 -99l-42 -34 l153 -208c4 -4 24 -26 85 -26h13v-32c-21 4 -354 3 -371 0v32c20 -1 44 14 37 30c-1 2 -47 67 -68 94c-2 -2 -102 -84 -90 -107c3 -6 11 -15 21 -17v-32c-8 3 -163 3 -190 0zM266 371c-16 13 -115 18 -104 -4c2 -5 226 -298 231 -304c10 -28 93 -13 109 -1 c-1 1 -51 71 -112 154c-76 104 -121 152 -124 155"],120170:[431,204,700,17,674,"476 431c11 -3 191 -4 198 0v-32c-11 0 -73 -4 -101 -49c-7 -11 -254 -447 -256 -450c-27 -45 -120 -104 -200 -104c-78 0 -101 83 -89 115c15 38 77 35 82 -13c2 -37 -28 -44 -39 -46c39 -31 94 -30 143 12c26 22 66 44 89 108c-54 102 -202 410 -205 414 c-11 16 -44 12 -81 13v32c18 -4 323 -3 340 0v-32c-36 4 -66 -2 -51 -25c1 -4 112 -215 113 -214c1 0 98 186 100 192c7 25 -6 46 -43 47v32zM231 373c-20 20 -83 18 -83 -3l176 -333c1 1 43 80 43 82c-51 97 -132 250 -136 254"],120171:[447,0,560,12,548,"262 27c194 0 243 0 260 161h26c0 -19 -16 -160 -16 -188l-514 1c-4 3 -4 2 -6 11c0 4 0 8 2 9c0 1 202 281 278 387c-222 -8 -176 1 -212 -132v-7h-26c4 13 10 172 12 178h474c8 -8 8 -31 6 -36c-4 -4 -284 -383 -284 -384zM484 407c2 1 -64 1 -106 1 c-2 -1 -282 -381 -282 -382h118c76 104 270 380 270 381"],120792:[689,16,600,28,568,"139 621c168 165 421 13 429 -272c1 -81 -18 -181 -62 -243c-146 -223 -435 -122 -474 154c-17 129 25 281 107 361zM434 557c-74 110 -211 58 -198 63v-585c27 -14 121 -35 192 52c70 92 85 355 6 470zM173 577c-78 -114 -89 -379 0 -495c0 149 1 495 0 495"],120793:[689,3,600,44,556,"154 543l-88 -30c-12 27 -18 46 -18 72c76 14 216 55 362 104l14 -10c-8 -115 -12 -180 -12 -262v-331c0 -30 24 -46 70 -48l74 -3v-38c-58 1 -114 2 -130 2c-14 0 -124 1 -160 0c-22 0 -150 -1 -222 -2v38l106 2c2 4 4 234 4 506zM194 557v-518h118v553"],120794:[679,3,600,30,570,"541 89v-44c-14 -4 -386 -2 -406 -2l41 46h365zM399 341l-95 -76c38 23 186 347 -101 321c178 83 399 -27 196 -245zM30 -3h540c-3 25 -3 38 -3 54c0 23 -3 38 -1 66h-367l210 176c80 67 124 142 124 208c0 104 -108 178 -257 178c-72 0 -126 -18 -195 -62l-22 -170 l45 -3l22 70c9 31 64 57 117 57c80 0 110 -40 110 -110c0 -77 -27 -161 -121 -241l-202 -171v-52"],120795:[679,17,600,36,564,"310 615c112 -59 131 -130 50 -224c238 170 40 242 -50 224zM348 325c36 -33 61 -79 61 -140c-1 -35 -10 -113 -51 -146c220 108 194 244 -10 286zM78 170c28 -55 75 -130 186 -130c77 0 115 62 115 137c0 77 -26 127 -117 127c-27 0 -50 -2 -74 -8l-9 5l23 76h42 c82 0 137 43 137 108c0 57 -47 93 -123 93c-75 0 -119 -33 -142 -104h-43l38 158c70 34 119 47 186 47c126 0 219 -61 219 -145c0 -63 -43 -109 -136 -150c54 -6 80 -12 110 -28c49 -28 74 -70 74 -125c0 -137 -155 -248 -348 -248c-59 0 -99 6 -149 24 c-17 82 -20 107 -31 153"],120796:[689,3,600,50,550,"392 638l-52 -11l-28 -56v-536h80v603zM50 156v40l220 451l188 42l8 -12v-436h80v-85h-86v-42c0 -47 18 -73 56 -76l34 -3v-38c-134 3 -204 3 -218 3c-12 0 -32 0 -64 -1c-14 0 -60 -1 -108 -2v38l62 3c34 2 54 29 54 76v42h-226zM276 241v299l-160 -299h160"],120797:[675,17,600,27,573,"286 9v8c-94 -12 -183 29 -232 117l-8 -2c15 -52 30 -88 35 -109c78 -28 155 -29 205 -14zM207 639h-76v-315c16 7 54 14 76 18v297zM264 554l-5 -159c69 27 49 35 100 35c147 0 214 -90 214 -204c0 -145 -178 -243 -371 -243c-55 0 -92 6 -136 22c-8 33 -16 59 -39 138 l38 18c41 -79 103 -119 187 -119c93 0 154 55 154 140c0 86 -64 142 -162 142c-53 0 -96 -12 -135 -41l-23 11v372l8 9c71 -4 146 -5 256 -7l214 7c-7 -23 -4 -46 -4 -66c0 -13 0 -27 2 -43l-8 -12c-102 2 -160 3 -173 3"],120798:[679,17,600,29,571,"224 255l3 34c-2 -12 -3 -24 -3 -34zM312 638c-343 -178 -306 -630 -34 -628c-226 88 -200 388 34 628zM467 679l46 -28l-17 -17c-12 1 -22 1 -27 1c-87 0 -167 -34 -215 -96c-36 -47 -53 -101 -63 -204c58 63 105 86 177 86c114 0 203 -75 203 -194 c0 -146 -124 -244 -279 -244c-87 0 -157 26 -200 79c-38 47 -63 125 -63 209c0 144 63 257 184 336c77 51 140 70 254 72zM296 332c-73 0 -109 -42 -109 -128c0 -104 45 -170 117 -170c69 0 107 52 107 142c0 93 -46 156 -115 156"],120799:[675,3,600,29,571,"495 625v23h-54l-323 -617h74c0 2 224 455 303 594zM571 671v-46l-315 -628c-12 0 -113 3 -169 0l-7 11l295 576l-5 6h-203c-58 0 -71 -8 -77 -53l-10 -66h-46v93c0 49 0 35 -5 102l13 9c52 -1 103 -2 120 -2c104 -4 172 -5 203 -5h34"],120800:[679,17,600,38,562,"200 346c76 -33 162 -58 212 -105c16 -24 36 -60 34 -93c60 181 -144 189 -204 223c-54 30 -87 46 -101 118c-4 105 83 152 147 160l24 15c-136 2 -233 -82 -225 -175c4 -46 45 -114 113 -143zM180 329c-82 41 -118 85 -122 160c0 115 100 190 254 190 c130 0 214 -58 214 -147c0 -58 -30 -97 -114 -145c50 -18 72 -30 98 -56c34 -34 52 -74 52 -122c0 -130 -120 -226 -282 -226c-142 0 -242 73 -242 176c0 73 46 129 142 170zM302 273l-72 28c-64 -48 -84 -71 -84 -125c0 -80 64 -140 152 -140c74 0 121 50 121 118 c0 59 -24 96 -117 119zM300 424l52 -20c54 38 72 68 72 111c0 66 -50 106 -126 106c-68 0 -107 -32 -107 -87c0 -50 23 -79 109 -110"],120801:[679,17,600,38,562,"325 646c94 -39 124 -129 124 -247c0 -144 -30 -256 -122 -336c256 93 252 559 -2 583zM128 -17l-46 29l11 19c17 -2 30 -3 38 -3c33 0 88 12 123 29c56 25 99 76 122 141c13 36 19 64 25 126l-76 -57c-24 -17 -48 -24 -88 -24c-127 0 -199 71 -199 196 c0 148 101 240 263 240c170 0 261 -98 261 -278c0 -181 -101 -328 -266 -390c-54 -20 -88 -25 -168 -28zM292 625c-69 0 -111 -50 -111 -136c0 -97 46 -157 122 -157c72 0 105 39 105 125c0 108 -41 168 -116 168"]};MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/DoubleStruck/Regular/Main.js"); | PypiClean |
/CoAPthon-4.0.2.tar.gz/CoAPthon-4.0.2/coapthon/layers/forwardLayer.py | import copy
from coapthon.messages.request import Request
from coapclient import HelperClient
from coapthon.messages.response import Response
from coapthon import defines
from coapthon.resources.remoteResource import RemoteResource
from coapthon.utils import parse_uri
__author__ = 'Giacomo Tanganelli'
class ForwardLayer(object):
"""
Class used by Proxies to forward messages.
"""
def __init__(self, server):
self._server = server
def receive_request(self, transaction):
"""
Setup the transaction for forwarding purposes on Forward Proxies.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction
"""
uri = transaction.request.proxy_uri
host, port, path = parse_uri(uri)
path = str("/" + path)
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.token = transaction.request.token
return self._forward_request(transaction, (host, port), path)
def receive_request_reverse(self, transaction):
"""
Setup the transaction for forwarding purposes on Reverse Proxies.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction
"""
path = str("/" + transaction.request.uri_path)
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.token = transaction.request.token
if path == defines.DISCOVERY_URL:
transaction = self._server.resourceLayer.discover(transaction)
else:
new = False
if transaction.request.code == defines.Codes.POST.number:
new_paths = self._server.root.with_prefix(path)
new_path = "/"
for tmp in new_paths:
if len(tmp) > len(new_path):
new_path = tmp
if path != new_path:
new = True
path = new_path
try:
resource = self._server.root[path]
except KeyError:
resource = None
if resource is None or path == '/':
# Not Found
transaction.response.code = defines.Codes.NOT_FOUND.number
else:
transaction.resource = resource
transaction = self._handle_request(transaction, new)
return transaction
@staticmethod
def _forward_request(transaction, destination, path):
"""
Forward requests.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:param destination: the destination of the request (IP, port)
:param path: the path of the request.
:rtype : Transaction
:return: the edited transaction
"""
client = HelperClient(destination)
request = Request()
request.options = copy.deepcopy(transaction.request.options)
del request.block2
del request.block1
del request.uri_path
del request.proxy_uri
del request.proxy_schema
# TODO handle observing
del request.observe
# request.observe = transaction.request.observe
request.uri_path = path
request.destination = destination
request.payload = transaction.request.payload
request.code = transaction.request.code
response = client.send_request(request)
client.stop()
transaction.response.payload = response.payload
transaction.response.code = response.code
transaction.response.options = response.options
return transaction
def _handle_request(self, transaction, new_resource):
"""
Forward requests. Used by reverse proxies to also create new virtual resources on the proxy
in case of created resources
:type new_resource: bool
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:param new_resource: if the request will generate a new resource
:return: the edited transaction
"""
client = HelperClient(transaction.resource.remote_server)
request = Request()
request.options = copy.deepcopy(transaction.request.options)
del request.block2
del request.block1
del request.uri_path
del request.proxy_uri
del request.proxy_schema
# TODO handle observing
del request.observe
# request.observe = transaction.request.observe
request.uri_path = "/".join(transaction.request.uri_path.split("/")[1:])
request.destination = transaction.resource.remote_server
request.payload = transaction.request.payload
request.code = transaction.request.code
response = client.send_request(request)
client.stop()
transaction.response.payload = response.payload
transaction.response.code = response.code
transaction.response.options = response.options
if response.code == defines.Codes.CREATED.number:
lp = transaction.response.location_path
del transaction.response.location_path
transaction.response.location_path = transaction.request.uri_path.split("/")[0] + "/" + lp
# TODO handle observing
if new_resource:
resource = RemoteResource('server', transaction.resource.remote_server, lp, coap_server=self,
visible=True,
observable=False,
allow_children=True)
self._server.add_resource(transaction.response.location_path, resource)
if response.code == defines.Codes.DELETED.number:
del self._server.root["/" + transaction.request.uri_path]
return transaction | PypiClean |
/Faker-Events-1.6.0.tar.gz/Faker-Events-1.6.0/faker_events/handlers.py | import json
from sys import stdout
from .text_color import eprint, Palatte
__all__ = ['Stream']
class Stream():
"""
A Handler for sending events to a Data Steam. By default events are writen
to standard out on the console.
Parameters
----------
stype: str
Stream Type. 'console', 'kafka' or 'kinesis'
host: str
Host to connect too (Used for Kafka)
name: str
Topic Name for Kafka or Stream Name for Kinesis
key: str
Partition Key to be used. (Required for Kinesis)
"""
def __init__(self,
stype: str = 'console',
host: str = None,
name: str = None,
key: str = None):
self.stype = stype
self.host = host
self.name = name
self.key = key
if stype == 'console':
self._setup_console()
elif stype == 'kafka':
self._setup_kafka()
elif stype == 'kinesis':
self._setup_kinesis()
else:
raise ValueError('Unknown stream type')
def _setup_console(self):
def send(message):
stdout.write(json.dumps(message) + '\n')
self.send = send
def _setup_kafka(self) -> None:
eprint("Logging to Kafka", Palatte.BLUE)
if self.host is None:
raise ValueError('A host name must be supplied with Kafka')
if self.name is None:
raise ValueError('A stream "name" must be supplied with kinesis')
from kafka import KafkaProducer
producer = KafkaProducer(
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
bootstrap_servers=[self.host])
def send_kafka(message):
producer.send(topic=self.name, value=message)
self.send = send_kafka
def _setup_kinesis(self):
eprint("Logging to Kinesis", Palatte.BLUE)
if self.name is None:
raise ValueError('A stream "name" must be supplied with kinesis')
if self.key is None:
raise ValueError('A partition key must be supplied with Kinesis')
import boto3
kinesis = boto3.client('kinesis')
def send(message):
formatted = json.dumps(message).encode()
kinesis.put_record(
StreamName=self.name,
Data=formatted,
PartitionKey=self.key,
)
self.send = send | PypiClean |
/NeuralPlayground-0.0.7.tar.gz/NeuralPlayground-0.0.7/neuralplayground/arenas/hafting_2008.py | import copy
from typing import Union
import matplotlib as mpl
import numpy as np
from neuralplayground.arenas import Simple2D
from neuralplayground.experiments import Hafting2008Data
class Hafting2008(Simple2D):
"""Arena resembling Hafting2008 experimental setting
Methods
----------
__init__
Initialise the class
reset(self):
Reset the environment variables
step(self, action):
Increment the global step count of the agent in the environment and updates the position of the agent according
to the recordings of the specific chosen session
Attribute (In addition to the ones in Simple2D class)
---------
use_behavioral_data: bool
If True, then uses the animal trajectories recorded in Hafting 2008
experiment: neuralplayground.experiments.Hafting2008Data
Experiment class object with neural recordings and animal trajectories
"""
def __init__(
self,
use_behavioral_data: bool = False,
data_path: str = None,
recording_index: int = None,
environment_name: str = "Hafting2008",
verbose: bool = False,
experiment_class=Hafting2008Data,
**env_kwargs,
):
"""Initialise the class
Parameters
----------
use_behavioral_data: bool
If True, then uses the animal trajectories recorded in Hafting 2008
data_path: str
if None, fetch the data from the NeuralPlayground data repository,
else load data from given path
recording_index: int
if None, load data from default recording index of corresponding experiment class
environment_name: str
Name of the specific instantiation of the Hafting2008 class
verbose: bool
Set to True to show the information of the class
experiment_class:
Experiment class to be initialized
env_kwargs: dict
Leave empty in this class, the arena parameters and sampling values are set to resemble the experiment
For full control over these parameters use Simple2D class
"""
self.data_path = data_path
self.environment_name = environment_name
self.use_behavioral_data = use_behavioral_data
self.experiment = experiment_class(
data_path=self.data_path,
experiment_name=self.environment_name,
verbose=verbose,
recording_index=recording_index,
)
self.arena_limits = self.experiment.arena_limits
self.recording_list = self.experiment.recording_list
self.arena_x_limits, self.arena_y_limits = (
self.arena_limits[0, :],
self.arena_limits[1, :],
)
env_kwargs["arena_x_limits"] = self.arena_x_limits
env_kwargs["arena_y_limits"] = self.arena_y_limits
env_kwargs["agent_step_size"] = 1.0
env_kwargs["time_step_size"] = 1 / 50 # Taken from experiment, 50 Hz movement sampling
super().__init__(environment_name, **env_kwargs)
if self.use_behavioral_data:
self.state_dims_labels = [
"x_pos",
"y_pos",
"head_direction_x",
"head_direction_y",
]
def reset(self, random_state: bool = False, custom_state: np.ndarray = None):
"""Reset the environment variables. If using behavioral data, it will reset the position to the
initial position of the trajectory recorded in the experiment
Parameters
----------
random_state: bool
If True, sample a new position uniformly within the arena, use default otherwise
custom_state: np.ndarray
If given, use this array to set the initial state
Returns
----------
observation: ndarray
Because this is a fully observable environment, make_observation returns the state of the environment
Array of the observation of the agent in the environment (Could be modified as the environments are evolves)
self.state: ndarray (2,)
Vector of the x and y coordinate of the position of the animal in the environment
"""
# Reset to first position recorded in this session
if self.use_behavioral_data:
self.pos, self.head_dir = (
self.experiment.position[0, :],
self.experiment.head_direction[0, :],
)
custom_state = np.concatenate([self.pos, self.head_dir])
return super().reset(random_state=False, custom_state=custom_state)
# Default reset
else:
return super().reset(random_state=random_state, custom_state=custom_state)
def set_animal_data(
self,
recording_index: int = 0,
tolerance: float = 1e-10,
keep_history: bool = True,
):
"""Set position and head direction to be used by the Arena Class,
See neuralplayground.experiments classes"""
self.experiment.set_animal_data(recording_index=recording_index, tolerance=tolerance)
if keep_history:
prev_hist = copy.copy(self.history)
self.reset()
self.history = prev_hist
else:
self.reset()
def show_data(self, full_dataframe: bool = False):
"""Print of available data recorded in the experiment
Parameters
----------
full_dataframe: bool
if True, it will show all available experiment, a small sample otherwise
Returns
-------
recording_list: Pandas dataframe
List of available experiment, columns with rat_id, recording session and recorded variables
"""
self.experiment.show_data(full_dataframe=full_dataframe)
return self.experiment.show_data(full_dataframe=full_dataframe)
def plot_recording_tetr(
self,
recording_index: Union[int, tuple, list] = None,
save_path: Union[str, tuple, list] = None,
ax: Union[mpl.axes.Axes, tuple, list] = None,
tetrode_id: Union[str, tuple, list] = None,
bin_size: float = 2.0,
):
"""Check plot_recording_tetrode method from neuralplayground.experiments.Hafting2008Data"""
return self.experiment.plot_recording_tetr(recording_index, save_path, ax, tetrode_id, bin_size)
def recording_tetr(
self,
recording_index: Union[int, tuple, list] = None,
save_path: Union[str, tuple, list] = None,
tetrode_id: Union[str, tuple, list] = None,
bin_size: float = 2.0,
):
"""Check plot_recording_tetrode method from neuralplayground.experiments.Hafting2008Data"""
return self.experiment.recording_tetr(recording_index, save_path, tetrode_id, bin_size)
def plot_recorded_trajectory(
self,
recording_index: Union[int, tuple, list] = None,
save_path: Union[str, tuple, list] = None,
ax: Union[mpl.axes.Axes, tuple, list] = None,
plot_every: int = 20,
):
"""Check plot_trajectory method from neuralplayground.experiments.Hafting2008Data"""
return self.experiment.plot_trajectory(
recording_index=recording_index,
save_path=save_path,
ax=ax,
plot_every=plot_every,
)
def step(self, action: np.ndarray, normalize_step: bool = False, skip_every: int = 10):
"""Runs the environment dynamics. Increasing global counters.
Given some action, return observation, new state and reward.
If using behavioral data, the action argument is ignored, and instead inferred from the recorded trajectory
in the experiment.
Parameters
----------
action: ndarray (2,)
Array containing the action of the agent, in this case the delta_x and detla_y increment to position
normalize_step: bool
If true, the action is normalized to have unit size, then scaled by the agent step size
skip_every: int
When using behavioral data, the next state will be the position and head direction
"skip_every" recorded steps after the current one, essentially reducing the sampling rate
Returns
-------
reward: float
The reward that the animal receives in this state
new_state: ndarray
Update the state with the updated vector of coordinate x and y of position and head directions respectively
observation: ndarray
Array of the observation of the agent in the environment
"""
if not self.use_behavioral_data:
return super().step(action)
# In this case, the action is ignored and computed from the step in behavioral data recorded from the experiment
if self.global_steps * skip_every >= self.experiment.position.shape[0] - 1:
self.global_steps = np.random.choice(np.arange(skip_every))
# Time elapsed since last reset
self.global_time = self.global_steps * self.time_step_size
# New state as "skip every" steps after the current one in the recording
new_state = (
self.experiment.position[self.global_steps * skip_every, :],
self.experiment.head_direction[self.global_steps * skip_every, :],
)
new_state = np.concatenate(new_state)
# Inferring action from recording
action = new_state - self.state
reward = self.reward_function(action, state=self.state)
transition = {
"action": action,
"state": self.state,
"next_state": new_state,
"reward": reward,
"step": self.global_steps,
}
self.history.append(transition)
self.state = new_state
observation = self.make_observation()
self._increase_global_step()
return observation, new_state, reward
if __name__ == "__main__":
hafting_data = Hafting2008Data(verbose=False)
hafting_data.show_readme() | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/ats/model/attachment_endpoint_request.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.ats.model.attachment_request import AttachmentRequest
globals()['AttachmentRequest'] = AttachmentRequest
class AttachmentEndpointRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'model': (AttachmentRequest,), # noqa: E501
'remote_user_id': (str,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'model': 'model', # noqa: E501
'remote_user_id': 'remote_user_id', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, model, remote_user_id, *args, **kwargs): # noqa: E501
"""AttachmentEndpointRequest - a model defined in OpenAPI
Args:
model (AttachmentRequest):
remote_user_id (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.model = model
self.remote_user_id = remote_user_id
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, model, remote_user_id, *args, **kwargs): # noqa: E501
"""AttachmentEndpointRequest - a model defined in OpenAPI
Args:
model (AttachmentRequest):
remote_user_id (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.model: Union["AttachmentRequest"] = model
self.remote_user_id: Union[str] = remote_user_id | PypiClean |
/MNN-0.0.7-cp27-cp27mu-manylinux2010_x86_64.whl/MNNTools/MNN_FB/QuantizedFloatParam.py |
# namespace: MNN
import flatbuffers
class QuantizedFloatParam(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsQuantizedFloatParam(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = QuantizedFloatParam()
x.Init(buf, n + offset)
return x
# QuantizedFloatParam
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# QuantizedFloatParam
def Weight(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# QuantizedFloatParam
def WeightAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int8Flags, o)
return 0
# QuantizedFloatParam
def WeightLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# QuantizedFloatParam
def Bias(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# QuantizedFloatParam
def BiasAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# QuantizedFloatParam
def BiasLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# QuantizedFloatParam
def Scale(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# QuantizedFloatParam
def ScaleAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# QuantizedFloatParam
def ScaleLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# QuantizedFloatParam
def TensorScale(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# QuantizedFloatParam
def TensorScaleAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# QuantizedFloatParam
def TensorScaleLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
def QuantizedFloatParamStart(builder): builder.StartObject(4)
def QuantizedFloatParamAddWeight(builder, weight): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(weight), 0)
def QuantizedFloatParamStartWeightVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def QuantizedFloatParamAddBias(builder, bias): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(bias), 0)
def QuantizedFloatParamStartBiasVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def QuantizedFloatParamAddScale(builder, scale): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0)
def QuantizedFloatParamStartScaleVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def QuantizedFloatParamAddTensorScale(builder, tensorScale): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(tensorScale), 0)
def QuantizedFloatParamStartTensorScaleVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def QuantizedFloatParamEnd(builder): return builder.EndObject() | PypiClean |
/NeuroTools-0.3.1.tar.gz/NeuroTools-0.3.1/src/visual_logging.py | import zipfile, atexit, os
from NeuroTools import check_dependency
from datetime import datetime
from logging import CRITICAL, DEBUG, ERROR, FATAL, INFO, WARN, WARNING, NOTSET
from time import sleep
if check_dependency('matplotlib'):
import matplotlib
matplotlib.use('Agg')
if check_dependency('pylab'):
import pylab
_filename = 'visual_log.zip'
_zipfile = None
_level = INFO
_last_timestamp = ''
def _remove_if_empty():
if len(_zipfile.namelist()) == 0 and os.path.exists(_filename):
os.remove(_filename)
def basicConfig(filename, level=INFO):
global _zipfile, _filename, _level
_filename = filename
_level = level
#_zipfile.close()
if os.path.exists(filename) and zipfile.is_zipfile(filename):
mode = 'a'
else:
mode = 'w'
_zipfile = zipfile.ZipFile(filename, mode=mode, compression=zipfile.ZIP_DEFLATED)
atexit.register(_zipfile.close)
atexit.register(_remove_if_empty)
def _reopen():
global _zipfile
if (_zipfile.fp is None) or _zipfile.fp.closed:
_zipfile = zipfile.ZipFile(_filename, mode='a', compression=zipfile.ZIP_DEFLATED)
def flush():
"""Until the zipfile is closed (normally on exit), the zipfile cannot
be accessed by other tools. Calling flush() closes the zipfile, which
will be reopened the next time a log function is called.
"""
_zipfile.close()
def _get_timestamp():
"""At the moment, it is not possible to create visual
logs at a rate of more than one/second."""
global _last_timestamp
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
while timestamp == _last_timestamp:
sleep(0.1)
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
_last_timestamp = timestamp
return timestamp
def _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs):
_reopen()
timestamp = _get_timestamp()
# create figure
pylab.clf()
if xdata is not None:
pylab.plot(xdata, ydata, **kwargs)
else:
if hasattr(ydata, 'shape') and len(ydata.shape) > 1:
pylab.matshow(ydata, **kwargs)
pylab.colorbar()
else:
pylab.plot(ydata)
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
pylab.title(title)
# add it to the zipfile
fig_name = timestamp + '.png'
pylab.savefig(fig_name)
_zipfile.write(fig_name,
os.path.join(os.path.basename(os.path.splitext(_filename)[0]), fig_name))
os.remove(timestamp+'.png')
def debug(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs):
if _level <= DEBUG:
_plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs)
def info(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs):
if _level <= INFO:
_plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs)
def warning(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs):
if _level <= WARNING:
_plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs)
def error(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs):
if _level <= ERROR:
_plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs)
def critical(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs):
if _level <= CRITICAL:
_plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs)
def exception(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs):
if _level <= ERROR:
_plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs)
def log(level, ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs):
if _level <= level:
_plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs)
def test():
test_file = 'visual_logging_test.zip'
if os.path.exists(test_file):
os.remove(test_file)
basicConfig(test_file, level=DEBUG)
xdata = pylab.arange(0, 2*pylab.pi, 0.02*pylab.pi)
debug(pylab.sin(xdata), xdata, 'x', 'sin(x)', 'visual_logging test 1')
flush()
debug(0.5*pylab.sin(2*xdata-0.3), xdata, 'x', 'sin(2x-0.3)/2')
debug(pylab.sqrt(xdata), xdata, 'x', 'sqrt(x)')
flush()
zf = zipfile.ZipFile(test_file, 'r')
print zf.namelist()
assert len(zf.namelist()) == 3, zf.namelist()
zf.close()
# ==============================================================================
if __name__ == '__main__':
test() | PypiClean |
/ImageResolver-0.4.2.tar.gz/ImageResolver-0.4.2/imageresolver/plugins/opengraph.py | import re
import os
import logging
from operator import itemgetter
from imageresolver import FileExtensionResolver
class Plugin(object):
def get_image(self, url, soup):
ogtags = [{'type': 'facebook', 'attribute': 'property', 'name': 'og:image', 'value': 'content'},
{'type': 'facebook', 'attribute': 'rel', 'name': 'image_src', 'value': 'href'},
{'type': 'twitter', 'attribute': 'name', 'name': 'twitter:image', 'value': 'value'},
{'type': 'twitter', 'attribute': 'name', 'name': 'twitter:image', 'value': 'content'},
{'type': 'twitter', 'attribute': 'property', 'name': 'twitter:image', 'value': 'content'},
{'type': 'image', 'attribute': 'itemprop', 'name': 'image', 'value': 'content'}]
ogimages = []
for ogtag in ogtags:
tags = soup.find_all('meta', {ogtag['attribute']: ogtag['name']})
if tags:
try:
for image in tags:
url = FileExtensionResolver().resolve(image['url'])
if url:
ogimages.append({'url': url, 'type': ogtag['type'], 'score': 0})
except KeyError as e:
pass
ogimages_len = len(ogimages)
# if more than 1 image, score and return the best one
if ogimages_len >= 1:
if ogimages_len == 1:
logger = logging.getLogger('ImageResolver')
logger.debug('Resolving using plugin ' + str(os.path.basename(__file__)) + ' ' + str(url))
resolved_image = ogimages[0]['url']
else:
for image in ogimages:
# sometimes opengraph tags don't have an actual image?
url = FileExtensionResolver().resolve(image['url'])
if not url:
image['score'] = -1
else:
if re.search('(large|big)', image['url'], re.IGNORECASE):
image['score'] += 1
if image['type'] == 'twitter':
image['score'] += 1
ogimages.sort(key=itemgetter('score'), reverse=True)
resolved_image = ogimages[0]['url']
if not re.search('^https?:', resolved_image):
if resolved_image.startswith('//'):
return 'https:' + resolved_image
else:
return resolved_image
return None | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/jquery/ui/jquery.ui.droppable.js | (function( $, undefined ) {
$.widget("ui.droppable", {
widgetEventPrefix: "drop",
options: {
accept: '*',
activeClass: false,
addClasses: true,
greedy: false,
hoverClass: false,
scope: 'default',
tolerance: 'intersect'
},
_create: function() {
var o = this.options, accept = o.accept;
this.isover = 0; this.isout = 1;
this.accept = $.isFunction(accept) ? accept : function(d) {
return d.is(accept);
};
//Store the droppable's proportions
this.proportions = { width: this.element[0].offsetWidth, height: this.element[0].offsetHeight };
// Add the reference and positions to the manager
$.ui.ddmanager.droppables[o.scope] = $.ui.ddmanager.droppables[o.scope] || [];
$.ui.ddmanager.droppables[o.scope].push(this);
(o.addClasses && this.element.addClass("ui-droppable"));
},
destroy: function() {
var drop = $.ui.ddmanager.droppables[this.options.scope];
for ( var i = 0; i < drop.length; i++ )
if ( drop[i] == this )
drop.splice(i, 1);
this.element
.removeClass("ui-droppable ui-droppable-disabled")
.removeData("droppable")
.unbind(".droppable");
return this;
},
_setOption: function(key, value) {
if(key == 'accept') {
this.accept = $.isFunction(value) ? value : function(d) {
return d.is(value);
};
}
$.Widget.prototype._setOption.apply(this, arguments);
},
_activate: function(event) {
var draggable = $.ui.ddmanager.current;
if(this.options.activeClass) this.element.addClass(this.options.activeClass);
(draggable && this._trigger('activate', event, this.ui(draggable)));
},
_deactivate: function(event) {
var draggable = $.ui.ddmanager.current;
if(this.options.activeClass) this.element.removeClass(this.options.activeClass);
(draggable && this._trigger('deactivate', event, this.ui(draggable)));
},
_over: function(event) {
var draggable = $.ui.ddmanager.current;
if (!draggable || (draggable.currentItem || draggable.element)[0] == this.element[0]) return; // Bail if draggable and droppable are same element
if (this.accept.call(this.element[0],(draggable.currentItem || draggable.element))) {
if(this.options.hoverClass) this.element.addClass(this.options.hoverClass);
this._trigger('over', event, this.ui(draggable));
}
},
_out: function(event) {
var draggable = $.ui.ddmanager.current;
if (!draggable || (draggable.currentItem || draggable.element)[0] == this.element[0]) return; // Bail if draggable and droppable are same element
if (this.accept.call(this.element[0],(draggable.currentItem || draggable.element))) {
if(this.options.hoverClass) this.element.removeClass(this.options.hoverClass);
this._trigger('out', event, this.ui(draggable));
}
},
_drop: function(event,custom) {
var draggable = custom || $.ui.ddmanager.current;
if (!draggable || (draggable.currentItem || draggable.element)[0] == this.element[0]) return false; // Bail if draggable and droppable are same element
var childrenIntersection = false;
this.element.find(":data(droppable)").not(".ui-draggable-dragging").each(function() {
var inst = $.data(this, 'droppable');
if(
inst.options.greedy
&& !inst.options.disabled
&& inst.options.scope == draggable.options.scope
&& inst.accept.call(inst.element[0], (draggable.currentItem || draggable.element))
&& $.ui.intersect(draggable, $.extend(inst, { offset: inst.element.offset() }), inst.options.tolerance)
) { childrenIntersection = true; return false; }
});
if(childrenIntersection) return false;
if(this.accept.call(this.element[0],(draggable.currentItem || draggable.element))) {
if(this.options.activeClass) this.element.removeClass(this.options.activeClass);
if(this.options.hoverClass) this.element.removeClass(this.options.hoverClass);
this._trigger('drop', event, this.ui(draggable));
return this.element;
}
return false;
},
ui: function(c) {
return {
draggable: (c.currentItem || c.element),
helper: c.helper,
position: c.position,
offset: c.positionAbs
};
}
});
$.extend($.ui.droppable, {
version: "1.8.10"
});
$.ui.intersect = function(draggable, droppable, toleranceMode) {
if (!droppable.offset) return false;
var x1 = (draggable.positionAbs || draggable.position.absolute).left, x2 = x1 + draggable.helperProportions.width,
y1 = (draggable.positionAbs || draggable.position.absolute).top, y2 = y1 + draggable.helperProportions.height;
var l = droppable.offset.left, r = l + droppable.proportions.width,
t = droppable.offset.top, b = t + droppable.proportions.height;
switch (toleranceMode) {
case 'fit':
return (l <= x1 && x2 <= r
&& t <= y1 && y2 <= b);
break;
case 'intersect':
return (l < x1 + (draggable.helperProportions.width / 2) // Right Half
&& x2 - (draggable.helperProportions.width / 2) < r // Left Half
&& t < y1 + (draggable.helperProportions.height / 2) // Bottom Half
&& y2 - (draggable.helperProportions.height / 2) < b ); // Top Half
break;
case 'pointer':
var draggableLeft = ((draggable.positionAbs || draggable.position.absolute).left + (draggable.clickOffset || draggable.offset.click).left),
draggableTop = ((draggable.positionAbs || draggable.position.absolute).top + (draggable.clickOffset || draggable.offset.click).top),
isOver = $.ui.isOver(draggableTop, draggableLeft, t, l, droppable.proportions.height, droppable.proportions.width);
return isOver;
break;
case 'touch':
return (
(y1 >= t && y1 <= b) || // Top edge touching
(y2 >= t && y2 <= b) || // Bottom edge touching
(y1 < t && y2 > b) // Surrounded vertically
) && (
(x1 >= l && x1 <= r) || // Left edge touching
(x2 >= l && x2 <= r) || // Right edge touching
(x1 < l && x2 > r) // Surrounded horizontally
);
break;
default:
return false;
break;
}
};
/*
This manager tracks offsets of draggables and droppables
*/
$.ui.ddmanager = {
current: null,
droppables: { 'default': [] },
prepareOffsets: function(t, event) {
var m = $.ui.ddmanager.droppables[t.options.scope] || [];
var type = event ? event.type : null; // workaround for #2317
var list = (t.currentItem || t.element).find(":data(droppable)").andSelf();
droppablesLoop: for (var i = 0; i < m.length; i++) {
if(m[i].options.disabled || (t && !m[i].accept.call(m[i].element[0],(t.currentItem || t.element)))) continue; //No disabled and non-accepted
for (var j=0; j < list.length; j++) { if(list[j] == m[i].element[0]) { m[i].proportions.height = 0; continue droppablesLoop; } }; //Filter out elements in the current dragged item
m[i].visible = m[i].element.css("display") != "none"; if(!m[i].visible) continue; //If the element is not visible, continue
m[i].offset = m[i].element.offset();
m[i].proportions = { width: m[i].element[0].offsetWidth, height: m[i].element[0].offsetHeight };
if(type == "mousedown") m[i]._activate.call(m[i], event); //Activate the droppable if used directly from draggables
}
},
drop: function(draggable, event) {
var dropped = false;
$.each($.ui.ddmanager.droppables[draggable.options.scope] || [], function() {
if(!this.options) return;
if (!this.options.disabled && this.visible && $.ui.intersect(draggable, this, this.options.tolerance))
dropped = dropped || this._drop.call(this, event);
if (!this.options.disabled && this.visible && this.accept.call(this.element[0],(draggable.currentItem || draggable.element))) {
this.isout = 1; this.isover = 0;
this._deactivate.call(this, event);
}
});
return dropped;
},
drag: function(draggable, event) {
//If you have a highly dynamic page, you might try this option. It renders positions every time you move the mouse.
if(draggable.options.refreshPositions) $.ui.ddmanager.prepareOffsets(draggable, event);
//Run through all droppables and check their positions based on specific tolerance options
$.each($.ui.ddmanager.droppables[draggable.options.scope] || [], function() {
if(this.options.disabled || this.greedyChild || !this.visible) return;
var intersects = $.ui.intersect(draggable, this, this.options.tolerance);
var c = !intersects && this.isover == 1 ? 'isout' : (intersects && this.isover == 0 ? 'isover' : null);
if(!c) return;
var parentInstance;
if (this.options.greedy) {
var parent = this.element.parents(':data(droppable):eq(0)');
if (parent.length) {
parentInstance = $.data(parent[0], 'droppable');
parentInstance.greedyChild = (c == 'isover' ? 1 : 0);
}
}
// we just moved into a greedy child
if (parentInstance && c == 'isover') {
parentInstance['isover'] = 0;
parentInstance['isout'] = 1;
parentInstance._out.call(parentInstance, event);
}
this[c] = 1; this[c == 'isout' ? 'isover' : 'isout'] = 0;
this[c == "isover" ? "_over" : "_out"].call(this, event);
// we just moved out of a greedy child
if (parentInstance && c == 'isout') {
parentInstance['isout'] = 0;
parentInstance['isover'] = 1;
parentInstance._over.call(parentInstance, event);
}
});
}
};
})(jQuery); | PypiClean |
/IF_LICENSE_PLATES_COULD_TALK-0.0.1.tar.gz/IF_LICENSE_PLATES_COULD_TALK-0.0.1/src/if_license_plates_could_talk/data/crime.py | import pandas as pd
import os
from . import utils
from . import population
from datetime import datetime
crime_categories = {
"crimes": ["Straftaten insgesamt"],
"fraud": [
"Betrug §§ 263, 263a, 264, 264a, 265, 265a, 265b StGB davon:", "Betrug §§ 263, 263a, 264, 264a, 265, 265a, 265b StGB",
"Urkundenfälschung §§ 267-271, 273-279, 281 StGB", "Betrug §§ 263, 263a, 264, 264a, 265, 265a-e StGB"],
"violence": ["Gefährliche und schwere Körperverletzung, Verstümmelung weiblicher Genitalien §§ 224, 226, 226a, 231 StGB",
"Vorsätzliche einfache Körperverletzung § 223 StGB",
"Gewaltkriminalität",
"Tätlicher Angriff auf Vollstreckungsbeamte und gleichstehende Personen §§ 114, 115 StGB",
"Vergewaltigung, sexuelle Nötigung und sexueller Übergriff im besonders schweren Fall einschl. mit Todesfolge §§ 177, 178 StGB",
"Mord, Totschlag und Tötung auf Verlangen"],
"theft": ["Raub, räuberische Erpressung und räuberischer Angriff auf Kraftfahrer §§ 249-252, 255, 316a StGB", "Raub, räuberische Erpressung auf/gegen Geldinstitute, Postfilialen und -agenturen",
"Raub, räuberische Erpressung auf/gegen sonstige Zahlstellen und Geschäfte",
"Handtaschenraub",
"Sonstige Raubüberfälle auf Straßen, Wegen oder Plätzen",
"Raubüberfälle in Wohnungen",
"Diebstahl ohne erschwerende Umstände §§ 242, 247, 248a-c StGB und zwar:",
"Einfacher Ladendiebstahl",
"Diebstahl unter erschwerenden Umständen §§ 243-244a StGB und zwar:",
"Wohnungseinbruchdiebstahl §§ 244 Abs. 1 Nr. 3 und Abs. 4, 244a StGB",
"Tageswohnungseinbruchdiebstahl §§ 244 Abs. 1 Nr. 3 und Abs. 4, 244a StGB",
"Diebstahl insgesamt und zwar:",
"Diebstahl insgesamt von Kraftwagen einschl. unbefugte Ingebrauchnahme",
"Diebstahl insgesamt von Mopeds und Krafträdern einschl. unbefugte Ingebrauchnahme",
"Diebstahl insgesamt von Fahrrädern einschl. unbefugte Ingebrauchnahme",
"Diebstahl insgesamt an/aus Kraftfahrzeugen",
"Taschendiebstahl insgesamt"
],
"drug": ["Rauschgiftdelikte (soweit nicht bereits mit anderer Schlüsselzahl erfasst)"]
}
def crime_filter(df, categories, column="Straftat"):
"""Construct filter for crimes listed in [categories]
Args:
df ([type]): [description]
column (str, optional): [description]. Defaults to "Straftat".
Returns:
[type]: [description]
"""
filt = df[column] == categories[0]
for cat in categories:
filt = filt | (df[column] == cat)
return filt
def year_to_path(year):
"""Compute path of data on crimes for the given year
Args:
year (int): year
Returns:
str: path to data file
"""
data_path = os.path.join(utils.path_to_data_dir(), "raw", "crime")
path = str(year)
files = os.listdir(os.path.join(data_path, "bka", path))
if len(files) > 0:
return os.path.join(data_path, "bka", path, files[0])
def prep_data_2013():
"""Preprocess data on crimes in 2013
Returns:
DataFrame: data on crimes in 2013
"""
df = pd.read_excel(year_to_path(2013), skiprows=6)[
["Unnamed: 1", "Unnamed: 2", "Fälle"]].dropna(subset=["Unnamed: 2"])
df.rename(columns={
"Unnamed: 1": "Straftat", "Unnamed: 2": "kreis_key", "Fälle": "crimes_2013"}, inplace=True)
cats = df.Straftat.unique()
df.kreis_key = utils.fix_key(df.kreis_key)
df = df[["Straftat", "kreis_key", "crimes_2013"]]
df_ges = pd.DataFrame()
for cat in crime_categories:
df_cat = df[crime_filter(df, crime_categories[cat])]
df_cat = df_cat.groupby("kreis_key").sum().reset_index()
df_cat = df_cat.rename(columns={"crimes_2013": f"{cat}_2013"})
if not df_ges.empty:
df_ges = df_ges.merge(df_cat, on="kreis_key", how="outer")
else:
df_ges = df_cat
df_ges = utils.fix_goettingen(df_ges, f"{cat}_2013")
df_ges = utils.fix_goettingen(df_ges, "crimes_2013")
return df_ges, list(cats)
def prep_data_14_20(year):
"""Preprocess data on crimes in the specified year
Args:
year (int): year in the range 2014-2020
Returns:
DataFrame: data on crimes in the given year
"""
crime_clm = f"crimes_{year}"
df = pd.read_csv(year_to_path(year), encoding="ISO-8859-1",
delimiter=";", skiprows=1, thousands=",")
cats = df.Straftat.unique()
df.rename(columns={"Gemeindeschlüssel": "kreis_key", "Anzahl erfasste Faelle": crime_clm,
"erfasste Fälle": crime_clm, "Gemeindeschluessel": "kreis_key", "erfasste Faelle": crime_clm}, inplace=True)
df.kreis_key = utils.fix_key(df.kreis_key)
df_ges = pd.DataFrame()
for cat in crime_categories:
df_cat = df[["kreis_key", "Straftat", crime_clm]
][crime_filter(df, crime_categories[cat])]
df_cat = df_cat.groupby("kreis_key").sum().reset_index()
df_cat = df_cat.rename(columns={crime_clm: f"{cat}_{year}"})
if not df_ges.empty:
df_ges = df_ges.merge(df_cat, on="kreis_key")
else:
df_ges = df_cat
if year <= 2016:
df_ges = utils.fix_goettingen(df_ges, f"{cat}_{year}")
return df_ges, list(cats)
def prep_data():
"""Preprocess crime data
Returns:
DataFrame: crime data in the years 2013-2020
"""
df, cats = prep_data_2013()
for i in range(2014, 2021):
df2, cats2 = prep_data_14_20(i)
df = df.merge(df2, on="kreis_key", how="outer")
cats = cats + cats2
cats_df = pd.DataFrame(pd.Series(cats).unique())
cats_df.to_csv(os.path.join(utils.path_to_data_dir(),
"processed", "crime", "categories.csv"))
# calculate crime rates
df_population = population.load_data()
df_crime_rates = df.merge(df_population, on="kreis_key")
years = list(filter(lambda y: f"population_{y}" in df_crime_rates.columns and f"crimes_{y}" in df_crime_rates.columns, range(2000, datetime.today(
).year+2)))
cols = ["kreis_key"]
for cat in crime_categories:
for year in years:
df_crime_rates[f"{cat}_pp_{year}"] = df_crime_rates[f"{cat}_{year}"] / \
df_crime_rates[f"population_{year}"]
cols = cols + [f"{cat}_{year}" for year in years]
cols = cols + [f"{cat}_pp_{year}" for year in years]
df_crime_rates = df_crime_rates[cols]
return df_crime_rates
def load_data():
"""Load crime data from csv
Returns:
DataFrame : data on crimes
"""
df = pd.read_csv(os.path.join(utils.path_to_data_dir(), "processed",
"crime", "crime.csv"), index_col=0)
df.kreis_key = utils.fix_key(df.kreis_key)
return df | PypiClean |
/Cfg-Loader-0.2.2.tar.gz/Cfg-Loader-0.2.2/cfg_loader/interpolator.py | import re
import string
from .exceptions import UnsetRequiredSubstitution, InvalidSubstitution
# Brace formatted syntax separators (c.f. https://docs.docker.com/compose/compose-file/#variable-substitution)
SEPARATOR_DEFAULT_IF_EMPTY = ':-'
SEPARATOR_DEFAULT_IF_UNSET = '-'
SEPARATOR_ERROR_IF_EMPTY = ':?'
SEPARATOR_ERROR_IF_UNSET = '?'
class SubstitutionTemplate(string.Template):
"""Class used to substitute environment variables in a string
It implements specification from docker-compose environ variable substitution
(c.f. https://docs.docker.com/compose/compose-file/#variable-substitution)
Examples with basic substitution
>>> template = SubstitutionTemplate('${VARIABLE}')
>>> template.substitute({'VARIABLE': 'value'})
'value'
>>> template.substitute({'VARIABLE': ''})
''
>>> template.substitute({})
Traceback (most recent call last):
...
KeyError: 'VARIABLE'
Examples with substitution if variable is empty or unset (separator: ":-")
>>> template = SubstitutionTemplate('${VARIABLE:-default}')
>>> template.substitute({'VARIABLE': 'value'})
'value'
>>> template.substitute({'VARIABLE': ''})
'default'
>>> template.substitute({})
'default'
Examples with substitution if variable is empty (separator: "-"):
>>> template = SubstitutionTemplate('${VARIABLE-default}')
>>> template.substitute({'VARIABLE': 'value'})
'value'
>>> template.substitute({'VARIABLE': ''})
''
>>> template.substitute({})
'default'
Examples with error raised if variable is unset (separator: "?")
>>> template = SubstitutionTemplate('${VARIABLE?err}')
>>> template.substitute({'VARIABLE': 'value'})
'value'
>>> template.substitute({'VARIABLE': ''})
''
>>> template.substitute({})
Traceback (most recent call last):
...
cfg_loader.exceptions.UnsetRequiredSubstitution: err
Examples with error raised if variable is empty or unset (separator: ":?")
>>> template = SubstitutionTemplate('${VARIABLE:?err}')
>>> template.substitute({'VARIABLE': 'value'})
'value'
>>> template.substitute({'VARIABLE': ''})
Traceback (most recent call last):
...
cfg_loader.exceptions.UnsetRequiredSubstitution: err
>>> template.substitute({})
Traceback (most recent call last):
...
cfg_loader.exceptions.UnsetRequiredSubstitution: err
"""
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) |
(?P<named>%(id)s) |
{(?P<braced>%(bid)s)} |
(?P<invalid>)
)
""" % {
'delim': re.escape('$'),
'id': r'[_a-z][_a-z0-9]*',
'bid': r'[_a-z][_a-z0-9]*(?:(?P<sep>:?[-?])[^}]*)?',
}
def substitute(self, mapping):
"""Substitute values indexed by mapping into `template`
:param mapping: Mapping containing values to substitute
:type mapping: dict
"""
def convert(mo):
named, braced = mo.group('named') or mo.group('braced'), mo.group('braced')
if braced is not None:
sep = mo.group('sep')
if sep:
return process_braced_group(braced, sep, mapping)
if named is not None:
val = mapping[named]
return '%s' % (val,)
if mo.group('escaped') is not None: # pragma: no branch
return self.delimiter
if mo.group('invalid') is not None: # pragma: no branch
raise ValueError('Invalid placeholder: {}'.format(self.template))
return self.pattern.sub(convert, self.template)
def process_braced_group(braced, sep, mapping):
"""Parse a braced formatted syntax and returns substituted value or raise error
It implements specification from docker-compose environ variable substitution
(c.f. https://docs.docker.com/compose/compose-file/#variable-substitution)
:param braced: Braced formatted syntax to substitute
:type braced: str
:param sep: Separator in the braced syntax
:type sep: str
:param mapping: Mapping with values to substitute
:type mapping: dict
"""
if sep == SEPARATOR_DEFAULT_IF_EMPTY:
var, _, default = braced.partition(SEPARATOR_DEFAULT_IF_EMPTY)
return mapping.get(var) or default
elif sep == SEPARATOR_DEFAULT_IF_UNSET:
var, _, default = braced.partition(SEPARATOR_DEFAULT_IF_UNSET)
return mapping.get(var, default)
elif sep == SEPARATOR_ERROR_IF_EMPTY:
var, _, err = braced.partition(SEPARATOR_ERROR_IF_EMPTY)
rv = mapping.get(var)
if not rv:
raise UnsetRequiredSubstitution(err)
return rv
elif sep == SEPARATOR_ERROR_IF_UNSET: # pragma: no branch
var, _, err = braced.partition(SEPARATOR_ERROR_IF_UNSET)
if var in mapping:
return mapping.get(var)
raise UnsetRequiredSubstitution(err)
class Interpolator:
"""Class used to substitute environment variables in complex object
:param substitution_mapping: Mapping with values to substitute
:type substitution_mapping: dict
Example
>>> interpolator = Interpolator(substitution_mapping={'VARIABLE': 'value'})
>>> interpolator.interpolate('${VARIABLE} in complex string')
'value in complex string'
>>> result = interpolator.interpolate_recursive({'key1': '${VARIABLE}', 'key2': ['element', '${EXTRA-default}']})
>>> result == {'key1': 'value', 'key2': ['element', 'default']}
True
"""
def __init__(self, substitution_mapping=None, substitution_template=SubstitutionTemplate):
self._substitution_template = substitution_template
self._substitution_mapping = substitution_mapping or {}
def interpolate(self, string):
"""Substitute environment variable in a string"""
try:
return self._substitution_template(string).substitute(self._substitution_mapping)
except ValueError as e:
raise InvalidSubstitution(e)
def interpolate_recursive(self, obj):
"""Substitute environment variable in an object"""
if isinstance(obj, str):
return self.interpolate(obj)
elif isinstance(obj, dict):
return {key: self.interpolate_recursive(value) for key, value in obj.items()}
elif isinstance(obj, list):
return [self.interpolate_recursive(element) for element in obj]
return obj | PypiClean |
/Gaussian_Bionomial_Distributions-0.1.tar.gz/Gaussian_Bionomial_Distributions-0.1/Gaussian_Bionomial_Distributions/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/Montreal-Forced-Aligner-3.0.0a3.tar.gz/Montreal-Forced-Aligner-3.0.0a3/docs/source/user_guide/corpus_creation/index.rst | .. _corpus_creation:
*************************
Corpus creation utilities
*************************
MFA now contains several command line utilities for helping to create corpora from scratch. The main workflow is as follows:
1. If the corpus made up of long sound file that need segmenting, :ref:`segment the audio files using VAD <create_segments>`
2. If the corpus does not contain transcriptions, :ref:`transcribe utterances using existing acoustic models,
language models, and dictionaries <transcribing>`
3. Use the :ref:`Anchor annotator tool <anchor>` to manually correct error in transcription
4. As necessary, bootstrap better transcriptions:
1. :ref:`Train language model <training_lm>` with updated transcriptions
2. :ref:`Add pronunciation and silence probabilities to the dictionary <training_dictionary>`
.. toctree::
:hidden:
create_segments
train_ivector
diarize_speakers
transcribing
training_lm
training_dictionary
tokenize
train_tokenizer
anchor
| PypiClean |
/MIAvisual-0.0.6-py3-none-any.whl/matplotlib/rcsetup.py | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
# Don't let the original cycler collide with our validating cycler
from cycler import Cycler, cycler as ccycler
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = [
'GTK3Agg', 'GTK3Cairo', 'GTK4Agg', 'GTK4Cairo',
'MacOSX',
'nbAgg',
'QtAgg', 'QtCairo', 'Qt5Agg', 'Qt5Cairo',
'TkAgg', 'TkCairo',
'WebAgg',
'WX', 'WXAgg', 'WXCairo',
]
non_interactive_bk = ['agg', 'cairo',
'pdf', 'pgf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False, *,
_deprecated_since=None):
"""*valid* is a list of legal strings."""
self.key = key
self.ignorecase = ignorecase
self._deprecated_since = _deprecated_since
def func(s):
if ignorecase:
return s.lower()
else:
return s
self.valid = {func(k): k for k in valid}
def __call__(self, s):
if self._deprecated_since:
name, = (k for k, v in globals().items() if v is self)
_api.warn_deprecated(
self._deprecated_since, name=name, obj_type="function")
if self.ignorecase:
s = s.lower()
if s in self.valid:
return self.valid[s]
msg = (f"{s!r} is not a valid value for {self.key}; supported values "
f"are {[*self.valid.values()]}")
if (isinstance(s, str)
and (s.startswith('"') and s.endswith('"')
or s.startswith("'") and s.endswith("'"))
and s[1:-1] in self.valid):
msg += "; remove quotes surrounding your string"
raise ValueError(msg)
@lru_cache()
def _listify_validator(scalar_validator, allow_stringlist=False, *,
n=None, doc=None):
def f(s):
if isinstance(s, str):
try:
val = [scalar_validator(v.strip()) for v in s.split(',')
if v.strip()]
except Exception:
if allow_stringlist:
# Sometimes, a list of colors might be a single string
# of single-letter colornames. So give that a shot.
val = [scalar_validator(v.strip()) for v in s if v.strip()]
else:
raise
# Allow any ordered sequence type -- generators, np.ndarray, pd.Series
# -- but not sets, whose iteration order is non-deterministic.
elif np.iterable(s) and not isinstance(s, (set, frozenset)):
# The condition on this list comprehension will preserve the
# behavior of filtering out any empty strings (behavior was
# from the original validate_stringlist()), while allowing
# any non-string/text scalar values such as numbers and arrays.
val = [scalar_validator(v) for v in s
if not isinstance(v, str) or v]
else:
raise ValueError(
f"Expected str or other non-set iterable, but got {s}")
if n is not None and len(val) != n:
raise ValueError(
f"Expected {n} values, but there are {len(val)} values in {s}")
return val
try:
f.__name__ = "{}list".format(scalar_validator.__name__)
except AttributeError: # class instance.
f.__name__ = "{}List".format(type(scalar_validator).__name__)
f.__qualname__ = f.__qualname__.rsplit(".", 1)[0] + "." + f.__name__
f.__doc__ = doc if doc is not None else scalar_validator.__doc__
return f
def validate_any(s):
return s
validate_anylist = _listify_validator(validate_any)
def _validate_date(s):
try:
np.datetime64(s)
return s
except ValueError:
raise ValueError(
f'{s!r} should be a string that can be parsed by numpy.datetime64')
def validate_bool(b):
"""Convert b to ``bool`` or raise."""
if isinstance(b, str):
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError('Could not convert "%s" to bool' % b)
def validate_axisbelow(s):
try:
return validate_bool(s)
except ValueError:
if isinstance(s, str):
if s == 'line':
return 'line'
raise ValueError('%s cannot be interpreted as'
' True, False, or "line"' % s)
def validate_dpi(s):
"""Confirm s is string 'figure' or convert s to float or raise."""
if s == 'figure':
return s
try:
return float(s)
except ValueError as e:
raise ValueError(f'{s!r} is not string "figure" and '
f'could not convert {s!r} to float') from e
def _make_type_validator(cls, *, allow_none=False):
"""
Return a validator that converts inputs to *cls* or raises (and possibly
allows ``None`` as well).
"""
def validator(s):
if (allow_none and
(s is None or isinstance(s, str) and s.lower() == "none")):
return None
if cls is str and not isinstance(s, str):
_api.warn_deprecated(
"3.5", message="Support for setting an rcParam that expects a "
"str value to a non-str value is deprecated since %(since)s "
"and support will be removed %(removal)s.")
try:
return cls(s)
except (TypeError, ValueError) as e:
raise ValueError(
f'Could not convert {s!r} to {cls.__name__}') from e
validator.__name__ = f"validate_{cls.__name__}"
if allow_none:
validator.__name__ += "_or_None"
validator.__qualname__ = (
validator.__qualname__.rsplit(".", 1)[0] + "." + validator.__name__)
return validator
validate_string = _make_type_validator(str)
validate_string_or_None = _make_type_validator(str, allow_none=True)
validate_stringlist = _listify_validator(
validate_string, doc='return a list of strings')
validate_int = _make_type_validator(int)
validate_int_or_None = _make_type_validator(int, allow_none=True)
validate_float = _make_type_validator(float)
validate_float_or_None = _make_type_validator(float, allow_none=True)
validate_floatlist = _listify_validator(
validate_float, doc='return a list of floats')
def _validate_pathlike(s):
if isinstance(s, (str, os.PathLike)):
# Store value as str because savefig.directory needs to distinguish
# between "" (cwd) and "." (cwd, but gets updated by user selections).
return os.fsdecode(s)
else:
return validate_string(s) # Emit deprecation warning.
def validate_fonttype(s):
"""
Confirm that this is a Postscript or PDF font type that we know how to
convert to.
"""
fonttypes = {'type3': 3,
'truetype': 42}
try:
fonttype = validate_int(s)
except ValueError:
try:
return fonttypes[s.lower()]
except KeyError as e:
raise ValueError('Supported Postscript/PDF font types are %s'
% list(fonttypes)) from e
else:
if fonttype not in fonttypes.values():
raise ValueError(
'Supported Postscript/PDF font types are %s' %
list(fonttypes.values()))
return fonttype
_validate_standard_backends = ValidateInStrings(
'backend', all_backends, ignorecase=True)
_auto_backend_sentinel = object()
def validate_backend(s):
backend = (
s if s is _auto_backend_sentinel or s.startswith("module://")
else _validate_standard_backends(s))
return backend
def _validate_toolbar(s):
s = ValidateInStrings(
'toolbar', ['None', 'toolbar2', 'toolmanager'], ignorecase=True)(s)
if s == 'toolmanager':
_api.warn_external(
"Treat the new Tool classes introduced in v1.5 as experimental "
"for now; the API and rcParam may change in future versions.")
return s
def validate_color_or_inherit(s):
"""Return a valid color arg."""
if cbook._str_equal(s, 'inherit'):
return s
return validate_color(s)
def validate_color_or_auto(s):
if cbook._str_equal(s, 'auto'):
return s
return validate_color(s)
def validate_color_for_prop_cycle(s):
# N-th color cycle syntax can't go into the color cycle.
if isinstance(s, str) and re.match("^C[0-9]$", s):
raise ValueError(f"Cannot put cycle reference ({s!r}) in prop_cycler")
return validate_color(s)
def _validate_color_or_linecolor(s):
if cbook._str_equal(s, 'linecolor'):
return s
elif cbook._str_equal(s, 'mfc') or cbook._str_equal(s, 'markerfacecolor'):
return 'markerfacecolor'
elif cbook._str_equal(s, 'mec') or cbook._str_equal(s, 'markeredgecolor'):
return 'markeredgecolor'
elif s is None:
return None
elif isinstance(s, str) and len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if s.lower() == 'none':
return None
elif is_color_like(s):
return s
raise ValueError(f'{s!r} does not look like a color arg')
def validate_color(s):
"""Return a valid color arg."""
if isinstance(s, str):
if s.lower() == 'none':
return 'none'
if len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if is_color_like(s):
return s
# If it is still valid, it must be a tuple (as a string from matplotlibrc).
try:
color = ast.literal_eval(s)
except (SyntaxError, ValueError):
pass
else:
if is_color_like(color):
return color
raise ValueError(f'{s!r} does not look like a color arg')
validate_colorlist = _listify_validator(
validate_color, allow_stringlist=True, doc='return a list of colorspecs')
def _validate_cmap(s):
_api.check_isinstance((str, Colormap), cmap=s)
return s
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError as e:
raise ValueError('not a valid aspect specification') from e
def validate_fontsize_None(s):
if s is None or s == 'None':
return None
else:
return validate_fontsize(s)
def validate_fontsize(s):
fontsizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large', 'smaller', 'larger']
if isinstance(s, str):
s = s.lower()
if s in fontsizes:
return s
try:
return float(s)
except ValueError as e:
raise ValueError("%s is not a valid font size. Valid font sizes "
"are %s." % (s, ", ".join(fontsizes))) from e
validate_fontsizelist = _listify_validator(validate_fontsize)
def validate_fontweight(s):
weights = [
'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman',
'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black']
# Note: Historically, weights have been case-sensitive in Matplotlib
if s in weights:
return s
try:
return int(s)
except (ValueError, TypeError) as e:
raise ValueError(f'{s} is not a valid font weight.') from e
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
def _validate_mathtext_fallback(s):
_fallback_fonts = ['cm', 'stix', 'stixsans']
if isinstance(s, str):
s = s.lower()
if s is None or s == 'none':
return None
elif s.lower() in _fallback_fonts:
return s
else:
raise ValueError(
f"{s} is not a valid fallback font name. Valid fallback font "
f"names are {','.join(_fallback_fonts)}. Passing 'None' will turn "
"fallback off.")
def validate_whiskers(s):
try:
return _listify_validator(validate_float, n=2)(s)
except (TypeError, ValueError):
try:
return float(s)
except ValueError as e:
raise ValueError("Not a valid whisker value ['range', float, "
"(float, float)]") from e
def validate_ps_distiller(s):
if isinstance(s, str):
s = s.lower()
if s in ('none', None, 'false', False):
return None
else:
return ValidateInStrings('ps.usedistiller', ['ghostscript', 'xpdf'])(s)
# A validator dedicated to the named line styles, based on the items in
# ls_mapper, and a list of possible strings read from Line2D.set_linestyle
_validate_named_linestyle = ValidateInStrings(
'linestyle',
[*ls_mapper.keys(), *ls_mapper.values(), 'None', 'none', ' ', ''],
ignorecase=True)
def _validate_linestyle(ls):
"""
A validator for all possible line styles, the named ones *and*
the on-off ink sequences.
"""
if isinstance(ls, str):
try: # Look first for a valid named line style, like '--' or 'solid'.
return _validate_named_linestyle(ls)
except ValueError:
pass
try:
ls = ast.literal_eval(ls) # Parsing matplotlibrc.
except (SyntaxError, ValueError):
pass # Will error with the ValueError at the end.
def _is_iterable_not_string_like(x):
# Explicitly exclude bytes/bytearrays so that they are not
# nonsensically interpreted as sequences of numbers (codepoints).
return np.iterable(x) and not isinstance(x, (str, bytes, bytearray))
if _is_iterable_not_string_like(ls):
if len(ls) == 2 and _is_iterable_not_string_like(ls[1]):
# (offset, (on, off, on, off, ...))
offset, onoff = ls
else:
# For backcompat: (on, off, on, off, ...); the offset is implicit.
offset = 0
onoff = ls
if (isinstance(offset, Number)
and len(onoff) % 2 == 0
and all(isinstance(elem, Number) for elem in onoff)):
return (offset, onoff)
raise ValueError(f"linestyle {ls!r} is not a valid on-off ink sequence.")
validate_fillstyle = ValidateInStrings(
'markers.fillstyle', ['full', 'left', 'right', 'bottom', 'top', 'none'])
validate_fillstylelist = _listify_validator(validate_fillstyle)
def validate_markevery(s):
"""
Validate the markevery property of a Line2D object.
Parameters
----------
s : None, int, (int, int), slice, float, (float, float), or list[int]
Returns
-------
None, int, (int, int), slice, float, (float, float), or list[int]
"""
# Validate s against type slice float int and None
if isinstance(s, (slice, float, int, type(None))):
return s
# Validate s against type tuple
if isinstance(s, tuple):
if (len(s) == 2
and (all(isinstance(e, int) for e in s)
or all(isinstance(e, float) for e in s))):
return s
else:
raise TypeError(
"'markevery' tuple must be pair of ints or of floats")
# Validate s against type list
if isinstance(s, list):
if all(isinstance(e, int) for e in s):
return s
else:
raise TypeError(
"'markevery' list must have all elements of type int")
raise TypeError("'markevery' is of an invalid type")
validate_markeverylist = _listify_validator(validate_markevery)
def validate_bbox(s):
if isinstance(s, str):
s = s.lower()
if s == 'tight':
return s
if s == 'standard':
return None
raise ValueError("bbox should be 'tight' or 'standard'")
elif s is not None:
# Backwards compatibility. None is equivalent to 'standard'.
raise ValueError("bbox should be 'tight' or 'standard'")
return s
def validate_sketch(s):
if isinstance(s, str):
s = s.lower()
if s == 'none' or s is None:
return None
try:
return tuple(_listify_validator(validate_float, n=3)(s))
except ValueError:
raise ValueError("Expected a (scale, length, randomness) triplet")
def _validate_greaterequal0_lessthan1(s):
s = validate_float(s)
if 0 <= s < 1:
return s
else:
raise RuntimeError(f'Value must be >=0 and <1; got {s}')
def _validate_greaterequal0_lessequal1(s):
s = validate_float(s)
if 0 <= s <= 1:
return s
else:
raise RuntimeError(f'Value must be >=0 and <=1; got {s}')
_range_validators = { # Slightly nicer (internal) API.
"0 <= x < 1": _validate_greaterequal0_lessthan1,
"0 <= x <= 1": _validate_greaterequal0_lessequal1,
}
def validate_hatch(s):
r"""
Validate a hatch pattern.
A hatch pattern string can have any sequence of the following
characters: ``\ / | - + * . x o O``.
"""
if not isinstance(s, str):
raise ValueError("Hatch pattern must be a string")
_api.check_isinstance(str, hatch_pattern=s)
unknown = set(s) - {'\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O'}
if unknown:
raise ValueError("Unknown hatch symbol(s): %s" % list(unknown))
return s
validate_hatchlist = _listify_validator(validate_hatch)
validate_dashlist = _listify_validator(validate_floatlist)
_prop_validators = {
'color': _listify_validator(validate_color_for_prop_cycle,
allow_stringlist=True),
'linewidth': validate_floatlist,
'linestyle': _listify_validator(_validate_linestyle),
'facecolor': validate_colorlist,
'edgecolor': validate_colorlist,
'joinstyle': _listify_validator(JoinStyle),
'capstyle': _listify_validator(CapStyle),
'fillstyle': validate_fillstylelist,
'markerfacecolor': validate_colorlist,
'markersize': validate_floatlist,
'markeredgewidth': validate_floatlist,
'markeredgecolor': validate_colorlist,
'markevery': validate_markeverylist,
'alpha': validate_floatlist,
'marker': validate_stringlist,
'hatch': validate_hatchlist,
'dashes': validate_dashlist,
}
_prop_aliases = {
'c': 'color',
'lw': 'linewidth',
'ls': 'linestyle',
'fc': 'facecolor',
'ec': 'edgecolor',
'mfc': 'markerfacecolor',
'mec': 'markeredgecolor',
'mew': 'markeredgewidth',
'ms': 'markersize',
}
def cycler(*args, **kwargs):
"""
Create a `~cycler.Cycler` object much like :func:`cycler.cycler`,
but includes input validation.
Call signatures::
cycler(cycler)
cycler(label=values[, label2=values2[, ...]])
cycler(label, values)
Form 1 copies a given `~cycler.Cycler` object.
Form 2 creates a `~cycler.Cycler` which cycles over one or more
properties simultaneously. If multiple properties are given, their
value lists must have the same length.
Form 3 creates a `~cycler.Cycler` for a single property. This form
exists for compatibility with the original cycler. Its use is
discouraged in favor of the kwarg form, i.e. ``cycler(label=values)``.
Parameters
----------
cycler : Cycler
Copy constructor for Cycler.
label : str
The property key. Must be a valid `.Artist` property.
For example, 'color' or 'linestyle'. Aliases are allowed,
such as 'c' for 'color' and 'lw' for 'linewidth'.
values : iterable
Finite-length iterable of the property values. These values
are validated and will raise a ValueError if invalid.
Returns
-------
Cycler
A new :class:`~cycler.Cycler` for the given properties.
Examples
--------
Creating a cycler for a single property:
>>> c = cycler(color=['red', 'green', 'blue'])
Creating a cycler for simultaneously cycling over multiple properties
(e.g. red circle, green plus, blue cross):
>>> c = cycler(color=['red', 'green', 'blue'],
... marker=['o', '+', 'x'])
"""
if args and kwargs:
raise TypeError("cycler() can only accept positional OR keyword "
"arguments -- not both.")
elif not args and not kwargs:
raise TypeError("cycler() must have positional OR keyword arguments")
if len(args) == 1:
if not isinstance(args[0], Cycler):
raise TypeError("If only one positional argument given, it must "
"be a Cycler instance.")
return validate_cycler(args[0])
elif len(args) == 2:
pairs = [(args[0], args[1])]
elif len(args) > 2:
raise TypeError("No more than 2 positional arguments allowed")
else:
pairs = kwargs.items()
validated = []
for prop, vals in pairs:
norm_prop = _prop_aliases.get(prop, prop)
validator = _prop_validators.get(norm_prop, None)
if validator is None:
raise TypeError("Unknown artist property: %s" % prop)
vals = validator(vals)
# We will normalize the property names as well to reduce
# the amount of alias handling code elsewhere.
validated.append((norm_prop, vals))
return reduce(operator.add, (ccycler(k, v) for k, v in validated))
class _DunderChecker(ast.NodeVisitor):
def visit_Attribute(self, node):
if node.attr.startswith("__") and node.attr.endswith("__"):
raise ValueError("cycler strings with dunders are forbidden")
self.generic_visit(node)
def validate_cycler(s):
"""Return a Cycler object from a string repr or the object itself."""
if isinstance(s, str):
# TODO: We might want to rethink this...
# While I think I have it quite locked down, it is execution of
# arbitrary code without sanitation.
# Combine this with the possibility that rcparams might come from the
# internet (future plans), this could be downright dangerous.
# I locked it down by only having the 'cycler()' function available.
# UPDATE: Partly plugging a security hole.
# I really should have read this:
# https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
# We should replace this eval with a combo of PyParsing and
# ast.literal_eval()
try:
_DunderChecker().visit(ast.parse(s))
s = eval(s, {'cycler': cycler, '__builtins__': {}})
except BaseException as e:
raise ValueError("'%s' is not a valid cycler construction: %s" %
(s, e)) from e
# Should make sure what comes from the above eval()
# is a Cycler object.
if isinstance(s, Cycler):
cycler_inst = s
else:
raise ValueError("object was not a string or Cycler instance: %s" % s)
unknowns = cycler_inst.keys - (set(_prop_validators) | set(_prop_aliases))
if unknowns:
raise ValueError("Unknown artist properties: %s" % unknowns)
# Not a full validation, but it'll at least normalize property names
# A fuller validation would require v0.10 of cycler.
checker = set()
for prop in cycler_inst.keys:
norm_prop = _prop_aliases.get(prop, prop)
if norm_prop != prop and norm_prop in cycler_inst.keys:
raise ValueError("Cannot specify both '{0}' and alias '{1}'"
" in the same prop_cycle".format(norm_prop, prop))
if norm_prop in checker:
raise ValueError("Another property was already aliased to '{0}'."
" Collision normalizing '{1}'.".format(norm_prop,
prop))
checker.update([norm_prop])
# This is just an extra-careful check, just in case there is some
# edge-case I haven't thought of.
assert len(checker) == len(cycler_inst.keys)
# Now, it should be safe to mutate this cycler
for prop in cycler_inst.keys:
norm_prop = _prop_aliases.get(prop, prop)
cycler_inst.change_key(prop, norm_prop)
for key, vals in cycler_inst.by_key().items():
_prop_validators[key](vals)
return cycler_inst
def validate_hist_bins(s):
valid_strs = ["auto", "sturges", "fd", "doane", "scott", "rice", "sqrt"]
if isinstance(s, str) and s in valid_strs:
return s
try:
return int(s)
except (TypeError, ValueError):
pass
try:
return validate_floatlist(s)
except ValueError:
pass
raise ValueError("'hist.bins' must be one of {}, an int or"
" a sequence of floats".format(valid_strs))
class _ignorecase(list):
"""A marker class indicating that a list-of-str is case-insensitive."""
def _convert_validator_spec(key, conv):
if isinstance(conv, list):
ignorecase = isinstance(conv, _ignorecase)
return ValidateInStrings(key, conv, ignorecase=ignorecase)
else:
return conv
# Mapping of rcParams to validators.
# Converters given as lists or _ignorecase are converted to ValidateInStrings
# immediately below.
# The rcParams defaults are defined in matplotlibrc.template, which gets copied
# to matplotlib/mpl-data/matplotlibrc by the setup script.
_validators = {
"backend": validate_backend,
"backend_fallback": validate_bool,
"toolbar": _validate_toolbar,
"interactive": validate_bool,
"timezone": validate_string,
"webagg.port": validate_int,
"webagg.address": validate_string,
"webagg.open_in_browser": validate_bool,
"webagg.port_retries": validate_int,
# line props
"lines.linewidth": validate_float, # line width in points
"lines.linestyle": _validate_linestyle, # solid line
"lines.color": validate_color, # first color in color cycle
"lines.marker": validate_string, # marker name
"lines.markerfacecolor": validate_color_or_auto, # default color
"lines.markeredgecolor": validate_color_or_auto, # default color
"lines.markeredgewidth": validate_float,
"lines.markersize": validate_float, # markersize, in points
"lines.antialiased": validate_bool, # antialiased (no jaggies)
"lines.dash_joinstyle": JoinStyle,
"lines.solid_joinstyle": JoinStyle,
"lines.dash_capstyle": CapStyle,
"lines.solid_capstyle": CapStyle,
"lines.dashed_pattern": validate_floatlist,
"lines.dashdot_pattern": validate_floatlist,
"lines.dotted_pattern": validate_floatlist,
"lines.scale_dashes": validate_bool,
# marker props
"markers.fillstyle": validate_fillstyle,
## pcolor(mesh) props:
"pcolor.shading": ["auto", "flat", "nearest", "gouraud"],
"pcolormesh.snap": validate_bool,
## patch props
"patch.linewidth": validate_float, # line width in points
"patch.edgecolor": validate_color,
"patch.force_edgecolor": validate_bool,
"patch.facecolor": validate_color, # first color in cycle
"patch.antialiased": validate_bool, # antialiased (no jaggies)
## hatch props
"hatch.color": validate_color,
"hatch.linewidth": validate_float,
## Histogram properties
"hist.bins": validate_hist_bins,
## Boxplot properties
"boxplot.notch": validate_bool,
"boxplot.vertical": validate_bool,
"boxplot.whiskers": validate_whiskers,
"boxplot.bootstrap": validate_int_or_None,
"boxplot.patchartist": validate_bool,
"boxplot.showmeans": validate_bool,
"boxplot.showcaps": validate_bool,
"boxplot.showbox": validate_bool,
"boxplot.showfliers": validate_bool,
"boxplot.meanline": validate_bool,
"boxplot.flierprops.color": validate_color,
"boxplot.flierprops.marker": validate_string,
"boxplot.flierprops.markerfacecolor": validate_color_or_auto,
"boxplot.flierprops.markeredgecolor": validate_color,
"boxplot.flierprops.markeredgewidth": validate_float,
"boxplot.flierprops.markersize": validate_float,
"boxplot.flierprops.linestyle": _validate_linestyle,
"boxplot.flierprops.linewidth": validate_float,
"boxplot.boxprops.color": validate_color,
"boxplot.boxprops.linewidth": validate_float,
"boxplot.boxprops.linestyle": _validate_linestyle,
"boxplot.whiskerprops.color": validate_color,
"boxplot.whiskerprops.linewidth": validate_float,
"boxplot.whiskerprops.linestyle": _validate_linestyle,
"boxplot.capprops.color": validate_color,
"boxplot.capprops.linewidth": validate_float,
"boxplot.capprops.linestyle": _validate_linestyle,
"boxplot.medianprops.color": validate_color,
"boxplot.medianprops.linewidth": validate_float,
"boxplot.medianprops.linestyle": _validate_linestyle,
"boxplot.meanprops.color": validate_color,
"boxplot.meanprops.marker": validate_string,
"boxplot.meanprops.markerfacecolor": validate_color,
"boxplot.meanprops.markeredgecolor": validate_color,
"boxplot.meanprops.markersize": validate_float,
"boxplot.meanprops.linestyle": _validate_linestyle,
"boxplot.meanprops.linewidth": validate_float,
## font props
"font.family": validate_stringlist, # used by text object
"font.style": validate_string,
"font.variant": validate_string,
"font.stretch": validate_string,
"font.weight": validate_fontweight,
"font.size": validate_float, # Base font size in points
"font.serif": validate_stringlist,
"font.sans-serif": validate_stringlist,
"font.cursive": validate_stringlist,
"font.fantasy": validate_stringlist,
"font.monospace": validate_stringlist,
# text props
"text.color": validate_color,
"text.usetex": validate_bool,
"text.latex.preamble": validate_string,
"text.hinting": ["default", "no_autohint", "force_autohint",
"no_hinting", "auto", "native", "either", "none"],
"text.hinting_factor": validate_int,
"text.kerning_factor": validate_int,
"text.antialiased": validate_bool,
"mathtext.cal": validate_font_properties,
"mathtext.rm": validate_font_properties,
"mathtext.tt": validate_font_properties,
"mathtext.it": validate_font_properties,
"mathtext.bf": validate_font_properties,
"mathtext.sf": validate_font_properties,
"mathtext.fontset": ["dejavusans", "dejavuserif", "cm", "stix",
"stixsans", "custom"],
"mathtext.default": ["rm", "cal", "it", "tt", "sf", "bf", "default",
"bb", "frak", "scr", "regular"],
"mathtext.fallback": _validate_mathtext_fallback,
"image.aspect": validate_aspect, # equal, auto, a number
"image.interpolation": validate_string,
"image.cmap": _validate_cmap, # gray, jet, etc.
"image.lut": validate_int, # lookup table
"image.origin": ["upper", "lower"],
"image.resample": validate_bool,
# Specify whether vector graphics backends will combine all images on a
# set of axes into a single composite image
"image.composite_image": validate_bool,
# contour props
"contour.negative_linestyle": _validate_linestyle,
"contour.corner_mask": validate_bool,
"contour.linewidth": validate_float_or_None,
# errorbar props
"errorbar.capsize": validate_float,
# axis props
# alignment of x/y axis title
"xaxis.labellocation": ["left", "center", "right"],
"yaxis.labellocation": ["bottom", "center", "top"],
# axes props
"axes.axisbelow": validate_axisbelow,
"axes.facecolor": validate_color, # background color
"axes.edgecolor": validate_color, # edge color
"axes.linewidth": validate_float, # edge linewidth
"axes.spines.left": validate_bool, # Set visibility of axes spines,
"axes.spines.right": validate_bool, # i.e., the lines around the chart
"axes.spines.bottom": validate_bool, # denoting data boundary.
"axes.spines.top": validate_bool,
"axes.titlesize": validate_fontsize, # axes title fontsize
"axes.titlelocation": ["left", "center", "right"], # axes title alignment
"axes.titleweight": validate_fontweight, # axes title font weight
"axes.titlecolor": validate_color_or_auto, # axes title font color
# title location, axes units, None means auto
"axes.titley": validate_float_or_None,
# pad from axes top decoration to title in points
"axes.titlepad": validate_float,
"axes.grid": validate_bool, # display grid or not
"axes.grid.which": ["minor", "both", "major"], # which grids are drawn
"axes.grid.axis": ["x", "y", "both"], # grid type
"axes.labelsize": validate_fontsize, # fontsize of x & y labels
"axes.labelpad": validate_float, # space between label and axis
"axes.labelweight": validate_fontweight, # fontsize of x & y labels
"axes.labelcolor": validate_color, # color of axis label
# use scientific notation if log10 of the axis range is smaller than the
# first or larger than the second
"axes.formatter.limits": _listify_validator(validate_int, n=2),
# use current locale to format ticks
"axes.formatter.use_locale": validate_bool,
"axes.formatter.use_mathtext": validate_bool,
# minimum exponent to format in scientific notation
"axes.formatter.min_exponent": validate_int,
"axes.formatter.useoffset": validate_bool,
"axes.formatter.offset_threshold": validate_int,
"axes.unicode_minus": validate_bool,
# This entry can be either a cycler object or a string repr of a
# cycler-object, which gets eval()'ed to create the object.
"axes.prop_cycle": validate_cycler,
# If "data", axes limits are set close to the data.
# If "round_numbers" axes limits are set to the nearest round numbers.
"axes.autolimit_mode": ["data", "round_numbers"],
"axes.xmargin": _range_validators["0 <= x <= 1"], # margin added to xaxis
"axes.ymargin": _range_validators["0 <= x <= 1"], # margin added to yaxis
'axes.zmargin': _range_validators["0 <= x <= 1"], # margin added to zaxis
"polaraxes.grid": validate_bool, # display polar grid or not
"axes3d.grid": validate_bool, # display 3d grid
# scatter props
"scatter.marker": validate_string,
"scatter.edgecolors": validate_string,
"date.epoch": _validate_date,
"date.autoformatter.year": validate_string,
"date.autoformatter.month": validate_string,
"date.autoformatter.day": validate_string,
"date.autoformatter.hour": validate_string,
"date.autoformatter.minute": validate_string,
"date.autoformatter.second": validate_string,
"date.autoformatter.microsecond": validate_string,
'date.converter': ['auto', 'concise'],
# for auto date locator, choose interval_multiples
'date.interval_multiples': validate_bool,
# legend properties
"legend.fancybox": validate_bool,
"legend.loc": _ignorecase([
"best",
"upper right", "upper left", "lower left", "lower right", "right",
"center left", "center right", "lower center", "upper center",
"center"]),
# the number of points in the legend line
"legend.numpoints": validate_int,
# the number of points in the legend line for scatter
"legend.scatterpoints": validate_int,
"legend.fontsize": validate_fontsize,
"legend.title_fontsize": validate_fontsize_None,
# color of the legend
"legend.labelcolor": _validate_color_or_linecolor,
# the relative size of legend markers vs. original
"legend.markerscale": validate_float,
"legend.shadow": validate_bool,
# whether or not to draw a frame around legend
"legend.frameon": validate_bool,
# alpha value of the legend frame
"legend.framealpha": validate_float_or_None,
## the following dimensions are in fraction of the font size
"legend.borderpad": validate_float, # units are fontsize
# the vertical space between the legend entries
"legend.labelspacing": validate_float,
# the length of the legend lines
"legend.handlelength": validate_float,
# the length of the legend lines
"legend.handleheight": validate_float,
# the space between the legend line and legend text
"legend.handletextpad": validate_float,
# the border between the axes and legend edge
"legend.borderaxespad": validate_float,
# the border between the axes and legend edge
"legend.columnspacing": validate_float,
"legend.facecolor": validate_color_or_inherit,
"legend.edgecolor": validate_color_or_inherit,
# tick properties
"xtick.top": validate_bool, # draw ticks on top side
"xtick.bottom": validate_bool, # draw ticks on bottom side
"xtick.labeltop": validate_bool, # draw label on top
"xtick.labelbottom": validate_bool, # draw label on bottom
"xtick.major.size": validate_float, # major xtick size in points
"xtick.minor.size": validate_float, # minor xtick size in points
"xtick.major.width": validate_float, # major xtick width in points
"xtick.minor.width": validate_float, # minor xtick width in points
"xtick.major.pad": validate_float, # distance to label in points
"xtick.minor.pad": validate_float, # distance to label in points
"xtick.color": validate_color, # color of xticks
"xtick.labelcolor": validate_color_or_inherit, # color of xtick labels
"xtick.minor.visible": validate_bool, # visibility of minor xticks
"xtick.minor.top": validate_bool, # draw top minor xticks
"xtick.minor.bottom": validate_bool, # draw bottom minor xticks
"xtick.major.top": validate_bool, # draw top major xticks
"xtick.major.bottom": validate_bool, # draw bottom major xticks
"xtick.labelsize": validate_fontsize, # fontsize of xtick labels
"xtick.direction": ["out", "in", "inout"], # direction of xticks
"xtick.alignment": ["center", "right", "left"],
"ytick.left": validate_bool, # draw ticks on left side
"ytick.right": validate_bool, # draw ticks on right side
"ytick.labelleft": validate_bool, # draw tick labels on left side
"ytick.labelright": validate_bool, # draw tick labels on right side
"ytick.major.size": validate_float, # major ytick size in points
"ytick.minor.size": validate_float, # minor ytick size in points
"ytick.major.width": validate_float, # major ytick width in points
"ytick.minor.width": validate_float, # minor ytick width in points
"ytick.major.pad": validate_float, # distance to label in points
"ytick.minor.pad": validate_float, # distance to label in points
"ytick.color": validate_color, # color of yticks
"ytick.labelcolor": validate_color_or_inherit, # color of ytick labels
"ytick.minor.visible": validate_bool, # visibility of minor yticks
"ytick.minor.left": validate_bool, # draw left minor yticks
"ytick.minor.right": validate_bool, # draw right minor yticks
"ytick.major.left": validate_bool, # draw left major yticks
"ytick.major.right": validate_bool, # draw right major yticks
"ytick.labelsize": validate_fontsize, # fontsize of ytick labels
"ytick.direction": ["out", "in", "inout"], # direction of yticks
"ytick.alignment": [
"center", "top", "bottom", "baseline", "center_baseline"],
"grid.color": validate_color, # grid color
"grid.linestyle": _validate_linestyle, # solid
"grid.linewidth": validate_float, # in points
"grid.alpha": validate_float,
## figure props
# figure title
"figure.titlesize": validate_fontsize,
"figure.titleweight": validate_fontweight,
# figure size in inches: width by height
"figure.figsize": _listify_validator(validate_float, n=2),
"figure.dpi": validate_float,
"figure.facecolor": validate_color,
"figure.edgecolor": validate_color,
"figure.frameon": validate_bool,
"figure.autolayout": validate_bool,
"figure.max_open_warning": validate_int,
"figure.raise_window": validate_bool,
"figure.subplot.left": _range_validators["0 <= x <= 1"],
"figure.subplot.right": _range_validators["0 <= x <= 1"],
"figure.subplot.bottom": _range_validators["0 <= x <= 1"],
"figure.subplot.top": _range_validators["0 <= x <= 1"],
"figure.subplot.wspace": _range_validators["0 <= x < 1"],
"figure.subplot.hspace": _range_validators["0 <= x < 1"],
"figure.constrained_layout.use": validate_bool, # run constrained_layout?
# wspace and hspace are fraction of adjacent subplots to use for space.
# Much smaller than above because we don't need room for the text.
"figure.constrained_layout.hspace": _range_validators["0 <= x < 1"],
"figure.constrained_layout.wspace": _range_validators["0 <= x < 1"],
# buffer around the axes, in inches.
'figure.constrained_layout.h_pad': validate_float,
'figure.constrained_layout.w_pad': validate_float,
## Saving figure's properties
'savefig.dpi': validate_dpi,
'savefig.facecolor': validate_color_or_auto,
'savefig.edgecolor': validate_color_or_auto,
'savefig.orientation': ['landscape', 'portrait'],
"savefig.format": validate_string,
"savefig.bbox": validate_bbox, # "tight", or "standard" (= None)
"savefig.pad_inches": validate_float,
# default directory in savefig dialog box
"savefig.directory": _validate_pathlike,
"savefig.transparent": validate_bool,
"tk.window_focus": validate_bool, # Maintain shell focus for TkAgg
# Set the papersize/type
"ps.papersize": _ignorecase(["auto", "letter", "legal", "ledger",
*[f"{ab}{i}"
for ab in "ab" for i in range(11)]]),
"ps.useafm": validate_bool,
# use ghostscript or xpdf to distill ps output
"ps.usedistiller": validate_ps_distiller,
"ps.distiller.res": validate_int, # dpi
"ps.fonttype": validate_fonttype, # 3 (Type3) or 42 (Truetype)
"pdf.compression": validate_int, # 0-9 compression level; 0 to disable
"pdf.inheritcolor": validate_bool, # skip color setting commands
# use only the 14 PDF core fonts embedded in every PDF viewing application
"pdf.use14corefonts": validate_bool,
"pdf.fonttype": validate_fonttype, # 3 (Type3) or 42 (Truetype)
"pgf.texsystem": ["xelatex", "lualatex", "pdflatex"], # latex variant used
"pgf.rcfonts": validate_bool, # use mpl's rc settings for font config
"pgf.preamble": validate_string, # custom LaTeX preamble
# write raster image data into the svg file
"svg.image_inline": validate_bool,
"svg.fonttype": ["none", "path"], # save text as text ("none") or "paths"
"svg.hashsalt": validate_string_or_None,
# set this when you want to generate hardcopy docstring
"docstring.hardcopy": validate_bool,
"path.simplify": validate_bool,
"path.simplify_threshold": _range_validators["0 <= x <= 1"],
"path.snap": validate_bool,
"path.sketch": validate_sketch,
"path.effects": validate_anylist,
"agg.path.chunksize": validate_int, # 0 to disable chunking
# key-mappings (multi-character mappings should be a list/tuple)
"keymap.fullscreen": validate_stringlist,
"keymap.home": validate_stringlist,
"keymap.back": validate_stringlist,
"keymap.forward": validate_stringlist,
"keymap.pan": validate_stringlist,
"keymap.zoom": validate_stringlist,
"keymap.save": validate_stringlist,
"keymap.quit": validate_stringlist,
"keymap.quit_all": validate_stringlist, # e.g.: "W", "cmd+W", "Q"
"keymap.grid": validate_stringlist,
"keymap.grid_minor": validate_stringlist,
"keymap.yscale": validate_stringlist,
"keymap.xscale": validate_stringlist,
"keymap.help": validate_stringlist,
"keymap.copy": validate_stringlist,
# Animation settings
"animation.html": ["html5", "jshtml", "none"],
# Limit, in MB, of size of base64 encoded animation in HTML
# (i.e. IPython notebook)
"animation.embed_limit": validate_float,
"animation.writer": validate_string,
"animation.codec": validate_string,
"animation.bitrate": validate_int,
# Controls image format when frames are written to disk
"animation.frame_format": ["png", "jpeg", "tiff", "raw", "rgba", "ppm",
"sgi", "bmp", "pbm", "svg"],
# Path to ffmpeg binary. If just binary name, subprocess uses $PATH.
"animation.ffmpeg_path": _validate_pathlike,
# Additional arguments for ffmpeg movie writer (using pipes)
"animation.ffmpeg_args": validate_stringlist,
# Path to convert binary. If just binary name, subprocess uses $PATH.
"animation.convert_path": _validate_pathlike,
# Additional arguments for convert movie writer (using pipes)
"animation.convert_args": validate_stringlist,
# Classic (pre 2.0) compatibility mode
# This is used for things that are hard to make backward compatible
# with a sane rcParam alone. This does *not* turn on classic mode
# altogether. For that use `matplotlib.style.use("classic")`.
"_internal.classic_mode": validate_bool
}
_hardcoded_defaults = { # Defaults not inferred from matplotlibrc.template...
# ... because they are private:
"_internal.classic_mode": False,
# ... because they are deprecated:
# No current deprecations.
# backend is handled separately when constructing rcParamsDefault.
}
_validators = {k: _convert_validator_spec(k, conv)
for k, conv in _validators.items()} | PypiClean |
/GenIce-1.0.11.tar.gz/GenIce-1.0.11/genice/lattices/Struct66.py | pairs="""
138 34
9 209
168 120
53 71
127 166
31 23
105 122
196 67
143 114
24 171
212 178
115 183
10 188
2 88
1 111
34 49
0 150
174 208
42 195
26 56
197 57
174 142
167 172
41 206
27 98
196 182
20 90
153 123
96 23
39 180
37 175
160 178
29 168
117 186
40 67
197 215
58 139
213 31
187 113
3 132
116 35
80 73
168 154
19 97
2 60
98 44
116 82
145 169
124 189
139 11
13 144
36 50
108 186
157 135
18 12
95 135
127 55
1 118
185 46
119 122
175 63
197 28
205 126
112 169
78 79
128 132
65 28
133 112
177 148
180 110
42 69
143 167
34 103
189 140
64 8
15 190
125 109
130 184
52 67
4 114
87 51
117 103
136 196
202 4
150 65
170 74
9 207
54 186
75 61
131 78
188 27
10 155
54 123
85 86
205 70
151 180
2 172
71 165
59 64
24 94
17 111
57 94
145 157
139 155
51 38
150 113
211 191
77 56
56 102
196 214
10 106
76 147
21 141
26 109
146 66
14 75
18 167
182 79
88 107
164 31
37 134
58 93
84 215
165 209
188 58
30 210
101 102
53 148
130 88
151 116
143 194
136 192
43 101
71 58
183 106
19 113
63 100
60 81
171 138
84 47
65 212
38 123
143 77
51 109
214 92
0 157
134 68
162 152
163 152
124 210
200 49
44 195
187 141
63 39
209 3
54 87
146 45
174 87
206 101
42 172
166 201
15 6
211 114
92 195
186 149
22 38
175 32
199 120
138 121
127 198
181 206
35 179
81 107
200 173
185 173
76 95
21 153
178 215
150 162
37 22
91 108
159 193
158 193
190 27
11 44
88 69
90 176
42 12
137 13
109 208
127 124
106 79
62 113
77 110
207 132
59 161
17 100
41 199
40 198
159 62
43 199
68 161
51 83
32 122
57 204
14 189
129 119
69 147
61 129
182 72
97 182
73 177
160 117
18 202
16 142
25 159
104 6
24 46
20 69
139 169
158 96
89 7
100 122
97 166
87 171
80 131
140 146
0 112
36 33
1 154
161 203
115 53
133 155
176 57
32 23
172 98
61 105
20 92
198 146
10 81
128 191
19 5
7 30
115 44
6 181
181 7
63 86
183 131
54 203
167 118
128 76
145 3
60 27
203 8
134 39
48 62
154 129
189 201
166 99
112 152
128 177
40 62
5 141
89 213
43 68
191 194
93 190
33 210
171 117
195 155
25 149
68 110
66 201
6 154
92 183
104 61
1 179
72 192
83 200
13 184
22 70
53 80
3 184
202 74
18 207
85 126
207 194
173 103
215 95
41 125
37 59
65 185
5 201
14 213
185 187
120 208
22 56
28 144
26 168
165 86
94 121
175 126
135 184
199 142
5 52
212 103
83 16
156 70
176 72
213 210
41 30
125 124
66 67
176 144
70 110
180 194
72 99
205 105
29 77
198 33
55 45
82 148
121 66
94 99
108 153
29 43
133 79
163 52
134 102
73 95
4 102
16 55
82 191
147 84
212 47
151 100
116 85
145 71
130 76
151 205
91 160
211 209
36 140
60 202
170 190
11 148
136 204
74 101
0 78
17 93
165 82
158 75
149 8
160 46
163 131
157 80
111 170
36 75
129 50
136 121
90 106
173 149
83 8
142 203
104 89
138 45
156 123
25 164
24 200
17 35
26 206
40 97
89 32
64 38
137 147
20 204
48 162
156 96
137 12
159 108
15 85
208 140
30 50
74 118
204 84
12 81
162 178
47 192
104 126
161 156
120 50
214 163
14 21
73 152
48 52
133 214
130 211
158 153
19 78
13 107
91 141
197 46
33 193
48 91
2 114
86 93
59 23
11 35
125 16
90 107
29 118
28 135
170 181
115 188
177 169
9 4
137 132
49 55
7 119
192 34
193 31
64 164
144 47
174 45
9 39
25 187
179 98
111 119
15 179
99 49
105 96
21 164
"""
waters="""
0.87352 0.31636 0.64904
0.55954 0.68996 0.27826
0.35802 0.0 0.41702
0.0625 0.31636 0.48547
0.25 0.19136 0.32325
0.75 0.0 0.80746
0.62817 0.0 0.21454
0.64368 0.31004 0.14791
0.14515 0.3032 0.98564
0.12649 0.31636 0.35097
0.5625 0.31636 0.51454
0.75 0.68365 0.43422
0.375 0.5 0.47093
0.25 0.31636 0.56578
0.75 0.0 0.99206
0.67052 0.0 0.2904
0.35485 0.3032 0.98564
0.75 0.5 0.31986
0.328 0.5 0.39034
0.75 0.1782 0.76204
0.43899 0.875 0.60803
0.875 0.0 0.94964
0.12183 0.0 0.14267
0.91882 0.375 0.08679
0.25 0.0 0.83534
0.95618 0.3032 0.90636
0.37817 0.0 0.14267
0.56102 0.125 0.39197
0.12352 0.18504 0.67191
0.35655 0.69681 0.21412
0.58118 0.375 0.08679
0.85485 0.3032 0.01436
0.85632 0.31004 0.14791
0.6875 0.5 0.96797
0.31397 0.5 0.81218
0.75 0.68996 0.35581
0.64515 0.69681 0.01436
0.06081 0.1782 0.16473
0.125 0.0 0.05037
0.06546 0.375 0.28775
0.68897 0.5 0.83179
0.45618 0.3032 0.09364
0.4375 0.68365 0.48547
0.31103 0.5 0.16821
0.6405 0.80865 0.46042
0.41882 0.625 0.91321
0.12817 0.0 0.78546
0.25 0.5 0.68015
0.85655 0.69681 0.78588
0.35632 0.31004 0.85209
0.58118 0.625 0.08679
0.25 0.0 0.00795
0.75 0.82181 0.76204
0.8125 0.0 0.50114
0.14515 0.69681 0.98564
0.41882 0.375 0.91321
0.25 0.0 0.19255
0.32948 0.0 0.7096
0.75 0.31636 0.43422
0.04382 0.3032 0.09364
0.43602 0.19136 0.40536
0.75 0.81496 0.12187
0.81103 0.5 0.83179
0.94046 0.31004 0.27826
0.0625 0.1782 0.02518
0.05954 0.31004 0.72174
0.56081 0.82181 0.83527
0.64346 0.69681 0.78588
0.18897 0.5 0.16821
0.3595 0.80865 0.53959
0.06081 0.82181 0.16473
0.8595 0.19136 0.46042
0.44046 0.31004 0.72174
0.93602 0.80865 0.59464
0.43454 0.375 0.28775
0.75 0.82181 0.03557
0.1405 0.80865 0.53959
0.25 0.82181 0.23797
0.75 0.19136 0.67675
0.62649 0.31636 0.64904
0.85802 0.0 0.58298
0.4375 0.31636 0.48547
0.93899 0.875 0.39197
0.25 0.1782 0.96443
0.25 0.68996 0.64419
0.82948 0.0 0.2904
0.87649 0.18504 0.32809
0.25 0.82181 0.96443
0.3125 0.0 0.49886
0.75 0.18504 0.12187
0.43899 0.125 0.60803
0.9392 0.82181 0.83527
0.56399 0.80865 0.59464
0.75 0.31004 0.35581
0.37183 0.0 0.78546
0.06102 0.875 0.60803
0.91882 0.625 0.08679
0.64346 0.3032 0.78588
0.56102 0.875 0.39197
0.43581 0.18504 0.79934
0.86098 0.5 0.26829
0.35655 0.3032 0.21412
0.25 0.1782 0.23797
0.18603 0.5 0.81218
0.75 0.0 0.16466
0.85632 0.68996 0.14791
0.56399 0.19136 0.59464
0.3595 0.19136 0.53959
0.95618 0.69681 0.90636
0.375 0.0 0.05037
0.14346 0.69681 0.21412
0.63903 0.5 0.26829
0.828 0.5 0.60966
0.85655 0.3032 0.78588
0.25 0.0 0.36311
0.6875 0.0 0.50114
0.87649 0.81496 0.32809
0.14368 0.68996 0.85209
0.43454 0.625 0.28775
0.68603 0.5 0.18782
0.45618 0.69681 0.09364
0.43581 0.81496 0.79934
0.81397 0.5 0.18782
0.0625 0.82181 0.02518
0.5625 0.1782 0.97482
0.4375 0.1782 0.02518
0.87183 0.0 0.21454
0.54382 0.3032 0.90636
0.0625 0.68365 0.48547
0.64368 0.68996 0.14791
0.1875 0.0 0.49886
0.75 0.0 0.63689
0.125 0.5 0.47093
0.672 0.5 0.60966
0.14346 0.3032 0.21412
0.06102 0.125 0.60803
0.44046 0.68996 0.72174
0.25 0.5 0.52332
0.35632 0.68996 0.85209
0.75 0.5 0.47669
0.5625 0.82181 0.97482
0.87817 0.0 0.85734
0.3125 0.5 0.03203
0.25 0.80865 0.32325
0.25 0.31004 0.64419
0.9375 0.31636 0.51454
0.54382 0.69681 0.90636
0.25 0.68365 0.56578
0.8595 0.80865 0.46042
0.08118 0.375 0.91321
0.93454 0.375 0.71226
0.94046 0.68996 0.27826
0.87352 0.68365 0.64904
0.9375 0.82181 0.97482
0.5642 0.81496 0.20067
0.625 0.5 0.52907
0.04382 0.69681 0.09364
0.93602 0.19136 0.59464
0.85485 0.69681 0.01436
0.87353 0.5 0.90239
0.0642 0.81496 0.79934
0.12647 0.5 0.09761
0.93454 0.625 0.71226
0.75 0.80865 0.67675
0.9375 0.1782 0.97482
0.93899 0.125 0.39197
0.56081 0.1782 0.83527
0.37352 0.68365 0.35097
0.4392 0.82181 0.16473
0.875 0.5 0.52907
0.55954 0.31004 0.27826
0.25 0.81496 0.87813
0.43602 0.80865 0.40536
0.14368 0.31004 0.85209
0.35485 0.69681 0.98564
0.93581 0.18504 0.20067
0.37649 0.18504 0.67191
0.9375 0.68365 0.51454
0.05954 0.68996 0.72174
0.62352 0.81496 0.32809
0.06546 0.625 0.28775
0.5642 0.18504 0.20067
0.56546 0.375 0.71226
0.64198 0.0 0.58298
0.1405 0.19136 0.53959
0.0642 0.18504 0.79934
0.08118 0.625 0.91321
0.9392 0.1782 0.83527
0.6405 0.19136 0.46042
0.625 0.0 0.94964
0.62352 0.18504 0.32809
0.06399 0.80865 0.40536
0.36098 0.5 0.73171
0.8125 0.5 0.96797
0.12649 0.68365 0.35097
0.5625 0.68365 0.51454
0.56546 0.625 0.71226
0.17052 0.0 0.7096
0.62647 0.5 0.90239
0.37353 0.5 0.09761
0.25 0.18504 0.87813
0.62183 0.0 0.85734
0.37352 0.31636 0.35097
0.1875 0.5 0.03203
0.37649 0.81496 0.67191
0.93581 0.81496 0.20067
0.4392 0.1782 0.16473
0.172 0.5 0.39034
0.4375 0.82181 0.02518
0.06399 0.19136 0.40536
0.64515 0.3032 0.01436
0.14198 0.0 0.41702
0.13903 0.5 0.73171
0.75 0.1782 0.03557
0.62649 0.68365 0.64904
0.12352 0.81496 0.67191
"""
coord= "relative"
cages="""
12 -0.25 0.0 -0.61171
15 -0.5806 0.0 -0.08484
12 -0.25 0.5 -0.28241
12 0.0 0.0 0.5
12 0.75 0.24015 0.24448
12 0.25 0.0 0.61171
12 0.5 -0.5 0.0
14 -0.25 -0.21278 -0.10073
15 -0.0806 0.0 0.08484
16 0.06793 0.0 0.28436
12 -0.75 -0.24015 -0.24448
12 -0.25 -0.24015 0.24448
12 0.5 0.0 0.5
16 -0.43801 -0.5 -0.60477
16 -0.43207 0.0 -0.28436
12 0.0 0.5 0.0
16 0.43207 0.0 0.28436
12 0.25 -0.26542 0.44186
12 0.49411 -0.5 0.18898
16 -0.06793 0.0 -0.28436
12 -0.25 -0.26542 -0.44186
12 0.25 0.5 0.28241
12 0.00589 0.5 0.18898
12 0.75 0.5 0.07333
16 0.06199 0.5 0.60477
16 0.43801 -0.5 0.60477
12 -0.49411 -0.5 -0.18898
14 0.25 0.21278 0.10073
12 -0.25 0.26542 -0.44186
12 0.25 0.26542 0.44186
12 -0.75 0.24015 -0.24448
15 0.0806 0.0 -0.08484
16 -0.06199 0.5 -0.60477
14 -0.25 0.21278 -0.10073
12 -0.00589 0.5 -0.18898
14 0.25 -0.21278 0.10073
15 0.5806 0.0 0.08484
12 -0.75 0.5 -0.07333
"""
bondlen = 3
cell = """
22.611830247419306 13.678368557348675 36.90820504451432
"""
density = 0.5655780712548233
from genice.cell import cellvectors
cell = cellvectors(a=22.611830247419306,
b=13.678368557348675,
c=36.90820504451432) | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/utils/FileOperations.py | from __future__ import print_function
import errno
import fnmatch
import glob
import os
import shutil
import stat
import tempfile
import time
from contextlib import contextmanager
from nuitka.__past__ import ( # pylint: disable=I0021,redefined-builtin
WindowsError,
basestring,
raw_input,
)
from nuitka.PythonVersions import python_version
from nuitka.Tracing import general, my_print, options_logger
from .Importing import importFromInlineCopy
from .ThreadedExecutor import RLock, getThreadIdent
from .Utils import isMacOS, isWin32OrPosixWindows, isWin32Windows
# Locking seems to be only required for Windows mostly, but we can keep
# it for all.
file_lock = RLock()
# Use this in case of dead locks or even to see file operations being done.
_lock_tracing = False
@contextmanager
def withFileLock(reason="unknown"):
"""Acquire file handling lock.
Args:
reason: What is being done.
Notes: This is most relevant for Windows, but prevents concurrent access
from threads generally, which could lead to observing half ready things.
"""
if _lock_tracing:
my_print(getThreadIdent(), "Want file lock for %s" % reason)
file_lock.acquire()
if _lock_tracing:
my_print(getThreadIdent(), "Acquired file lock for %s" % reason)
yield
if _lock_tracing:
my_print(getThreadIdent(), "Released file lock for %s" % reason)
file_lock.release()
def areSamePaths(path1, path2):
"""Decide if two paths the same.
Args:
path1: First path
path2: Second path
Returns:
Boolean value indicating if the two paths point to the
same path.
Notes:
Case differences ignored on platforms where that is the
norm, and with it normalized, and turned absolute paths, it
becomes a mere string compare after that.
is no differences.
"""
path1 = os.path.normcase(os.path.abspath(os.path.normpath(path1)))
path2 = os.path.normcase(os.path.abspath(os.path.normpath(path2)))
return path1 == path2
def haveSameFileContents(path1, path2):
# Local import, to avoid this for normal use cases.
import filecmp
return filecmp.cmp(path1, path2)
def getFileSize(path):
return os.path.getsize(path)
def relpath(path, start="."):
"""Make it a relative path, if possible.
Args:
path: path to work on
start: where to start from, defaults to current directory
Returns:
Changed path, pointing to the same path relative to current
directory if possible.
Notes:
On Windows, a relative path is not possible across device
names, therefore it may have to return the absolute path
instead.
"""
if start == ".":
start = os.curdir
try:
return os.path.relpath(path, start)
except ValueError:
# On Windows, paths on different devices prevent it to work. Use that
# full path then.
if isWin32OrPosixWindows():
return os.path.abspath(path)
raise
def isRelativePath(path):
if os.path.isabs(path):
return False
if path.startswith((".." + os.path.sep, "../")):
return False
return True
def makePath(path):
"""Create a directory if it doesn't exist.
Args:
path: path to create as a directory
Notes:
This also is thread safe on Windows, i.e. no race is
possible.
"""
with withFileLock("creating directory %s" % path):
if not os.path.isdir(path):
os.makedirs(path)
def makeContainingPath(filename):
target_dir = os.path.dirname(filename)
if not os.path.isdir(target_dir):
makePath(target_dir)
def isPathExecutable(path):
"""Is the given path executable."""
return os.path.isfile(path) and os.access(path, os.X_OK)
# Make sure we don't repeat this too much.
_real_path_windows_cache = {}
_powershell_path = None
def _getRealPathWindows(path):
# Slow on Python2, because we are using an external process.
# Singleton, pylint: disable=global-statement
global _powershell_path
if _powershell_path is None:
from .Execution import getExecutablePath
_powershell_path = getExecutablePath("powershell")
# Try to find it only once, otherwise ignore its absence, symlinks are not
# that important.
if _powershell_path is None:
_powershell_path = False
if path not in _real_path_windows_cache:
if _powershell_path:
from .Execution import check_output
result = check_output(
[
_powershell_path,
"-NoProfile",
"Get-Item",
path,
"|",
"Select-Object",
"-ExpandProperty",
"Target",
],
shell=False,
)
if str is not bytes:
result = result.decode("utf8")
_real_path_windows_cache[path] = os.path.join(
os.path.dirname(path), result.rstrip("\r\n")
)
else:
_real_path_windows_cache[path] = path
return _real_path_windows_cache[path]
def getDirectoryRealPath(path):
"""Get os.path.realpath with Python2 and Windows symlink workaround applied.
Args:
path: path to get realpath of
Returns:
path with symlinks resolved
Notes:
Workaround for Windows symlink is applied.
"""
path = os.path.realpath(path)
# Attempt to resolve Windows symlinks older Python
if os.name == "nt":
if os.path.islink(path) or (not os.path.isdir(path) and os.path.exists(path)):
path = _getRealPathWindows(path)
return path
def listDir(path):
"""Give a sorted listing of a path.
Args:
path: directory to create a listing from
Returns:
Sorted list of tuples of full filename, and basename of
files in that directory.
Notes:
Typically the full name and the basename are both needed
so this function simply does both, for ease of use on the
calling side.
This should be used, because it makes sure to resolve the
symlinks to directories on Windows, that a naive "os.listdir"
won't do by default.
"""
real_path = getDirectoryRealPath(path)
return sorted(
[(os.path.join(path, filename), filename) for filename in os.listdir(real_path)]
)
def getFileList(
path,
ignore_dirs=(),
ignore_filenames=(),
ignore_suffixes=(),
only_suffixes=(),
normalize=True,
):
"""Get all files below a given path.
Args:
path: directory to create a recursive listing from
ignore_dirs: Don't descend into these directory, ignore them
ignore_filenames: Ignore files named exactly like this
ignore_suffixes: Don't return files with these suffixes
only_suffixes: If not empty, limit returned files to these suffixes
Returns:
Sorted list of all filenames below that directory,
relative to it.
Notes:
This function descends into directories, but does
not follow symlinks.
"""
# We work with a lot of details here, pylint: disable=too-many-locals
result = []
# Normalize ignoredirs for better matching.
ignore_dirs = [os.path.normcase(ignore_dir) for ignore_dir in ignore_dirs]
ignore_filenames = [
os.path.normcase(ignore_filename) for ignore_filename in ignore_filenames
]
for root, dirnames, filenames in os.walk(path):
dirnames.sort()
filenames.sort()
# Normalize dirnames for better matching.
dirnames_normalized = [os.path.normcase(dirname) for dirname in dirnames]
for ignore_dir in ignore_dirs:
if ignore_dir in dirnames_normalized:
dirnames.remove(ignore_dir)
# Normalize filenames for better matching.
filenames_normalized = [os.path.normcase(filename) for filename in filenames]
for ignore_filename in ignore_filenames:
if ignore_filename in filenames_normalized:
filenames.remove(ignore_filename)
for filename in filenames:
if os.path.normcase(filename).endswith(ignore_suffixes):
continue
if only_suffixes and not os.path.normcase(filename).endswith(only_suffixes):
continue
fullname = os.path.join(root, filename)
if normalize:
fullname = os.path.normpath(fullname)
result.append(fullname)
return result
def getSubDirectories(path, ignore_dirs=()):
"""Get all directories below a given path.
Args:
path: directory to create a recursive listing from
ignore_dirs: directories named that like will be ignored
Returns:
Sorted list of all directories below that directory,
relative to it.
Notes:
This function descends into directories, but does
not follow symlinks.
"""
result = []
ignore_dirs = [os.path.normcase(ignore_dir) for ignore_dir in ignore_dirs]
for root, dirnames, _filenames in os.walk(path):
# Normalize dirnames for better matching.
dirnames_normalized = [os.path.normcase(dirname) for dirname in dirnames]
for ignore_dir in ignore_dirs:
if ignore_dir in dirnames_normalized:
dirnames.remove(ignore_dir)
dirnames.sort()
for dirname in dirnames:
result.append(os.path.join(root, dirname))
result.sort()
return result
def listDllFilesFromDirectory(path, prefix=None, suffixes=None):
"""Give a sorted listing of DLLs filenames in a path.
Args:
path: directory to create a DLL listing from
prefix: shell pattern to match filename start against, can be None
suffixes: shell patch to match filename end against, defaults to all platform ones
Returns:
Sorted list of tuples of full filename, and basename of
DLLs in that directory.
Notes:
Typically the full name and the basename are both needed
so this function simply does both, for ease of use on the
calling side.
"""
# Accept None value as well.
prefix = prefix or ""
suffixes = suffixes or ("dll", "so.*", "so", "dylib")
pattern_list = [prefix + "*." + suffix for suffix in suffixes]
for fullpath, filename in listDir(path):
for pattern in pattern_list:
if fnmatch.fnmatch(filename, pattern):
yield fullpath, filename
break
def listExeFilesFromDirectory(path, prefix=None, suffixes=None):
"""Give a sorted listing of EXE filenames in a path.
Args:
path: directory to create a DLL listing from
prefix: shell pattern to match filename start against, can be None
suffixes: shell patch to match filename end against, can be None
Returns:
Sorted list of tuples of full filename, and basename of
DLLs in that directory.
Notes:
Typically the full name and the basename are both needed
so this function simply does both, for ease of use on the
calling side.
"""
# Accept None value as well.
prefix = prefix or ""
# On Windows, we check exe suffixes, on other platforms we shell all filenames,
# matching the prefix, but they have to the executable bit set.
if suffixes is None and isWin32OrPosixWindows():
suffixes = "exe", "bin"
if suffixes:
pattern_list = [prefix + "*." + suffix for suffix in suffixes]
else:
pattern_list = [prefix + "*"]
for fullpath, filename in listDir(path):
for pattern in pattern_list:
if fnmatch.fnmatch(filename, pattern):
if not isWin32OrPosixWindows() and not os.access(fullpath, os.X_OK):
continue
yield fullpath, filename
break
def getSubDirectoriesWithDlls(path):
"""Get all directories below a given path.
Args:
path: directory to create a recursive listing from
Returns:
Sorted tuple of all directories below that directory,
relative to it, that contain DLL files.
Notes:
This function descends into directories, but does
not follow symlinks.
"""
result = set()
for dll_sub_directory in _getSubDirectoriesWithDlls(path):
result.add(dll_sub_directory)
return tuple(sorted(result))
def _getSubDirectoriesWithDlls(path):
for sub_directory in getSubDirectories(path=path, ignore_dirs=("__pycache__",)):
if any(listDllFilesFromDirectory(sub_directory)) or _isMacOSFramework(
sub_directory
):
yield sub_directory
candidate = os.path.dirname(sub_directory)
# Should be string identical, no normalization in is done in "getSubDirectories"
while candidate != path:
yield candidate
candidate = os.path.dirname(candidate)
def _isMacOSFramework(path):
"""Decide if a folder is a framework folder."""
return isMacOS() and os.path.isdir(path) and path.endswith(".framework")
def isLink(path):
result = os.path.islink(path)
# Special handling for Junctions.
if not result and isWin32Windows():
import ctypes.wintypes
GetFileAttributesW = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributesW.restype = ctypes.wintypes.DWORD
GetFileAttributesW.argtypes = (ctypes.wintypes.LPCWSTR,)
INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF
FILE_ATTRIBUTE_REPARSE_POINT = 0x00400
result = GetFileAttributesW(path)
if result != INVALID_FILE_ATTRIBUTES:
result = bool(result & FILE_ATTRIBUTE_REPARSE_POINT)
return result
def deleteFile(path, must_exist):
"""Delete a file, potentially making sure it exists.
Args:
path: file to delete
Notes:
This also is thread safe on Windows, i.e. no race is
possible.
"""
with withFileLock("deleting file %s" % path):
if isLink(path) or os.path.isfile(path):
try:
os.unlink(path)
except OSError:
if must_exist:
raise
elif must_exist:
raise OSError("Does not exist", path)
def splitPath(path):
"""Split path, skipping empty elements."""
return tuple(
element for element in os.path.split(path.rstrip(os.path.sep)) if element
)
def getFilenameExtension(path):
"""Get the filename extension.
Note: The extension is case normalized, i.e. it may actually be ".TXT"
rather than ".txt", use "changeFilenameExtension" if you want to replace
it with something else.
Note: For checks on extension, use hasFilenameExtension instead.
"""
return os.path.splitext(os.path.normcase(path))[1]
def changeFilenameExtension(path, extension):
"""Change the filename extension."""
return os.path.splitext(path)[0] + extension
def hasFilenameExtension(path, extensions):
"""Has a filename one of the given extensions.
Note: The extensions should be normalized, i.e. lower case and will match other
cases where the file system does that on a platform.
"""
extension = getFilenameExtension(path)
if isinstance(extensions, basestring):
return extension == extensions
else:
return extension in extensions
def removeDirectory(path, ignore_errors):
"""Remove a directory recursively.
On Windows, it happens that operations fail, and succeed when retried,
so added a retry and small delay, then another retry. Should make it
much more stable during tests.
All kinds of programs that scan files might cause this, but they do
it hopefully only briefly.
"""
def onError(func, path, exc_info):
# Try again immediately, ignore what happened, pylint: disable=unused-argument
try:
func(path)
except OSError:
time.sleep(0.1)
func(path)
with withFileLock("removing directory %s" % path):
if os.path.exists(path):
try:
shutil.rmtree(path, ignore_errors=False, onerror=onError)
except OSError:
if ignore_errors:
shutil.rmtree(path, ignore_errors=ignore_errors)
else:
raise
def resetDirectory(path, ignore_errors):
removeDirectory(path=path, ignore_errors=ignore_errors)
makePath(path)
@contextmanager
def withTemporaryFile(suffix="", mode="w", delete=True, temp_path=None):
with tempfile.NamedTemporaryFile(
suffix=suffix, mode=mode, delete=delete, dir=temp_path
) as temp_file:
yield temp_file
def getFileContentByLine(filename, mode="r", encoding=None):
# We read the whole, to keep lock times minimal. We only deal with small
# files like this normally.
return getFileContents(filename, mode, encoding=encoding).splitlines()
def getFileContents(filename, mode="r", encoding=None):
"""Get the contents of a file.
Args:
filename: str with the file to be read
mode: "r" for str, "rb" for bytes result
encoding: optional encoding to used when reading the file, e.g. "utf8"
Returns:
str or bytes - depending on mode.
"""
with withFileLock("reading file %s" % filename):
with openTextFile(filename, mode, encoding=encoding) as f:
return f.read()
def getFileFirstLine(filename, mode="r", encoding=None):
"""Get the contents of a file.
Args:
filename: str with the file to be read
mode: "r" for str, "rb" for bytes result
encoding: optional encoding to used when reading the file, e.g. "utf8"
Returns:
str or bytes - depending on mode.
"""
with withFileLock("reading file %s" % filename):
with openTextFile(filename, mode, encoding=encoding) as f:
return f.readline()
def openTextFile(filename, mode, encoding=None):
if encoding is not None:
import codecs
return codecs.open(filename, mode, encoding=encoding)
else:
# Avoid deprecation warning, is now the default.
if python_version >= 0x370:
mode = mode.replace("U", "")
# Encoding was checked to be not needed.
return open(filename, mode) # pylint: disable=unspecified-encoding
def putTextFileContents(filename, contents, encoding=None):
"""Write a text file from given contents.
Args:
filename: str with the file to be created
contents: str or iterable of strings with what should be written into the file
encoding: optional encoding to used when writing the file
Returns:
None
"""
def _writeContents(output_file):
if isinstance(contents, basestring):
print(contents, file=output_file, end="")
else:
for line in contents:
print(line, file=output_file)
with withFileLock("writing file %s" % filename):
with openTextFile(filename, "w", encoding=encoding) as output_file:
_writeContents(output_file)
@contextmanager
def withPreserveFileMode(filenames):
if type(filenames) is str:
filenames = [filenames]
old_modes = {}
for filename in filenames:
old_modes[filename] = os.stat(filename).st_mode
yield
for filename in filenames:
os.chmod(filename, old_modes[filename])
@contextmanager
def withMadeWritableFileMode(filenames):
if type(filenames) is str:
filenames = [filenames]
with withPreserveFileMode(filenames):
for filename in filenames:
os.chmod(filename, int("644", 8))
yield
def removeFileExecutablePermission(filename):
old_stat = os.stat(filename)
mode = old_stat.st_mode
mode &= ~(stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if mode != old_stat.st_mode:
os.chmod(filename, mode)
def addFileExecutablePermission(filename):
old_stat = os.stat(filename)
mode = old_stat.st_mode
mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if mode != old_stat.st_mode:
os.chmod(filename, mode)
def renameFile(source_filename, dest_filename):
# There is no way to safely update a file on Windows, but lets
# try on Linux at least.
old_stat = os.stat(source_filename)
try:
os.rename(source_filename, dest_filename)
except OSError:
copyFile(source_filename, dest_filename)
os.unlink(source_filename)
os.chmod(dest_filename, old_stat.st_mode)
def copyTree(source_path, dest_path):
"""Copy whole directory tree, preserving attributes.
Args:
source_path: where to copy from
dest_path: where to copy to, may already exist
Notes:
This must be used over `shutil.copytree` which has troubles
with existing directories on some Python versions.
"""
if python_version >= 0x380:
# Python 3.8+ has dirs_exist_ok
return shutil.copytree(source_path, dest_path, dirs_exist_ok=True)
from distutils.dir_util import copy_tree
return copy_tree(source_path, dest_path)
def copyFileWithPermissions(source_path, dest_path):
"""Improved version of shutil.copy2.
File systems might not allow to transfer extended attributes, which we then ignore
and only copy permissions.
"""
try:
shutil.copy2(source_path, dest_path)
except PermissionError as e:
if e.errno != errno.EACCES:
raise
source_mode = os.stat(source_path).st_mode
shutil.copy(source_path, dest_path)
os.chmod(dest_path, source_mode)
def copyFile(source_path, dest_path):
"""Improved version of shutil.copy
This handles errors with a chance to correct them, e.g. on Windows, files might be
locked by running program or virus checkers.
"""
while 1:
try:
shutil.copyfile(source_path, dest_path)
except PermissionError as e:
if e.errno != errno.EACCES:
raise
general.warning("Problem copying file %s:" % e)
try:
reply = raw_input("Retry? (YES/no) ") or "yes"
except EOFError:
reply = "no"
if reply.upper() == "YES":
continue
raise
break
def getWindowsDrive(path):
"""Windows drive for a given path."""
drive, _ = os.path.splitdrive(os.path.abspath(path))
return os.path.normcase(drive)
def isPathBelow(path, filename):
"""Is a path inside of a given directory path
Args:
path: location to be below
filename: candidate being checked
"""
if type(path) in (tuple, list):
for p in path:
if isPathBelow(path=p, filename=filename):
return True
return False
path = os.path.abspath(path)
filename = os.path.abspath(filename)
if isWin32Windows():
if getWindowsDrive(path) != getWindowsDrive(filename):
return False
return os.path.relpath(filename, path).split(os.path.sep)[0] != ".."
def isPathBelowOrSameAs(path, filename):
"""Is a path inside of a given directory path or the same path as that directory."""
return isPathBelow(path, filename) or areSamePaths(path, filename)
def getWindowsShortPathName(filename):
"""Gets the short path name of a given long path.
Args:
filename - long Windows filename
Returns:
Path that is a short filename pointing at the same file.
Notes:
Originally from http://stackoverflow.com/a/23598461/200291
"""
import ctypes.wintypes
GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW
GetShortPathNameW.argtypes = [
ctypes.wintypes.LPCWSTR,
ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD,
]
GetShortPathNameW.restype = ctypes.wintypes.DWORD
output_buf_size = 0
while True:
output_buf = ctypes.create_unicode_buffer(output_buf_size)
needed = GetShortPathNameW(
os.path.abspath(filename), output_buf, output_buf_size
)
if needed == 0:
# Windows only code, pylint: disable=I0021,undefined-variable
# Permission denied.
if ctypes.GetLastError() == 5:
return filename
raise WindowsError(
ctypes.GetLastError(), ctypes.FormatError(ctypes.GetLastError())
)
if output_buf_size >= needed:
# Short paths should be ASCII. Don't return unicode without a need,
# as e.g. Scons hates that in environment variables.
if str is bytes:
return output_buf.value.encode("utf8")
else:
return output_buf.value
else:
output_buf_size = needed
def getExternalUsePath(filename, only_dirname=False):
"""Gets the externally usable absolute path for a given relative path.
Args:
filename - filename, potentially relative
Returns:
Path that is a absolute and (on Windows) short filename pointing at the same file.
Notes:
This is only "os.path.abspath" except on Windows, where is converts
to a short path too.
"""
filename = os.path.abspath(filename)
if os.name == "nt":
if only_dirname:
dirname = getWindowsShortPathName(os.path.dirname(filename))
assert os.path.exists(dirname)
filename = os.path.join(dirname, os.path.basename(filename))
else:
filename = getWindowsShortPathName(filename)
return filename
def getLinkTarget(filename):
"""Return the path a link is pointing too, if any.
Args:
filename - check this path, need not be a filename
Returns:
(bool, link_target) - first value indicates if it is a link, second the link target
Notes:
This follows symlinks to the very end.
"""
is_link = False
while os.path.exists(filename) and os.path.islink(filename):
link_target = os.readlink(filename)
filename = os.path.join(os.path.dirname(filename), link_target)
is_link = True
return is_link, filename
def replaceFileAtomic(source_path, dest_path):
"""
Move ``src`` to ``dst``. If ``dst`` exists, it will be silently
overwritten.
Both paths must reside on the same filesystem for the operation to be
atomic.
"""
if python_version >= 0x300:
os.replace(source_path, dest_path)
else:
importFromInlineCopy("atomicwrites", must_exist=True).replace_atomic(
source_path, dest_path
)
def resolveShellPatternToFilenames(pattern):
"""Resolve shell pattern to filenames.
Args:
pattern - str
Returns:
list - filenames that matched.
"""
if "**" in pattern:
if python_version >= 0x350:
result = glob.glob(pattern, recursive=True)
else:
glob2 = importFromInlineCopy("glob2", must_exist=False)
if glob2 is None:
options_logger.sysexit(
"Using pattern with '**' is not supported before Python 3.5 unless glob2 is installed."
)
result = glob2.glob(pattern)
else:
result = glob.glob(pattern)
result = [os.path.normpath(filename) for filename in result]
result.sort()
return result
@contextmanager
def withDirectoryChange(path, allow_none=False):
"""Change current directory temporarily in a context."""
# spellchecker: ignore chdir
if path is not None or not allow_none:
old_cwd = os.getcwd()
os.chdir(path)
yield
if path is not None or not allow_none:
os.chdir(old_cwd) | PypiClean |
/KeralaPyApiV2-2.0.2020.tar.gz/KeralaPyApiV2-2.0.2020/pyrogram/client/methods/messages/edit_message_media.py |
import os
from typing import Union
import pyrogram
from pyrogram.api import functions, types
from pyrogram.client.ext import BaseClient, utils
from pyrogram.client.types import (
InputMediaPhoto, InputMediaVideo, InputMediaAudio,
InputMediaAnimation, InputMediaDocument
)
from pyrogram.client.types.input_media import InputMedia
class EditMessageMedia(BaseClient):
async def edit_message_media(
self,
chat_id: Union[int, str],
message_id: int,
media: InputMedia,
reply_markup: "pyrogram.InlineKeyboardMarkup" = None
) -> "pyrogram.Message":
"""Edit animation, audio, document, photo or video messages.
If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise, the
message type can be changed arbitrarily.
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Message identifier in the chat specified in chat_id.
media (:obj:`InputMedia`):
One of the InputMedia objects describing an animation, audio, document, photo or video.
reply_markup (:obj:`InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
:obj:`Message`: On success, the edited message is returned.
Example:
.. code-block:: python
from pyrogram import InputMediaPhoto, InputMediaVideo, InputMediaAudio
# Replace the current media with a local photo
app.edit_message_media(chat_id, message_id, InputMediaPhoto("new_photo.jpg"))
# Replace the current media with a local video
app.edit_message_media(chat_id, message_id, InputMediaVideo("new_video.mp4"))
# Replace the current media with a local audio
app.edit_message_media(chat_id, message_id, InputMediaAudio("new_audio.mp3"))
"""
caption = media.caption
parse_mode = media.parse_mode
if isinstance(media, InputMediaPhoto):
if os.path.exists(media.media):
media = await self.send(
functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=types.InputMediaUploadedPhoto(
file=await self.save_file(media.media)
)
)
)
media = types.InputMediaPhoto(
id=types.InputPhoto(
id=media.photo.id,
access_hash=media.photo.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaPhotoExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, media.file_ref, 2)
elif isinstance(media, InputMediaVideo):
if os.path.exists(media.media):
media = await self.send(
functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "video/mp4",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=await self.save_file(media.media),
attributes=[
types.DocumentAttributeVideo(
supports_streaming=media.supports_streaming or None,
duration=media.duration,
w=media.width,
h=media.height
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, media.file_ref, 4)
elif isinstance(media, InputMediaAudio):
if os.path.exists(media.media):
media = await self.send(
functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "audio/mpeg",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=await self.save_file(media.media),
attributes=[
types.DocumentAttributeAudio(
duration=media.duration,
performer=media.performer,
title=media.title
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, media.file_ref, 9)
elif isinstance(media, InputMediaAnimation):
if os.path.exists(media.media):
media = await self.send(
functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "video/mp4",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=await self.save_file(media.media),
attributes=[
types.DocumentAttributeVideo(
supports_streaming=True,
duration=media.duration,
w=media.width,
h=media.height
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
),
types.DocumentAttributeAnimated()
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, media.file_ref, 10)
elif isinstance(media, InputMediaDocument):
if os.path.exists(media.media):
media = await self.send(
functions.messages.UploadMedia(
peer=await self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "application/zip",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=await self.save_file(media.media),
attributes=[
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
media = utils.get_input_media_from_file_id(media.media, media.file_ref, 5)
r = await self.send(
functions.messages.EditMessage(
peer=await self.resolve_peer(chat_id),
id=message_id,
media=media,
reply_markup=reply_markup.write() if reply_markup else None,
**await self.parser.parse(caption, parse_mode)
)
)
for i in r.updates:
if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)):
return await pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
) | PypiClean |
/Data%20Lake-0.0.0.1.tar.gz/Data Lake-0.0.0.1/DataLake/DataLake.py | from abc import ABC, abstractmethod
from notebookutils import mssparkutils
from delta.tables import *
from pyspark.sql.functions import *
import json
class DataLake(ABC):
events = None
string_validate_columns = ""
input_data = ""
mount_data = ""
json_load_insert_values = {}
colums_validate_merge = []
def __init__(self, storage_account_name, storage_account_access_key, storage_landing, storage_bronze,
input_data_param, mount_data_param, colums_validate_merge_param, new_name,database, target, origin):
self.storage_account_name=storage_account_name
self.storage_account_access_key=storage_account_access_key
self.storage_landing = storage_landing
self.storage_bronze = storage_bronze
self.input_data_param = input_data_param
self.mount_data_param = mount_data_param
self.colums_validate_merge_param = colums_validate_merge_param
self.new_name = new_name
self.table_name = new_name
self.table_exist = False
self.database = database
self.target = target
self.origin = origin
abstractmethod
def merge(self):
pass
abstractmethod
def initialize_variables(self):
pass
abstractmethod
def load_data(self):
pass
def initialize_config_storage(self):
spark.conf.set(
"fs.azure.account.key." + self.storage_account_name + ".blob.core.windows.net",
self.storage_account_access_key)
def validate_tables(self):
list_tables = spark.catalog.listTables(self.database)
for item in list_tables:
print(item)
print(type(item))
print(item.name)
self.table_exist = item.name.lower() == self.table_name.lower()
if self.table_exist:
break
def load_data_csv(self, input_data):
self.events = spark.read.format("csv").option("header", "true").load(input_data)
def create_colums_merge(self):
string_and = "and"
for item in self.colums_validate_merge:
condition = "{0}.{2} = {1}.{2}".format(self.origin, self.target, item)
if item is not "":
if self.string_validate_columns is "":
self.string_validate_columns = condition
else:
self.string_validate_columns = "{0} {1} {2}".format(self.string_validate_columns, string_and, condition)
print(string_validate_columns)
def create_json_columns_pass(self):
insert_values = "{"
for item in self.colums_silver_array:
if item is not "":
insert_values = "{0}{1}".format(insert_values, '"{0}":"{1}.{0}", '.format(item, self.origin))
insert_values = insert_values[0: len(insert_values) - 2] + '}'
self.json_load_insert_values = json.loads(insert_values)
print(self.json_load_insert_values) | PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/onedrive/termstore/sets/collection.py | from office365.entity_collection import EntityCollection
from office365.onedrive.termstore.sets.name import LocalizedName
from office365.onedrive.termstore.sets.set import Set
from office365.runtime.client_value_collection import ClientValueCollection
from office365.runtime.queries.create_entity import CreateEntityQuery
class SetCollection(EntityCollection):
def __init__(self, context, resource_path=None, parent_group=None):
"""
:param office365.onedrive.termstore.groups.group.Group parent_group: The parent group that contains the set
"""
super(SetCollection, self).__init__(context, Set, resource_path)
self._parent_group = parent_group
def get_by_name(self, name):
"""
Returns the TermSet specified by its name.
:param str name: Term set name
:rtype: Set
"""
return self.single("displayName eq '{0}'".format(name))
def add(self, name, parent_group=None):
"""Create a new set object.
:param office365.onedrive.termstore.group.Group parent_group: The parent group that contains the set.
:param str name: Default name (in en-US localization).
"""
return_type = Set(self.context)
self.add_child(return_type)
def _group_loaded(set_create_info):
qry = CreateEntityQuery(self, set_create_info, return_type)
self.context.add_query(qry)
if self._parent_group is not None:
props = {
"localizedNames": ClientValueCollection(LocalizedName, [LocalizedName(name)])
}
self._parent_group.ensure_property("id", _group_loaded, props)
elif parent_group is not None:
props = {
"parentGroup": {"id": parent_group.id},
"localizedNames": ClientValueCollection(LocalizedName, [LocalizedName(name)])
}
parent_group.ensure_property("id", _group_loaded, props)
else:
raise TypeError("Parameter 'parent_group' is not set")
return return_type | PypiClean |
/GailBot_Testing_Suite-0.1a8-py3-none-any.whl/gailbot/services/organizer/settings/interface/googleInterface.py | import os
from pydantic import BaseModel, ValidationError
from typing import Dict, Union
from .engineSettingInterface import EngineSettingInterface
from gailbot.core.utils.logger import makelogger
from gailbot.core.engines.google import Google
from gailbot.core.utils.general import copy, is_file, is_directory, make_dir, get_name, get_extension
from gailbot.configs import workspace_config_loader
API_KEY_DIR = workspace_config_loader().engine_ws.google_api
logger = makelogger("google_interface")
class ValidateGoogle(BaseModel):
engine: str
google_api_key: str
class Transcribe(BaseModel):
"""
NOTE: google does not support additional kwargs in transcription
"""
pass
class Init(BaseModel):
# the path to a file that stores the google api key
google_api_key: str
class GoogleInterface(EngineSettingInterface):
"""
Interface for the Google speech to text engine
"""
engine: str
init: Init = None
transcribe: Transcribe = None
def load_google_setting(setting: Dict[str, str]) -> Union[bool, EngineSettingInterface]:
""" given a dictionary, load the dictionary as a google setting
Args:
setting (Dict[str, str]): the dictionary that contains the setting data
Returns:
Union[bool , SettingInterface]: if the setting dictionary is validated
by the google setting interface,
return the google setting interface
as an instance of SettingInterface,
else return false
"""
logger.info(setting)
if not "engine" in setting.keys() or setting["engine"] != "google":
return False
try:
setting = setting.copy()
validate = ValidateGoogle(**setting)
if not is_directory(API_KEY_DIR):
make_dir(API_KEY_DIR)
# check that the api key is valid
assert Google.is_valid_google_api(setting["google_api_key"])
# save a copied version of the api key file to the workspace
copied_api = os.path.join(API_KEY_DIR, get_name(setting["google_api_key"]) + ".json")
setting["google_api_key"] = copy(setting["google_api_key"], copied_api)
google_set = dict ()
google_set["engine"] = setting.pop("engine")
google_set["init"] = dict()
google_set["transcribe"] = dict()
google_set["init"].update(setting)
google_setting = GoogleInterface(**google_set)
return google_setting
except ValidationError as e:
logger.error(e, exc_info=e)
return False | PypiClean |
/CephQeSdk-1.0.0.tar.gz/CephQeSdk-1.0.0/src/RhcsQeSdk/core/cli/ceph/orch/host.py | import logging
from copy import deepcopy
import RhcsQeSdk.core.cli.fabfile as fabfile
from RhcsQeSdk.core.cli.ceph.orch.label import Label
from RhcsQeSdk.core.cli.ceph.orch.maintanence import Maintanence
from RhcsQeSdk.core.utilities import core_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(message)s"
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
class Host:
"""
This module provides a command line interface (CLI) to ceph orch host.
"""
def __init__(self, base_cmd):
self.base_cmd = base_cmd + " host"
self.label = Label(self.base_cmd)
self.maintanence = Maintanence(self.base_cmd)
def ls(self, **kw):
"""
Displays the current hosts and labels.
Args:
None
Returns:
Dict(str)
A mapping of host strings of the given task's return value for that host's execution run.
"""
kw = kw.get("kw")
cmd = self.base_cmd + " ls"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def add_(self, **kw):
"""
To add new hosts to cluster.
Args:
kw (Dict): Key/value pairs that needs to be provided to the installer.
Example:
Supported keys:
host_name(str): name of host.
labels(str): name of label.(Optional).
ip_address(str): Ipaddress of host.
Returns:
Dict(str)
A mapping of host strings of the given task's return value for that host's execution run.
"""
kw = kw.get("kw")
kw_copy = deepcopy(kw)
host_string = kw_copy.pop("host_string")
cmd_args = core_utils.build_cmd_args(kw=kw_copy)
cmd = self.base_cmd + f" add {host_string}" + cmd_args
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def drain(self, **kw):
"""
To drain all daemons from specified host.
Args:
kw (Dict): Key/value pairs that needs to be provided to the installer.
Example:
Supported keys:
host_name(str): name of host.
force(bool): force drain.
Returns:
Dict(str)
A mapping of host strings of the given task's return value for that host's execution run.
"""
kw = kw.get("kw")
host_name = kw.get("host_name", "")
force = "--force" if kw.get("force") else ""
cmd = self.base_cmd + f" drain {host_name} {force}"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def rm(self, **kw):
"""
To remove host.
Args:
kw (Dict): Key/value pairs that needs to be provided to the installer.
Example:
Supported keys:
host_name(str): name of host.
force(bool): Whether its a forced operation or not(Optional).
offline(bool): if a host is offline(Optional).
Returns:
Dict(str)
A mapping of host strings of the given task's return value for that host's execution run.
"""
kw = kw.get("kw")
kw_copy = deepcopy(kw)
host_name = kw_copy.pop("host_name", "")
cmd = self.base_cmd + f" rm {host_name}" + core_utils.build_cmd_args(kw=kw_copy)
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config")) | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/_items_/input_form/inputList/inputList.py | import Famcy
import json
class inputList(Famcy.FamcyInputBlock):
"""
Represents the block to display
paragraph.
"""
def __init__(self, **kwargs):
self.value = inputList.generate_template_content()
super(inputList, self).__init__(**kwargs)
self.init_block()
@classmethod
def generate_template_content(cls):
return {
"title": "inputList",
"desc": "",
"mandatory": False,
"value": [],
"returnValue": [],
"defaultValue": None,
"action_after_post": "clean", # (clean / save)
}
def init_block(self):
self.body = Famcy.div()
self.body["id"] = self.id
self.body["className"] = "inputList"
h3_temp = Famcy.h3()
p_temp = Famcy.p()
div_temp = Famcy.div()
div_temp["id"] = self.id + '_inputList'
div_temp["className"] = "inputList_holder"
sel_temp = Famcy.select()
div_temp.addElement(sel_temp)
script = Famcy.script()
script.innerHTML = 'generate_list("' + self.id + '")'
self.body.addElement(h3_temp)
self.body.addElement(p_temp)
self.body.addElement(div_temp)
self.body.addElement(script)
def render_inner(self):
self.body.children[2].children[0].children = []
if "---" not in self.value["value"]:
self.value["value"].insert(0, "---")
self.value["returnValue"].insert(0, "---")
for i, list_value in enumerate(self.value["value"]):
opt_temp = Famcy.option()
opt_temp["name"] = self.name
opt_temp["value"] = str(self.value["returnValue"][i]) if len(self.value["returnValue"]) == len(self.value["value"]) else str(list_value)
opt_temp.innerHTML = str(list_value)
self.body.children[2].children[0].addElement(opt_temp)
if self.value["defaultValue"]:
self.body["default_value"] = self.value["defaultValue"]
else:
self.body["default_value"] = "---"
if self.value["mandatory"]:
self.body["className"] = "required_list"
else:
if "required_list" in self.body.classList:
self.body.classList.remove("required_list")
self.body.children[0].innerHTML = self.value["title"]
self.body.children[1].innerHTML = self.value["desc"]
self.body.children[2].children[0]["after_action"] = self.value["action_after_post"]
return self.body | PypiClean |
/Mopidy-Spotmop-2.10.1.tar.gz/Mopidy-Spotmop-2.10.1/mopidy_spotmop/static/app/services/notify/service.js | angular.module('spotmop.services.notify', [])
/**
* Service to facilitate the creation and management of dialogs globally
**/
.factory("NotifyService", ['$rootScope', '$compile', '$interval', '$timeout', 'SettingsService', function( $rootScope, $compile, $interval, $timeout, SettingsService ){
// setup response object
return {
/**
* Create a new notification item
* @param message = string (body of message)
* @param duration = int (how long to hold message) optional
**/
notify: function( message, duration ){
if( typeof(duration) === 'undefined' )
var duration = 2500;
var notification = $('<notification class="notification default">'+message+'</notification>');
$('#notifications').append( notification );
// hide in when we meet our duration
// remember that we can disable hide by parsing duration=false
if( duration )
$timeout(
function(){
notification.fadeOut(200, function(){ notification.remove() } );
},
duration
);
},
/**
* Error message
* @param icon = string (icon type to use)
* @param duration = int (how long to hold message) optional
**/
error: function( message, duration ){
if( typeof(duration) === 'undefined' )
var duration = 2500;
var notification = $('<notification class="notification error">'+message+'</notification>');
$('#notifications').append( notification );
// hide in when we meet our duration
// remember that we can disable hide by parsing duration=false
if( duration )
$timeout(
function(){
notification.fadeOut(200, function(){ notification.remove() } );
},
duration
);
},
/**
* When we want to notify the user that they need to pull finger and authenticate with Spotify
**/
spotifyAuthenticationError: function(){
this.error( 'Please authenticate with Spotify - you can find this under settings' );
},
/**
* When a shortcut is triggered, notify, growl styles
* @param icon = string (icon type to use)
**/
shortcut: function( icon ){
$('#notifications').find('notification.keyboard-shortcut').remove();
var notification = $('<notification class="notification keyboard-shortcut"><i class="fa fa-'+icon+'"></i></notification>');
$('#notifications').append( notification );
$timeout(
function(){
notification.fadeOut(200, function(){ notification.remove() } );
},
1500
);
},
/**
* HTML5 browser notifications
* @param title = string
* @param body = string
* @param icon = string (optional)
**/
browserNotify: function( title, body, icon ){
// handle null icon
if( typeof(icon) === 'undefined' ) icon = '';
// disabled by user
if( SettingsService.getSetting('notificationsDisabled') ) return false;
// Determine the correct object to use
var notification = window.Notification || window.mozNotification || window.webkitNotification;
// not supported
if ('undefined' === typeof notification) return false;
// The user needs to allow this
if ('undefined' !== typeof notification) notification.requestPermission(function(permission){});
var trackNotification = new notification(
title,
{
body: body,
dir: 'auto',
lang: 'EN',
tag: 'spotmopNotification',
icon: icon
}
);
return true;
}
};
}])
/**
* Behavior for the notification itself
**/
.directive("notification", function(){
return {
restrict: 'AE',
link: function($scope, $element, $attrs){
console.log( $element );
}
}
}); | PypiClean |
/Furious-GUI-0.2.4.tar.gz/Furious-GUI-0.2.4/Furious/Core/Configuration.py | from Furious.Core.Core import XrayCore
from Furious.Core.Intellisense import Intellisense
from Furious.Widget.Widget import MessageBox
from Furious.Utility.Constants import APPLICATION_NAME
from Furious.Utility.Utility import Base64Encoder, Protocol, bootstrapIcon
from Furious.Utility.Translator import gettext as _
import copy
import ujson
import functools
import urllib.parse
# '/' will be quoted for V2rayN compatibility.
quote = functools.partial(urllib.parse.quote, safe='')
unquote = functools.partial(urllib.parse.unquote)
urlunparse = functools.partial(urllib.parse.urlunparse)
class UnsupportedServerExport(Exception):
pass
class Configuration:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def toJSON(text):
return ujson.loads(text)
@staticmethod
def corruptedError(errorBox, isAsync=False):
assert isinstance(errorBox, MessageBox)
errorBox.setIcon(MessageBox.Icon.Critical)
errorBox.setWindowTitle(_('Server configuration corrupted'))
errorBox.setText(
_(
f'{APPLICATION_NAME} cannot restore your server configuration. '
f'It may have been tampered with.'
)
)
errorBox.setInformativeText(
_(f'The configuration content has been cleared by {APPLICATION_NAME}.')
)
if isAsync:
# Show the MessageBox asynchronously
errorBox.open()
else:
# Show the MessageBox and wait for user to close it
errorBox.exec()
@staticmethod
def export(remark, ob):
if Intellisense.getCoreType(ob) == XrayCore.name():
def hasKey(obj, key):
return obj.get(key) is not None
def getStreamNetSettings(protocol, streamObject, netType):
kwargs = {}
netobj = streamObject[
ProxyOutboundObject.getStreamNetworkSettingsName(netType)
]
if netType == 'tcp':
try:
if protocol == Protocol.VMess:
kwargs['type'] = netobj['header']['type']
if protocol == Protocol.VLESS:
kwargs['headerType'] = netobj['header']['type']
except Exception:
# Any non-exit exceptions
pass
elif netType == 'kcp':
try:
# Get order matters here
if protocol == Protocol.VMess:
if hasKey(netobj, 'seed'):
kwargs['path'] = netobj['seed']
kwargs['type'] = netobj['header']['type']
if protocol == Protocol.VLESS:
if hasKey(netobj, 'seed'):
kwargs['seed'] = netobj['seed']
kwargs['headerType'] = netobj['header']['type']
except Exception:
# Any non-exit exceptions
pass
elif netType == 'ws':
try:
# Get order matters here
if hasKey(netobj, 'path'):
kwargs['path'] = quote(netobj['path'])
if netobj['headers']['Host']:
kwargs['host'] = quote(netobj['headers']['Host'])
except Exception:
# Any non-exit exceptions
pass
elif netType == 'h2' or netType == 'http':
try:
# Get order matters here
if hasKey(netobj, 'path'):
kwargs['path'] = quote(netobj['path'])
kwargs['host'] = quote(','.join(netobj['host']))
except Exception:
# Any non-exit exceptions
pass
elif netType == 'quic':
try:
# Get order matters here
if protocol == Protocol.VMess:
if hasKey(netobj, 'security'):
kwargs['host'] = netobj['security']
if hasKey(netobj, 'key'):
kwargs['path'] = quote(netobj['key'])
kwargs['type'] = netobj['header']['type']
if protocol == Protocol.VLESS:
if hasKey(netobj, 'security'):
kwargs['quicSecurity'] = netobj['security']
if hasKey(netobj, 'key'):
kwargs['path'] = quote(netobj['key'])
kwargs['headerType'] = netobj['header']['type']
except Exception:
# Any non-exit exceptions
pass
elif netType == 'grpc':
if protocol == Protocol.VMess:
if hasKey(netobj, 'serviceName'):
kwargs['path'] = netobj['serviceName']
if protocol == Protocol.VLESS:
if hasKey(netobj, 'serviceName'):
kwargs['serviceName'] = netobj['serviceName']
return kwargs
def getStreamTLSSettings(streamObject, tlsType):
if tlsType == 'none':
return {}
kwargs = {}
tlsobj = streamObject[
ProxyOutboundObject.getStreamTLSSettingsName(tlsType)
]
if tlsType == 'reality' or tlsType == 'tls':
if tlsobj.get('fingerprint'):
kwargs['fp'] = tlsobj['fingerprint']
if tlsobj.get('serverName'):
kwargs['sni'] = tlsobj['serverName']
if tlsobj.get('alpn'):
kwargs['alpn'] = quote(','.join(tlsobj['alpn']))
if tlsType == 'reality':
# More kwargs for reality
if tlsobj.get('publicKey'):
kwargs['pbk'] = tlsobj['publicKey']
if tlsobj.get('shortId'):
kwargs['sid'] = tlsobj['shortId']
if tlsobj.get('spiderX'):
kwargs['spx'] = quote(tlsobj['spiderX'])
return kwargs
# Begin export
coreProtocol = Intellisense.getCoreProtocol(ob)
if coreProtocol == Protocol.VMess or coreProtocol == Protocol.VLESS:
proxyOutbound = None
for outbound in ob['outbounds']:
if outbound['tag'] == 'proxy':
proxyOutbound = outbound
break
if proxyOutbound is None:
raise Exception('No proxy outbound found')
proxyServer = proxyOutbound['settings']['vnext'][0]
proxyServerUser = proxyServer['users'][0]
proxyStream = proxyOutbound['streamSettings']
proxyStreamNet = proxyStream['network']
proxyStreamTLS = proxyStream['security']
if coreProtocol == Protocol.VMess:
return (
'vmess://'
+ Base64Encoder.encode(
ujson.dumps(
{
'v': '2',
'ps': quote(remark),
'add': proxyServer['address'],
'port': proxyServer['port'],
'id': proxyServerUser['id'],
'aid': proxyServerUser['alterId'],
'scy': proxyServerUser['security'],
'net': proxyStreamNet,
'tls': proxyStreamTLS,
# kwargs
**getStreamNetSettings(
coreProtocol, proxyStream, proxyStreamNet
),
**getStreamTLSSettings(proxyStream, proxyStreamTLS),
},
ensure_ascii=False,
escape_forward_slashes=False,
).encode()
).decode()
)
if coreProtocol == Protocol.VLESS:
flowArg = {}
if proxyServerUser.get('flow'):
flowArg['flow'] = proxyServerUser['flow']
netloc = f'{proxyServerUser["id"]}@{proxyServer["address"]}:{proxyServer["port"]}'
query = '&'.join(
f'{key}={value}'
for key, value in {
'encryption': proxyServerUser['encryption'],
'type': proxyStreamNet,
'security': proxyStreamTLS,
# kwargs
**flowArg,
**getStreamNetSettings(
coreProtocol, proxyStream, proxyStreamNet
),
**getStreamTLSSettings(proxyStream, proxyStreamTLS),
}.items()
)
return urlunparse(['vless', netloc, '', '', query, quote(remark)])
if coreProtocol == Protocol.Shadowsocks:
proxyOutbound = None
for outbound in ob['outbounds']:
if outbound['tag'] == 'proxy':
proxyOutbound = outbound
break
if proxyOutbound is None:
raise Exception('No proxy outbound found')
proxyServer = proxyOutbound['settings']['servers'][0]
method = proxyServer['method']
password = proxyServer['password']
address = proxyServer['address']
port = proxyServer['port']
netloc = f'{quote(method)}:{quote(password)}@{address}:{port}'
return urlunparse(['ss', netloc, '', '', '', quote(remark)])
else:
raise UnsupportedServerExport('Unsupported core protocol export')
class OutboundObject:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build(self):
raise NotImplementedError
class ProxyOutboundObject(OutboundObject):
def __init__(
self,
protocol,
remote_host,
remote_port,
uuid_,
encryption,
type_,
security,
**kwargs,
):
super().__init__()
self.protocol = protocol
self.remote_host = remote_host
self.remote_port = remote_port
self.uuid_ = uuid_
self.encryption = encryption
self.type_ = type_
self.security = security
self.kwargs = kwargs
def getTLSSettings(self, security):
TLSObject = {}
if security == 'reality' or security == 'tls':
# Note: If specify default value 'chrome', some share link fails.
# Leave default value as empty
fp = self.kwargs.get('fp')
sni = self.kwargs.get('sni', self.remote_host)
# Protect "xxx," format
alpn = list(
filter(
lambda x: x != '',
unquote(self.kwargs.get('alpn', '')).split(','),
)
)
if fp:
TLSObject['fingerprint'] = fp
if sni:
TLSObject['serverName'] = sni
if alpn:
TLSObject['alpn'] = alpn
if security == 'reality':
# More args for reality
pbk = self.kwargs.get('pbk')
sid = self.kwargs.get('sid', '')
spx = self.kwargs.get('spx', '')
if pbk:
TLSObject['publicKey'] = pbk
TLSObject['shortId'] = sid
TLSObject['spiderX'] = unquote(spx)
return TLSObject
@staticmethod
def getStreamTLSSettingsName(security):
# tlsSettings, realitySettings
return f'{security}Settings'
@staticmethod
def getStreamNetworkSettingsName(type_):
if type_ == 'h2':
return 'httpSettings'
else:
return f'{type_}Settings'
def getStreamNetworkSettings(self):
# Note:
# V2rayN share standard doesn't require unquote. Still
# unquote value according to (VMess AEAD / VLESS) standard
if self.type_ == 'tcp':
TcpObject = {}
if self.kwargs.get('headerType', 'none'):
TcpObject['header'] = {
'type': self.kwargs.get('headerType', 'none'),
}
return TcpObject
elif self.type_ == 'kcp':
KcpObject = {
# Extension. From V2rayN
'uplinkCapacity': 12,
'downlinkCapacity': 100,
}
if self.kwargs.get('headerType', 'none'):
KcpObject['header'] = {
'type': self.kwargs.get('headerType', 'none'),
}
if self.kwargs.get('seed'):
KcpObject['seed'] = self.kwargs.get('seed')
return KcpObject
elif self.type_ == 'ws':
WebSocketObject = {}
if self.kwargs.get('path', '/'):
WebSocketObject['path'] = unquote(self.kwargs.get('path', '/'))
if self.kwargs.get('host'):
WebSocketObject['headers'] = {
'Host': unquote(self.kwargs.get('host')),
}
return WebSocketObject
elif self.type_ == 'h2' or self.type_ == 'http':
HttpObject = {}
if self.kwargs.get('host', self.remote_host):
# Protect "xxx," format
HttpObject['host'] = list(
filter(
lambda x: x != '',
unquote(self.kwargs.get('host', self.remote_host)).split(','),
)
)
if self.kwargs.get('path', '/'):
HttpObject['path'] = unquote(self.kwargs.get('path', '/'))
return HttpObject
elif self.type_ == 'quic':
QuicObject = {}
if self.kwargs.get('quicSecurity', 'none'):
QuicObject['security'] = self.kwargs.get('quicSecurity', 'none')
if self.kwargs.get('key'):
QuicObject['key'] = unquote(self.kwargs.get('key'))
if self.kwargs.get('headerType', 'none'):
QuicObject['header'] = {
'type': self.kwargs.get('headerType', 'none'),
}
return QuicObject
elif self.type_ == 'grpc':
GRPCObject = {}
if self.kwargs.get('serviceName'):
GRPCObject['serviceName'] = self.kwargs.get('serviceName')
if self.kwargs.get('mode', 'gun'):
GRPCObject['multiMode'] = self.kwargs.get('mode', 'gun') == 'multi'
return GRPCObject
def getUserObject(self):
if self.protocol == 'vmess':
UserObject = {
'id': self.uuid_,
'security': self.encryption,
# Extension
'email': '[email protected]',
}
# For VMess(V2rayN share standard) only.
if self.kwargs.get('aid') is not None:
UserObject['alterId'] = self.kwargs.get('aid')
return UserObject
if self.protocol == 'vless':
UserObject = {
'id': self.uuid_,
'encryption': self.encryption,
# Extension
'email': '[email protected]',
}
if self.kwargs.get('flow'):
# flow is empty, TLS. Otherwise, XTLS.
UserObject['flow'] = self.kwargs.get('flow')
return UserObject
return {}
def build(self):
myJSON = {
'tag': 'proxy',
'protocol': self.protocol,
'settings': {
'vnext': [
{
'address': self.remote_host,
'port': self.remote_port,
'users': [
self.getUserObject(),
],
},
]
},
'streamSettings': {
'network': self.type_,
'security': self.security,
},
'mux': {
'enabled': False,
'concurrency': -1,
},
}
if self.security != 'none':
# tlsSettings, realitySettings
myJSON['streamSettings'][
ProxyOutboundObject.getStreamTLSSettingsName(self.security)
] = self.getTLSSettings(self.security)
# Stream network settings
myJSON['streamSettings'][
ProxyOutboundObject.getStreamNetworkSettingsName(self.type_)
] = self.getStreamNetworkSettings()
return myJSON
class ProxyOutboundObjectSS(OutboundObject):
def __init__(self, method, password, address, port, **kwargs):
super().__init__(**kwargs)
self.method = method
self.password = password
self.address = address
self.port = int(port)
def build(self):
myJSON = {
'tag': 'proxy',
'protocol': 'shadowsocks',
'settings': {
'servers': [
{
'address': self.address,
'port': self.port,
'method': self.method,
'password': self.password,
'ota': False,
},
]
},
'streamSettings': {
'network': 'tcp',
},
'mux': {
'enabled': False,
'concurrency': -1,
},
}
return myJSON
class Outbounds:
def __init__(self, proxyOutboundObject):
self.proxyOutboundObject = proxyOutboundObject
def build(self):
return [
# Proxy
self.proxyOutboundObject.build(),
# Direct
{
'tag': 'direct',
'protocol': 'freedom',
'settings': {},
},
# Block
{
'tag': 'block',
'protocol': 'blackhole',
'settings': {
'response': {
'type': 'http',
}
},
},
]
class XrayCoreConfiguration:
DEFAULT_CONF = {
# Default log configuration
'log': {
'access': '',
'error': '',
'loglevel': 'warning',
},
# Default inbounds configuration
'inbounds': [
{
'tag': 'socks',
'port': 10808,
'listen': '127.0.0.1',
'protocol': 'socks',
'sniffing': {
'enabled': True,
'destOverride': [
'http',
'tls',
],
},
'settings': {
'auth': 'noauth',
'udp': True,
'allowTransparent': False,
},
},
{
'tag': 'http',
'port': 10809,
'listen': '127.0.0.1',
'protocol': 'http',
'sniffing': {
'enabled': True,
'destOverride': [
'http',
'tls',
],
},
'settings': {
'auth': 'noauth',
'udp': True,
'allowTransparent': False,
},
},
],
}
@staticmethod
def getDefaultJSON():
return copy.deepcopy(XrayCoreConfiguration.DEFAULT_CONF)
@staticmethod
def build(proxyOutboundObject):
# log, inbounds
myJSON = XrayCoreConfiguration.getDefaultJSON()
# Add outbounds
myJSON['outbounds'] = Outbounds(proxyOutboundObject).build()
# Add empty routing
myJSON['routing'] = {}
return myJSON | PypiClean |
/BabitMF_GPU-0.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/bmf/builder/bmf_graph.py | import json
import os
import sys
import threading
import time
from bmf.lib._bmf import engine
from .bmf_modules import bmf_modules
from .bmf_node import BmfNode, BmfEdge
from .bmf_stream import BmfStream
from .ff_filter import get_filter_para
from .graph_config import NodeConfig, GraphConfigEncoder, GraphConfig, StreamConfig, ModuleConfig, MetaConfig
from ..ffmpeg_engine.engine import FFmpegEngine
from ..python_sdk import Log, LogLevel, Timestamp
## @defgroup pyAPI API in Python
if sys.version_info.major == 2:
from Queue import Queue
else:
from queue import Queue
class BmfCallBackType:
LATEST_TIMESTAMP = 0
class GraphMode:
NORMAL = 'Normal' # indicate normal mode
SERVER = 'Server' # indicate server mode
GENERATOR = 'Generator' # indicate generator mode
SUBGRAPH = 'Subgraph' # indicate subgraph
PUSHDATA = 'Pushdata' # indicate push data
FFMPEG = 'ffmpeg'
C_ENGINE = 'c_engine'
## @ingroup pyAPI
## @defgroup grphClass BmfGraph
###@{
# BMF graph class
###@}
class BmfGraph:
global_node_id_ = 0
global_added_id_ = 0
server_input_name = "server_input"
node_id_mutex_ = threading.Lock()
logbuffer_ = None
av_log_list_ = list()
def __init__(self, option=None):
if option is None:
option = {}
self.mode = GraphMode.NORMAL
self.nodes_ = []
self.option_ = option
# ignore graph output stream
self.no_output_stream_ = option.get('no_output_stream', True)
# graph input and output streams
self.input_streams_ = []
self.output_streams_ = []
# save pre_created streams in SERVER mode
self.node_streams_ = []
# engine graph
self.exec_graph_ = None
self.graph_config_ = None
self.update_graph_ = None
# engine pre_allocated modules
self.pre_module = {}
# save created modules for sync mode
self.sync_mode_ = {}
# callbacks set by user
self.user_callbacks = {}
self.cb_lock = threading.RLock()
if BmfGraph.logbuffer_ is not None:
BmfGraph.logbuffer_.close()
## @ingroup pyAPI
## @ingroup grphClass
###@{
# set new graph options before run
# @param option: the option patch for the graph
def set_option(self, option=None):
###@}
if option is None:
return
for key in option:
self.option_[key] = option[key]
## @ingroup pyAPI
## @ingroup grphClass
###@{
# To get a globalized effect buffer (list) which include all the log coming from ffmpeg libraries
#
# @param level: ffmpeg av log level by default "info" level. it's optional, and also can be set:
# "quiet","panic","fatal","error","warning","info","verbose","debug","trace"
# @return A list object in python
# @note Should be called BEFORE graph run since it will notice the ffmpeg modulethat the log buffer is needed,
# the buffer will be clean each time when this function called
def get_av_log_buffer(self, level='info'):
###@}
# ffmpeg log config
from bmf.lib._bmf.sdk import LogBuffer
BmfGraph.av_log_list_.clear()
BmfGraph.logbuffer_ = LogBuffer(BmfGraph.av_log_list_, level)
return BmfGraph.av_log_list_
## @ingroup pyAPI
## @ingroup grphClass
###@{
# get sync module by given alias
# @param alias: a node tag given by user while building graph pipeline
def get_module(self, alias):
###@}
select_node = None
# find node by alias
for node in self.nodes_:
if "alias" in node.get_option() and node.get_option(
)["alias"] == alias:
select_node = node
break
# alias not correct
if select_node is None:
raise Exception('cannot find node according to alias')
# create sync module
if alias not in self.sync_mode_:
sync_mode = select_node.create_sync_module()
self.sync_mode_[alias] = sync_mode
return self.sync_mode_[alias]
## @ingroup pyAPI
## @ingroup grphClass
###@{
# To setup a user defined callback into the graph. The callback can be triggered in the module
# @param cb_type: a value can be defined by user to distinguish which is the one to call in multiple callbacks
# @param cb: the function for this callback
def add_user_callback(self, cb_type, cb):
###@}
self.cb_lock.acquire()
cb_list = self.user_callbacks.get(cb_type, [])
if len(cb_list) == 0:
self.user_callbacks[cb_type] = cb_list
if cb is not None:
cb_list.append(cb)
self.cb_lock.release()
## @ingroup pyAPI
## @ingroup grphClass
###@{
# Remove the user defined callback from the callback list
# @param cb_type: a value can be defined by user to distinguish which is the one to call in multiple callbacks
# @param cb: the function for this callback
def remove_user_callback(self, cb_type, cb):
###@}
self.cb_lock.acquire()
cb_list = self.user_callbacks.get(cb_type, [])
cb_list.remove(cb)
self.cb_lock.release()
def clear_user_callback(self, cb_type, cb):
self.cb_lock.acquire()
self.user_callbacks[cb_type] = []
self.cb_lock.release()
def callback_for_engine(self, cb_type, para):
# TODO: here we locked all types, can optimize to lock one type
self.cb_lock.acquire()
res = bytes("", "ASCII")
cb_list = self.user_callbacks.get(cb_type, [])
for cb in cb_list:
if cb is not None:
res = cb(para)
break
self.cb_lock.release()
return res
@staticmethod
def generate_node_id():
BmfGraph.node_id_mutex_.acquire()
result = BmfGraph.global_node_id_
BmfGraph.global_node_id_ += 1
BmfGraph.node_id_mutex_.release()
return result
@staticmethod
def generate_add_id():
BmfGraph.node_id_mutex_.acquire()
result = BmfGraph.global_added_id_
BmfGraph.global_added_id_ += 1
BmfGraph.node_id_mutex_.release()
return result
def add_node(self, node):
if node is not None:
self.nodes_.append(node)
def module(self,
module_info,
option=None,
module_path="",
entry="",
input_manager='immediate',
pre_module=None,
scheduler=0,
stream_alias=None):
if option is None:
option = {}
if isinstance(module_info, str):
return BmfNode(
{
"name": module_info,
"type": "",
"path": module_path,
"entry": entry
}, option, self, input_manager, pre_module,
scheduler).stream(stream_alias=stream_alias)
return BmfNode(module_info, option, self, input_manager, pre_module,
scheduler).stream(stream_alias=stream_alias)
## @ingroup moduleAPI
###@{
# A graph function to provide a build-in decoder BMF stream
# Include av demuxer and decoder
# @param decoder_para: the parameters for the decoder
# @return A BMF stream(s)
def decode(self,
decoder_para,
type="",
path="",
entry="",
stream_alias=None):
###@}
module_info = {
"name": bmf_modules['ff_decoder'],
"type": type,
"path": path,
"entry": entry
}
return BmfNode(module_info, decoder_para, self,
'immediate').stream(stream_alias=stream_alias)
def download(self,
download_para,
type="",
path="",
entry="",
stream_alias=None):
module_info = {
"name": 'download',
"type": type,
"path": path,
"entry": entry
}
return BmfNode(module_info, download_para, self,
'immediate').stream(stream_alias=stream_alias)
def py_module(self,
name,
option=None,
module_path="",
entry="",
input_manager='immediate',
pre_module=None,
scheduler=0,
stream_alias=None):
if option is None:
option = {}
return self.module(
{
"name": name,
"type": "python",
"path": module_path,
"entry": entry
},
option,
input_manager=input_manager,
pre_module=pre_module,
scheduler=scheduler,
stream_alias=stream_alias)
def go_module(self,
name,
option=None,
module_path="",
entry="",
input_manager="immediate",
pre_module=None,
scheduler=0,
stream_alias=None):
if option is None:
option = {}
return self.module(
{
"name": name,
"type": "go",
"path": module_path,
"entry": entry
},
option,
input_manager=input_manager,
pre_module=pre_module,
scheduler=scheduler,
stream_alias=stream_alias)
## @ingroup pyAPI
## @ingroup grphClass
###@{
# Using the stream in the graph to build a c/c++ implemented module stream loaded by module library path
# and entry
# @param name: the module name
# @param option: the parameters for the module
# @param module_path: the path to load the module
# @param entry: the call entry of the module
# @param input_manager: select the input manager for this module, immediate by default
# @return Stream(s) of the module
def c_module(self,
name,
option=None,
module_path="",
entry="",
input_manager="immediate",
pre_module=None,
scheduler=0,
stream_alias=None):
###@}
if option is None:
option = {}
return self.module(
{
"name": name,
"type": "c++",
"path": module_path,
"entry": entry
},
option,
input_manager=input_manager,
pre_module=pre_module,
scheduler=scheduler,
stream_alias=stream_alias)
def anullsrc(self, *args, **kwargs):
stream_alias = None
type = ""
path = ""
entry = ""
if 'stream_alias' in kwargs:
stream_alias = kwargs['stream_alias']
del kwargs['stream_alias']
if 'type' in kwargs:
type = kwargs['type']
del kwargs['type']
if 'path' in kwargs:
path = kwargs['path']
del kwargs['path']
if 'entry' in kwargs:
entry = kwargs['entry']
del kwargs['entry']
para = get_filter_para(*args, **kwargs)
if para is not None and len(para) > 0:
option = {'name': 'anullsrc', 'para': para}
module_info = {
"name": bmf_modules['ff_filter'],
"type": type,
"path": path,
"entry": entry
}
# create node
return BmfNode(module_info, option, self,
'immediate').stream(stream_alias=stream_alias)
def input_stream(self, name):
stream = BmfStream(name, self, name)
self.input_streams_.append(stream)
return stream
def fill_packet(self, name, packet, block=False):
if self.exec_graph_ is not None:
# pq = Queue()
# pq.put(packet)
self.exec_graph_.add_input_stream_packet(name, packet, block)
def fill_eos(self, name):
if self.exec_graph_ is not None:
self.exec_graph_.add_eos_packet(name)
def poll_packet(self, name, block=False):
if self.exec_graph_ is not None:
return self.exec_graph_.poll_output_stream_packet(name, block)
else:
time.sleep(1)
@staticmethod
def get_node_output_stream_map(node):
stream_map = {}
for edge in node.get_outgoing_edges():
stream_map[edge.get_upstream_stream().get_notify(
)] = edge.get_upstream_stream()
return stream_map
@staticmethod
def all_stream_has_notify(stream_map):
for notify in stream_map.keys():
if not isinstance(notify, str):
return False
return True
@staticmethod
def all_stream_has_index(stream_map):
max_index = -1
for notify in stream_map.keys():
if not isinstance(notify, int):
return False, 0
else:
max_index = max(max_index, notify)
return True, max_index
@staticmethod
def generate_node_stream_config(stream_map, node):
streams = []
if len(stream_map) == 0:
return streams
# all streams has notify
if BmfGraph.all_stream_has_notify(stream_map):
for (_, stream) in stream_map.items():
stream_config = StreamConfig()
stream_config.set_identifier(stream.get_identifier())
if stream.get_alias() is None:
stream_config.set_alias("")
else:
stream_config.set_alias(stream.get_alias())
streams.append(stream_config)
return streams
# all streams don't have notify, use stream index as notify
ret, max_index = BmfGraph.all_stream_has_index(stream_map)
if ret:
for index in range(max_index + 1):
stream_config = StreamConfig()
if index in stream_map.keys():
if stream_map[index].get_alias() is None:
stream_config.set_alias("")
else:
stream_config.set_alias(stream_map[index].get_alias())
stream_config.set_identifier(
stream_map[index].get_identifier())
streams.append(stream_config)
else:
# just generate an unique name and hold the position
stream_config.set_identifier(node.generate_stream_name())
stream_config.set_alias("")
streams.append(stream_config)
return streams
print('failed to generate node stream config for ', node.get_type(),
node.get_id())
return streams
@staticmethod
def generate_module_info_config(module_info_dict):
module_info_config = ModuleConfig()
# set module name
if module_info_dict.get('name'):
module_info_config.set_name(module_info_dict['name'])
else:
module_info_config.set_name('')
# set module type
if module_info_dict.get('type'):
module_info_config.set_type(module_info_dict['type'])
else:
module_info_config.set_type('')
# set module path
if module_info_dict.get('path'):
module_info_config.set_path(module_info_dict['path'])
else:
module_info_config.set_path('')
# set module entry
if module_info_dict.get('entry'):
module_info_config.set_entry(module_info_dict['entry'])
else:
module_info_config.set_entry('')
return module_info_config
@staticmethod
def generate_meta_info_config(pre_module, callback_dict):
meta_info_config = MetaConfig()
# set pre_module
if pre_module is not None:
meta_info_config.set_premodule_id(pre_module.uid())
# set callback function
for key, callback in callback_dict.items():
callback_binding = "{}:{}".format(key, callback[0])
meta_info_config.add_callback_binding(callback_binding)
return meta_info_config
@staticmethod
def generate_node_config(node):
input_stream_map = node.get_input_streams()
output_stream_map = BmfGraph.get_node_output_stream_map(node)
node_config = NodeConfig()
# set node id
node_config.set_id(node.get_id())
# set option
node_config.set_option(node.get_option())
# set module info
node_config.set_module_info(
BmfGraph.generate_module_info_config(node.get_module_info()))
# set meta info
node_config.set_meta_info(
BmfGraph.generate_meta_info_config(node.get_pre_module(),
node.get_user_callback()))
# set alias
node_config.set_alias(node.get_option().get('alias', ''))
# set scheduler index
node_config.set_scheduler(node.get_scheduler())
# set input manager
node_config.set_input_manager(node.get_input_manager())
# set input streams
node_config.set_input_streams(
BmfGraph.generate_node_stream_config(input_stream_map, node))
# set output streams
node_config.set_output_streams(
BmfGraph.generate_node_stream_config(output_stream_map, node))
return node_config
def dump_graph(self, graph_config):
dump = self.option_.get('dump_graph', 0)
graph_str = json.dumps(obj=graph_config.__dict__,
ensure_ascii=False,
indent=4,
cls=GraphConfigEncoder)
# print(graph_str)
Log.log(LogLevel.DEBUG, graph_str)
if dump == 1:
if 'graph_name' in self.option_:
file_name = 'original_' + self.option_['graph_name'] + '.json'
else:
file_name = 'original_graph.json'
f = open(file_name, 'w')
f.write(graph_str)
f.close()
def generate_graph_config(self):
graph_config = GraphConfig()
# set option
graph_config.set_option(self.option_)
# set input stream
for stream in self.input_streams_:
stream_config = StreamConfig()
stream_config.set_identifier(stream.get_name())
if stream.get_alias() is None:
stream_config.set_alias("")
else:
stream_config.set_alias(stream.get_alias())
graph_config.add_input_stream(stream_config)
# set output stream
for stream in self.output_streams_:
stream_config = StreamConfig()
stream_config.set_identifier(stream.get_name())
if stream.get_alias() is None:
stream_config.set_alias("")
else:
stream_config.set_alias(stream.get_alias())
graph_config.add_output_stream(stream_config)
# node config
for node in self.nodes_:
node_config = BmfGraph.generate_node_config(node)
graph_config.add_node_config(node_config)
# graph pre_allocated module
graph_pre_module = {}
for node in self.nodes_:
if node.get_pre_module() is not None:
graph_pre_module[node.get_id()] = node.get_pre_module()
# set graph mode
graph_config.set_mode(self.mode)
return graph_config, graph_pre_module
def parse_output_streams(self, streams):
if streams is not None:
if isinstance(streams, BmfStream):
# create a edge connected with stream and graph output stream
graph_output_stream = BmfStream(streams.get_name(), None, 0)
edge = BmfEdge(streams, graph_output_stream)
streams.get_node().add_outgoing_edge(edge)
self.output_streams_.append(graph_output_stream)
elif isinstance(streams, list):
for stream in streams:
if stream is not None:
graph_output_stream = BmfStream(
stream.get_name(), None, 0)
edge = BmfEdge(stream, graph_output_stream)
stream.get_node().add_outgoing_edge(edge)
self.output_streams_.append(graph_output_stream)
def get_graph_config(self):
return self.graph_config_
## @ingroup pyAPI
## @ingroup grphClass
###@{
# To run a graph by a graph config file
# @param graph_config: the graph config file path
# @return The name list of output streams in this graph
def run_by_config(self, graph_config):
###@}
self.dump_graph(graph_config)
graph_config_str = graph_config.dump()
# print(self.callback_for_engine)
self.exec_graph_ = engine.Graph(
graph_config_str, False,
graph_config.get_option().get('optimize_graph', True))
self.exec_graph_.start()
# if graph has no input stream, 'close' will wait all nodes finish
# else, we need fill packets to input stream and close graph manually
if len(self.input_streams_) == 0 and len(self.output_streams_) == 0:
self.exec_graph_.close()
elif len(self.output_streams_) > 0:
# return output stream name which is used to poll packets
output_streams_name = []
for stream in self.output_streams_:
output_streams_name.append(stream.get_name())
return output_streams_name
## @ingroup pyAPI
## @ingroup grphClass
###@{
# To generate the graph config only, without running
# @param streams: the input stream list of the module
# @param is_sub_graph: bool value to indicate whether it's a sub graph, False by default
# @param mode: to set the graph mode, NORMAL by default, other option bmf_graph.GraphMode
# @param file_name: output file name with extension
def generate_config_file(self,
streams=None,
is_sub_graph=False,
mode=GraphMode.NORMAL,
is_blocked=True,
file_name="original_graph.json"):
###@}
self.mode = mode
# in server mode, graph has output_stream
if self.mode == GraphMode.SERVER:
self.no_output_stream_ = False
# init graph output streams, support multi outputs
# ignore output stream for main graph as default
if not self.no_output_stream_ or is_sub_graph:
self.parse_output_streams(streams)
# in server mode, we should create an input_stream for graph, and also add it to the first node
if self.mode == GraphMode.SERVER:
if len(self.input_streams_) == 0:
stream = self.input_stream(self.server_input_name)
else:
stream = self.input_streams_[0]
# self.nodes_[0].input_streams_[0] = stream
self.nodes_[0].init_input_stream_and_edge(stream, 0)
for node in self.nodes_:
node.set_input_manager('server')
# parse graph config
self.graph_config_, self.pre_module = self.generate_graph_config()
if file_name != "":
# save config file
f = open(file_name, 'w')
f.write(self.graph_config_.dump())
f.close()
## @ingroup pyAPI
###@{
# To run the graph until it's finished
# @param streams: the input stream list of the module
# @param is_sub_graph: bool value to indicate whether it's a sub graph, False by default
# @param mode: to set the graph mode, NORMAL by default, other option bmf_graph.GraphMode
def run(self,
streams=None,
is_sub_graph=False,
mode=GraphMode.NORMAL,
is_blocked=True):
###@}
file_name = ""
if 'dump_graph' in self.option_ and self.option_['dump_graph'] == 1:
file_name = "original_graph.json"
self.generate_config_file(streams=streams,
is_sub_graph=is_sub_graph,
mode=mode,
is_blocked=is_blocked,
file_name=file_name)
graph_config_str = self.graph_config_.dump()
print(graph_config_str)
# call engine
self.exec_graph_ = engine.Graph(
graph_config_str, False, self.option_.get('optimize_graph', True))
self.exec_graph_.start()
# if graph has no input stream, 'close' will wait all nodes finish
# else, we need fill packets to input stream and close graph manually
if len(self.input_streams_) == 0 and len(self.output_streams_) == 0:
if is_blocked:
self.exec_graph_.close()
else:
print("start to run without block")
elif len(self.output_streams_) > 0:
# return output stream name which is used to poll packets
output_streams_name = []
for stream in self.output_streams_:
output_streams_name.append(stream.get_name())
return output_streams_name
return None
## @ingroup pyAPI
## @ingroup grphClass
###@{
# Run the graph without wait to close, user should call close() by themself
def run_wo_block(self,
streams=None,
is_sub_graph=False,
mode=GraphMode.NORMAL):
###@}
return self.run(streams, is_sub_graph, mode, False)
def runFFmpegByConfig(self, config_path):
start_time = time.time()
self.graph_config_ = GraphConfig(config_path)
ffmpeg_engine = FFmpegEngine()
command = ""
if (ffmpeg_engine.is_valid_for_ffmpeg(self.graph_config_)):
# self.dump_graph(self.graph_config_)
command = ffmpeg_engine.get_ffmpeg_command(self.graph_config_)
command = command + " -y"
# do graph optimization
print("ffmpeg command: ", command)
os.system(command)
end_time = time.time()
ffmpeg_time = (end_time - start_time)
return ffmpeg_time
def start(self, stream, is_sub_graph=False):
self.output_streams_.append(stream)
# create a edge connected with stream and graph output stream
graph_output_stream = BmfStream(stream.get_name(), None, 0)
edge = BmfEdge(stream, graph_output_stream)
stream.get_node().add_outgoing_edge(edge)
if stream is not None:
self.mode = GraphMode.GENERATOR
# parse graph config
self.graph_config_, self.pre_module = self.generate_graph_config()
# for sub-graph, don't start executing
if is_sub_graph:
return
# create and run graph
graph_config_str = self.graph_config_.dump()
self.exec_graph_ = engine.Graph(graph_config_str, False, True)
self.exec_graph_.start()
while True:
pkt = self.exec_graph_.poll_output_stream_packet(
stream.get_name(), True)
if pkt is not None and pkt.defined():
if pkt.timestamp == Timestamp.EOF:
break
yield pkt
self.exec_graph_.close()
## @ingroup pyAPI
## @ingroup grphClass
###@{
# To generate the graph of dynamical remove node, the graph should be different from running main graph.
# @param option: json style of description of which node to be removed
# exp. {'alias': 'decode1'}
def dynamic_remove(self, option):
###@}
alias_name = option.get('alias', '')
if len(alias_name) == 0:
Log.log(LogLevel.ERROR,
"the alias name is must needed for removing")
return False
self.graph_ = BmfGraph(option)
remove_node = BmfNode(alias_name, option, self, 'immediate')
self.graph_.add_node(self)
self.graph_config_, pre_module = self.generate_graph_config()
for node_config in self.graph_config_.nodes:
node_config.set_action('remove')
## @ingroup pyAPI
## @ingroup grphClass
###@{
# To generate the graph of dynamical add node, the graph should be different from running main graph.
# @param module_stream: the stream(s) of the new node
# @param inputs: a json style description for the input to be connected with this new node
# exp. {'alias': 'layout', 'streams': 1}
# it means the input of this node will be "layout" alias node and have 1 stream linked
# @param outputs: a json style description for the output to be connected with this new node
def dynamic_add(self, module_stream, inputs=None, outputs=None):
###@}
nb_links = 0
add_id = 0
self.graph_config_, pre_module = self.generate_graph_config()
if inputs is not None:
if module_stream.get_node().get_graph().graph_config_ is None:
module_stream.get_node().get_graph(
).graph_config_, tmp = module_stream.get_node().get_graph(
).generate_graph_config()
Log.log(LogLevel.ERROR,
"generate graph config for none graph config")
tail_config = None
for node_config in module_stream.get_node().get_graph(
).graph_config_.nodes:
node_config.set_action('add')
tail_config = node_config
out_link_module_alias = ''
if outputs is not None:
out_link_module_alias = outputs.get('alias', '')
nb_links = outputs.get('streams', 0)
if tail_config is None:
Log.log(LogLevel.ERROR,
"the output node config can't be found")
return False
add_id = self.generate_add_id()
for i in range(nb_links):
stream_config = StreamConfig()
out_link_name = out_link_module_alias + "." + str(
add_id) + "_" + str(i)
stream_config.set_identifier(out_link_name)
stream_config.set_alias(out_link_name)
tail_config.add_output_stream(stream_config)
if inputs is not None:
in_link_module_alias = inputs.get('alias', '')
nb_links = inputs.get('streams', 0)
ncfg = None
for node_config in module_stream.get_node().get_graph(
).graph_config_.nodes:
if len(node_config.get_input_streams()) == 0:
ncfg = node_config
break
if ncfg is None:
Log.log(LogLevel.ERROR, "the input node config can't be found")
return False
add_id = self.generate_add_id()
for i in range(nb_links):
stream_config = StreamConfig()
in_link_name = in_link_module_alias + "." + str(
add_id) + "_" + str(i)
stream_config.set_identifier(in_link_name)
stream_config.set_alias(in_link_name)
ncfg.add_input_stream(stream_config)
graph_config_str = self.graph_config_.dump()
self.exec_graph_ = engine.Graph(
graph_config_str, False, self.option_.get('optimize_graph', True))
## @ingroup pyAPI
## @ingroup grphClass
###@{
# To generate the graph of dynamical node option reset, the graph should be different from running main graph.
# @param option: json style of description of the parameters to be reset of the node
# exp. {'alias': 'encode1',
# 'output_path': output_path,
# 'video_params': {
# 'codec': 'h264',
# 'width': 320,
# 'height': 240,
# 'crf': 23,
# 'preset': 'veryfast'
# }
# }
def dynamic_reset(self, option):
###@}
alias_name = option.get('alias', '')
if len(alias_name) == 0:
Log.log(LogLevel.ERROR, "the alias name is must needed for reset")
return False
self.graph_ = BmfGraph(option)
reset_node = BmfNode("", option, self)
self.graph_.add_node(self)
self.graph_config_, pre_module = self.generate_graph_config()
for node_config in self.graph_config_.nodes:
node_config.set_action('reset')
## @ingroup pyAPI
## @ingroup grphClass
###@{
# Final action to do the dynamical add/remove/reset node for current running graph.
# @param update_graph: the graph generated by previous dynamic_add(), dynamic_remove() or dynamic_reset()
def update(self, update_graph):
###@}
if update_graph is None or update_graph.graph_config_ is None:
Log.log(LogLevel.ERROR,
"the graph for update is not created properly")
return False
graph_config_str = update_graph.graph_config_.dump()
self.exec_graph_.update(graph_config_str, False)
def status(self):
if self.exec_graph_ is not None:
return self.exec_graph_.status()
return None
## @ingroup pyAPI
## @ingroup grphClass
###@{
# To close the graph by block wait until all the tasks are finished.
def close(self):
###@}
if self.exec_graph_ is not None:
self.exec_graph_.close()
## @ingroup pyAPI
## @ingroup grphClass
###@{
# Force close the running graph even if the whole pipeline in the graph is not finished
def force_close(self):
###@}
if self.exec_graph_ is not None:
self.exec_graph_.force_close()
def generateConfig(self, file_name):
self.graph_config_, graph_pre_module = self.generate_graph_config()
print(self.graph_config_)
self.dump_graph(self.graph_config_)
graph_str = json.dumps(obj=self.graph_config_.__dict__,
ensure_ascii=False,
indent=4,
cls=GraphConfigEncoder)
f = open(file_name, 'w')
f.write(graph_str)
f.close()
# there will be some error when bmf_graph Deconstruction, it may use thread_queue thread to handle Deconstruction
# which will cause hang
# def __del__(self):
# if self.exec_graph_ is not None:
# self.exec_graph_.close() | PypiClean |
/Antares_Launcher-1.3.0.tar.gz/Antares_Launcher-1.3.0/doc/source/development_guide.rst | *****************
Development Guide
*****************
This part of the documentation is for developers.
The aim is to have a design doc ready to be shared with any other dev. It should greatly help people getting in the project.
The goal is to give you more information about the module : what exactly it does, why and eventually to have in mind the
global architecture of the code
Advanced Options:
+---------------------------------------+----------------------------------------------------+-------------------------------------+
| `--log-dir=JOB_LOG_DIR` | directory where the logs of the jobs will be found | register inside the directory |
| | | `JOB-LOGS` |
+---------------------------------------+----------------------------------------------------+-------------------------------------+
| `-n N_CPU`, `--n-cores=N_CPU` | number of cores to be used for a single job, | |
| | max 12 | |
+---------------------------------------+----------------------------------------------------+-------------------------------------+
| `--ssh-settings-file=JSON_SSH_CONFIG` | path to the configuration file for the ssh | `JSON_SSH_CONFIG=.\ssh_config.json` |
| | connection | |
+---------------------------------------+----------------------------------------------------+-------------------------------------+
| `-k` | Kill a job on remote server arg: JOBID | |
+---------------------------------------+----------------------------------------------------+-------------------------------------+
*****************
Binary generation
*****************
.. mdinclude:: ./binary_generation.md
**************
Doc generation
**************
.. mdinclude:: ../README.md
*************
Class diagram
*************
Here below, one can find the Class diagram for a global view.
.. image:: ./schema/antares_launcher_diagram.png
:align: center
****************
Sequence diagram
****************
Here is the UML sequence diagram. From that global representation, make a diagram with more details for developpers ?
.. image:: ./schema/final_full_antares_flow_chart.png
:align: center
| PypiClean |
/MultiSim-0.10.0.tar.gz/MultiSim-0.10.0/multisim/parts/mixingvalve.py | import numpy as np
import pandas as pd
from ..simenv import SimEnv
from .. import precomp_funs as _pf
class MixingValve(SimEnv):
"""
type: MixingValve class.
The MixingValve **mixes or separates** a flow. The flow on the 2-end-side
is mixed/separated by the factors n1 and n2, with **n1 + n1 = 1** and
**n1 >= 0** and **n2 >= 0**.
When mixing the temperatures and mass flows of the respective streams are
mixed by the rule of *dm_out = dm_in1 + dm_in2*.
When separating one stream is separated into two streams with the
same temperature and the massflows *dm_in = n1*dm_out1 + n2*dm_out2*.
The resulting flow of mixing/separating is calculated after each timestep
and intermediate step depending on the given control algorithm and the
measured values in the specified measuring port.
The MixingValve class does not contain a differential method as it only
passes the values of the part connected to its 'in'-port(s) to its
'out'-port(s) and the values of the part connected to its 'out'-port(s) to
its 'in'-port(s) and only applying the mixing/separating. Thus it is
not involved in solving the equations using the specified solver algorithm.
Parameters:
-----------
name: string
Name of the part.
mix_or_sep: string, default: 'mix'
Specifies if the MixingValve is supposed to mix or separate strings.
It can be set to 'mix' for mixing or 'sep' for separating. When 'mix'
is set, there are two inlet ports 'in1' and 'in1' and one outlet port
'out' which have to be connected. When 'sep' is set there is one inlet
port 'in1' two outlet ports 'out1' and 'out2' which have to be
connected.
"""
def __init__(self, name, master_cls, mix_or_split='mix', **kwargs):
self._models = master_cls
self.constr_type = 'Valve_3w' # define construction type
base_err = ( # define leading base error message
'While adding {0} `{1}` to the simulation '
'environment, the following error occurred:\n'
).format(self.constr_type, str(name))
arg_err = ( # define leading error for missing/incorrect argument
'Missing argument or incorrect type/value: {0}\n\n'
)
self._base_err = base_err # save to self to access it in methods
self._arg_err = arg_err # save to self to access it in methods
self.name = name
self._unit = '[%]' # unit of the actuator
self.part_id = self._models.num_parts - 1
self.kind = mix_or_split
# save smallest possible float number for avoiding 0-division:
self._tiny = self._models._tiny
# even though this part is not using numeric solving, number of
# gridpoints are specified anyways:
self.num_gp = 3
# preallocate grids:
self.T = np.zeros(3, dtype=np.float64)
self._T_init = np.zeros_like(self.T) # init temp for resetting env.
# preallocate T ports array (here only used for dimension checking)
self._T_port = np.zeros_like(self.T)
self.dm = np.zeros(3)
# self.U = np.zeros(3)
# preallocate grids for port connection parameters
# cross section area of wall of connected pipe, fluid cross section
# area of, gridspacing and lambda of wall of connected pipe
self._A_wll_conn_p = np.zeros_like(self._T_port)
self._A_fld_conn_p = np.zeros_like(self._T_port)
self._port_gsp = np.full_like(self._T_port, self._tiny)
self._lam_wll_conn_p = np.full_like(self._T_port, self._tiny)
self._lam_port_fld = np.full_like(self._T_port, self._tiny)
# port_definition (first, second and last array element):
self.port_num = 3
# Index to own value array to get values of own ports, meaning if I
# index a FLATTENED self.T.flat with self._port_own_idx, I need to
# get values accoring to the order given in self.port_names.
# That is, this must yield the value of the cell in self.T, which is
# belonging to the port 'in':
# self.T.flat[self._port_own_idx[self.port_names.index('in')]]
self._port_own_idx = np.array(
(0, 1, self.T.shape[0] - 1), dtype=np.int32
)
self._port_own_idx_2D = self._port_own_idx # save for compatibility
"""port_array"""
self.port_ids = np.array((), dtype=np.int32)
# set to read-only to avoid manipulation, same for port_name by using
# tuple:
# self._port_own_idx.flags.writeable = False
# preallocate port values to avoid allocating in loop:
self._port_vals = np.zeros(self.port_num)
# preallocate list to mark ports which have already been solved in
# topology (to enable creating subnets)
self._solved_ports = list()
# port setup depending on mixer or separator valve
# mixing or separating factors for each port are saved in the dict
# port_factors, with the factor 1 being a tuple (can't be changed!):
if mix_or_split == 'mix':
self.port_names = tuple(('A', 'B', 'AB'))
# set massflow characteristics for ports: in means that an
# inflowing massflow has a positive sign, out means that an
# outflowing massflow is pos.
self.dm_char = tuple(('in', 'in', 'out'))
self.pf_arr = np.array(
[0.5, 0.5, 1], dtype=np.float64 # port in1 # port in2
) # port out
elif mix_or_split == 'split':
self.port_names = tuple(('A', 'B', 'AB'))
# set massflow characteristics for ports: in means that an
# inflowing massflow has a positive sign, out means that an
# outflowing massflow is pos.
self.dm_char = tuple(('out', 'out', 'in'))
self.pf_arr = np.array(
[0.5, 0.5, 1], dtype=np.float64 # port out1 # port out2
) # port in
else:
err_str = 'mix_or_split has to be set to \'mix\' or\'split\'!'
raise ValueError(err_str)
# make dict for easy lookup of portfactors with memory views:
self.port_factors = dict(
{
'A': self.pf_arr[0:1],
'B': self.pf_arr[1:2],
'AB': self.pf_arr[2:3],
}
)
# construct partname+portname to get fast access to own ports:
dummy_var = list(self.port_names)
for i in range(self.port_num):
dummy_var[i] = self.name + ';' + dummy_var[i]
self._own_ports = tuple(dummy_var)
# preallocate result grids with one row. An estimate of total rows will
# be preallocated before simulation start in initialize_sim. massflow
# grid is preallocated in set_initial_cond:
self.res = np.zeros((1, self.port_num))
self.res_dm = np.zeros((2, self.port_num))
# set if type has to be solved numeric:
self.solve_numeric = False
# if port arrays shall be collapsed to amount of ports to improve speed
self.collapse_arrays = False
self._collapsed = False # bool checker if already collapsed
# determine if part is treated as hydraulic compensator
self.hydr_comp = False
# if part can be a parent part of a primary flow net:
self._flow_net_parent = False
# add each flow channel of part to hydr_comps (will be removed once its
# massflow solving method is completely integrated in flow_net.
# remaining parts except real hydr comps will be used to generate an
# error):
self._models._hydr_comps.add(self.name)
# if the topology construction method has to stop when it reaches the
# part to solve more ports from other sides before completely solving
# the massflow of it. This will be set to false as soon as only one
# port to solve is remaining:
self.break_topology = False
# count how many ports are still open to be solved by topology. If
# break topology is True, this is used to set it to False if 1 is
# reached.
self._cnt_open_prts = self.port_num # not required here
self._port_heatcond = True # if heatcond. over ports is enabled
# determine if part has the capability to affect massflow (dm) by
# diverting flow through ports or adding flow through ports:
self.affect_dm = True
# if the massflow (dm) has the same value in all cells of the part
# (respectively in each flow channel for parts with multiple flows):
self.dm_invariant = False
# if the part has multiple separated flow channels which do NOT mix
# (like a heat exchanger for exampe):
self.multiple_flows = False
# bool checker if flows were updated in update_flownet to avoid
# processing flows in get_diff each time (array for referencing):
self._process_flows = np.array([True])
# if the part CAN BE controlled by the control algorithm:
self.is_actuator = True
self._actuator_CV = self.pf_arr[:] # set array to be controlled
self._actuator_CV_name = 'port_opening'
# if the part HAS TO BE controlled by the control algorithm:
self.control_req = True
# if the part needs a special control algorithm (for parts with 2 or
# more controllable inlets/outlets/...):
self.actuator_special = True
# initialize bool if control specified:
self.ctrl_defined = False
# if the parts get_diff method is solved with memory views entirely and
# thus has arrays which are extended by +2 (+1 at each end):
self.enlarged_memview = False
# if the part has a special plot method which is defined within the
# part's class:
self.plot_special = True
# save initialization status:
self.initialized = False
# save memory address of T
self._memadd_T = self.T.__array_interface__['data'][0]
# preallocate massflow grid:
if self.dm_invariant:
# if there is the same massflow everywhere in the part
self.dm = np.zeros(1)
else:
self.dm = np.zeros(self.port_num)
# and also preallocate grid for massflow through ports:
if not self.hydr_comp:
# if part is no hydraulic compensator, dm ports grid is simply a
# memory view to massflow grid
self._dm_port = self.dm[:]
self._dm_io = self.dm[:]
else:
# if part is a hydraulic compensator, dm ports is separate from dm
self._dm_port = np.zeros_like(self.T)
self._dm_io = np.zeros_like(self.T)
# save all kind of info stuff to dicts:
# topology info:
self.info_topology = dict()
# IMPORTANT: THIS VARIABLE **MUST NOT BE INHERITED BY SUB-CLASSES**!!
# If sub-classes are inherited from this part, this bool checker AND
# the following variables MUST BE OVERWRITTEN!
# ist the diff function fully njitted AND are all input-variables
# stored in a container?
self._diff_fully_njit = False
# self._diff_njit = pipe1D_diff # handle to njitted diff function
# input args are created in simenv _create_diff_inputs method
def init_part(self, *, port_A_init, **kwargs):
"""
Initialize 3-way valve with specifications, material and initial
conditions. Initial condition for a 3-way valve is the relative port
opening of port A in values from 0...1.
"""
# get material properties and pipe specifications:
self._get_specs_n_props(**kwargs)
# gridspacing is saved in an array of length port_num to save the
# gridspacing of connected parts for heat flux calculation. this array
# is pre-filled with an estimate of 1.1 times the DN outer diameter but
# will be overwritten and filled by get_port_connections() method with
# connected part values, if any numeric parts are connected.
# therefore get the info topology key:
if 'in' in self.info_topology:
key = 'in'
else:
key = 'all_ports'
self.grid_spacing = np.full_like(
self._T_port, self.info_topology[key]['pipe_specs']['d_o'] * 1.1
)
# delete used kwargs:
del kwargs['material']
del kwargs['pipe_specs']
# assert in and out values
err_str = (
'Only values `0 <= port_A_init <= 1` are allowed as '
'initial values for mixing or splitting valves!'
)
assert 0 <= port_A_init <= 1, err_str
# set starting values to port factors:
if self.kind == 'mix':
self.pf_arr[0] = port_A_init
self.pf_arr[1] = 1 - port_A_init
else:
# self.pf_arr[1] = port_A_init
# self.pf_arr[2] = 1 - port_A_init
""" TODO: change to same idx mix split"""
self.pf_arr[0] = port_A_init
self.pf_arr[1] = 1 - port_A_init
self._pf_init = port_A_init # backup for resetting
# # if set to steady state:
# if kwargs:
# if 'set_steadystate' in kwargs:
# assert (type(kwargs['set_steadystate']) ==
# bool), ('\'set_steadystate\' can only be True or '
# 'False!')
# self.ctrl_defined = kwargs['set_steadystate']
# if valve has to be controlled (default) and thus is NOT set to
# static, it needs a lower and upper limit for the values to set:
if 'no_control' not in kwargs or (
'no_control' in kwargs and kwargs['no_control'] is False
):
err_str = (
self._base_err
+ self._arg_err.format('lower_limit, upper_limit')
+ 'The part was set to be an actuator and need a control with '
'`no_control=False`, thus `lower_limit` and `upper_limit` '
'in {0} have to be passed to clip the controller action on '
'the actuator to the limits.\n'
'The limits have to be given as integer or float values with '
'`lower_limit < upper_limit`.'
).format(self._unit)
assert 'lower_limit' in kwargs and 'upper_limit' in kwargs, err_str
self._lims = np.array( # set limits to array
[kwargs['lower_limit'], kwargs['upper_limit']],
dtype=np.float64,
)
self._llim = self._lims[0] # also save to single floats
self._ulim = self._lims[1] # also save to single floats
assert 0 <= self._lims[0] < self._lims[1] <= 1, (
err_str + ' For Valve_3w limits are additionally restricted '
'to `0 <= lower_limit < upper_limit <= 1`.'
)
# if part does not need control (static or given values):
elif 'no_control' in kwargs and kwargs['no_control'] is True:
# if part is static:
if 'const_val' in kwargs:
# check for correct type:
err_str = (
'If valve ' + self.name + ' is set to static with '
'`const_val=array`, array has to be a 1D numpy '
'array with 2 values! To set array values over '
'a predefined timespan, use `val_given=time_array` '
'instead!'
)
assert type(kwargs['const_val']) == np.ndarray and kwargs[
'const_val'
].shape == (2,), err_str
self.pfarr[0:2] = kwargs['const_val']
raise ValueError('with const val reset to init not working')
# delete used kwargs to enable checking at the end:
del kwargs['const_val']
elif 'val_given' in kwargs:
# check for correct type:
err_str = (
'If valve ' + self.name + ' is set with predefined '
'values over a timespan, `val_given=time_array` '
'has to be given! `time_array` has to be a Pandas '
'Series with the index column filled with '
'timestamps which have to outlast the simulation '
'timeframe! The valve setting to set has to be '
'given in the first column (index 0) for branch A '
'and in the second column (index 1) for branch B. '
'To set a constant valve opening, use `const_val` '
'instead!'
)
err_str = (
'A check for pandas series needs to be here,'
'also checking the timestamp! The check for the '
'correct duration of the timestamp needs to be '
'done during sim init!'
)
assert (
type(kwargs['val_given']) == pd.core.series.Series
), err_str
raise TypeError('Timeindex etc. not yet defined!!!')
# delete used kwargs to enable checking at the end:
del kwargs['val_given']
self.val_given = True
self.control_req = False
self.ctrl_defined = True
else:
err_str = (
'If `no_control=True` is defined for valve '
+ self.name
+ ', the valve opening has either to be'
' given with `const_val` as a constant opening or '
'with `val_given` as time dependent Panda Series!'
)
assert (
'const_val' not in kwargs and 'val_given' not in kwargs
), err_str
else:
err_str = (
'An error during the initialization of '
+ self.name
+ ' occurred! Please check the spelling and type of all '
'arguments passed to the parts `set_initial_cond()`!'
)
# construct list of differential input argument names IN THE CORRECT
# ORDER!!!
# regex to remove strings: [a-zA-Z_]*[ ]*=self.
self._input_arg_names_sorted = [
'ports_all',
'_port_link_idx',
'_dm_io',
'T',
]
# update init status:
self.initialized = True
def _reset_to_init_cond(self):
# set starting values to port factors:
if self.kind == 'mix':
self.pf_arr[0] = self._pf_init
self.pf_arr[1] = 1 - self._pf_init
else:
# self.pf_arr[1] = port_A_init
# self.pf_arr[2] = 1 - port_A_init
""" TODO: change to same idx mix split"""
self.pf_arr[0] = self._pf_init
self.pf_arr[1] = 1 - self._pf_init
def _get_flow_routine(
self, port, parent_port=None, subnet=False, **kwargs
):
"""
Returns the massflow calculation routine for the port of the current
part to the topology construction. The massflow calculation routine has
to look like:
routine = (memory_view_to_target_port,
operation_id,
memory_view_to_port1, memory_view_to_port2, ...)
with target_port being the port which has to be calculated and port1
and port2 being the other/source ports which **don't** have to be
calculated with this routine! These source ports **must be given**
when the routine is called.
Parameters:
-----------
port : string
Port name of the port which shall be calculated (target port).
"""
# get topology connection conditions (target index, source part/port
# identifiers, source index and algebraic sign for passed massflow):
trgt_idx, src_part, src_port, src_idx, alg_sign = self._get_topo_cond(
port, parent_port
)
# 3wValve, no ports solved yet
if self._cnt_open_prts == 3:
# The following connection requirement(s) have to be checked:
# 1: all ports (A, B and AB) of a mixing valve MUST NOT be on
# the pressure side of a pump.
# 2: entry ports (A and B) of a mixing valve MUST NOT be on the
# suction side of a pump. This means a mixing valve can only
# be solved coming from port AB.
# 3: all ports (A, B and AB) of a splitting valve MUST NOT be
# on the suction side of a pump.
# 4: exit ports (A and B) of a splitting valve MUST NOT be on
# the pressure side of a pump. This means a splitting valve
# can only be solved coming from port AB.
# 5: two parts of the non numeric solving kind MUST NOT be
# connected directly. At least one numeric part has to be in
# between.
# check connection requirement(s):
# prepare error strings:
err_str1 = (
'Part ' + self.name + ' is directly connected to '
'the pressure side of a pump. Mixing valves may '
'only be connected to the suction side of a pump '
'with port AB!'
)
err_str2 = (
'Part ' + self.name + ' is connected to the '
'suction side of a pump with port A or B. '
'Mixing valves may only be connected to the '
'suction side of a pump with port AB!'
)
err_str3 = (
'Part ' + self.name + ' is directly connected to the '
'suction side of a pump. Splitting valves may only be '
'connected to the pressure side of a pump with port '
'AB!'
)
err_str4 = (
'Part ' + self.name + ' is connected to the '
'pressure side of a pump with port A or B. '
'Splitting valves may only be connected to the '
'suction side of a pump with port AB!'
)
if self.kind == 'mix':
# assert condition 1:
assert kwargs['pump_side'] != 'pressure', err_str1
# assert condition 2:
assert port == 'AB', err_str2
else:
# assert condition 3:
assert kwargs['pump_side'] != 'suction', err_str3
# assert condition 4:
assert port == 'AB', err_str4
# assert condition 5:
err_str5 = (
'Non numeric Part ' + self.name + ' is connected to '
'non numeric part ' + src_part + '. Two non '
'numeric parts must not be connected directly! '
'Insert a numeric part in between to set up a '
'correct topology!'
)
assert self._models.parts[src_part].solve_numeric, err_str5
# if valve is getting the massflow from another part (then port
# AB is solved as the first port), it can simply be copied
# from it: operation id 0 (positive) or - 1 (negative)
if alg_sign == 'positive':
operation_id = 0
else:
operation_id = -1
# add operation instructions to tuple (memory view to target
# massflow array cell, operation id and memory view source port's
# massflow array cells)
op_routine = tuple()
# construct memory view to target massflow array cell and append to
# op routine tuple
op_routine += (self._dm_io.reshape(-1)[trgt_idx],)
# add operation id:
op_routine += (operation_id,)
# add memory view to source massflow array cell:
op_routine += (
self._models.parts[src_part]._dm_io.reshape(-1)[src_idx],
)
else:
# get massflow calculation routine for the case that port
# A or B needs to be solved using the massflow from port AB
# and valve opening (stored in port factors array).
# operation id of a 3w valve for this case is ALWAYS 3, since
# AB must be given and A or B can be calculated by multiplying
# the respective port opening factor with AB. no negative
# of product needed, since AB positive massflow sign is
# contrary to A and B
operation_id = 3
# get source port index and create memory view to it:
src1_idx_start = self._port_own_idx[self.port_names.index('AB')]
src1_idx = slice(src1_idx_start, src1_idx_start + 1)
# second source "port" index is the index to the port factor
# array cell of port:
src2_idx_start = self.port_names.index(port)
src2_idx = slice(src2_idx_start, src2_idx_start + 1)
# add operation instructions to tuple (memory view to target
# massflow array cell, operation id, memory view to the
# source port's massflow array cell and memory view to the
# TARGET PORT'S port factor array cell):
op_routine = (
self._dm_io.reshape(-1)[trgt_idx],
operation_id,
self._dm_io.reshape(-1)[src1_idx],
self.pf_arr[src2_idx],
)
# update solved ports list and counter stop break:
self._solved_ports.append(port)
self._cnt_open_prts = self.port_num - len(self._solved_ports)
# this stays always False for 3w Valve!
self.break_topology = False
# remove part from hydr_comps if completely solved:
if self._cnt_open_prts == 0:
self._models._hydr_comps.remove(self.name)
# save topology parameters to dict for easy information lookup:
net = 'Subnet' if subnet else 'Flownet'
operation_routine = (
'Negative of sum'
if operation_id == -1
else 'Sum'
if operation_id == 1
else 'Pass on value'
if operation_id == 0
else ('Multiplication ' 'with port factor')
if operation_id == 3
else 'Error'
)
src_part = src_part if src_part is not None else self.name
source_ports = (
tuple(('AB', 'pf_arr[' + port + ']'))
if operation_id == 3
else src_port
if operation_id == 0
else tuple(set(self.port_names) - set(port))
)
# add port dict for current port and fill it:
if port not in self.info_topology:
self.info_topology[port] = dict()
self.info_topology[port].update(
{
'Net': net,
'Massflow': self._dm_io.reshape(-1)[trgt_idx],
'Calculation routine': operation_routine,
'Source part': src_part,
'Source port(s)': source_ports,
'Connected part': (
self._models.port_links[self.name + ';' + port].split(';')[
0
]
),
'Connected port': (
self._models.port_links[self.name + ';' + port].split(';')[
1
]
),
'Parent pump/part': kwargs['parent_pump'],
'Pump side': kwargs['pump_side'],
}
)
return op_routine
def _process_cv(self, ctrl_inst):
# 3w_valve_direct!
# n1 value (port A) with clipping to ]llim,ulim[:
self.pf_arr[0] = (
self._llim
if ctrl_inst.cv < self._llim
else self._ulim
if ctrl_inst.cv > self._ulim
else ctrl_inst.cv
)
# n2 value (port B):
self.pf_arr[1] = 1 - self.pf_arr[0]
def solve(self, timestep):
"""
Mixing Valve solve method:
--------------------------
The mass flow averaged mean of the values of the other parts ports
connected to the 'in1' and 'in2' ports is passed to the 'out'
port, taking the arithmetic mean of the in-ports temperatures to
get approximate material properties at the out port. the value of
the 'out' port is passed to 'in1' and 'in2' unchanged.
This is approximately correct, as the temperature values affect the
heat conduction and in-/ or outflowing massflow of connected parts
while the valve part itself is approximated as infinitely small
containing no mass.
Splitting Valve solve method:
-----------------------------
The arithmetic mean of the values of the other parts ports
connected to the 'out1' and 'out2' ports is passed to the 'in'
port, while the value of the 'in' port is passed to 'out1' and
'out2' unchanged.
This is approximately correct, as the temperature values affect the
heat conduction and in-/ or outflowing massflow of connected parts
while the valve part itself is approximated as infinitely small
containing no mass.
"""
# save massflow to results grid:
self.res_dm[self.stepnum] = self._dm_io
# get kind and then call numba jitted function (calling numba function
# which also does the selecting is SLOWER!)
if self.kind == 'mix':
# numba compiled function to solve mixing (includes getting ports)
_pf.solve_mix(
self._models.ports_all,
self._port_link_idx,
self._dm_io,
self.T,
)
else:
# numba compiled function to solve splitting(incl. getting ports)
_pf.solve_split(
self._models.ports_all, self._port_link_idx, self.T
)
# copy results to results grid:
self.res[self.stepnum] = self.T
def draw_part(self, axis, timestep, draw):
"""
Draws the current part in the plot environment, using vector
transformation to rotate the part drawing.
"""
# get and calculate all the information if not drawing (save all to a
# hidden dict):
if not draw:
# create hidden plot dict:
__pt = dict()
# get part start position from plot info dict:
__pt['pos_start'] = self.info_plot['path'][0]['start_coordinates']
# assert that orientation is in info dict and correct type:
orient_list = ['left', 'middle', 'right']
err_str = (
'For plotting of 3w-valves the orientation of the '
'valve must be given! Please pass the orientation as '
'`orientation=\'string\'` to the 3w-valve\'s '
'`set_plot_shape()`-method with string being one of '
'the following: ' + str(orient_list)
)
assert 'orientation' in self.info_plot, err_str
assert self.info_plot['orientation'] in orient_list, err_str
# get orientation:
__pt['orient'] = self.info_plot['orientation']
# get direction vector from info dict:
__pt['vec_dir'] = self.info_plot['path'][0]['vector']
# get part rotation angle from the drawing direction vector (vector
# from part start to part end in drawing):
__pt['rot_angle'] = self._models._angle_to_x_axis(__pt['vec_dir'])
# get length of part:
__pt['vec_len'] = np.sqrt(
(__pt['vec_dir'] * __pt['vec_dir']).sum()
)
# construct all drawing vectors for zero-rotation and one port on
# the left side, one port on the bottom side and one port on the
# right side (standard orientation 'left'). all vectors start from
# the center of the part which is given
# by the end position of vertex_coords.
# construct left port (upper vertice and lower vertice):
__pt['vec_l_u'] = np.array([-1, 0.5]) * __pt['vec_len']
__pt['vec_l_l'] = np.array([-1, -0.5]) * __pt['vec_len']
# construct right port (upper vertice and lower vertice):
__pt['vec_r_u'] = np.array([1, 0.5]) * __pt['vec_len']
__pt['vec_r_l'] = np.array([1, -0.5]) * __pt['vec_len']
# construct middle port (left vertice and right vertice):
__pt['vec_m_l'] = np.array([-0.5, -1]) * __pt['vec_len']
__pt['vec_m_r'] = np.array([0.5, -1]) * __pt['vec_len']
# get rotation angle due to orientation (to x unit vector (1 0)):
if __pt['orient'] == 'left':
# standard rotation
__pt['orient_angle'] = 0
elif __pt['orient'] == 'right':
# flipped standard rotation
__pt['orient_angle'] = 180 / 180 * np.pi
elif __pt['orient'] == 'middle':
# middle port on the left
__pt['orient_angle'] = -90 / 180 * np.pi
# get total rotation angle:
__pt['rot_angle'] += __pt['orient_angle']
# rotate all vectors:
__pt['vec_l_u'] = self._models._rotate_vector(
__pt['vec_l_u'], __pt['rot_angle']
)
__pt['vec_l_l'] = self._models._rotate_vector(
__pt['vec_l_l'], __pt['rot_angle']
)
__pt['vec_r_u'] = self._models._rotate_vector(
__pt['vec_r_u'], __pt['rot_angle']
)
__pt['vec_r_l'] = self._models._rotate_vector(
__pt['vec_r_l'], __pt['rot_angle']
)
__pt['vec_m_l'] = self._models._rotate_vector(
__pt['vec_m_l'], __pt['rot_angle']
)
__pt['vec_m_r'] = self._models._rotate_vector(
__pt['vec_m_r'], __pt['rot_angle']
)
# construct all points:
__pt['pos_center'] = __pt['pos_start'] + __pt['vec_dir']
__pt['pos_l_u'] = __pt['pos_center'] + __pt['vec_l_u']
__pt['pos_l_l'] = __pt['pos_center'] + __pt['vec_l_l']
__pt['pos_r_u'] = __pt['pos_center'] + __pt['vec_r_u']
__pt['pos_r_l'] = __pt['pos_center'] + __pt['vec_r_l']
__pt['pos_m_l'] = __pt['pos_center'] + __pt['vec_m_l']
__pt['pos_m_r'] = __pt['pos_center'] + __pt['vec_m_r']
# construct x- and y-grid for lines (from center to l_u to l_l to
# r_u to r_l to center to m_l to m_r to center):
__pt['x_grid'] = np.array(
[
__pt['pos_center'][0],
__pt['pos_l_u'][0],
__pt['pos_l_l'][0],
__pt['pos_r_u'][0],
__pt['pos_r_l'][0],
__pt['pos_center'][0],
__pt['pos_m_l'][0],
__pt['pos_m_r'][0],
__pt['pos_center'][0],
]
)
__pt['y_grid'] = np.array(
[
__pt['pos_center'][1],
__pt['pos_l_u'][1],
__pt['pos_l_l'][1],
__pt['pos_r_u'][1],
__pt['pos_r_l'][1],
__pt['pos_center'][1],
__pt['pos_m_l'][1],
__pt['pos_m_r'][1],
__pt['pos_center'][1],
]
)
# replace port coordinates since they are wrong for more complex
# parts:
if __pt['orient'] == 'left':
# get middle and right port coordinates:
__pt['p1_coords'] = (
__pt['pos_center']
+ (__pt['vec_m_l'] + __pt['vec_m_r']) / 2
)
__pt['p2_coords'] = (
__pt['pos_center']
+ (__pt['vec_r_u'] + __pt['vec_r_l']) / 2
)
elif __pt['orient'] == 'middle':
# get left and right port coordinates:
__pt['p1_coords'] = (
__pt['pos_center']
+ (__pt['vec_l_u'] + __pt['vec_l_l']) / 2
)
__pt['p2_coords'] = (
__pt['pos_center']
+ (__pt['vec_r_u'] + __pt['vec_r_l']) / 2
)
elif __pt['orient'] == 'right':
# get left and middle port coordinates:
__pt['p1_coords'] = (
__pt['pos_center']
+ (__pt['vec_l_u'] + __pt['vec_l_l']) / 2
)
__pt['p2_coords'] = (
__pt['pos_center']
+ (__pt['vec_m_l'] + __pt['vec_m_r']) / 2
)
# get the free ports (the ports where the position is not coming
# from):
free_ports = list(self.port_names)
free_ports.remove(self.info_plot['auto_connection']['own_port'])
# now get the free ports depending on invert status:
if 'invert' not in self.info_plot or not self.info_plot['invert']:
p1 = free_ports[0]
p2 = free_ports[1]
elif self.info_plot['invert']:
p1 = free_ports[1]
p2 = free_ports[0]
# set them to the ports:
self.info_plot[p1]['coordinates'] = __pt['p1_coords']
self.info_plot[p2]['coordinates'] = __pt['p2_coords']
# get the connected part;ports:
# p1_conn_p = self._models.port_links[self.name + ';' + free_ports[0]]
# p2_conn_p = self._models.port_links[self.name + ';' + free_ports[1]]
# # split them up:
# p1_conn_part, p1_conn_port = p1_conn_p.split(';')
# p2_conn_part, p2_conn_port = p2_conn_p.split(';')
# # now run their set plot shape with that new information again:
# NetPlotter.set_plot_shape(p1_conn_part, p1_conn_port,
# self._models.parts[p1_conn_part].
# info_plot['vertex_coordinates'],
# linewidth=self._models.parts[p1_conn_part].
# info_plot['path_linewidth'])
# NetPlotter.set_plot_shape(p2_conn_part, p2_conn_port,
# self._models.parts[p2_conn_part].
# info_plot['vertex_coordinates'],
# linewidth=self._models.parts[p2_conn_part].
# info_plot['path_linewidth'])
# get annotation text properties:
# get offset vector depending on rotation of pump to deal with
# none-quadratic form of textbox to avoid overlapping. only in the
# range of +/-45° of pos. and neg. x-axis an offset vec length of
# -20 is allowed, else -30:
offset = (
20
if (
0 <= __pt['rot_angle'] <= 45 / 180 * np.pi
or 135 / 180 * np.pi
<= __pt['rot_angle']
<= 225 / 180 * np.pi
or __pt['rot_angle'] >= 315 / 180 * np.pi
)
else 30
)
# get text offset from bottom point of pump by vector rotation:
__pt['txt_offset'] = tuple(
self._models._rotate_vector(
np.array([0, offset]), __pt['rot_angle']
)
)
__pt['txtA_offset'] = tuple(
self._models._rotate_vector(
np.array([0, offset]), __pt['rot_angle']
)
)
__pt['txtB_offset'] = tuple(
self._models._rotate_vector(
np.array([0, offset]), __pt['rot_angle']
)
)
# finally save hidden dict to self:
self.__pt = __pt
# only draw if true:
if draw:
# add lines to plot
axis.plot(
self.__pt['x_grid'],
self.__pt['y_grid'],
color=[0, 0, 0],
linewidth=self.info_plot['path_linewidth'],
zorder=5,
)
# construct name and massflow strings for ports A and B:
txt = self.name
txtA = (
'A'
+ r'\n$\dot{m} = $'
+ str(self.res_dm[timestep, 0])
+ r'$\,$kg/s'
)
txtB = (
'B'
+ r'\n$\dot{m} = $'
+ str(self.res_dm[timestep, 1])
+ r'$\,$kg/s'
)
axis.annotate(
txt,
xy=(self.__pt['pos_center']),
xytext=self.__pt['txt_offset'],
textcoords='offset points',
ha='center',
va='center',
)
axis.annotate(
txtA,
xy=(self.info_plot['A']['coordinates']),
xytext=self.__pt['txtA_offset'],
textcoords='offset points',
ha='center',
va='center',
)
axis.annotate(
txtB,
xy=(self.info_plot['B']['coordinates']),
xytext=self.__pt['txtB_offset'],
textcoords='offset points',
ha='center',
va='center',
)
# construct name and massflow string:
# txt = (self.name + '\n$\dot{m} = $' + str(self.res_dm[timestep][0])
# + 'kg/s')
# get offset vector depending on rotation of pump to deal with
# none-quadratic form of textbox to avoid overlapping. only in the
# range of +/-45° of pos. and neg. x-axis an offset vec length of -20
# is allowed, else -30:
# offset = (-20 if (0 <= rot_angle <= 45/180*np.pi
# or 135/180*np.pi <= rot_angle <= 225/180*np.pi
# or rot_angle >= 315/180*np.pi) else -30)
# # get text offset from bottom point of pump by vector rotation:
# txt_offset = tuple(self._models._rotate_vector(np.array([0, offset]),
# rot_angle))
# axis.annotate(txt, xy=(pos_bot),
# xytext=txt_offset, textcoords='offset points',
# ha='center', va='center') | PypiClean |
/MambuPy-2.0.0b22.tar.gz/MambuPy-2.0.0b22/mambupy/rest1to2/mambubranch.py | from mambupy.rest.mambubranch import MambuBranch as MambuBranch1
from mambupy.rest.mambubranch import MambuBranches as MambuBranches1
from mambupy.rest1to2.mambustruct import MambuStruct
from mambupy.rest.mamburestutils import MambuStructIterator
class MambuBranch(MambuStruct, MambuBranch1):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def preprocess(self):
from mambupy.rest1to2 import mambuuser
self.mambuusersclass = mambuuser.MambuUsers
try:
self.address = self.addresses[0]
for name, item in self.addresses[0].items():
try:
self.addresses[0][name] = item.strip()
self.address[name] = item.strip()
except AttributeError:
pass
except (IndexError, AttributeError):
pass
def postprocess(self):
try:
for name, item in self.addresses[0].items():
try:
if name == "indexInList":
continue
self.addresses[0][name] = str(self.addresses[0][name])
self.address[name] = str(self.address[name])
except AttributeError:
pass
except (IndexError, AttributeError):
pass
def setUsers(self, *args, **kwargs):
try:
self.mambuusersclass
except AttributeError:
from .mambuuser import MambuUsers
self.mambuusersclass = MambuUsers
usrs = [
us
for us in self.mambuusersclass(branchId=self["id"], *args, **kwargs)
if us["userState"] == "ACTIVE"
]
self["users"] = usrs
return 1
class MambuBranches(MambuStruct, MambuBranches1):
def __init__(self, *args, **kwargs):
if "mambuclassname" in kwargs:
mambuclassname = kwargs.pop("mambuclassname")
else:
mambuclassname = "MambuBranch"
if "mambuclass1" in kwargs:
mambuclass1 = kwargs.pop("mambuclass1")
else:
mambuclass1 = MambuBranch
super().__init__(
mambuclassname=mambuclassname,
mambuclass1=mambuclass1, *args, **kwargs)
def __iter__(self):
return MambuStructIterator(self.wrapped2)
def __repr__(self):
return super().__repr__() | PypiClean |
/FDmitry_Server-1.0.1.tar.gz/FDmitry_Server-1.0.1/server/server/database.py | from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey, DateTime, Text
from sqlalchemy.orm import mapper, sessionmaker
import datetime
class ServerStorage:
'''
Класс - оболочка для работы с базой данных сервера.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется классический подход.
'''
class AllUsers:
'''Класс - отображение таблицы всех пользователей.'''
def __init__(self, username, passwd_hash):
self.name = username
self.last_login = datetime.datetime.now()
self.passwd_hash = passwd_hash
self.pubkey = None
self.id = None
class ActiveUsers:
'''Класс - отображение таблицы активных пользователей.'''
def __init__(self, user_id, ip_address, port, login_time):
self.user = user_id
self.ip_address = ip_address
self.port = port
self.login_time = login_time
self.id = None
class LoginHistory:
'''Класс - отображение таблицы истории входов.'''
def __init__(self, name, date, ip, port):
self.id = None
self.name = name
self.date_time = date
self.ip = ip
self.port = port
class UsersContacts:
'''Класс - отображение таблицы контактов пользователей.'''
def __init__(self, user, contact):
self.id = None
self.user = user
self.contact = contact
class UsersHistory:
'''Класс - отображение таблицы истории действий.'''
def __init__(self, user):
self.id = None
self.user = user
self.sent = 0
self.accepted = 0
def __init__(self, path):
# Создаём движок базы данных
self.database_engine = create_engine(
f'sqlite:///{path}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
# Создаём объект MetaData
self.metadata = MetaData()
# Создаём таблицу пользователей
users_table = Table('Users', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String, unique=True),
Column('last_login', DateTime),
Column('passwd_hash', String),
Column('pubkey', Text)
)
# Создаём таблицу активных пользователей
active_users_table = Table(
'Active_users', self.metadata, Column(
'id', Integer, primary_key=True), Column(
'user', ForeignKey('Users.id'), unique=True), Column(
'ip_address', String), Column(
'port', Integer), Column(
'login_time', DateTime))
# Создаём таблицу истории входов
user_login_history = Table('Login_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', ForeignKey('Users.id')),
Column('date_time', DateTime),
Column('ip', String),
Column('port', String)
)
# Создаём таблицу контактов пользователей
contacts = Table('Contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('user', ForeignKey('Users.id')),
Column('contact', ForeignKey('Users.id'))
)
# Создаём таблицу статистики пользователей
users_history_table = Table('History', self.metadata,
Column('id', Integer, primary_key=True),
Column('user', ForeignKey('Users.id')),
Column('sent', Integer),
Column('accepted', Integer)
)
# Создаём таблицы
self.metadata.create_all(self.database_engine)
# Создаём отображения
mapper(self.AllUsers, users_table)
mapper(self.ActiveUsers, active_users_table)
mapper(self.LoginHistory, user_login_history)
mapper(self.UsersContacts, contacts)
mapper(self.UsersHistory, users_history_table)
# Создаём сессию
Session = sessionmaker(bind=self.database_engine)
self.session = Session()
# Если в таблице активных пользователей есть записи, то их необходимо
# удалить
self.session.query(self.ActiveUsers).delete()
self.session.commit()
def user_login(self, username, ip_address, port, key):
'''
Метод выполняющийся при входе пользователя, записывает в базу факт входа
Обновляет открытый ключ пользователя при его изменении.
'''
# Запрос в таблицу пользователей на наличие там пользователя с таким
# именем
rez = self.session.query(self.AllUsers).filter_by(name=username)
# Если имя пользователя уже присутствует в таблице, обновляем время последнего входа
# и проверяем корректность ключа. Если клиент прислал новый ключ,
# сохраняем его.
if rez.count():
user = rez.first()
user.last_login = datetime.datetime.now()
if user.pubkey != key:
user.pubkey = key
# Если нету, то генерируем исключение
else:
raise ValueError('Пользователь не зарегистрирован.')
# Теперь можно создать запись в таблицу активных пользователей о факте
# входа.
new_active_user = self.ActiveUsers(
user.id, ip_address, port, datetime.datetime.now())
self.session.add(new_active_user)
# и сохранить в историю входов
history = self.LoginHistory(
user.id, datetime.datetime.now(), ip_address, port)
self.session.add(history)
# Сохрраняем изменения
self.session.commit()
def add_user(self, name, passwd_hash):
'''
Метод регистрации пользователя.
Принимает имя и хэш пароля, создаёт запись в таблице статистики.
'''
user_row = self.AllUsers(name, passwd_hash)
self.session.add(user_row)
self.session.commit()
history_row = self.UsersHistory(user_row.id)
self.session.add(history_row)
self.session.commit()
def remove_user(self, name):
'''Метод удаляющий пользователя из базы.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
self.session.query(self.ActiveUsers).filter_by(user=user.id).delete()
self.session.query(self.LoginHistory).filter_by(name=user.id).delete()
self.session.query(self.UsersContacts).filter_by(user=user.id).delete()
self.session.query(
self.UsersContacts).filter_by(
contact=user.id).delete()
self.session.query(self.UsersHistory).filter_by(user=user.id).delete()
self.session.query(self.AllUsers).filter_by(name=name).delete()
self.session.commit()
def get_hash(self, name):
'''Метод получения хэша пароля пользователя.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
return user.passwd_hash
def get_pubkey(self, name):
'''Метод получения публичного ключа пользователя.'''
user = self.session.query(self.AllUsers).filter_by(name=name).first()
return user.pubkey
def check_user(self, name):
'''Метод проверяющий существование пользователя.'''
if self.session.query(self.AllUsers).filter_by(name=name).count():
return True
else:
return False
def user_logout(self, username):
'''Метод фиксирующий отключения пользователя.'''
# Запрашиваем пользователя, что покидает нас
user = self.session.query(
self.AllUsers).filter_by(
name=username).first()
# Удаляем его из таблицы активных пользователей.
self.session.query(self.ActiveUsers).filter_by(user=user.id).delete()
# Применяем изменения
self.session.commit()
def process_message(self, sender, recipient):
'''Метод записывающий в таблицу статистики факт передачи сообщения.'''
# Получаем ID отправителя и получателя
sender = self.session.query(
self.AllUsers).filter_by(
name=sender).first().id
recipient = self.session.query(
self.AllUsers).filter_by(
name=recipient).first().id
# Запрашиваем строки из истории и увеличиваем счётчики
sender_row = self.session.query(
self.UsersHistory).filter_by(
user=sender).first()
sender_row.sent += 1
recipient_row = self.session.query(
self.UsersHistory).filter_by(
user=recipient).first()
recipient_row.accepted += 1
self.session.commit()
def add_contact(self, user, contact):
'''Метод добавления контакта для пользователя.'''
# Получаем ID пользователей
user = self.session.query(self.AllUsers).filter_by(name=user).first()
contact = self.session.query(
self.AllUsers).filter_by(
name=contact).first()
# Проверяем что не дубль и что контакт может существовать (полю
# пользователь мы доверяем)
if not contact or self.session.query(
self.UsersContacts).filter_by(
user=user.id,
contact=contact.id).count():
return
# Создаём объект и заносим его в базу
contact_row = self.UsersContacts(user.id, contact.id)
self.session.add(contact_row)
self.session.commit()
# Функция удаляет контакт из базы данных
def remove_contact(self, user, contact):
'''Метод удаления контакта пользователя.'''
# Получаем ID пользователей
user = self.session.query(self.AllUsers).filter_by(name=user).first()
contact = self.session.query(
self.AllUsers).filter_by(
name=contact).first()
# Проверяем что контакт может существовать (полю пользователь мы
# доверяем)
if not contact:
return
# Удаляем требуемое
self.session.query(self.UsersContacts).filter(
self.UsersContacts.user == user.id,
self.UsersContacts.contact == contact.id
).delete()
self.session.commit()
def users_list(self):
'''Метод возвращающий список известных пользователей со временем последнего входа.'''
# Запрос строк таблицы пользователей.
query = self.session.query(
self.AllUsers.name,
self.AllUsers.last_login
)
# Возвращаем список кортежей
return query.all()
def active_users_list(self):
'''Метод возвращающий список активных пользователей.'''
# Запрашиваем соединение таблиц и собираем кортежи имя, адрес, порт,
# время.
query = self.session.query(
self.AllUsers.name,
self.ActiveUsers.ip_address,
self.ActiveUsers.port,
self.ActiveUsers.login_time
).join(self.AllUsers)
# Возвращаем список кортежей
return query.all()
def login_history(self, username=None):
'''Метод возвращающий историю входов.'''
# Запрашиваем историю входа
query = self.session.query(self.AllUsers.name,
self.LoginHistory.date_time,
self.LoginHistory.ip,
self.LoginHistory.port
).join(self.AllUsers)
# Если было указано имя пользователя, то фильтруем по нему
if username:
query = query.filter(self.AllUsers.name == username)
# Возвращаем список кортежей
return query.all()
def get_contacts(self, username):
'''Метод возвращающий список контактов пользователя.'''
# Запрашивааем указанного пользователя
user = self.session.query(self.AllUsers).filter_by(name=username).one()
# Запрашиваем его список контактов
query = self.session.query(self.UsersContacts, self.AllUsers.name). \
filter_by(user=user.id). \
join(self.AllUsers, self.UsersContacts.contact == self.AllUsers.id)
# выбираем только имена пользователей и возвращаем их.
return [contact[1] for contact in query.all()]
def message_history(self):
'''Метод возвращающий статистику сообщений.'''
query = self.session.query(
self.AllUsers.name,
self.AllUsers.last_login,
self.UsersHistory.sent,
self.UsersHistory.accepted
).join(self.AllUsers)
# Возвращаем список кортежей
return query.all()
# Отладка
if __name__ == '__main__':
test_db = ServerStorage('../server_database.db3')
test_db.user_login('test1', '192.168.1.113', 8080)
test_db.user_login('test2', '192.168.1.113', 8081)
print(test_db.users_list())
# print(test_db.active_users_list())
# test_db.user_logout('McG')
# print(test_db.login_history('re'))
# test_db.add_contact('test2', 'test1')
# test_db.add_contact('test1', 'test3')
# test_db.add_contact('test1', 'test6')
# test_db.remove_contact('test1', 'test3')
test_db.process_message('test1', 'test2')
print(test_db.message_history()) | PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/db/migrations/operations/base.py | from djmodels.db import router
from djmodels.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
class Operation:
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
# Should this operation be considered safe to elide and optimize across?
elidable = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Return a 3-tuple of class import path (or just name if it lives
under djmodels.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Take the state from the previous migration, and mutate it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Perform the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Perform the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Output a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Return True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Return True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Return whether or not a model may be migrated.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def reduce(self, operation, app_label=None):
"""
Return either a list of operations the actual operation should be
replaced with or a boolean that indicates whether or not the specified
operation can be optimized across.
"""
if self.elidable:
return [operation]
elif operation.elidable:
return [self]
return False
def _get_model_tuple(self, remote_model, app_label, model_name):
if remote_model == RECURSIVE_RELATIONSHIP_CONSTANT:
return app_label, model_name.lower()
elif '.' in remote_model:
return tuple(remote_model.lower().split('.'))
else:
return app_label, remote_model.lower()
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
) | PypiClean |
/dragonflow-4.0.0.tar.gz/dragonflow-4.0.0/doc/source/specs/remote_device_communication.rst | ..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
===========================
Remote Device Communication
===========================
https://blueprints.launchpad.net/dragonflow/+spec/remote-device-communication
This spec proposes the solution of communicating to a remote device which
is not managed by Dragonflow.
Problem Description
===================
In the common scenario, a VM not only needs to communicate with another VM
but also a physical machine, however, the virtual or physical machine
may not be managed by Dragonflow, in this spec we call them remote device,
if a VM in Dragonflow wants to communicate to remote device, Dragonflow
needs to know some info of the remote device.
Usually we would deploy a VTEP for virtual or physical machine in DC network,
such as the Openvswitch vxlan port, the VTEP TOR(top of rack) and the
physical router which support VTEP, so if Dragonflow knows the correct VTEP
IP, VM in Dragonflow could access remote device by the overlay network.
The remote device may belong to one tenant or it has no tenant info at all.
It could be managed by another cloud OS and how the remote device knows the
location of the VM in Dragonflow and accesses it is out of the scope of this
spec.
Proposed Change
===============
To resolve the problem, the general idea is we should tell the info of remote
device to Dragonflow. We can invoke the Neutron API create_port and provide
the info of remote device, plugin will assign a specific chassis name for
the remote device and publish the create_port message. After the chassis
receives the message, it will create corresponding tunnel port to the remote
chassis and install the forwarding rules.
Neutron Plugin
--------------
When we invoke the create_port Neutron API provided by Neutron plugin in
Dragonflow, it will process it:
1. We put the info that indicates the Neutron port is a remote device port
into the binding_profile field so that Neutron plugin could recognize it:
binding_profile = {"port_key": "remote_port",
"host_ip": remote_chassis_ip}
2. When the Neutron plugin finds it is a remote port by the binding_profile
field in the create_port message, it will assign the remote_chassis_ip as
the chassis name of the remote port, because the remote_chassis_ip should be
unique in DC network. Then it will store the lport in DF DB and publish the
message with corresponding topic, if the lport belongs to some tenant, we
could use tenant_id as the topic.
DF Local Controller
-------------------
DF local controller will process above notification message:
1. DF local controller will analyse the create_port message and find it is a
remote device port by the specific chassis name, and also it will fetch
the remote tunnel ip by the chassis name.
2. Local controller will check whether local chassis has the tunnel port from
itself to the specific remote chassis, if not, it will create the tunnel
port and establish the tunnel to the remote chassis.
3. After the tunnel port has been created, local controller will notify the
create_lport message to Apps, it will be considered a normal remote port as
in current implementation.
On the other hand, when the remote device port is deleted from local cache,
it means there are no need to communicate to the remote chassis anymore
for the local controller, it should delete the corresponding tunnel port and
forwarding rules.
| PypiClean |
/OTRXMPPChannel-1.0.4.tar.gz/OTRXMPPChannel-1.0.4/README.rst | XMPP-OTR channel for Python
===========================
This is a Python library for communicating with XMPP destinations using
OTR (`Off-the-Record Messaging`_) encryption.
Features
--------
- Your internet application can talk securely to you on your PC or
smartphone using readily-available chat software with OTR support
- OTRv2
- Send to and receive from multiple destinations, with or without
fingerprint verification
- Pure python (no libotr dependency)
Installation
------------
::
$ sudo pip install --pre xmpppy # xmpppy is tagged as an "rc" version
$ sudo pip install otrxmppchannel
Example
-------
::
import time
from otrxmppchannel import OTRXMPPChannel
from otrxmppchannel.connection import OTR_TRUSTED, OTR_UNTRUSTED,
OTR_UNENCRYPTED, OTR_UNKNOWN
# Load the base64-encoded OTR DSA key. Constructing the object without
# a key will generate one and provide it via ValueError exception.
privkey = open('.otrprivkey', 'r').read()
class MyOTRChannel(OTRXMPPChannel):
def on_receive(self, message, from_jid, otr_state):
if otr_state == OTR_TRUSTED:
state = 'trusted'
elif otr_state == OTR_UNTRUSTED:
state = 'UNTRUSTED!'
elif otr_state == OTR_UNENCRYPTED:
state = 'UNENCRYPTED!'
else:
state = 'UNKNOWN OTR STATUS!'
print('received %s from %s (%s)' % (message, from_jid, state))
mychan = MyOTRXMPPChannel(
'[email protected]/datadiode',
'supersecret',
[
(
'[email protected]',
'33eb6b01c97ceba92bd6b5e3777189c43f8d6f03'
),
'[email protected]'
],
privkey
)
mychan.send('') # Force OTR setup
time.sleep(3) # Wait a bit for OTR setup to complete
mychan.send('This message should be encrypted')
Notes
-----
- XMPP invitations are not handled
- It seems to take roughly 3 seconds to set up an OTR session. Messages
sent before the session is ready may be lost.
- The private key serialization format is specific to pure-python-otr.
Conversions from other formats are not handled.
Dependencies
------------
- `xmpppy`_ (>= 0.4.1)
- `pure-python-otr`_ (>= 1.0.0)
Author
------
- `Mike Gogulski`_ - https://github.com/mikegogulski
Donations
---------
If you found this software useful and would like to encourage its
maintenance and further development, please consider making a donation
to the Bitcoin address ``1MWFhwdFVEhB3X4eVsm9WxwvAhaxQqNbJh``.
License
-------
This is free and unencumbered public domain software. For more
information, see http://unlicense.org/ or the accompanying UNLICENSE
file.
.. _Off-the-Record Messaging: https://otr.cypherpunks.ca/
.. _xmpppy: http://xmpppy.sourceforge.net/
.. _pure-python-otr: https://github.com/afflux/pure-python-otr
.. _Mike Gogulski: mailto:[email protected] | PypiClean |
/gramaddict-3.2.5.tar.gz/gramaddict-3.2.5/README.md | <p align="center">
<img src="https://github.com/GramAddict/bot/raw/master/res/logo.png" alt="logo">
<br />
<h1 align="center">GramAddict</h1>
<br />
<p align="center">Looking for Instagram automation? I'm proud to present you a <b>100% free and open source Instagram bot</b>. This bot will allow you to grow your following and engagement by liking, following, commenting and sending PM automatically with your Android phone/tablet/emulator. <b>No root required.</b> <p>
<p align="center">
<a href="https://github.com/gramaddict/bot/blob/master/LICENSE">
<img src="https://img.shields.io/github/license/gramaddict/bot?style=flat" alt=""/>
</a>
<a href="https://www.python.org/">
<img src="https://img.shields.io/badge/built%20with-Python3-red.svg?style=flat" alt=""/>
</a>
<a href="https://github.com/GramAddict/bot/pulls">
<img src="https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat" alt=""/>
</a>
<a href="https://github.com/GramAddict/bot/issues">
<img src="https://img.shields.io/github/issues/gramaddict/bot?style=flat" alt=""/>
</a>
<a href="https://github.com/GramAddict/bot/pulls">
<img src="https://img.shields.io/github/issues-pr/gramaddict/bot?style=flat" alt=""/>
</a>
<a href="https://github.com/GramAddict/bot/stargazers">
<img src="https://img.shields.io/github/stars/gramaddict/bot?style=flat" alt="">
</a>
<a href="https://img.shields.io/github/last-commit/gramaddict/bot/master?style=flat">
<img src="https://img.shields.io/github/last-commit/GramAddict/bot/master?style=flat" alt="">
</a>
<a href="https://pypi.org/project/gramaddict/">
<img src="https://img.shields.io/pypi/dm/gramaddict" alt="">
</a>
<a href="https://github.com/GramAddict/bot#backers">
<img src="https://img.shields.io/opencollective/backers/gramaddict?style=flat" alt="">
</a>
<a href="https://discord.gg/NK8PNEFGFF">
<img src="https://img.shields.io/discord/771481743471017994?style=flat" alt="">
</a>
<a href="https://duckduckgo.com/?q=where+can+i+find+old+ig+version+apk&t=newext&atb=v376-1&df=y&ia=web">
<img src="https://img.shields.io/badge/works_on_ig_version-226.1.0.16.117-orange" alt="">
</a>
</p>
<br />
# Why should I automate my Instagram account?
It's very hard nowadays to grow an account. Have you ever been on the explore page? IG will show your post only to close accounts & accounts you interact most with, you will never be on the explore page. Nobody will see your beautiful photos, your hilarious memes, or whatever you are posting. But now you can do something about that! With GramAddict you can __get noticed easily__, and if you really deserve it, your account will __grow super fast__!
## So, do I need to publish good content to grow?
Of course you have to! This bot will mainly help you to get the __visibility you deserve__ and which is Instagram not giving you.
## Ok but I don't know how to start this journey...
Don't worry about that now, it's not that hard to start botting with GramAddict! I tried to make it as noob-friendly as possible. In any case, if you follow the docs with all the steps provided, you will make it! You are also invited to join our community and ask for any help!
## There is a community around this project?
Yes! We are on __discord__ and we count a lot of __active users__ you can ask us whatever you want!
<a href="https://discord.gg/NK8PNEFGFF">
<img src="https://img.shields.io/discord/771481743471017994?style=flat" alt="">
</a>
## I saw there are a lot of similar projects on GitHub, why should I choose this one?
You're right, there are plenty of other bot solutions. Most of them use API requests. However, you will get your account banned if you use API (1-30 days)!
There's also a very similar project to this one, yet it's not free and the dev doesn't care about the community much. They removed few lines from the core and ask you for a subscription plan in order to use those features. Furthermore, you don't even know what's the real code during execution - it's encrypted. And if they are stealing something else? Who knows? I don't.
This bot is __free to use__ and __open source__, you won't get charged for few extra lines, __EVER__.
I'm focused on the community and not on finding ways how to get money out of this project. __I really care about the community__ and every dev should too. 🤗
## So this bot does not use API?
__No__ this bot is working through __adb__ and is powered by __uiautomator2__ which is a testing wrapper for Android devices. In fact, your device (or an emulator) is used for doing the botting sh*it. That's very secure compared to other botting systems around there. I also made it look as much human as possible. You can think about the bot as a __stupid__ friend using your phone and doing things for you. The example bottom is quite old but will show you what we are talking about.
<p align="center">
<img src="https://github.com/GramAddict/bot/raw/master/res/demo.gif">
</p>
## Urra! I'll never get banned anymore with this bot!
No no no, wait! 🤦
You have to configure the bot for doing things as a human would do. Do you really spend all the day scrolling on Instagram? If yes contact a medic :P
You don't have to do so many actions or Instagram will block you. That appends even if you do that by your hands.. Have you ever added a lot of people in a row and got blocked for that action? Or spamming people with a link in PM?
__DON'T F*CKING DO THAT__
__Be gently and anonymous and nobody will notice that you're botting, trust me__.
## Do I need a computer for running the bot?
Yes, but you can also run it [directly on your phone](https://docs.gramaddict.org/#/termux)!!
In any case, you can decide to use a physical device or an emulator. If you're under Windows I suggest you use [Memu](https://www.memuplay.com/), under Mac OS we found out that [Android studio](https://developer.android.com/studio) works well (Installable with [homebrew](https://formulae.brew.sh/cask/android-studio)).
For hosting the bot you can use:
- your computer (with Windows, macOS, or Linux)
- a Raspberry (which is a cheap little pc Linux based)
## Cool! What can I do with this bot?
There are lots of __cool features__ you can use __for free__!
- Works without rooting
- Works with both emulators and physical devices
- Can be used stand-alone (without the use of a computer)
- Includes realistic random human-like delays and actions
- Can watch stories while interacting
- Comment post with emojis and [spintax logic](https://github.com/GramAddict/docs/blob/main/configuration.md#spintax-support)
- Send PM
- Type like a human (letter by letter by faking using suggestions. For example, you won't type `H - e - l - l - o` letter by letter but something like `H - He - Hello`)
- Browse carousels and watch their contents
- Watch videos for a specific amount of time
- Scheduler
- Getting tasty telegram reports
- Supports multiple actions in one session
- Lots of customizable limits to keep your account safe from soft bans
- Available interactions
- interact with a user's followers or following
- interact with a top or recent hashtag's post likers
- interact with a top or recent hashtag post
- interact with a top or recent place's post likers
- interact with a top or recent place post
- interact with user's post likers
- interact with a single blogger
- interact with your feed
- interact with users from a list (*.txt)
- interact with posts from links inside a list (*.txt)
- unfollow any followers
- unfollow any followers, followed by bot
- unfollow any followers, followed by bot, who don't follow you back
- unfollow from a list (*.txt)
- scrape mode for collecting usernames instead of interacting with them (you will find more information about that in the doc)
- Lots of available filters to customize who you interact with
- you can blacklist people to avoid interacting with them
- you can whitelist people to not remove them when you unfollow people
- biography main characters and language
- profile name main characters
- private / public / business / non business account
- number of posts / followers / following
... and more!
<br />
Full documentation available on [docs.gramaddict.org](https://docs.gramaddict.org)
## Telegram reports? What's that?
You can get __reports__ through telegram from your bot activities!
[Follow this guide to accomplish that](https://docs.gramaddict.org/#/configuration?id=telegram-reports).
<img src="https://github.com/GramAddict/bot/raw/master/res/telegram-reports.png" width="200">
> In this case trends are negative because I use this account only for tuning this bot, and it's private...
> I didn't accept anyone lately so the trends are meh :P
Cool, isn't it? 💦
# Quick start
Now that you're there you still don't know how to install that whole thing.
I'll try to help you accomplish that with a short tutorial. The full is available [here](https://docs.gramaddict.org/#/quickstart).
## What do you need:
- a computer (with Windows, macOS or Linux)
- Python installed on your machine (with pip)
- Adb tools
- a physical device or an emulator (with Android 4.4+)
### Step 1: Install Python (>=3.6):
>Python 3.10 is currently not supported!
If you're on Linux or macOS you should have Python and Git installed, just check that your Python version is >= 3.6.
On Windows you have to [install it](https://www.python.org/downloads/release/python-397/) for sure.
Failed? [Detailed tutorial here](https://docs.gramaddict.org/#/quickstart?id=step-1-install-python-if-not-installed).
>A little reminder: we refer to python with __python3__. You may also have python2 installed if you're on Linux for example. If you're on Windows you can use __python__ instead of python3.
>Check which python alias you have to use by typing `python -V` or `python3 -V` or `py -V` and use the right one for the rest of the tutorial.
>Check that pip3 is installed by typing `pip3 -V`
>If that returns an error you have to install it! How? [Google is your best friend!](https://www.google.com/search?q=how+to+install+pip) :P
### Step 2: Install GramAddict:
You can install GramAddict in two ways: with __pip__ or with __git__.
Is good practice creating virtual environments when you install a new package. That will save you from a lot of problems!
#### Create a virtual environment
We create a virtual environment called `.venv` and activate it:
- create a directory where you will create the new environment
- write in console: `python3 -m venv .venv`
> We use `venv` instead of `virtualenv` because `venv` is shipped directly with python3 and you don't have to install anything 🤪
- activate the .venv:
- `source .venv/bin/activate` on Linux/macOS
- `.venv\Scripts\activate.bat` on Windows cmd
- `.venv\Scripts\activate.ps1` on Windows PowerShell
> If you activate the venv correctly, you will see a little (.venv) on the left side of the command line!
#### With pip (I suggest you this way):
- install the package: `pip3 install GramAddict`
- check if it's installed: `gramaddict --version`
- if everything is fine you will get the GramAddict version installed 🥳
#### With git:
> __Warning:__ console commands like `gramaddict init`, `gramaddict dump` and `gramaddict run` won't work if you installed the bot with git.
- clone the project: `git clone https://github.com/GramAddict/bot.git gramaddict`
- enter the gramaddict folder: `cd gramaddict`
- install the requirements: `pip3 install -r requirements.txt`
### Step 3: Install adb:
Adb stands for [Android Debug Bridge](https://developer.android.com/studio/command-line/adb). It's needed for making this bot working properly. I think this one is the hardest part to accomplish but don't give up! You can do it, with my help. 💪
1. download [this package](https://developer.android.com/studio/releases/platform-tools) and unzip it somewhere and remind the path
> __Warning:__ place that where you're sure you won't ever delete it, otherwise the bot won't work anymore!
2. add platform-tools path to the PATH environment variable
- if you're on __Linux/macOS__ that's pretty easy:
- open ~/.bash_profile with any text editor you like
- add the following line with the full path to the platform-tools directory: export PATH=~/Library/Android/sdk/platform-tools/:$PATH. This path may be different depending on the way you installed platform-tools
- save file and restart Terminal
- on __Windows__ there are more steps to accomplish that:
- open Windows Explorer (you can press WINKEY+E) and right-click "My Computer" on left side
- in the pop-up menu, click `Properties`
- in the `System Properties` window, click the `Advanced` tab, and then click `Environment Variables`
- in the `System Variables` window, highlight `Path`, and click `Edit`
- in the `Edit System Variables` window, press on `New`
- enter the full path for the folder platform-tools you unzipped before
- press all the Ok and restart Command Prompt
3. check that everything is fine
- write `adb version`, you should get something like that:
> C:\Users\dedil>adb version
> Android Debug Bridge version 1.0.41
> Version 30.0.5-6877874
### Step 4: Set up the device:
**Physical device**
1. First thing you have to do is to [enable developer options and USB debugging](https://developer.android.com/studio/debug/dev-options#enable).
2. connect your phone to your computer with a USB cable
3. device will ask you to allow connection. Press "Connect"
**Emulator for Mac OS**
1. Install the [Homebrew](https://brew.sh)
2. Install the [Cask](https://cask.readthedocs.io/en/latest/index.html) by running `brew install cask`
3. Add the Cask application folder to your `$PATH`, e.g. `echo 'export PATH="$HOME/.cask/bin:$PATH"' >> ~/.bash_profile` and open a new terminal or reload with `source ~/.bash_profile`
4. Install the [Android Studio](https://formulae.brew.sh/cask/android-studio)
5. Run the Android Studio and click on 'More Actions -> Virtual Device Manager', then select the device and image, I found out that Pixel 2 API 28 combo works well.
6. Run the virtual device and install the Instagram app on it, don't forget to log in.
**Verify the device with adb**
Type `adb devices` in terminal
- a list of devices attached should be displayed, if not you did something wrong
> List of devices attached
> A0B1CD2345678901 device
- this is your device ID, you have to use it only if you have more than one device connected at the same time
### Step 5: Start the bot:
This bot works only if your Instagram is in [English](https://help.instagram.com/111923612310997).
1. initialize uiautomator2: `uiautomator2 init`
2. initialize GramAddict: `gramaddict init your_ig_account_name_here`
> __Warning:__ works only if you installed the bot with pip, if you used git you have to create account folder and youraccountname folder manually.
- that script will crate all the files you need for configure and start this bot
- you will find them inside the folder `accounts/youraccountname/`
```sh
gramaddict/
run.py
accounts/
youraccountname/
config.yml
telegram.yml
filters.yml
whitelist.txt
blacklist.txt
comments_list.txt
pm_list.txt
```
3. now that you have all the requirements you have to configure the whole thing by [following this guide](https://docs.gramaddict.org/#/configuration)
4. now you're done, and you can finally start the bot: `python3 run.py --config accounts/yourusername/config.yml` or `gramaddict run --config accounts/yourusername/config.yml`
Failed? [Check this out!](https://docs.gramaddict.org/#/quickstart?id=troubleshooting)
# Bot crashes, what should I do?
The script isn't perfect and may fail sometimes. If this is the case you can open a ticket on our [discord channel](https://discord.gg/NK8PNEFGFF). In that way you won't share with anyone your Instagram account name 😈. We'll be very happy to help you!
# WOW! You dedicated so much time to this project! Why you did that for free??
Well, we used to be in three a long time ago but suddenly my two friends left. This has been an opportunity for me to improve my skills in Python and that's why I didn't leave the project and keep maintaining it.
But of course donations are very welcome, so if you think I did a great job you can buy me a beer :)
<a href="https://www.buymeacoffee.com/mastrolube" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" hspace="10" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
# Can I do something to make this project grown?
On GitHub there is a `star system` which indicates how good a project is. If you like this project it will be amazing if you can press the little star at the top! ⭐
# Contributors
This project exists thanks to all of our Contributors [[Contribute](https://docs.gramaddict.org/#/contributing)].
<a href="https://github.com/gramaddict/bot/graphs/contributors"><img src="https://opencollective.com/gramaddict/contributors.svg?width=890&button=false" /></a>
<br />
# Backers
Thank you to everyone that supports us financially! 🙏 [[Become a backer](https://opencollective.com/gramaddict#backer)]
<a href="https://opencollective.com/gramaddict#backers" target="_blank"><img src="https://opencollective.com/gramaddict/backers.svg?width=890"></a>
<br />
# Talk botty with us
<p>
<a href="https://discord.gg/NK8PNEFGFF">
<img hspace="3" alt="Join us on Discord" src="https://github.com/GramAddict/bot/raw/master/res/discord.png" height=84/>
</a>
</p>
---
> **Disclaimer**<a name="disclaimer" />: This project comes with no guarantee or warranty. You are responsible for whatever happens from using this project. It is possible to get soft or hard banned by using this project if you are not careful.
| PypiClean |
/CubicReport-0.4.18.tar.gz/CubicReport-0.4.18/geraldo/generators/pdf.py | import datetime, os
from base import ReportGenerator
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.styles import ParagraphStyle
from reportlab.platypus import Paragraph, KeepInFrame
from reportlab.lib.units import cm
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.fonts import addMapping
try:
# Try to import pyPdf, a library to combine lots of PDF files
# at once. It is important to improve Geraldo's performance
# on memory consumming when generating large files.
# http://pypi.python.org/pypi/pyPdf/
import pyPdf
except ImportError:
pyPdf = None
DEFAULT_TEMP_DIR = '/tmp/'
from geraldo.utils import get_attr_value, calculate_size
from geraldo.widgets import Widget, Label, SystemField
from geraldo.graphics import Graphic, RoundRect, Rect, Line, Circle, Arc,\
Ellipse, Image
from geraldo.barcodes import BarCode
from geraldo.cache import make_hash_key, get_cache_backend, CACHE_DISABLED
from geraldo.charts import BaseChart
from geraldo.exceptions import AbortEvent
class PDFGenerator(ReportGenerator):
"""This is a generator to output a PDF using ReportLab library with
preference by its Platypus API"""
filename = None
canvas = None
return_canvas = False
multiple_canvas = False #bool(pyPdf)
temp_files = None
temp_file_name = None
temp_files_counter = 0
temp_files_max_pages = 10
temp_directory = DEFAULT_TEMP_DIR
mimetype = 'application/pdf'
def __init__(self, report, filename=None, canvas=None, return_canvas=False,
multiple_canvas=None, temp_directory=None, cache_enabled=None,
**kwargs):
super(PDFGenerator, self).__init__(report, **kwargs)
self.filename = filename
self.canvas = canvas
self.return_canvas = return_canvas
self.temp_directory = temp_directory or self.temp_directory
# Cache enabled
if cache_enabled is not None:
self.cache_enabled = cache_enabled
elif self.cache_enabled is None:
self.cache_enabled = bool(self.report.cache_status)
# Sets multiple_canvas with default value if None
if multiple_canvas is not None:
self.multiple_canvas = multiple_canvas
# Sets multiple_canvas as False if a canvas has been informed as argument
# nor if return_canvas attribute is setted as True
if canvas or self.return_canvas or self.return_pages:
self.multiple_canvas = False
# Initializes multiple canvas controller variables
elif self.multiple_canvas:
self.temp_files = []
# Just a unique name (current time + id of this object + formatting string for counter + PDF extension)
self.temp_file_name = datetime.datetime.now().strftime('%Y%m%d%H%M%s') + str(id(self)) + '_%s.pdf'
def execute(self):
"""Generates a PDF file using ReportLab pdfgen package."""
super(PDFGenerator, self).execute()
# Check the cache
if self.cached_before_render():
return
# Initializes the temporary PDF canvas (just to be used as reference)
if not self.canvas:
self.start_canvas()
# Prepare additional fonts
self.prepare_additional_fonts()
# Calls the before_print event
self.report.do_before_print(generator=self)
# Render pages
self.render_bands()
# Returns rendered pages
if self.return_pages:
return self._rendered_pages
# Check the cache
if self.cached_before_generate():
return
# Calls the "after render" event
self.report.do_before_generate(generator=self)
# Initializes the definitive PDF canvas
self.start_pdf()
# Generate the report pages (here it happens)
self.generate_pages()
# Calls the after_print event
self.report.do_after_print(generator=self)
# Multiple canvas files combination
if self.multiple_canvas:
self.combine_multiple_canvas()
else:
# Returns the canvas
if self.return_canvas:
return self.canvas
# Saves the canvas - only if it didn't return it
self.close_current_canvas()
# Store in the cache
self.store_in_cache()
def get_hash_key(self, objects):
"""Appends pdf extension to the hash_key"""
return super(PDFGenerator, self).get_hash_key(objects) + '.pdf'
def store_in_cache(self):
if not self.cache_enabled or self.report.cache_status == CACHE_DISABLED:
return
# Gest canvas content to store in the cache
if isinstance(self.filename, basestring):
fp = file(self.filename, 'rb')
content = fp.read()
fp.close()
elif hasattr(self.filename, 'read') and callable(self.filename.read):
content = self.filename.read()
else:
return False
return super(PDFGenerator, self).store_in_cache(content)
def start_canvas(self, filename=None):
"""Sets the PDF canvas"""
# Canvas for multiple canvas
if self.multiple_canvas:
filename = os.path.join(
self.temp_directory,
filename or self.temp_file_name%(self.temp_files_counter),
)
# Appends this filename to the temp files list
self.temp_files.append(filename)
# Increments the counter for the next file
self.temp_files_counter += 1
self.canvas = Canvas(filename=filename, pagesize=self.report.page_size)
# Canvas for single canvas
else:
filename = filename or self.filename
self.canvas = Canvas(filename=filename, pagesize=self.report.page_size)
def close_current_canvas(self):
"""Saves and close the current canvas instance"""
self.canvas.save()
def combine_multiple_canvas(self):
"""Combine multiple PDF files at once when is working with multiple canvas"""
if not self.multiple_canvas or not pyPdf or not self.temp_files:
return
readers = []
def append_pdf(input, output):
for page_num in range(input.numPages):
output.addPage(input.getPage(page_num))
output = pyPdf.PdfFileWriter()
for f_name in self.temp_files:
reader = pyPdf.PdfFileReader(file(f_name, 'rb'))
readers.append(reader)
append_pdf(reader, output)
if isinstance(self.filename, basestring):
fp = file(self.filename, 'wb')
else:
fp = self.filename
output.write(fp)
# Closes and clear objects
fp.close()
for r in readers: del r
del output
def start_pdf(self):
"""Initializes the PDF document with some properties and methods"""
# Set PDF properties
self.canvas.setTitle(self.report.title)
self.canvas.setAuthor(self.report.author)
self.canvas.setSubject(self.report.subject)
self.canvas.setKeywords(self.report.keywords)
def render_page_header(self):
"""Generate the report page header band if it exists"""
if not self.report.band_page_header:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_header.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_header,
top_position=self.calculate_size(self.report.page_size[1]) - self.calculate_size(self.report.margin_top),
update_top=False,
)
def render_page_footer(self):
"""Generate the report page footer band if it exists"""
if not self.report.band_page_footer:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_footer.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_footer,
top_position=self.calculate_size(self.report.margin_bottom) +\
self.calculate_size(self.report.band_page_footer.height),
update_top=False,
)
def calculate_top(self, *args):
ret = args[0]
for i in args[1:]:
ret -= i
return ret
def get_top_pos(self):
"""Since the coordinates are bottom-left on PDF, we have to use this to get
the current top position, considering also the top margin."""
ret = self.calculate_size(self.report.page_size[1]) - self.calculate_size(self.report.margin_top) - self._current_top_position
if self.report.band_page_header:
ret -= self.calculate_size(self.report.band_page_header.height)
return ret
def make_paragraph(self, text, style=None):
"""Uses the Paragraph class to return a new paragraph object"""
return Paragraph(text, style)
def wrap_paragraph_on(self, paragraph, width, height):
"""Wraps the paragraph on the height/width informed"""
paragraph.wrapOn(self.canvas, width, height)
def wrap_barcode_on(self, barcode, width, height):
"""Wraps the barcode on the height/width informed"""
barcode.wrapOn(self.canvas, width, height)
# Stylizing
def set_fill_color(self, color):
"""Sets the current fill on canvas. Used for fonts and shape fills"""
self.canvas.setFillColor(color)
def set_stroke_color(self, color):
"""Sets the current stroke on canvas"""
self.canvas.setStrokeColor(color)
def set_stroke_width(self, width):
"""Sets the stroke/line width for shapes"""
self.canvas.setLineWidth(width)
def make_paragraph_style(self, band, style=None):
"""Merge report default_style + band default_style + widget style"""
d_style = self.report.default_style.copy()
if band.default_style:
for k,v in band.default_style.items():
d_style[k] = v
if style:
for k,v in style.items():
d_style[k] = v
return ParagraphStyle(name=datetime.datetime.now().strftime('%H%M%S'), **d_style)
def keep_in_frame(self, widget, width, height, paragraphs, mode, persistent=False):
keep = KeepInFrame(width, height, paragraphs, mode=mode)
keep.canv = self.canvas
keep.wrap(self.calculate_size(widget.width), self.calculate_size(widget.height))
if persistent:
widget.keep = keep
return keep
# METHODS THAT ARE TOTALLY SPECIFIC TO THIS GENERATOR AND MUST
# OVERRIDE THE SUPERCLASS EQUIVALENT ONES
def generate_pages(self):
"""Specific method that generates the pages"""
self._generation_datetime = datetime.datetime.now()
for num, page in enumerate([page for page in self._rendered_pages if page.elements]):
self._current_page_number = num + 1
# Multiple canvas support (closes current and creates a new
# once if reaches the max pages for temp file)
if num and self.multiple_canvas and num % self.temp_files_max_pages == 0:
self.close_current_canvas()
del self.canvas
self.start_canvas()
# Loop at band widgets
for element in page.elements:
# Widget element
if isinstance(element, Widget):
widget = element
# Set element colors
self.set_fill_color(widget.font_color)
self.generate_widget(widget, self.canvas, num)
# Graphic element
elif isinstance(element, Graphic):
graphic = element
# Set element colors
self.set_fill_color(graphic.fill_color)
self.set_stroke_color(graphic.stroke_color)
self.set_stroke_width(graphic.stroke_width)
self.generate_graphic(graphic, self.canvas)
self.canvas.showPage()
# Multiple canvas support (closes the current one)
if self.multiple_canvas:
self.close_current_canvas()
del self.canvas
def generate_widget(self, widget, canvas=None, page_number=0):
"""Renders a widget element on canvas"""
if isinstance(widget, SystemField):
# Sets system fields
widget.fields['report_title'] = self.report.title
widget.fields['page_number'] = page_number + 1
widget.fields['page_count'] = self.get_page_count()
widget.fields['current_datetime'] = self._generation_datetime
widget.fields['report_author'] = self.report.author
# Calls the before_print event
try:
widget.do_before_print(generator=self)
except AbortEvent:
return
# Exits if is not visible
if not widget.visible:
return
# This includes also the SystemField above
if isinstance(widget, Label):
para = Paragraph(widget.text, self.make_paragraph_style(widget.band, widget.style))
para.wrapOn(canvas, widget.width, widget.height)
if widget.truncate_overflow:
keep = self.keep_in_frame(
widget,
self.calculate_size(widget.width),
self.calculate_size(widget.height),
[para],
mode='truncate',
)
keep.drawOn(canvas, widget.left, widget.top)
elif isinstance(widget, SystemField):
para.drawOn(canvas, widget.left, widget.top - para.height)
else:
para.drawOn(canvas, widget.left, widget.top)
# Calls the after_print event
widget.do_after_print(generator=self)
def generate_graphic(self, graphic, canvas=None):
"""Renders a graphic element"""
canvas = canvas or self.canvas
# Calls the before_print event
try:
graphic.do_before_print(generator=self)
except AbortEvent:
return
# Exits if is not visible
if not graphic.visible:
return
if isinstance(graphic, RoundRect):
canvas.roundRect(
graphic.left,
graphic.top,
graphic.width,
graphic.height,
graphic.radius,
graphic.stroke,
graphic.fill,
)
elif isinstance(graphic, Rect):
canvas.rect(
graphic.left,
graphic.top,
graphic.width,
graphic.height,
graphic.stroke,
graphic.fill,
)
elif isinstance(graphic, Line):
canvas.line(
graphic.left,
graphic.top,
graphic.right,
graphic.bottom,
)
elif isinstance(graphic, Circle):
canvas.circle(
graphic.left_center,
graphic.top_center,
graphic.radius,
graphic.stroke,
graphic.fill,
)
elif isinstance(graphic, Arc):
canvas.arc(
graphic.left,
graphic.top,
graphic.right,
graphic.bottom,
graphic.start_angle,
graphic.extent,
)
elif isinstance(graphic, Ellipse):
canvas.ellipse(
graphic.left,
graphic.top,
graphic.right,
graphic.bottom,
graphic.stroke,
graphic.fill,
)
elif isinstance(graphic, Image) and graphic.image:
canvas.drawInlineImage(
graphic.image,
graphic.left,
graphic.top,
graphic.width,
graphic.height,
preserveAspectRatio=not graphic.stretch,
)
elif isinstance(graphic, BarCode):
barcode = graphic.render()
if barcode:
barcode.drawOn(canvas, graphic.left, graphic.top)
elif isinstance(graphic, BaseChart):
drawing = graphic.render()
if drawing:
drawing.drawOn(canvas, graphic.left, graphic.top)
else:
return
# Calls the after_print event
graphic.do_after_print(generator=self)
def prepare_additional_fonts(self):
"""This method loads additional fonts and register them using ReportLab
PDF metrics package.
Just supports TTF fonts, for a while."""
if not self.report.additional_fonts:
return
for font_family_name, fonts_or_file in self.report.additional_fonts.iteritems():
# Supports font family with many styles (i.e: normal, italic, bold, bold-italic, etc.)
if isinstance(fonts_or_file, (list, tuple, dict)):
for font_item in fonts_or_file:
# List of tuples with format like ('font-name', 'font-file', True/False bold, True/False italic)
if isinstance(font_item, (list, tuple)):
font_name, font_file, is_bold, is_italic = font_item
pdfmetrics.registerFont(TTFont(font_name, font_file))
addMapping(font_family_name, is_bold, is_italic, font_name)
# List of dicts with format like {'file': '', 'name': '', 'bold': False, 'italic': False}
elif isinstance(font_item, dict):
pdfmetrics.registerFont(TTFont(font_item['name'], font_item['file']))
addMapping(font_family_name, font_item.get('bold', False),
font_item.get('italic', False), font_item['name'])
# Old style: font name and file path
else:
pdfmetrics.registerFont(TTFont(font_family_name, fonts_or_file)) | PypiClean |
/EasyDataSetToBOP-1.0.2.tar.gz/EasyDataSetToBOP-1.0.2/easydatasettobop/bop_dataset_utils/misc.py | import os
import sys
import datetime
import pytz
import math
import subprocess
import numpy as np
from scipy.spatial import distance
from easydatasettobop.bop_dataset_utils import transform
def log(s):
"""A logging function.
:param s: String to print (with the current date and time).
"""
# Use UTC time for logging.
utc_now = pytz.utc.localize(datetime.datetime.utcnow())
# pst_now = utc_now.astimezone(pytz.timezone("America/Los_Angeles"))
utc_now_str = '{}/{}|{:02d}:{:02d}:{:02d}'.format(
utc_now.month, utc_now.day, utc_now.hour, utc_now.minute, utc_now.second)
# sys.stdout.write('{}: {}\n'.format(time.strftime('%m/%d|%H:%M:%S'), s))
sys.stdout.write('{}: {}\n'.format(utc_now_str, s))
sys.stdout.flush()
def ensure_dir(path):
"""Ensures that the specified directory exists.
:param path: Path to the directory.
"""
if not os.path.exists(path):
os.makedirs(path)
def get_symmetry_transformations(model_info, max_sym_disc_step):
"""Returns a set of symmetry transformations for an object model.
:param model_info: See files models_info.json provided with the datasets.
:param max_sym_disc_step: The maximum fraction of the object diameter which
the vertex that is the furthest from the axis of continuous rotational
symmetry travels between consecutive discretized rotations.
:return: The set of symmetry transformations.
"""
# Discrete symmetries.
trans_disc = [{'R': np.eye(3), 't': np.array([[0, 0, 0]]).T}] # Identity.
if 'symmetries_discrete' in model_info:
for sym in model_info['symmetries_discrete']:
sym_4x4 = np.reshape(sym, (4, 4))
R = sym_4x4[:3, :3]
t = sym_4x4[:3, 3].reshape((3, 1))
trans_disc.append({'R': R, 't': t})
# Discretized continuous symmetries.
trans_cont = []
if 'symmetries_continuous' in model_info:
for sym in model_info['symmetries_continuous']:
axis = np.array(sym['axis'])
offset = np.array(sym['offset']).reshape((3, 1))
# (PI * diam.) / (max_sym_disc_step * diam.) = discrete_steps_count
discrete_steps_count = int(np.ceil(np.pi / max_sym_disc_step))
# Discrete step in radians.
discrete_step = 2.0 * np.pi / discrete_steps_count
for i in range(1, discrete_steps_count):
R = transform.rotation_matrix(i * discrete_step, axis)[:3, :3]
t = -R.dot(offset) + offset
trans_cont.append({'R': R, 't': t})
# Combine the discrete and the discretized continuous symmetries.
trans = []
for tran_disc in trans_disc:
if len(trans_cont):
for tran_cont in trans_cont:
R = tran_cont['R'].dot(tran_disc['R'])
t = tran_cont['R'].dot(tran_disc['t']) + tran_cont['t']
trans.append({'R': R, 't': t})
else:
trans.append(tran_disc)
return trans
def project_pts(pts, K, R, t):
"""Projects 3D points.
:param pts: nx3 ndarray with the 3D points.
:param K: 3x3 ndarray with an intrinsic camera matrix.
:param R: 3x3 ndarray with a rotation matrix.
:param t: 3x1 ndarray with a translation vector.
:return: nx2 ndarray with 2D image coordinates of the projections.
"""
assert (pts.shape[1] == 3)
P = K.dot(np.hstack((R, t)))
pts_h = np.hstack((pts, np.ones((pts.shape[0], 1))))
pts_im = P.dot(pts_h.T)
pts_im /= pts_im[2, :]
return pts_im[:2, :].T
class Precomputer(object):
""" Caches pre_Xs, pre_Ys for a 30% speedup of depth_im_to_dist_im()
"""
xs, ys = None, None
pre_Xs, pre_Ys = None, None
depth_im_shape = None
K = None
@staticmethod
def precompute_lazy(depth_im, K):
""" Lazy precomputation for depth_im_to_dist_im() if depth_im.shape or K changes
:param depth_im: hxw ndarray with the input depth image, where depth_im[y, x]
is the Z coordinate of the 3D point [X, Y, Z] that projects to pixel [x, y],
or 0 if there is no such 3D point (this is a typical output of the
Kinect-like sensors).
:param K: 3x3 ndarray with an intrinsic camera matrix.
:return: hxw ndarray (Xs/depth_im, Ys/depth_im)
"""
if depth_im.shape != Precomputer.depth_im_shape:
Precomputer.depth_im_shape = depth_im.shape
Precomputer.xs, Precomputer.ys = np.meshgrid(
np.arange(depth_im.shape[1]), np.arange(depth_im.shape[0]))
if depth_im.shape != Precomputer.depth_im_shape \
or not np.all(K == Precomputer.K):
Precomputer.K = K
Precomputer.pre_Xs = (Precomputer.xs - K[0, 2]) / np.float64(K[0, 0])
Precomputer.pre_Ys = (Precomputer.ys - K[1, 2]) / np.float64(K[1, 1])
return Precomputer.pre_Xs, Precomputer.pre_Ys
def depth_im_to_dist_im_fast(depth_im, K):
"""Converts a depth image to a distance image.
:param depth_im: hxw ndarray with the input depth image, where depth_im[y, x]
is the Z coordinate of the 3D point [X, Y, Z] that projects to pixel [x, y],
or 0 if there is no such 3D point (this is a typical output of the
Kinect-like sensors).
:param K: 3x3 ndarray with an intrinsic camera matrix.
:return: hxw ndarray with the distance image, where dist_im[y, x] is the
distance from the camera center to the 3D point [X, Y, Z] that projects to
pixel [x, y], or 0 if there is no such 3D point.
"""
# Only recomputed if depth_im.shape or K changes.
pre_Xs, pre_Ys = Precomputer.precompute_lazy(depth_im, K)
dist_im = np.sqrt(
np.multiply(pre_Xs, depth_im) ** 2 +
np.multiply(pre_Ys, depth_im) ** 2 +
depth_im.astype(np.float64) ** 2)
return dist_im
def depth_im_to_dist_im(depth_im, K):
"""Converts a depth image to a distance image.
:param depth_im: hxw ndarray with the input depth image, where depth_im[y, x]
is the Z coordinate of the 3D point [X, Y, Z] that projects to pixel [x, y],
or 0 if there is no such 3D point (this is a typical output of the
Kinect-like sensors).
:param K: 3x3 ndarray with an intrinsic camera matrix.
:return: hxw ndarray with the distance image, where dist_im[y, x] is the
distance from the camera center to the 3D point [X, Y, Z] that projects to
pixel [x, y], or 0 if there is no such 3D point.
"""
xs, ys = np.meshgrid(
np.arange(depth_im.shape[1]), np.arange(depth_im.shape[0]))
Xs = np.multiply(xs - K[0, 2], depth_im) * (1.0 / K[0, 0])
Ys = np.multiply(ys - K[1, 2], depth_im) * (1.0 / K[1, 1])
dist_im = np.sqrt(
Xs.astype(np.float64) ** 2 +
Ys.astype(np.float64) ** 2 +
depth_im.astype(np.float64) ** 2)
# dist_im = np.linalg.norm(np.dstack((Xs, Ys, depth_im)), axis=2) # Slower.
return dist_im
def clip_pt_to_im(pt, im_size):
"""Clips a 2D point to the image frame.
:param pt: 2D point (x, y).
:param im_size: Image size (width, height).
:return: Clipped 2D point (x, y).
"""
return [min(max(pt[0], 0), im_size[0] - 1),
min(max(pt[1], 0), im_size[1] - 1)]
def calc_2d_bbox(xs, ys, im_size=None, clip=False):
"""Calculates 2D bounding box of the given set of 2D points.
:param xs: 1D ndarray with x-coordinates of 2D points.
:param ys: 1D ndarray with y-coordinates of 2D points.
:param im_size: Image size (width, height) (used for optional clipping).
:param clip: Whether to clip the bounding box (default == False).
:return: 2D bounding box (x, y, w, h), where (x, y) is the top-left corner
and (w, h) is width and height of the bounding box.
"""
bb_min = [xs.min(), ys.min()]
bb_max = [xs.max(), ys.max()]
if clip:
assert (im_size is not None)
bb_min = clip_pt_to_im(bb_min, im_size)
bb_max = clip_pt_to_im(bb_max, im_size)
return [bb_min[0], bb_min[1], bb_max[0] - bb_min[0], bb_max[1] - bb_min[1]]
def calc_3d_bbox(xs, ys, zs):
"""Calculates 3D bounding box of the given set of 3D points.
:param xs: 1D ndarray with x-coordinates of 3D points.
:param ys: 1D ndarray with y-coordinates of 3D points.
:param zs: 1D ndarray with z-coordinates of 3D points.
:return: 3D bounding box (x, y, z, w, h, d), where (x, y, z) is the top-left
corner and (w, h, d) is width, height and depth of the bounding box.
"""
bb_min = [xs.min(), ys.min(), zs.min()]
bb_max = [xs.max(), ys.max(), zs.max()]
return [bb_min[0], bb_min[1], bb_min[2],
bb_max[0] - bb_min[0], bb_max[1] - bb_min[1], bb_max[2] - bb_min[2]]
def iou(bb_a, bb_b):
"""Calculates the Intersection over Union (IoU) of two 2D bounding boxes.
:param bb_a: 2D bounding box (x1, y1, w1, h1) -- see calc_2d_bbox.
:param bb_b: 2D bounding box (x2, y2, w2, h2) -- see calc_2d_bbox.
:return: The IoU value.
"""
# [x1, y1, width, height] --> [x1, y1, x2, y2]
tl_a, br_a = (bb_a[0], bb_a[1]), (bb_a[0] + bb_a[2], bb_a[1] + bb_a[3])
tl_b, br_b = (bb_b[0], bb_b[1]), (bb_b[0] + bb_b[2], bb_b[1] + bb_b[3])
# Intersection rectangle.
tl_inter = max(tl_a[0], tl_b[0]), max(tl_a[1], tl_b[1])
br_inter = min(br_a[0], br_b[0]), min(br_a[1], br_b[1])
# Width and height of the intersection rectangle.
w_inter = br_inter[0] - tl_inter[0]
h_inter = br_inter[1] - tl_inter[1]
if w_inter > 0 and h_inter > 0:
area_inter = w_inter * h_inter
area_a = bb_a[2] * bb_a[3]
area_b = bb_b[2] * bb_b[3]
iou = area_inter / float(area_a + area_b - area_inter)
else:
iou = 0.0
return iou
def transform_pts_Rt(pts, R, t):
"""Applies a rigid transformation to 3D points.
:param pts: nx3 ndarray with 3D points.
:param R: 3x3 ndarray with a rotation matrix.
:param t: 3x1 ndarray with a translation vector.
:return: nx3 ndarray with transformed 3D points.
"""
assert (pts.shape[1] == 3)
pts_t = R.dot(pts.T) + t.reshape((3, 1))
return pts_t.T
def calc_pts_diameter(pts):
"""Calculates the diameter of a set of 3D points (i.e. the maximum distance
between any two points in the set).
:param pts: nx3 ndarray with 3D points.
:return: The calculated diameter.
"""
diameter = -1.0
for pt_id in range(pts.shape[0]):
pt_dup = np.tile(np.array([pts[pt_id, :]]), [pts.shape[0] - pt_id, 1])
pts_diff = pt_dup - pts[pt_id:, :]
max_dist = math.sqrt((pts_diff * pts_diff).sum(axis=1).max())
if max_dist > diameter:
diameter = max_dist
return diameter
def calc_pts_diameter2(pts):
"""Calculates the diameter of a set of 3D points (i.e. the maximum distance
between any two points in the set). Faster but requires more memory than
calc_pts_diameter.
:param pts: nx3 ndarray with 3D points.
:return: The calculated diameter.
"""
dists = distance.cdist(pts, pts, 'euclidean')
diameter = np.max(dists)
return diameter
def overlapping_sphere_projections(radius, p1, p2):
"""Checks if projections of two spheres overlap (approximated).
:param radius: Radius of the two spheres.
:param p1: [X1, Y1, Z1] center of the first sphere.
:param p2: [X2, Y2, Z2] center of the second sphere.
:return: True if the projections of the two spheres overlap.
"""
if p1[2] == 0 or p2[2] == 0:
return False
# 2D projections of centers of the spheres.
proj1 = (p1 / p1[2])[:2]
proj2 = (p2 / p2[2])[:2]
# Distance between the center projections.
proj_dist = np.linalg.norm(proj1 - proj2)
# The max. distance of the center projections at which the sphere projections,
# i.e. sphere silhouettes, still overlap (approximated).
proj_dist_thresh = radius * (1.0 / p1[2] + 1.0 / p2[2])
return proj_dist < proj_dist_thresh
def get_error_signature(error_type, n_top, **kwargs):
"""Generates a signature for the specified settings of pose error calculation.
:param error_type: Type of error.
:param n_top: Top N pose estimates (with the highest score) to be evaluated
for each object class in each image.
:return: Generated signature.
"""
error_sign = 'error=' + error_type + '_ntop=' + str(n_top)
if error_type == 'vsd':
if kwargs['vsd_tau'] == float('inf'):
vsd_tau_str = 'inf'
else:
vsd_tau_str = '{:.3f}'.format(kwargs['vsd_tau'])
error_sign += '_delta={:.3f}_tau={}'.format(
kwargs['vsd_delta'], vsd_tau_str)
return error_sign
def get_score_signature(correct_th, visib_gt_min):
"""Generates a signature for a performance score.
:param visib_gt_min: Minimum visible surface fraction of a valid GT pose.
:return: Generated signature.
"""
eval_sign = 'th=' + '-'.join(['{:.3f}'.format(t) for t in correct_th])
eval_sign += '_min-visib={:.3f}'.format(visib_gt_min)
return eval_sign
def run_meshlab_script(meshlab_server_path, meshlab_script_path, model_in_path,
model_out_path, attrs_to_save):
"""Runs a MeshLab script on a 3D model.
meshlabserver depends on X server. To remove this dependence (on linux), run:
1) Xvfb :100 &
2) export DISPLAY=:100.0
3) meshlabserver <my_options>
:param meshlab_server_path: Path to meshlabserver.exe.
:param meshlab_script_path: Path to an MLX MeshLab script.
:param model_in_path: Path to the input 3D model saved in the PLY format.
:param model_out_path: Path to the output 3D model saved in the PLY format.
:param attrs_to_save: Attributes to save:
- vc -> vertex colors
- vf -> vertex flags
- vq -> vertex quality
- vn -> vertex normals
- vt -> vertex texture coords
- fc -> face colors
- ff -> face flags
- fq -> face quality
- fn -> face normals
- wc -> wedge colors
- wn -> wedge normals
- wt -> wedge texture coords
"""
meshlabserver_cmd = [meshlab_server_path, '-s', meshlab_script_path, '-i',
model_in_path, '-o', model_out_path]
if len(attrs_to_save):
meshlabserver_cmd += ['-m'] + attrs_to_save
log(' '.join(meshlabserver_cmd))
if subprocess.call(meshlabserver_cmd) != 0:
exit(-1) | PypiClean |
/ChemDataExtractor-IDE-1.3.2.tar.gz/ChemDataExtractor-IDE-1.3.2/chemdataextractor/cli/dict.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import click
from ..nlp.lexicon import ChemLexicon
from ..nlp.tokenize import ChemWordTokenizer
from ..nlp.tag import DictionaryTagger
from ..nlp.cem import CsDictCemTagger, CiDictCemTagger, STOPLIST, STOP_SUB, STOP_TOKENS
try:
from html import unescape
except ImportError:
from six.moves.html_parser import HTMLParser
unescape = HTMLParser().unescape
NG_RE = re.compile('([\[\(](\d\d?CI|USAN|r?INN|BAN|JAN|USP)(\d\d?CI|USAN|r?INN|BAN|JAN|USP|[:\-,]|spanish|latin)*[\)\]])+$', re.I | re.U)
START_RE = re.compile('^(anhydrous|elemental|amorphous|conjugated|colloidal|activated) ', re.I | re.U)
END_RE = re.compile('[\[\(]((crude )?product|substance|solution|anhydrous|derivative|analog|salt|modified|discontinued|injectable|anesthetic|pharmaceutical|natural|nonionic|european|ester|dye|tablets?|mineral|VAN|hydrolyzed)[\)\]]$', re.I | re.U)
RATIO_RE = re.compile('[\[\(]((\d\d?)(:(\d\d?|\?|\d\.\d))+)[\)\]]$', re.I | re.U)
NUM_END_RE = re.compile(' (\d+)$', re.U)
ALPHANUM_END_RE = re.compile(' ([A-Za-z]\d*)$', re.U)
BRACKET_RE = re.compile('^\(([^\(\)]+)\)$', re.I | re.U)
GREEK_WORDS = {
'Alpha': 'Α', # \u0391
'Beta': 'Β', # \u0392
'Gamma': 'Γ', # \u0393
'Delta': 'Δ', # \u0394
'Epsilon': 'Ε', # \u0395
'Zeta': 'Ζ', # \u0396
'Eta': 'Η', # \u0397
'Theta': 'Θ', # \u0398
'Iota': 'Ι', # \u0399
'Kappa': 'Κ', # \u039a
'Lambda': 'Λ', # \u039b
'Mu': 'Μ', # \u039c
'Nu': 'Ν', # \u039d
'Xi': 'Ξ', # \u039e
'Omicron': 'Ο', # \u039f
'Pi': 'Π', # \u03a0
'Rho': 'Ρ', # \u03a1
'Sigma': 'Σ', # \u03a3
'Tau': 'Τ', # \u03a4
'Upsilon': 'Υ', # \u03a5
'Phi': 'Φ', # \u03a6
'Chi': 'Χ', # \u03a7
'Psi': 'Ψ', # \u03a8
'Omega': 'Ω', # \u03a9
'alpha': 'α', # \u03b1
'beta': 'β', # \u03b2
'gamma': 'γ', # \u03b3
'delta': 'δ', # \u03b4
'epsilon': 'ε', # \u03b5
'zeta': 'ζ', # \u03b6
'eta': 'η', # \u03b7
'theta': 'θ', # \u03b8
'iota': 'ι', # \u03b9
'kappa': 'κ', # \u03ba
'lambda': 'λ', # \u03bb
'mu': 'μ', # \u03bc
'nu': 'ν', # \u03bd
'xi': 'ξ', # \u03be
'omicron': 'ο', # \u03bf
'pi': 'π', # \u03c0
'rho': 'ρ', # \u03c1
'sigma': 'σ', # \u03c3
'tau': 'τ', # \u03c4
'upsilon': 'υ', # \u03c5
'phi': 'φ', # \u03c6
'chi': 'χ', # \u03c7
'psi': 'ψ', # \u03c8
'omega': 'ω', # \u03c9
}
UNAMBIGUOUS_GREEK_WORDS = {
'Alpha': 'Α', # \u0391
'Beta': 'Β', # \u0392
'Gamma': 'Γ', # \u0393
'Delta': 'Δ', # \u0394
'Epsilon': 'Ε', # \u0395
'Kappa': 'Κ', # \u039a
'Lambda': 'Λ', # \u039b
'Sigma': 'Σ', # \u03a3
'Upsilon': 'Υ', # \u03a5
'Omega': 'Ω', # \u03a9
'alpha': 'α', # \u03b1
'beta': 'β', # \u03b2
'gamma': 'γ', # \u03b3
'delta': 'δ', # \u03b4
'epsilon': 'ε', # \u03b5
'kappa': 'κ', # \u03ba
'lambda': 'λ', # \u03bb
'sigma': 'σ', # \u03c3
'upsilon': 'υ', # \u03c5
'omega': 'ω', # \u03c9
}
DOT_GREEK_RE = re.compile('\.(%s)\.' % '|'.join(re.escape(s) for s in GREEK_WORDS.keys()), re.U)
GREEK_RE = re.compile('([\daA\W]|^)(%s)([\d\W]|$)' % '|'.join(re.escape(s) for s in GREEK_WORDS.keys()), re.U)
UNAMBIGUOUS_GREEK_RE = re.compile('(%s)' % '|'.join(re.escape(s) for s in UNAMBIGUOUS_GREEK_WORDS.keys()), re.U)
@click.group(name='dict')
@click.pass_context
def dict_cli(ctx):
"""Chemical dictionary commands."""
pass
def _process_name(name):
"""Fix issues with Jochem names."""
# Unescape HTML entities
name = unescape(name)
# Remove bracketed stuff on the end
name = NG_RE.sub('', name).strip() # Nomenclature groups
name = END_RE.sub('', name).strip(', ') # Words
name = RATIO_RE.sub('', name).strip(', ') # Ratios
# Remove stuff off start
name = START_RE.sub('', name).strip()
# Remove balanced start and end brackets if none in between
name = BRACKET_RE.sub('\g<1>', name)
# Un-invert CAS style names
comps = name.split(', ')
if len(comps) == 2:
if comps[1].endswith('-'):
name = comps[0]
name = '%s%s' % (comps[1], name)
elif len(comps) > 2:
name = comps[0]
for i in range(1, len(comps)):
if comps[i].endswith('-'):
name = '%s%s' % (comps[i], name)
else:
name = '%s %s' % (name, comps[i])
return name
def _filter_name(name):
"""Filter words when adding to Dictionary. Return True if name should be added."""
# Remove if length 3 or less
if len(name) <= 3:
return False
# Remove if starts with IL-
if name.startswith('IL-'):
return False
lowname = name.lower()
# Remove if contains certain sequences
if any(c in lowname for c in STOP_SUB):
return False
# Remove if (case-insensitive) exact match to stoplist
if lowname in STOPLIST:
return False
comps = re.split('[ -]', lowname)
# Remove if just single character + digits separated by spaces or hyphens (or the word compound)
if all(c.isdigit() or len(c) == 1 or c == 'compound' for c in comps):
return False
# Remove if 3 or fewer letters with 2 or fewer digits
if len(comps) == 2 and len(comps[0]) <= 3 and comps[0].isalpha() and len(comps[1]) <= 3 and comps[1].isdigit():
return False
# Remove if just greek characters and numbrs
if re.match('^[Α-Ωα-ω0-9]+$', name):
return False
# Filter registry numbers? No real size benefit in DAWG.
# if REG_RE.search(name):
# keep = False
# Handle this at the token level
# if name.endswith(' derivative') or name.endswith(' analog') or name.endswith(' solution'):
# keep = False
# Filter this after matching and expanding boundaries
# if name.startswith('-') or name.endswith('-'):
# keep = False
# Filter this after matching and expanding boundaries
# if not bracket_level(name) == 0:
# print(name)
return True
def _filter_tokens(tokens):
""""""
keep = True
for token in tokens:
if token in STOP_TOKENS:
keep = False
return keep
def _get_variants(name):
"""Return variants of chemical name."""
names = [name]
oldname = name
# Map greek words to unicode characters
if DOT_GREEK_RE.search(name):
wordname = name
while True:
m = DOT_GREEK_RE.search(wordname)
if m:
wordname = wordname[:m.start(1)-1] + m.group(1) + wordname[m.end(1)+1:]
else:
break
symbolname = name
while True:
m = DOT_GREEK_RE.search(symbolname)
if m:
symbolname = symbolname[:m.start(1)-1] + GREEK_WORDS[m.group(1)] + symbolname[m.end(1)+1:]
else:
break
names = [wordname, symbolname]
else:
while True:
m = GREEK_RE.search(name)
if m:
name = name[:m.start(2)] + GREEK_WORDS[m.group(2)] + name[m.end(2):]
else:
break
while True:
m = UNAMBIGUOUS_GREEK_RE.search(name)
if m:
name = name[:m.start(1)] + GREEK_WORDS[m.group(1)] + name[m.end(1):]
else:
break
if not name == oldname:
names.append(name)
newnames = []
for name in names:
# If last word \d+, add variants with hyphen and no space preceding
if NUM_END_RE.search(name):
newnames.append(NUM_END_RE.sub('-\g<1>', name))
newnames.append(NUM_END_RE.sub('\g<1>', name))
# If last word [A-Za-z]\d* add variants with hyphen preceding.
if ALPHANUM_END_RE.search(name):
newnames.append(ALPHANUM_END_RE.sub('-\g<1>', name))
names.extend(newnames)
return names
tokenizer = ChemWordTokenizer(split_last_stop=False)
def _make_tokens(name):
""""""
tokenized_names = []
name = _process_name(name)
if _filter_name(name):
for name in _get_variants(name):
if _filter_name(name):
tokens = tokenizer.tokenize(name)
if _filter_tokens(tokens):
tokenized_names.append(tokens)
#print(tokenized_names)
return tokenized_names
@dict_cli.command()
@click.argument('jochem', type=click.File('r', encoding='utf8'))
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Dictionary file.', default=click.get_text_stream('stdout'))
@click.option('--csoutput', '-c', type=click.File('w', encoding='utf8'), help='Case-sensitive dictionary file.', default=click.get_text_stream('stdout'))
@click.pass_obj
def prepare_jochem(ctx, jochem, output, csoutput):
"""Process and filter jochem file to produce list of names for dictionary."""
click.echo('chemdataextractor.dict.prepare_jochem')
for i, line in enumerate(jochem):
print('JC%s' % i)
if line.startswith('TM '):
if line.endswith(' @match=ci\n'):
for tokens in _make_tokens(line[3:-11]):
output.write(' '.join(tokens))
output.write('\n')
else:
for tokens in _make_tokens(line[3:-1]):
csoutput.write(' '.join(tokens))
csoutput.write('\n')
@dict_cli.command()
@click.argument('include', type=click.File('r', encoding='utf8'))
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=click.get_text_stream('stdout'))
@click.pass_obj
def prepare_include(ctx, include, output):
"""Process and filter include file to produce list of names for dictionary."""
click.echo('chemdataextractor.dict.prepare_include')
for i, line in enumerate(include):
print('IN%s' % i)
for tokens in _make_tokens(line.strip()):
output.write(u' '.join(tokens))
output.write(u'\n')
@dict_cli.command()
@click.argument('inputs', type=click.File('r', encoding='utf8'), nargs=-1)
@click.option('--output', help='Output model file.', required=True)
@click.option('--cs/--no-cs', help='Whether case-sensitive.', default=False)
@click.pass_obj
def build(ctx, inputs, output, cs):
"""Build chemical name dictionary."""
click.echo('chemdataextractor.dict.build')
dt = DictionaryTagger(lexicon=ChemLexicon(), case_sensitive=cs)
names = []
for input in inputs:
for line in input:
tokens = line.split()
names.append(tokens)
dt.build(words=names)
dt.save(output)
@dict_cli.command()
@click.argument('model', required=True)
@click.option('--cs/--no-cs', default=False)
@click.option('--corpus', '-c', type=click.File('r', encoding='utf8'), required=True)
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=click.get_text_stream('stdout'))
@click.pass_obj
def tag(ctx, model, cs, corpus, output):
"""Tag chemical entities and write CHEMDNER annotations predictions file."""
click.echo('chemdataextractor.dict.tag')
tagger = CsDictCemTagger(model=model) if cs else CiDictCemTagger(model=model)
for line in corpus:
sentence = []
goldsentence = []
for t in line.split():
token, tag = t.rsplit(u'/', 1)
goldsentence.append((token, tag))
sentence.append(token)
if sentence:
tokentags = tagger.tag(sentence)
for i, tokentag in enumerate(tokentags):
goldtokentag = goldsentence[i]
if goldtokentag[1] not in {u'B-CM', u'I-CM'} and tokentag[1] in {u'B-CM', u'I-CM'}:
print(line)
print(tokentag[0])
output.write(u' '.join(u'/'.join(tokentag) for tokentag in tagger.tag(sentence)))
output.write(u'\n')
else:
output.write(u'\n') | PypiClean |
/aimm_simulator-2.0.3.tar.gz/aimm_simulator-2.0.3/src/AIMM_simulator/AIMM_simulator.py |
__version__='2.0.2'
'''The AIMM simulator emulates a cellular radio system roughly following 5G concepts and channel models.'''
from os.path import basename
from sys import stderr,stdout,exit,version as pyversion
from math import hypot,atan2,pi as math_pi
from time import time,sleep
from collections import deque
try:
import numpy as np
except:
print('numpy not found: please do "pip install numpy"',file=stderr)
exit(1)
try:
import simpy
except:
print('simpy not found: please do "pip install simpy"',file=stderr)
exit(1)
from .NR_5G_standard_functions import SINR_to_CQI,CQI_to_64QAM_efficiency
from .UMa_pathloss_model import UMa_pathloss
def np_array_to_str(x):
' Formats a 1-axis np.array as a tab-separated string '
return np.array2string(x,separator='\t').replace('[','').replace(']','')
def _nearest_weighted_point(x,pts,w=1.0):
'''
Internal use only.
Given a point x of shape (dim,), where dim is typically 2 or 3,
an array of points pts of shape (npts,dim),
and a vector of weights w of the same length as pts,
return the index of the point minimizing w[i]*d[i],
where d[i] is the distance from x to point i.
Returns the index of the point minimizing w[i]*d[i].
For the application to cellular radio systems, we let pts be the
cell locations, and then if we set
w[i]=p[i]**(-1/alpha),
where p[i] is the transmit power of cell i, and alpha>=2 is the pathloss
exponent, then this algorithm will give us the index of the cell providing
largest received power at the point x.
'''
weighted_distances=w*np.linalg.norm(pts-x,axis=1)
imin=np.argmin(weighted_distances)
if 0: # dbg
print('x=',x)
print('pts=',pts)
print('weighted_distances=',weighted_distances)
return weighted_distances[imin],imin
def to_dB(x):
return 10.0*np.log10(x)
def from_dB(x):
return np.power(10.0,x/10.0)
class Cell:
'''
Class representing a single Cell (gNB). As instances are created, the are automatically given indices starting from 0. This index is available as the data member ``cell.i``. The variable ``Cell.i`` is always the current number of cells.
Parameters
----------
sim : Sim
Simulator instance which will manage this Cell.
interval : float
Time interval between Cell updates.
bw_MHz : float
Channel bandwidth in MHz.
n_subbands : int
Number of subbands.
xyz : [float, float, float]
Position of cell in metres, and antenna height.
h_BS : float
Antenna height in metres; only used if xyz is not provided.
power_dBm : float
Transmit power in dBm.
MIMO_gain_dB : float
Effective power gain from MIMO in dB. This is no more than a crude way to
estimate the performance gain from using MIMO. A typical value might be 3dB for 2x2 MIMO.
pattern : array or function
If an array, then a 360-element array giving the antenna gain in dB in 1-degree increments (0=east, then counterclockwise). Otherwise, a function giving the antenna gain in dB in the direction theta=(180/pi)*atan2(y,x).
f_callback :
A function with signature ``f_callback(self,kwargs)``, which will be called
at each iteration of the main loop.
verbosity : int
Level of debugging output (0=none).
'''
i=0
def __init__(s,
sim,
interval=10.0,
bw_MHz=10.0,
n_subbands=1,
xyz=None,
h_BS=20.0,
power_dBm=30.0,
MIMO_gain_dB=0.0,
pattern=None,
f_callback=None,
f_callback_kwargs={},
verbosity=0):
# default scene 1000m x 1000m, but keep cells near the centre
s.i=Cell.i; Cell.i+=1
s.sim=sim
s.interval=interval
s.bw_MHz=bw_MHz
s.n_subbands=n_subbands
s.subband_mask=np.ones(n_subbands) # dtype is float, to allow soft masking
s.rbs=simpy.Resource(s.sim.env,capacity=50)
s.power_dBm=power_dBm
s.pattern=pattern
s.f_callback=f_callback
s.f_callback_kwargs=f_callback_kwargs
s.MIMO_gain_dB=MIMO_gain_dB
s.attached=set()
s.reports={'cqi': {}, 'rsrp': {}, 'throughput_Mbps': {}}
# rsrp_history[i] will be the last 10 reports of rsrp received
# at this cell from UE[i] (no timestamps, just for getting trend)
s.rsrp_history={}
if xyz is not None:
s.xyz=np.array(xyz)
else: # random cell locations
s.xyz=np.empty(3)
s.xyz[:2]=100.0+900.0*s.sim.rng.random(2)
s.xyz[2]=h_BS
if verbosity>1: print(f'Cell[{s.i}] is at',s.xyz,file=stderr)
s.verbosity=verbosity
# every time we make a new Cell, we have to check whether
# we have a hetnet or not...
s.sim._set_hetnet()
#s.sim.env.process(s.loop()) # start Cell main loop
def set_f_callback(s,f_callback,**kwargs):
' Add a callback function to the main loop of this Cell '
s.f_callback=f_callback
s.f_callback_kwargs=kwargs
def loop(s):
'''
Main loop of Cell class. Default: do nothing.
'''
while True:
if s.f_callback is not None: s.f_callback(s,**s.f_callback_kwargs)
yield s.sim.env.timeout(s.interval)
def __repr__(s):
return f'Cell(index={s.i},xyz={s.xyz})'
def get_nattached(s):
'''
Return the current number of UEs attached to this Cell.
'''
return len(s.attached)
def get_xyz(s):
'''
Return the current position of this Cell.
'''
return s.xyz
def set_xyz(s,xyz):
'''
Set a new position for this Cell.
'''
s.xyz=np.array(xyz)
s.sim.cell_locations[s.i]=s.xyz
print(f'Cell[{s.i}] is now at {s.xyz}',file=stderr)
def get_power_dBm(s):
'''
Return the transmit power in dBm currently used by this cell.
'''
return s.power_dBm
def set_power_dBm(s,p):
'''
Set the transmit power in dBm to be used by this cell.
'''
s.power_dBm=p
s.sim._set_hetnet()
def boost_power_dBm(s,p,mn=None,mx=None):
'''
Increase or decrease (if p<0) the transmit power in dBm to be used by this cell.
If mn is not ``None``, then the power will not be set if it falls below mn.
If mx is not ``None``, then the power will not be set if it exceeds mx.
Return the new power.
'''
if p<0.0:
if mn is not None and s.power_dBm+p>=mn:
s.power_dBm+=p
return s.power_dBm
if p>0.0:
if mx is not None and s.power_dBm+p<=mx:
s.power_dBm+=p
return s.power_dBm
s.power_dBm+=p
return s.power_dBm
def get_rsrp(s,i):
'''
Return last RSRP reported to this cell by UE[i].
'''
if i in s.reports['rsrp']:
return s.reports['rsrp'][i][1]
return -np.inf # no reports
def get_rsrp_history(s,i):
'''
Return an array of the last 10 RSRP[1]s reported to this cell by UE[i].
'''
if i in s.rsrp_history:
return np.array(s.rsrp_history[i])
return -np.inf*np.ones(10) # no recorded history
def set_MIMO_gain(s,MIMO_gain_dB):
'''
Set the MIMO gain in dB to be used by this cell.
'''
s.MIMO_gain_dB=MIMO_gain_dB
def get_UE_throughput(s,ue_i): # FIXME do we want an array over subbands?
'''
Return the total current throughput in Mb/s of UE[i] in the simulation.
The value -np.inf indicates that there is no current report.
'''
reports=s.reports['throughput_Mbps']
if ue_i in reports: return reports[ue_i][1]
return -np.inf # special value to indicate no report
def get_UE_CQI(s,ue_i):
'''
Return the current CQI of UE[i] in the simulation, as an array across all subbands. An array of NaNs is returned if there is no report.
'''
reports=s.reports['cqi']
return reports[ue_i][1] if ue_i in reports else np.nan*np.ones(s.n_subbands)
def get_RSRP_reports(s):
'''
Return the current RSRP reports to this cell, as a list of tuples (ue.i, rsrp).
'''
reports=s.reports['rsrp']
return [(ue.i,reports[ue.i][1]) if ue.i in reports else (ue.i,-np.inf) for ue in s.sim.UEs]
def get_RSRP_reports_dict(s):
'''
Return the current RSRP reports to this cell, as a dictionary ue.i: rsrp.
'''
reports=s.reports['rsrp']
return dict((ue.i,reports[ue.i][1]) if ue.i in reports else (ue.i,-np.inf) for ue in s.sim.UEs)
def get_average_throughput(s):
'''
Return the average throughput over all UEs attached to this cell.
'''
reports,k=s.reports['throughput_Mbps'],0
ave=np.zeros(s.n_subbands)
for ue_i in reports:
k+=1
#ave+=(reports[ue_i][1][0]-ave)/k
ave+=(np.sum(reports[ue_i][1])-ave)/k
return np.sum(ave)
def set_pattern(s,pattern):
'''
Set the antenna radiation pattern.
'''
s.pattern=pattern
def set_subband_mask(s,mask):
'''
Set the subband mask to ``mask``.
'''
#print('set_subband_mask',s.subband_mask.shape,len(mask),file=stderr)
assert s.subband_mask.shape[0]==len(mask)
s.subband_mask=np.array(mask)
def get_subband_mask(s):
'''
Get the current subband mask.
'''
return s.subband_mask
def monitor_rbs(s):
while True:
if s.rbs.queue:
if s.verbosity>0: print(f'rbs at {s.sim.env.now:.2f} ={s.rbs.count}')
yield s.sim.env.timeout(5.0)
# END class Cell
class UE:
'''
Represents a single UE. As instances are created, the are automatically given indices starting from 0. This index is available as the data member ``ue.i``. The static (class-level) variable ``UE.i`` is always the current number of UEs.
Parameters
----------
sim : Sim
The Sim instance which will manage this UE.
xyz : [float, float, float]
Position of UE in metres, and antenna height.
h_UT : float
Antenna height of user terminal in metres; only used if xyz is not provided.
reporting_interval : float
Time interval between UE reports being sent to the serving cell.
f_callback :
A function with signature ``f_callback(self,kwargs)``, which will be called at each iteration of the main loop.
f_callback_kwargs :
kwargs for previous function.
pathloss_model
An instance of a pathloss model. This must be a callable object which
takes two arguments, each a 3-vector. The first represent the transmitter
location, and the second the receiver location. It must return the
pathloss in dB along this signal path.
If set to ``None`` (the default), a standard urban macrocell model
is used.
See further ``NR_5G_standard_functions_00.py``.
'''
i=0
def __init__(s,sim,xyz=None,reporting_interval=1.0,pathloss_model=None,h_UT=2.0,f_callback=None,f_callback_kwargs={},verbosity=0):
s.sim=sim
s.i=UE.i; UE.i+=1
s.serving_cell=None
s.f_callback=f_callback
s.f_callback_kwargs=f_callback_kwargs
# next will be a record of last 10 serving cell ids,
# with time of last attachment.
# 0=>current, 1=>previous, etc. -1 => not valid)
# This is for use in handover algorithms
s.serving_cell_ids=deque([(-1,None)]*10,maxlen=10)
s.reporting_interval=reporting_interval
if xyz is not None:
s.xyz=np.array(xyz,dtype=float)
else:
s.xyz=250.0+500.0*s.sim.rng.random(3)
s.xyz[2]=h_UT
if verbosity>1: print(f'UE[{s.i}] is at',s.xyz,file=stderr)
# We assume here that the UMa_pathloss model needs to be instantiated,
# but other user-provided models are already instantiated,
# and provide callable objects...
if pathloss_model is None:
s.pathloss=UMa_pathloss(fc_GHz=s.sim.params['fc_GHz'],h_UT=s.sim.params['h_UT'],h_BS=s.sim.params['h_BS'])
if verbosity>1: print(f'Using 5G standard urban macrocell pathloss model.',file=stderr)
else:
s.pathloss=pathloss_model
if s.pathloss.__doc__ is not None:
if verbosity>1: print(f'Using user-specified pathloss model "{s.pathloss.__doc__}".',file=stderr)
else:
print(f'Using user-specified pathloss model.',file=stderr)
s.verbosity=verbosity
s.noise_power_dBm=-140.0
s.cqi=None
s.sinr_dB=None
# Keith Briggs 2022-10-12 loops now started in Sim.__init__
#s.sim.env.process(s.run_subband_cqi_report())
#s.sim.env.process(s.loop()) # this does reports to all cells
def __repr__(s):
return f'UE(index={s.i},xyz={s.xyz},serving_cell={s.serving_cell})'
def set_f_callback(s,f_callback,**kwargs):
' Add a callback function to the main loop of this UE '
s.f_callback=f_callback
s.f_callback_kwargs=kwargs
def loop(s):
' Main loop of UE class '
if s.verbosity>1:
print(f'Main loop of UE[{s.i}] started')
stdout.flush()
while True:
if s.f_callback is not None: s.f_callback(s,**s.f_callback_kwargs)
s.send_rsrp_reports()
s.send_subband_cqi_report() # FIXME merge these two reports
#print(f'dbg: Main loop of UE class started'); exit()
yield s.sim.env.timeout(s.reporting_interval)
def get_serving_cell(s):
'''
Return the current serving Cell object (not index) for this UE instance.
'''
ss=s.serving_cell
if ss is None: return None
return s.serving_cell
def get_serving_cell_i(s):
'''
Return the current serving Cell index for this UE instance.
'''
ss=s.serving_cell
if ss is None: return None
return s.serving_cell.i
def get_xyz(s):
'''
Return the current position of this UE.
'''
return s.xyz
def set_xyz(s,xyz,verbose=False):
'''
Set a new position for this UE.
'''
s.xyz=np.array(xyz)
if verbose: print(f'UE[{s.i}] is now at {s.xyz}',file=stderr)
def attach(s,cell,quiet=True):
'''
Attach this UE to a specific Cell instance.
'''
cell.attached.add(s.i)
s.serving_cell=cell
s.serving_cell_ids.appendleft((cell.i,s.sim.env.now,))
if not quiet and s.verbosity>0:
print(f'UE[{s.i:2}] is attached to cell[{cell.i}]',file=stderr)
def detach(s,quiet=True):
'''
Detach this UE from its serving cell.
'''
if s.serving_cell is None: # Keith Briggs 2022-08-08 added None test
return
s.serving_cell.attached.remove(s.i)
# clear saved reports from this UE...
reports=s.serving_cell.reports
for x in reports:
if s.i in reports[x]: del reports[x][s.i]
if not quiet and s.verbosity>0:
print(f'UE[{s.i}] detached from cell[{s.serving_cell.i}]',file=stderr)
s.serving_cell=None
def attach_to_strongest_cell_simple_pathloss_model(s):
'''
Attach to the cell delivering the strongest signal
at the current UE position. Intended for initial attachment only.
Uses only a simple power-law pathloss model. For proper handover
behaviour, use the MME module.
'''
celli=s.sim.get_strongest_cell_simple_pathloss_model(s.xyz)
s.serving_cell=s.sim.cells[celli]
s.serving_cell.attached.add(s.i)
if s.verbosity>0:
print(f'UE[{s.i:2}] ⟵⟶ cell[{celli}]',file=stderr)
def attach_to_nearest_cell(s):
'''
Attach this UE to the geographically nearest Cell instance.
Intended for initial attachment only.
'''
dmin,celli=_nearest_weighted_point(s.xyz[:2],s.sim.cell_locations[:,:2])
if 0: # dbg
print(f'_nearest_weighted_point: celli={celli} dmin={dmin:.2f}')
for cell in s.sim.cells:
d=np.linalg.norm(cell.xyz-s.xyz)
print(f'Cell[{cell.i}] is at distance {d:.2f}')
s.serving_cell=s.sim.cells[celli]
s.serving_cell.attached.add(s.i)
if s.verbosity>0:
print(f'UE[{s.i:2}] ⟵⟶ cell[{celli}]',file=stderr)
def get_CQI(s):
'''
Return the current CQI of this UE, as an array across all subbands.
'''
return s.cqi
def get_SINR_dB(s):
'''
Return the current SINR of this UE, as an array across all subbands.
The return value ``None`` indicates that there is no current report.
'''
return s.sinr_dB
def send_rsrp_reports(s,threshold=-120.0):
'''
Send RSRP reports in dBm to all cells for which it is over the threshold.
Subbands not handled.
'''
# antenna pattern computation added Keith Briggs 2021-11-24.
for cell in s.sim.cells:
pl_dB=s.pathloss(cell.xyz,s.xyz) # 2021-10-29
antenna_gain_dB=0.0
if cell.pattern is not None:
vector=s.xyz-cell.xyz # vector pointing from cell to UE
angle_degrees=(180.0/math_pi)*atan2(vector[1],vector[0])
antenna_gain_dB=cell.pattern(angle_degrees) if callable(cell.pattern) \
else cell.pattern[int(angle_degrees)%360]
rsrp_dBm=cell.power_dBm+antenna_gain_dB+cell.MIMO_gain_dB-pl_dB
rsrp=from_dB(rsrp_dBm)
if rsrp_dBm>threshold:
cell.reports['rsrp'][s.i]=(s.sim.env.now,rsrp_dBm)
if s.i not in cell.rsrp_history:
cell.rsrp_history[s.i]=deque([-np.inf,]*10,maxlen=10)
cell.rsrp_history[s.i].appendleft(rsrp_dBm)
def send_subband_cqi_report(s):
'''
For this UE, send an array of CQI reports, one for each subband; and a total throughput report, to the serving cell.
What is sent is a 2-tuple (current time, array of reports).
For RSRP reports, use the function ``send_rsrp_reports``.
Also saves the CQI[1]s in s.cqi, and returns the throughput value.
'''
if s.serving_cell is None: return 0.0 # 2022-08-08 detached
interference=from_dB(s.noise_power_dBm)*np.ones(s.serving_cell.n_subbands)
for cell in s.sim.cells:
pl_dB=s.pathloss(cell.xyz,s.xyz)
antenna_gain_dB=0.0
if cell.pattern is not None:
vector=s.xyz-cell.xyz # vector pointing from cell to UE
angle_degrees=(180.0/math_pi)*atan2(vector[1],vector[0])
antenna_gain_dB=cell.pattern(angle_degrees) if callable(cell.pattern) \
else cell.pattern[int(angle_degrees)%360]
if cell.i==s.serving_cell.i: # wanted signal
rsrp_dBm=cell.MIMO_gain_dB+antenna_gain_dB+cell.power_dBm-pl_dB
else: # unwanted interference
received_interference_power=antenna_gain_dB+cell.power_dBm-pl_dB
interference+=from_dB(received_interference_power)*cell.subband_mask
rsrp=from_dB(rsrp_dBm)
s.sinr_dB=to_dB(rsrp/interference) # scalar/array
s.cqi=cqi=SINR_to_CQI(s.sinr_dB)
spectral_efficiency=np.array([CQI_to_64QAM_efficiency(cqi_i) for cqi_i in cqi])
now=float(s.sim.env.now)
# per-UE throughput...
throughput_Mbps=s.serving_cell.bw_MHz*([email protected]_cell.subband_mask)/s.serving_cell.n_subbands/len(s.serving_cell.attached)
s.serving_cell.reports['cqi'][s.i]=(now,cqi)
s.serving_cell.reports['throughput_Mbps'][s.i]=(now,throughput_Mbps,)
return throughput_Mbps
def run_subband_cqi_report(s): # FIXME merge this with rsrp reporting
while True:
#if s.serving_cell is not None: # UE must be attached 2022-08-08
s.send_subband_cqi_report()
yield s.sim.env.timeout(s.reporting_interval)
# END class UE
class Sim:
'''
Class representing the complete simulation.
Parameters
----------
params : dict
A dictionary of additional global parameters which need to be accessible to downstream functions. In the instance, these parameters will be available as ``sim.params``. If ``params['profile']`` is set to a non-empty string, then a code profile will be performed and the results saved to the filename given by the string. There will be some execution time overhead when profiling.
'''
def __init__(s,params={'fc_GHz':3.5,'h_UT':2.0,'h_BS':20.0},show_params=True,rng_seed=0):
s.__version__=__version__
s.params=params
# set default values for operating frequenct, user terminal height, and
# base station height...
if 'fc_GHz' not in params: params['fc_GHz']=3.5
if 'h_UT' not in params: params['h_UT']=2.0
if 'h_BS' not in params: params['h_BS']=20.0
s.env=simpy.Environment()
s.rng=np.random.default_rng(rng_seed)
s.loggers=[]
s.scenario=None
s.ric=None
s.mme=None
s.hetnet=None # unknown at this point; will be set to True or False
s.cells=[]
s.UEs=[]
s.events=[]
s.cell_locations=np.empty((0,3))
np.set_printoptions(precision=2,linewidth=200)
pyv=pyversion.replace('\n','') #[:pyversion.index('(default')]
print(f'python version={pyv}',file=stderr)
print(f'numpy version={np.__version__}',file=stderr)
print(f'simpy version={simpy.__version__}',file=stderr)
print(f'AIMM simulator version={s.__version__}',file=stderr)
if show_params:
print(f'Simulation parameters:',file=stderr)
for param in s.params:
print(f" {param}={s.params[param]}",file=stderr)
def _set_hetnet(s):
# internal function only - decide whether we have a hetnet
powers=set(cell.get_power_dBm() for cell in s.cells)
s.hetnet=len(powers)>1 # powers are not all equal
def wait(s,interval=1.0):
'''
Convenience function to avoid low-level reference to env.timeout().
``loop`` functions in each class must yield this.
'''
return s.env.timeout(interval)
def make_cell(s,**kwargs):
'''
Convenience function: make a new Cell instance and add it to the simulation; parameters as for the Cell class. Return the new Cell instance. It is assumed that Cells never move after being created (i.e. the initial xyz[1] stays the same throughout the simulation).
'''
s.cells.append(Cell(s,**kwargs))
xyz=s.cells[-1].get_xyz()
s.cell_locations=np.vstack([s.cell_locations,xyz])
return s.cells[-1]
def make_UE(s,**kwargs):
'''
Convenience function: make a new UE instance and add it to the simulation; parameters as for the UE class. Return the new UE instance.
'''
s.UEs.append(UE(s,**kwargs))
return s.UEs[-1]
def get_ncells(s):
'''
Return the current number of cells in the simulation.
'''
return len(s.cells)
def get_nues(s):
'''
Return the current number of UEs in the simulation.
'''
return len(s.UEs)
def get_UE_position(s,ue_i):
'''
Return the xyz position of UE[i] in the simulation.
'''
return s.UEs[ue_i].xyz
def get_average_throughput(s):
'''
Return the average throughput over all UEs attached to all cells.
'''
ave,k=0.0,0
for cell in s.cells:
k+=1
ave+=(cell.get_average_throughput()-ave)/k
return ave
def add_logger(s,logger):
'''
Add a logger to the simulation.
'''
assert isinstance(logger,Logger)
s.loggers.append(logger)
def add_loggers(s,loggers):
'''
Add a sequence of loggers to the simulation.
'''
for logger in loggers:
assert isinstance(logger,Logger)
s.loggers.append(logger)
def add_scenario(s,scenario):
'''
Add a Scenario instance to the simulation.
'''
assert isinstance(scenario,Scenario)
s.scenario=scenario
def add_ric(s,ric):
'''
Add a RIC instance to the simulation.
'''
assert isinstance(ric,RIC)
s.ric=ric
def add_MME(s,mme):
'''
Add an MME instance to the simulation.
'''
assert isinstance(mme,MME)
s.mme=mme
def add_event(s,event):
s.events.append(event)
def get_serving_cell(s,ue_i):
if ue_i<len(s.UEs): return s.UEs[ue_i].serving_cell
return None
def get_serving_cell_i(s,ue_i):
if ue_i<len(s.UEs): return s.UEs[ue_i].serving_cell.i
return None
def get_nearest_cell(s,xy):
'''
Return the index of the geographical nearest cell (in 2 dimensions)
to the point xy.
'''
return _nearest_weighted_point(xy[:2],s.cell_locations[:,:2],w=1.0)[1]
def get_strongest_cell_simple_pathloss_model(s,xyz,alpha=3.5):
'''
Return the index of the cell delivering the strongest signal
at the point xyz (in 3 dimensions), with pathloss exponent alpha.
Note: antenna pattern is not used, so this function is deprecated,
but is adequate for initial UE attachment.
'''
p=np.array([from_dB(cell.get_power_dBm()) for cell in s.cells])
return _nearest_weighted_point(xyz,s.cell_locations,w=p**(-1.0/alpha))[1]
def get_best_rsrp_cell(s,ue_i,dbg=False):
'''
Return the index of the cell delivering the highest RSRP at UE[i].
Relies on UE reports, and ``None`` is returned if there are not enough
reports (yet) to determine the desired output.
'''
k,best_rsrp=None,-np.inf
cell_rsrp_reports=dict((cell.i,cell.reports['rsrp']) for cell in s.cells)
for cell in s.cells:
if ue_i not in cell_rsrp_reports[cell.i]: continue # no reports for this UE
time,rsrp=cell_rsrp_reports[cell.i][ue_i] # (time, subband reports)
if dbg: print(f"get_best_rsrp_cell at {float(s.env.now):.0f}: cell={cell.i} UE={ue_i} rsrp=",rsrp,file=stderr)
ave_rsrp=np.average(rsrp) # average RSRP over subbands
if ave_rsrp>best_rsrp: k,best_rsrp=cell.i,ave_rsrp
return k
def _start_loops(s):
# internal use only - start all main loops
for logger in s.loggers:
s.env.process(logger.loop())
if s.scenario is not None:
s.env.process(s.scenario.loop())
if s.ric is not None:
s.env.process(s.ric.loop())
if s.mme is not None:
s.env.process(s.mme.loop())
for event in s.events: # TODO ?
s.env.process(event)
for cell in s.cells: # 2022-10-12 start Cells
s.env.process(cell.loop())
for ue in s.UEs: # 2022-10-12 start UEs
#print(f'About to start main loop of UE[{ue.i}]..')
s.env.process(ue.loop())
#s.env.process(UE.run_subband_cqi_report())
#sleep(2); exit()
def run(s,until):
s._set_hetnet()
s.until=until
print(f'Sim: starting run for simulation time {until} seconds...',file=stderr)
s._start_loops()
t0=time()
if 'profile' in s.params and s.params['profile']:
# https://docs.python.org/3.6/library/profile.html
# to keep python 3.6 compatibility, we don't use all the
# features for profiling added in 3.8 or 3.9.
profile_filename=s.params['profile']
print(f'profiling enabled: output file will be {profile_filename}.',file=stderr)
import cProfile,pstats,io
pr=cProfile.Profile()
pr.enable()
s.env.run(until=until) # this is what is profiled
pr.disable()
strm=io.StringIO()
ps=pstats.Stats(pr,stream=strm).sort_stats('tottime')
ps.print_stats()
tbl=strm.getvalue().split('\n')
profile_file=open(profile_filename,'w')
for line in tbl[:50]: print(line,file=profile_file)
profile_file.close()
print(f'profile written to {profile_filename}.',file=stderr)
else:
s.env.run(until=until)
print(f'Sim: finished main loop in {(time()-t0):.2f} seconds.',file=stderr)
#print(f'Sim: hetnet={s.hetnet}.',file=stderr)
if s.mme is not None:
s.mme.finalize()
if s.ric is not None:
s.ric.finalize()
for logger in s.loggers:
logger.finalize()
# END class Sim
class Scenario:
'''
Base class for a simulation scenario. The default does nothing.
Parameters
----------
sim : Sim
Simulator instance which will manage this Scenario.
func : function
Function called to perform actions.
interval : float
Time interval between actions.
verbosity : int
Level of debugging output (0=none).
'''
def __init__(s,sim,func=None,interval=1.0,verbosity=0):
s.sim=sim
s.func=func
s.verbosity=verbosity
s.interval=interval
def loop(s):
'''
Main loop of Scenario class. Should be overridden to provide different functionalities.
'''
while True:
if s.func is not None: s.func(s.sim)
yield s.sim.env.timeout(s.interval)
# END class Scenario
class Logger:
'''
Represents a simulation logger. Multiple loggers (each with their own file) can be used if desired.
Parameters
----------
sim : Sim
The Sim instance which will manage this Logger.
func : function
Function called to perform logginf action.
header : str
Arbitrary text to write to the top of the logfile.
f : file object
An open file object which will be written or appended to.
logging_interval : float
Time interval between logging actions.
'''
def __init__(s,sim,func=None,header='',f=stdout,logging_interval=10,np_array_to_str=np_array_to_str):
s.sim=sim
s.func=s.default_logger if func is None else func
s.f=f
s.np_array_to_str=np_array_to_str
s.logging_interval=float(logging_interval)
if header: s.f.write(header)
def default_logger(s,f=stdout):
for cell in s.sim.cells:
for ue_i in cell.reports['cqi']:
rep=cell.reports['cqi'][ue_i]
if rep is None: continue
cqi=s.np_array_to_str(rep[1])
f.write(f'{cell.i}\t{ue_i}\t{cqi}\n')
def loop(s):
'''
Main loop of Logger class.
Can be overridden to provide custom functionality.
'''
while True:
s.func(f=s.f)
yield s.sim.env.timeout(s.logging_interval)
def finalize(s):
'''
Function called at end of simulation, to implement any required finalization actions.
'''
pass
# END class Logger
class MME:
'''
Represents a MME, for handling UE handovers.
Parameters
----------
sim : Sim
Sim instance which will manage this Scenario.
interval : float
Time interval between checks for handover actions.
verbosity : int
Level of debugging output (0=none).
strategy : str
Handover strategy; possible values are ``strongest_cell_simple_pathloss_model`` (default), or ``best_rsrp_cell``.
anti_pingpong : float
If greater than zero, then a handover pattern x->y->x between cells x and y is not allowed within this number of seconds. Default is 0.0, meaning pingponging is not suppressed.
'''
def __init__(s,sim,interval=10.0,strategy='strongest_cell_simple_pathloss_model',anti_pingpong=30.0,verbosity=0):
s.sim=sim
s.interval=interval
s.strategy=strategy
s.anti_pingpong=anti_pingpong
s.verbosity=verbosity
print(f'MME: using handover strategy {s.strategy}.',file=stderr)
def do_handovers(s):
'''
Check whether handovers are required, and do them if so.
Normally called from loop(), but can be called manually if required.
'''
for ue in s.sim.UEs:
if ue.serving_cell is None: continue # no handover needed for this UE. 2022-08-08 added None test
oldcelli=ue.serving_cell.i # 2022-08-26
CQI_before=ue.serving_cell.get_UE_CQI(ue.i)
previous,tm=ue.serving_cell_ids[1]
if s.strategy=='strongest_cell_simple_pathloss_model':
celli=s.sim.get_strongest_cell_simple_pathloss_model(ue.xyz)
elif s.strategy=='best_rsrp_cell':
celli=s.sim.get_best_rsrp_cell(ue.i)
if celli is None:
celli=s.sim.get_strongest_cell_simple_pathloss_model(ue.xyz)
else:
print(f'MME.loop: strategy {s.strategy} not implemented, quitting!',file=stderr)
exit()
if celli==ue.serving_cell.i: continue
if s.anti_pingpong>0.0 and previous==celli:
if s.sim.env.now-tm<s.anti_pingpong:
if s.verbosity>2:
print(f't={float(s.sim.env.now):8.2f} handover of UE[{ue.i}] suppressed by anti_pingpong heuristic.',file=stderr)
continue # not enough time since we were last on this cell
ue.detach(quiet=True)
ue.attach(s.sim.cells[celli])
ue.send_rsrp_reports() # make sure we have reports immediately
ue.send_subband_cqi_report()
if s.verbosity>1:
CQI_after=ue.serving_cell.get_UE_CQI(ue.i)
print(f't={float(s.sim.env.now):8.2f} handover of UE[{ue.i:3}] from Cell[{oldcelli:3}] to Cell[{ue.serving_cell.i:3}]',file=stderr,end=' ')
print(f'CQI change {CQI_before} -> {CQI_after}',file=stderr)
def loop(s):
'''
Main loop of MME.
'''
yield s.sim.env.timeout(0.5*s.interval) # stagger the intervals
print(f'MME started at {float(s.sim.env.now):.2f}, using strategy="{s.strategy}" and anti_pingpong={s.anti_pingpong:.0f}.',file=stderr)
while True:
s.do_handovers()
yield s.sim.env.timeout(s.interval)
def finalize(s):
'''
Function called at end of simulation, to implement any required finalization actions.
'''
pass
# END class MME
class RIC:
'''
Base class for a RIC, for hosting xApps. The default does nothing.
Parameters
----------
sim : Sim
Simulator instance which will manage this Scenario.
interval : float
Time interval between RIC actions.
verbosity : int
Level of debugging output (0=none).
'''
def __init__(s,sim,interval=10,verbosity=0):
s.sim=sim
s.interval=interval
s.verbosity=verbosity
def finalize(s):
'''
Function called at end of simulation, to implement any required finalization actions.
'''
pass
def loop(s):
'''
Main loop of RIC class. Must be overridden to provide functionality.
'''
print(f'RIC started at {float(s.sim.env.now):.2}.',file=stderr)
while True:
yield s.sim.env.timeout(s.interval)
# END class RIC
if __name__=='__main__': # a simple self-test
np.set_printoptions(precision=4,linewidth=200)
class MyLogger(Logger):
def loop(s):
while True:
for cell in s.sim.cells:
if cell.i!=0: continue # cell[0] only
for ue_i in cell.reports['cqi']:
if ue_i!=0: continue # UE[0] only
rep=cell.reports['cqi'][ue_i]
if not rep: continue
xy= s.np_array_to_str(s.sim.UEs[ue_i].xyz[:2])
cqi=s.np_array_to_str(cell.reports['cqi'][ue_i][1])
tp= s.np_array_to_str(cell.reports['throughput_Mbps'][ue_i][1])
s.f.write(f'{s.sim.env.now:.1f}\t{xy}\t{cqi}\t{tp}\n')
yield s.sim.env.timeout(s.logging_interval)
def test_01(ncells=4,nues=9,n_subbands=2,until=1000.0):
sim=Sim()
for i in range(ncells):
sim.make_cell(n_subbands=n_subbands,MIMO_gain_dB=3.0,verbosity=0)
sim.cells[0].set_xyz((500.0,500.0,20.0)) # fix cell[0]
for i in range(nues):
ue=sim.make_UE(verbosity=1)
if 0==i: # force ue[0] to attach to cell[0]
ue.set_xyz([501.0,502.0,2.0],verbose=True)
ue.attach_to_nearest_cell()
scenario=Scenario(sim,verbosity=0)
logger=MyLogger(sim,logging_interval=1.0)
ric=RIC(sim)
sim.add_logger(logger)
sim.add_scenario(scenario)
sim.add_ric(ric)
sim.run(until=until)
test_01() | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/electrum_chi/electrum/gui/qt/address_list.py |
from enum import IntEnum
from PyQt5.QtCore import Qt, QPersistentModelIndex, QModelIndex
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QFont
from PyQt5.QtWidgets import QAbstractItemView, QComboBox, QLabel, QMenu
from electrum.i18n import _
from electrum.util import block_explorer_URL, profiler
from electrum.plugin import run_hook
from electrum.bitcoin import is_address
from electrum.wallet import InternalAddressCorruption
from .util import MyTreeView, MONOSPACE_FONT, ColorScheme, webopen
class AddressList(MyTreeView):
class Columns(IntEnum):
TYPE = 0
ADDRESS = 1
LABEL = 2
COIN_BALANCE = 3
FIAT_BALANCE = 4
NUM_TXS = 5
filter_columns = [Columns.TYPE, Columns.ADDRESS, Columns.LABEL, Columns.COIN_BALANCE]
def __init__(self, parent=None):
super().__init__(parent, self.create_menu, stretch_column=self.Columns.LABEL)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
self.show_change = 0
self.show_used = 0
self.change_button = QComboBox(self)
self.change_button.currentIndexChanged.connect(self.toggle_change)
for t in [_('All'), _('Receiving'), _('Change')]:
self.change_button.addItem(t)
self.used_button = QComboBox(self)
self.used_button.currentIndexChanged.connect(self.toggle_used)
for t in [_('All'), _('Unused'), _('Funded'), _('Used')]:
self.used_button.addItem(t)
self.setModel(QStandardItemModel(self))
self.update()
def get_toolbar_buttons(self):
return QLabel(_("Filter:")), self.change_button, self.used_button
def on_hide_toolbar(self):
self.show_change = 0
self.show_used = 0
self.update()
def save_toolbar_state(self, state, config):
config.set_key('show_toolbar_addresses', state)
def refresh_headers(self):
fx = self.parent.fx
if fx and fx.get_fiat_address_config():
ccy = fx.get_currency()
else:
ccy = _('Fiat')
headers = {
self.Columns.TYPE: _('Type'),
self.Columns.ADDRESS: _('Address'),
self.Columns.LABEL: _('Label'),
self.Columns.COIN_BALANCE: _('Balance'),
self.Columns.FIAT_BALANCE: ccy + ' ' + _('Balance'),
self.Columns.NUM_TXS: _('Tx'),
}
self.update_headers(headers)
def toggle_change(self, state):
if state == self.show_change:
return
self.show_change = state
self.update()
def toggle_used(self, state):
if state == self.show_used:
return
self.show_used = state
self.update()
@profiler
def update(self):
self.wallet = self.parent.wallet
current_address = self.current_item_user_role(col=self.Columns.LABEL)
if self.show_change == 1:
addr_list = self.wallet.get_receiving_addresses()
elif self.show_change == 2:
addr_list = self.wallet.get_change_addresses()
else:
addr_list = self.wallet.get_addresses()
self.model().clear()
self.refresh_headers()
fx = self.parent.fx
set_address = None
for address in addr_list:
num = self.wallet.get_address_history_len(address)
label = self.wallet.labels.get(address, '')
c, u, x = self.wallet.get_addr_balance(address)
balance = c + u + x
is_used_and_empty = self.wallet.is_used(address) and balance == 0
if self.show_used == 1 and (balance or is_used_and_empty):
continue
if self.show_used == 2 and balance == 0:
continue
if self.show_used == 3 and not is_used_and_empty:
continue
balance_text = self.parent.format_amount(balance, whitespaces=True)
# create item
if fx and fx.get_fiat_address_config():
rate = fx.exchange_rate()
fiat_balance = fx.value_str(balance, rate)
else:
fiat_balance = ''
labels = ['', address, label, balance_text, fiat_balance, "%d"%num]
address_item = [QStandardItem(e) for e in labels]
# align text and set fonts
for i, item in enumerate(address_item):
item.setTextAlignment(Qt.AlignVCenter)
if i not in (self.Columns.TYPE, self.Columns.LABEL):
item.setFont(QFont(MONOSPACE_FONT))
item.setEditable(i in self.editable_columns)
address_item[self.Columns.FIAT_BALANCE].setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
# setup column 0
if self.wallet.is_change(address):
address_item[self.Columns.TYPE].setText(_('change'))
address_item[self.Columns.TYPE].setBackground(ColorScheme.YELLOW.as_color(True))
else:
address_item[self.Columns.TYPE].setText(_('receiving'))
address_item[self.Columns.TYPE].setBackground(ColorScheme.GREEN.as_color(True))
address_item[self.Columns.LABEL].setData(address, Qt.UserRole)
# setup column 1
if self.wallet.is_frozen_address(address):
address_item[self.Columns.ADDRESS].setBackground(ColorScheme.BLUE.as_color(True))
if self.wallet.is_beyond_limit(address):
address_item[self.Columns.ADDRESS].setBackground(ColorScheme.RED.as_color(True))
# add item
count = self.model().rowCount()
self.model().insertRow(count, address_item)
address_idx = self.model().index(count, self.Columns.LABEL)
if address == current_address:
set_address = QPersistentModelIndex(address_idx)
self.set_current_idx(set_address)
# show/hide columns
if fx and fx.get_fiat_address_config():
self.showColumn(self.Columns.FIAT_BALANCE)
else:
self.hideColumn(self.Columns.FIAT_BALANCE)
self.filter()
def create_menu(self, position):
from electrum.wallet import Multisig_Wallet
is_multisig = isinstance(self.wallet, Multisig_Wallet)
can_delete = self.wallet.can_delete_address()
selected = self.selected_in_column(self.Columns.ADDRESS)
if not selected:
return
multi_select = len(selected) > 1
addrs = [self.model().itemFromIndex(item).text() for item in selected]
menu = QMenu()
if not multi_select:
idx = self.indexAt(position)
if not idx.isValid():
return
col = idx.column()
item = self.model().itemFromIndex(idx)
if not item:
return
addr = addrs[0]
addr_column_title = self.model().horizontalHeaderItem(self.Columns.LABEL).text()
addr_idx = idx.sibling(idx.row(), self.Columns.LABEL)
column_title = self.model().horizontalHeaderItem(col).text()
copy_text = self.model().itemFromIndex(idx).text()
if col == self.Columns.COIN_BALANCE or col == self.Columns.FIAT_BALANCE:
copy_text = copy_text.strip()
menu.addAction(_("Copy {}").format(column_title), lambda: self.place_text_on_clipboard(copy_text))
menu.addAction(_('Details'), lambda: self.parent.show_address(addr))
persistent = QPersistentModelIndex(addr_idx)
menu.addAction(_("Edit {}").format(addr_column_title), lambda p=persistent: self.edit(QModelIndex(p)))
menu.addAction(_("Request payment"), lambda: self.parent.receive_at(addr))
if self.wallet.can_export():
menu.addAction(_("Private key"), lambda: self.parent.show_private_key(addr))
if not is_multisig and not self.wallet.is_watching_only():
menu.addAction(_("Sign/verify message"), lambda: self.parent.sign_verify_message(addr))
menu.addAction(_("Encrypt/decrypt message"), lambda: self.parent.encrypt_message(addr))
if can_delete:
menu.addAction(_("Remove from wallet"), lambda: self.parent.remove_address(addr))
addr_URL = block_explorer_URL(self.config, 'addr', addr)
if addr_URL:
menu.addAction(_("View on block explorer"), lambda: webopen(addr_URL))
if not self.wallet.is_frozen_address(addr):
menu.addAction(_("Freeze"), lambda: self.parent.set_frozen_state_of_addresses([addr], True))
else:
menu.addAction(_("Unfreeze"), lambda: self.parent.set_frozen_state_of_addresses([addr], False))
coins = self.wallet.get_spendable_coins(addrs, config=self.config)
if coins:
menu.addAction(_("Spend from"), lambda: self.parent.spend_coins(coins))
run_hook('receive_menu', menu, addrs, self.wallet)
menu.exec_(self.viewport().mapToGlobal(position))
def place_text_on_clipboard(self, text):
if is_address(text):
try:
self.wallet.check_address(text)
except InternalAddressCorruption as e:
self.parent.show_error(str(e))
raise
self.parent.app.clipboard().setText(text) | PypiClean |
/BlueWhale3-3.31.3.tar.gz/BlueWhale3-3.31.3/Orange/widgets/visualize/owvenndiagram.py | import math
import unicodedata
from collections import namedtuple, defaultdict
from itertools import compress, count
from functools import reduce
from operator import attrgetter
from xml.sax.saxutils import escape
from typing import Dict, Any, List, Mapping, Optional
import numpy as np
from AnyQt.QtWidgets import (
QGraphicsScene, QGraphicsView, QGraphicsWidget,
QGraphicsPathItem, QGraphicsTextItem, QStyle, QSizePolicy
)
from AnyQt.QtGui import (
QPainterPath, QPainter, QTransform, QColor, QBrush, QPen, QPalette
)
from AnyQt.QtCore import Qt, QPointF, QRectF, QLineF
from AnyQt.QtCore import pyqtSignal as Signal
from Orange.data import Table, Domain, StringVariable, RowInstance
from Orange.data.util import get_unique_names_duplicates
from Orange.widgets import widget, gui
from Orange.widgets.settings import (
DomainContextHandler, ContextSetting, Setting)
from Orange.widgets.utils import itemmodels, colorpalettes
from Orange.widgets.utils.annotated_data import (create_annotated_table,
ANNOTATED_DATA_SIGNAL_NAME)
from Orange.widgets.utils.sql import check_sql_input_sequence
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.widget import MultiInput, Output, Msg
from Orange.i18n_config import *
def __(key):
return i18n.t("widget.visualize.visualize.owvenndiagram." + key)
_InputData = namedtuple("_InputData", ["key", "name", "table"])
_ItemSet = namedtuple("_ItemSet", ["key", "name", "title", "items"])
IDENTITY_STR = __("checkbox.instance_identity")
EQUALITY_STR = __("checkbox.instance_equality")
class VennVariableListModel(itemmodels.VariableListModel):
def __init__(self):
super().__init__([IDENTITY_STR, EQUALITY_STR])
self.same_domains = True
def set_variables(self, variables, same_domains):
self[2:] = variables
self.same_domains = same_domains
def flags(self, index):
if index.row() == 1 and not self.same_domains:
return Qt.NoItemFlags
return Qt.ItemIsSelectable | Qt.ItemIsEnabled
class OWVennDiagram(widget.OWWidget):
name = __("name")
description = __("desc")
icon = "icons/VennDiagram.svg"
priority = 280
keywords = []
settings_version = 2
class Inputs:
data = MultiInput("Data", Table, label=i18n.t("widget.visualize.visualize.common.data"))
class Outputs:
selected_data = Output("Selected Data", Table, default=True, label=i18n.t("widget.visualize.visualize.common.selected_data"))
annotated_data = Output(ANNOTATED_DATA_SIGNAL_NAME, Table, label=i18n.t("widget.visualize.visualize.common.data"))
class Error(widget.OWWidget.Error):
instances_mismatch = Msg(__("msg.instance_error"))
too_many_inputs = Msg(__("msg.venn_diagram_accept_most_dataset"))
class Warning(widget.OWWidget.Warning):
renamed_vars = Msg(__("msg.variable_rename"))
selection: list
settingsHandler = DomainContextHandler()
# Indices of selected disjoint areas
selection = Setting([], schema_only=True)
#: Output unique items (one output row for every unique instance `key`)
#: or preserve all duplicates in the output.
output_duplicates = Setting(False)
autocommit = Setting(True)
rowwise = Setting(True)
selected_feature = ContextSetting(IDENTITY_STR)
want_main_area = False
graph_name = "scene"
atr_types = ['attributes', 'metas', 'class_vars']
atr_vals = {'metas': 'metas', 'attributes': 'X', 'class_vars': 'Y'}
row_vals = {'attributes': 'x', 'class_vars': 'y', 'metas': 'metas'}
def __init__(self):
super().__init__()
# Diagram update is in progress
self._updating = False
self.__id_gen = count() # 'key' generator for _InputData
#: Connected input dataset signals.
self._data_inputs: List[_InputData] = []
# Input non-none datasets in the order they were 'connected'.
self.__data: Optional[Dict[Any, _InputData]] = None
# Extracted input item sets in the order they were 'connected'
self.itemsets = {}
# A list with 2 ** len(self.data) elements that store item sets
# belonging to each area
self.disjoint = []
# A list with 2 ** len(self.data) elements that store keys of tables
# intersected in each area
self.area_keys = []
# Main area view
self.scene = QGraphicsScene(self)
self.view = QGraphicsView(self.scene)
self.view.setRenderHint(QPainter.Antialiasing)
self.view.setFrameStyle(QGraphicsView.StyledPanel)
self.controlArea.layout().addWidget(self.view)
self.vennwidget = VennDiagram()
self._resize()
self.vennwidget.itemTextEdited.connect(self._on_itemTextEdited)
self.scene.selectionChanged.connect(self._on_selectionChanged)
self.scene.addItem(self.vennwidget)
box = gui.radioButtonsInBox(
self.buttonsArea, self, 'rowwise',
[__("btn.column_feature"), __("btn.row_match"), ],
callback=self._on_matching_changed
)
gui.rubber(self.buttonsArea)
gui.separator(self.buttonsArea, 10, 0)
gui.comboBox(
gui.indentedBox(box,
gui.checkButtonOffsetHint(box.buttons[0]),
Qt.Horizontal,
addSpaceBefore=False),
self, "selected_feature",
model=VennVariableListModel(),
callback=self._on_inputAttrActivated,
tooltip=__("msg.selected_feature_tip"))
box.layout().setSpacing(6)
box.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
self.outputs_box = box = gui.vBox(self.buttonsArea,
sizePolicy=(QSizePolicy.Preferred,
QSizePolicy.Preferred),
stretch=0)
gui.rubber(box)
self.output_duplicates_cb = gui.checkBox(
box, self, "output_duplicates", __("checkbox.output_duplicate"),
callback=lambda: self.commit(), # pylint: disable=unnecessary-lambda
stateWhenDisabled=False,
attribute=Qt.WA_LayoutUsesWidgetRect)
gui.auto_send(
box, self, "autocommit", box=False, contentsMargins=(0, 0, 0, 0))
gui.rubber(box)
self._update_duplicates_cb()
self._queue = []
def resizeEvent(self, event):
super().resizeEvent(event)
self._resize()
def showEvent(self, event):
super().showEvent(event)
self._resize()
def _resize(self):
# vennwidget draws so that the diagram fits into its geometry,
# while labels take further 120 pixels, hence -120 in below formula
size = max(200, min(self.view.width(), self.view.height()) - 120)
self.vennwidget.resize(size, size)
self.scene.setSceneRect(self.scene.itemsBoundingRect())
@property
def data(self) -> Mapping[Any, _InputData]:
if self.__data is None:
self.__data = {
item.key: item for item in self._data_inputs[:5]
if item.table is not None
}
return self.__data
@Inputs.data
@check_sql_input_sequence
def setData(self, index: int, data: Optional[Table]):
item = self._data_inputs[index]
item = item._replace(
name=data.name if data is not None else "",
table=data
)
self._data_inputs[index] = item
self.__data = None # invalidate self.data
self._setInterAttributes()
@Inputs.data.insert
@check_sql_input_sequence
def insertData(self, index: int, data: Optional[Table]):
key = next(self.__id_gen)
item = _InputData(
key, name=data.name if data is not None else "", table=data
)
self._data_inputs.insert(index, item)
self.__data = None # invalidate self.data
if len(self._data_inputs) > 5:
self.Error.too_many_inputs()
self._setInterAttributes()
@Inputs.data.remove
def removeData(self, index: int):
self.__data = None # invalidate self.data
self._data_inputs.pop(index)
if len(self._data_inputs) <= 5:
self.Error.too_many_inputs.clear()
# Clear possible warnings.
self.Warning.clear()
self._setInterAttributes()
def data_equality(self):
""" Checks if all input datasets have same ids. """
if not self.data.values():
return True
sets = []
for val in self.data.values():
sets.append(set(val.table.ids))
inter = reduce(set.intersection, sets)
return len(inter) == max(map(len, sets))
def settings_compatible(self):
self.Error.instances_mismatch.clear()
if not self.rowwise:
if not self.data_equality():
self.vennwidget.clear()
self.Error.instances_mismatch()
self.itemsets = {}
return False
return True
def handleNewSignals(self):
self.vennwidget.clear()
if not self.settings_compatible():
self.invalidateOutput()
return
self._createItemsets()
self._createDiagram()
# If autocommit is enabled, _createDiagram already outputs data
# If not, call commit from here
if not self.autocommit:
self.commit.now()
super().handleNewSignals()
def _intersection_string_attrs(self):
sets = [set(string_attributes(data_.table.domain))
for data_ in self.data.values()]
if sets:
return list(reduce(set.intersection, sets))
return []
def _all_domains_same(self):
domains = [data_.table.domain for data_ in self.data.values()]
# Domain.__hash__ is hacky, let's not use a set here, just for the case
return not domains or all(domain == domains[0] for domain in domains)
def _uses_feature(self):
return isinstance(self.selected_feature, StringVariable)
def _setInterAttributes(self):
model = self.controls.selected_feature.model()
same_domains = self._all_domains_same()
variables = self._intersection_string_attrs()
model.set_variables(variables, same_domains)
if self.selected_feature == EQUALITY_STR and not same_domains \
or self._uses_feature() and \
self.selected_feature.name not in (var.name for var in variables):
self.selected_feature = IDENTITY_STR
@staticmethod
def _hashes(table):
# In the interest of space, we compare hashes. If this is not OK,
# concatenate bytes instead of xoring hashes. Renaming a method
# brings bonus points.
return [hash(inst.x.data.tobytes())
^ hash(inst.y.data.tobytes())
^ hash(inst.metas.data.tobytes()) for inst in table]
def _itemsForInput(self, key):
"""
Calculates input for venn diagram, according to user's settings.
"""
table = self.data[key].table
if self.selected_feature == IDENTITY_STR:
return list(table.ids)
if self.selected_feature == EQUALITY_STR:
return self._hashes(table)
attr = self.selected_feature
return [str(inst[attr]) for inst in table
if not np.isnan(inst[attr])]
def _createItemsets(self):
"""
Create itemsets over rows or columns (domains) of input tables.
"""
olditemsets = dict(self.itemsets)
self.itemsets.clear()
for key, input_ in self.data.items():
if self.rowwise:
items = self._itemsForInput(key)
else:
items = [el.name for el in input_.table.domain.attributes]
name = input_.name
if key in olditemsets and olditemsets[key].name == name:
# Reuse the title (which might have been changed by the user)
title = olditemsets[key].title
else:
title = name
itemset = _ItemSet(key=key, name=name, title=title, items=items)
self.itemsets[key] = itemset
def _createDiagram(self):
self._updating = True
oldselection = list(self.selection)
n = len(self.itemsets)
self.disjoint, self.area_keys = \
self.get_disjoint(set(s.items) for s in self.itemsets.values())
vennitems = []
colors = colorpalettes.LimitedDiscretePalette(n, force_glasbey=True)
for i, item in enumerate(self.itemsets.values()):
cnt = len(set(item.items))
cnt_all = len(item.items)
if cnt != cnt_all:
fmt = '{} <i>(all: {})</i>'
else:
fmt = '{}'
counts = fmt.format(cnt, cnt_all)
gr = VennSetItem(text=item.title, informativeText=counts)
color = colors[i]
color.setAlpha(100)
gr.setBrush(QBrush(color))
gr.setPen(QPen(Qt.NoPen))
vennitems.append(gr)
self.vennwidget.setItems(vennitems)
for i, area in enumerate(self.vennwidget.vennareas()):
area_items = list(map(str, list(self.disjoint[i])))
if i:
area.setText("{0}".format(len(area_items)))
label = disjoint_set_label(i, n, simplify=False)
tooltip = "<h4>|{}| = {}</h4>".format(label, len(area_items))
if self._uses_feature() or not self.rowwise:
# Nothing readable to show when matching by identity or equality
tooltip += "<span>" + ", ".join(map(escape, area_items[:32]))
if len(area_items) > 32:
tooltip += f"</br>({len(area_items) - 32} items not shown)"
tooltip += "</span>"
area.setToolTip(tooltip)
area.setPen(QPen(QColor(10, 10, 10, 200), 1.5))
area.setFlag(QGraphicsPathItem.ItemIsSelectable, True)
area.setSelected(i in oldselection)
self._updating = False
self._on_selectionChanged()
def _on_selectionChanged(self):
if self._updating:
return
areas = self.vennwidget.vennareas()
self.selection = [i for i, area in enumerate(areas) if area.isSelected()]
self.invalidateOutput()
def _update_duplicates_cb(self):
self.output_duplicates_cb.setEnabled(
self.rowwise and self._uses_feature())
def _on_matching_changed(self):
self._update_duplicates_cb()
if not self.settings_compatible():
self.invalidateOutput()
return
self._createItemsets()
self._createDiagram()
def _on_inputAttrActivated(self):
self.rowwise = True
self._on_matching_changed()
def _on_itemTextEdited(self, index, text):
text = str(text)
key = list(self.itemsets)[index]
self.itemsets[key] = self.itemsets[key]._replace(title=text)
def invalidateOutput(self):
self.commit.deferred()
def merge_data(self, domain, values, ids=None):
X, metas, class_vars = None, None, None
renamed = []
names = [var.name for val in domain.values() for var in val]
unique_names = iter(get_unique_names_duplicates(names))
for val in domain.values():
for n, idx, var in zip(names, count(), val):
u = next(unique_names)
if n != u:
val[idx] = var.copy(name=u)
renamed.append(n)
if renamed:
self.Warning.renamed_vars(', '.join(renamed))
if 'attributes' in values:
X = np.hstack(values['attributes'])
if 'metas' in values:
metas = np.hstack(values['metas'])
n = len(metas)
if 'class_vars' in values:
class_vars = np.hstack(values['class_vars'])
n = len(class_vars)
if X is None:
X = np.empty((n, 0))
table = Table.from_numpy(Domain(**domain), X, class_vars, metas)
if ids is not None:
table.ids = ids
return table
def extract_columnwise(self, var_dict, columns=None):
domain = {type_ : [] for type_ in self.atr_types}
values = defaultdict(list)
renamed = []
for atr_type, vars_dict in var_dict.items():
for var_name, var_data in vars_dict.items():
is_selected = bool(columns) and var_name.name in columns
if var_data[0]:
#columns are different, copy all, rename them
for var, table_key in var_data[1]:
idx = list(self.data).index(table_key) + 1
new_atr = var.copy(name=f'{var_name.name} ({idx})')
if columns and atr_type == 'attributes':
new_atr.attributes['Selected'] = is_selected
domain[atr_type].append(new_atr)
renamed.append(var_name.name)
values[atr_type].append(getattr(self.data[table_key].table[:, var_name],
self.atr_vals[atr_type])
.reshape(-1, 1))
else:
new_atr = var_data[1][0][0].copy()
if columns and atr_type == 'attributes':
new_atr.attributes['Selected'] = is_selected
domain[atr_type].append(new_atr)
values[atr_type].append(getattr(self.data[var_data[1][0][1]].table[:, var_name],
self.atr_vals[atr_type])
.reshape(-1, 1))
if renamed:
self.Warning.renamed_vars(', '.join(renamed))
return self.merge_data(domain, values)
def curry_merge(self, table_key, atr_type, ids=None, selection=False):
if self.rowwise:
check_equality = self.arrays_equal_rows
else:
check_equality = self.arrays_equal_cols
def inner(new_atrs, atr):
"""
Atrs - list of variables we wish to merge
new_atrs - dictionary where key is old var, val
is [is_different:bool, table_keys:list]), is_different is set to True,
if we are outputing duplicates, but the value is arbitrary
"""
if atr in new_atrs:
if not selection and self.output_duplicates:
#if output_duplicates, we just check if compute value is the same
new_atrs[atr][0] = True
elif not new_atrs[atr][0]:
for var, key in new_atrs[atr][1]:
if not check_equality(table_key,
key,
atr.name,
self.atr_vals[atr_type],
type(var), ids):
new_atrs[atr][0] = True
break
new_atrs[atr][1].append((atr, table_key))
else:
new_atrs[atr] = [False, [(atr, table_key)]]
return new_atrs
return inner
def arrays_equal_rows(self, key1, key2, name, data_type, type_, ids):
#gets masks, compares same as cols
t1 = self.data[key1].table
t2 = self.data[key2].table
inter_val = set(ids[key1]) & set(ids[key2])
t1_inter = [ids[key1][val] for val in inter_val]
t2_inter = [ids[key2][val] for val in inter_val]
return arrays_equal(
getattr(t1[t1_inter, name],
data_type).reshape(-1, 1),
getattr(t2[t2_inter, name],
data_type).reshape(-1, 1),
type_)
def arrays_equal_cols(self, key1, key2, name, data_type, type_, _ids=None):
return arrays_equal(
getattr(self.data[key1].table[:, name],
data_type),
getattr(self.data[key2].table[:, name],
data_type),
type_)
def create_from_columns(self, columns, relevant_keys, get_selected):
"""
Columns are duplicated only if values differ (even
if only in order of values), origin table name and input slot is added to column name.
"""
var_dict = {}
for atr_type in self.atr_types:
container = {}
for table_key in relevant_keys:
table = self.data[table_key].table
if atr_type == 'attributes':
if get_selected:
atrs = list(compress(table.domain.attributes,
[c.name in columns for c in table.domain.attributes]))
else:
atrs = getattr(table.domain, atr_type)
else:
atrs = getattr(table.domain, atr_type)
merge_vars = self.curry_merge(table_key, atr_type)
container = reduce(merge_vars, atrs, container)
var_dict[atr_type] = container
if get_selected:
annotated = self.extract_columnwise(var_dict, None)
else:
annotated = self.extract_columnwise(var_dict, columns)
return annotated
def extract_rowwise(self, var_dict, ids=None, selection=False):
"""
keys : ['attributes', 'metas', 'class_vars']
vals: new_atrs - dictionary where key is old name, val
is [is_different:bool, table_keys:list])
ids: dict with ids for each table
"""
all_ids = sorted(reduce(set.union, [set(val) for val in ids.values()], set()))
permutations = {}
for table_key, dict_ in ids.items():
permutations[table_key] = get_perm(list(dict_), all_ids)
domain = {type_ : [] for type_ in self.atr_types}
values = defaultdict(list)
renamed = []
for atr_type, vars_dict in var_dict.items():
for var_name, var_data in vars_dict.items():
different = var_data[0]
if different:
# Columns are different, copy and rename them.
# Renaming is done here to mark appropriately the source table.
# Additional strange clashes are checked later in merge_data
for var, table_key in var_data[1]:
temp = self.data[table_key].table
idx = list(self.data).index(table_key) + 1
domain[atr_type].append(var.copy(name='{} ({})'.format(var_name, idx)))
renamed.append(var_name.name)
v = getattr(temp[list(ids[table_key].values()), var_name],
self.atr_vals[atr_type])
perm = permutations[table_key]
if len(v) < len(all_ids):
values[atr_type].append(pad_columns(v, perm, len(all_ids)))
else:
values[atr_type].append(v[perm].reshape(-1, 1))
else:
value = np.full((len(all_ids), 1), np.nan)
domain[atr_type].append(var_data[1][0][0].copy())
for _, table_key in var_data[1]:
#different tables have different part of the same attribute vector
perm = permutations[table_key]
v = getattr(self.data[table_key].table[list(ids[table_key].values()),
var_name],
self.atr_vals[atr_type]).reshape(-1, 1)
value = value.astype(v.dtype, copy=False)
value[perm] = v
values[atr_type].append(value)
if renamed:
self.Warning.renamed_vars(', '.join(renamed))
ids = None if self._uses_feature() else np.array(all_ids)
table = self.merge_data(domain, values, ids)
if selection:
mask = [idx in self.selected_items for idx in all_ids]
return create_annotated_table(table, mask)
return table
def get_indices(self, table, selection):
"""Returns mappings of ids (be it row id or string) to indices in tables"""
if self.selected_feature == IDENTITY_STR:
items = table.ids
ids = range(len(table))
elif self.selected_feature == EQUALITY_STR:
items, ids = np.unique(self._hashes(table), return_index=True)
else:
items = getattr(table[:, self.selected_feature], 'metas')
if self.output_duplicates and selection:
items, inverse = np.unique(items, return_inverse=True)
ids = [np.nonzero(inverse == idx)[0] for idx in range(len(items))]
else:
items, ids = np.unique(items, return_index=True)
if selection:
return {item: idx for item, idx in zip(items, ids)
if item in self.selected_items}
return dict(zip(items, ids))
def get_indices_to_match_by(self, relevant_keys, selection=False):
dict_ = {}
for key in relevant_keys:
table = self.data[key].table
dict_[key] = self.get_indices(table, selection)
return dict_
def create_from_rows(self, relevant_ids, selection=False):
var_dict = {}
for atr_type in self.atr_types:
container = {}
for table_key in relevant_ids:
merge_vars = self.curry_merge(table_key, atr_type, relevant_ids, selection)
atrs = getattr(self.data[table_key].table.domain, atr_type)
container = reduce(merge_vars, atrs, container)
var_dict[atr_type] = container
if self.output_duplicates and not selection:
return self.extract_rowwise_duplicates(var_dict, relevant_ids)
return self.extract_rowwise(var_dict, relevant_ids, selection)
def expand_table(self, table, atrs, metas, cv):
exp = []
n = 1 if isinstance(table, RowInstance) else len(table)
if isinstance(table, RowInstance):
ids = table.id.reshape(-1, 1)
atr_vals = self.row_vals
else:
ids = table.ids.reshape(-1, 1)
atr_vals = self.atr_vals
for all_el, atr_type in zip([atrs, metas, cv], self.atr_types):
cur_el = getattr(table.domain, atr_type)
array = np.full((n, len(all_el)), np.nan)
if cur_el:
perm = get_perm(cur_el, all_el)
b = getattr(table, atr_vals[atr_type]).reshape(len(array), len(perm))
array = array.astype(b.dtype, copy=False)
array[:, perm] = b
exp.append(array)
return (*exp, ids)
def extract_rowwise_duplicates(self, var_dict, ids):
all_ids = sorted(reduce(set.union, [set(val) for val in ids.values()], set()))
sort_key = attrgetter("name")
all_atrs = sorted(var_dict['attributes'], key=sort_key)
all_metas = sorted(var_dict['metas'], key=sort_key)
all_cv = sorted(var_dict['class_vars'], key=sort_key)
all_x, all_y, all_m = [], [], []
new_table_ids = []
for idx in all_ids:
#iterate trough tables with same idx
for table_key, t_indices in ids.items():
if idx not in t_indices:
continue
map_ = t_indices[idx]
extracted = self.data[table_key].table[map_]
# pylint: disable=unbalanced-tuple-unpacking
x, m, y, t_ids = self.expand_table(extracted, all_atrs, all_metas, all_cv)
all_x.append(x)
all_y.append(y)
all_m.append(m)
new_table_ids.append(t_ids)
domain = {'attributes': all_atrs, 'metas': all_metas, 'class_vars': all_cv}
values = {'attributes': [np.vstack(all_x)],
'metas': [np.vstack(all_m)],
'class_vars': [np.vstack(all_y)]}
return self.merge_data(domain, values, np.vstack(new_table_ids))
@gui.deferred
def commit(self):
if not self.vennwidget.vennareas() or not self.data:
self.Outputs.selected_data.send(None)
self.Outputs.annotated_data.send(None)
return
self.selected_items = reduce(
set.union, [self.disjoint[index] for index in self.selection],
set()
)
selected_keys = reduce(
set.union, [set(self.area_keys[area]) for area in self.selection],
set())
selected = None
if self.rowwise:
if self.selected_items:
selected_ids = self.get_indices_to_match_by(
selected_keys, bool(self.selection))
selected = self.create_from_rows(selected_ids, False)
annotated_ids = self.get_indices_to_match_by(self.data)
annotated = self.create_from_rows(annotated_ids, True)
else:
annotated = self.create_from_columns(self.selected_items, self.data, False)
if self.selected_items:
selected = self.create_from_columns(self.selected_items, selected_keys, True)
self.Outputs.selected_data.send(selected)
self.Outputs.annotated_data.send(annotated)
def send_report(self):
self.report_plot()
def get_disjoint(self, sets):
"""
Return all disjoint subsets.
"""
sets = list(sets)
n = len(sets)
disjoint_sets = [None] * (2 ** n)
included_tables = [None] * (2 ** n)
for i in range(2 ** n):
key = setkey(i, n)
included = [s for s, inc in zip(sets, key) if inc]
if included:
excluded = [s for s, inc in zip(sets, key) if not inc]
s = reduce(set.intersection, included)
s = reduce(set.difference, excluded, s)
else:
s = set()
disjoint_sets[i] = s
included_tables[i] = [k for k, inc in zip(self.data, key) if inc]
return disjoint_sets, included_tables
@classmethod
def migrate_settings(cls, settings, version):
if version < 3:
if settings.pop("selected_feature", None) is None:
settings["selected_feature"] = IDENTITY_STR
def string_attributes(domain):
"""
Return all string attributes from the domain.
"""
return [attr for attr in domain.variables + domain.metas if attr.is_string]
def disjoint_set_label(i, n, simplify=False):
"""
Return a html formated label for a disjoint set indexed by `i`.
"""
intersection = unicodedata.lookup("INTERSECTION")
# comp = unicodedata.lookup("COMPLEMENT") #
# This depends on the font but the unicode complement in
# general does not look nice in a super script so we use
# plain c instead.
comp = "c"
def label_for_index(i):
return chr(ord("A") + i)
if simplify:
return "".join(label_for_index(i) for i, b in enumerate(setkey(i, n))
if b)
else:
return intersection.join(label_for_index(i) +
("" if b else "<sup>" + comp + "</sup>")
for i, b in enumerate(setkey(i, n)))
class VennSetItem(QGraphicsPathItem):
def __init__(self, parent=None, text="", informativeText=""):
super(VennSetItem, self).__init__(parent)
# Plain text title (editable by the VennDiagram)
self.text = text
# Extra informative text (possibly rich text)
self.informativeText = informativeText
# TODO: Use palette's selected/highligted text / background colors to
# indicate selection
class VennIntersectionArea(QGraphicsPathItem):
def __init__(self, parent=None, text=""):
super().__init__(parent)
self.setAcceptHoverEvents(True)
self.setPen(QPen(Qt.NoPen))
self.text = QGraphicsTextItem(self)
layout = self.text.document().documentLayout()
layout.documentSizeChanged.connect(self._onLayoutChanged)
self._text = text
self._anchor = QPointF()
def setText(self, text):
if self._text != text:
self._text = text
self.text.setPlainText(text)
def setTextAnchor(self, pos):
if self._anchor != pos:
self._anchor = pos
self._updateTextAnchor()
def hoverEnterEvent(self, event):
self.setZValue(self.zValue() + 1)
return QGraphicsPathItem.hoverEnterEvent(self, event)
def hoverLeaveEvent(self, event):
self.setZValue(self.zValue() - 1)
return QGraphicsPathItem.hoverLeaveEvent(self, event)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
if event.modifiers() & Qt.AltModifier:
self.setSelected(False)
elif event.modifiers() & Qt.ControlModifier:
self.setSelected(not self.isSelected())
elif event.modifiers() & Qt.ShiftModifier:
self.setSelected(True)
else:
for area in self.parentWidget().vennareas():
area.setSelected(False)
self.setSelected(True)
def mouseReleaseEvent(self, event):
pass
def paint(self, painter, option, _widget=None):
painter.save()
path = self.path()
brush = QBrush(self.brush())
pen = QPen(self.pen())
if option.state & QStyle.State_Selected:
pen.setColor(Qt.red)
brush.setStyle(Qt.DiagCrossPattern)
brush.setColor(QColor(40, 40, 40, 100))
elif option.state & QStyle.State_MouseOver:
pen.setColor(Qt.blue)
if option.state & QStyle.State_MouseOver:
brush.setColor(QColor(100, 100, 100, 100))
if brush.style() == Qt.NoBrush:
# Make sure the highlight is actually visible.
brush.setStyle(Qt.SolidPattern)
painter.setPen(pen)
painter.setBrush(brush)
painter.drawPath(path)
painter.restore()
def itemChange(self, change, value):
if change == QGraphicsPathItem.ItemSelectedHasChanged:
self.setZValue(self.zValue() + (1 if value else -1))
return QGraphicsPathItem.itemChange(self, change, value)
def _updateTextAnchor(self):
rect = self.text.boundingRect()
pos = anchor_rect(rect, self._anchor)
self.text.setPos(pos)
def _onLayoutChanged(self):
self._updateTextAnchor()
class GraphicsTextEdit(QGraphicsTextItem):
#: Edit triggers
NoEditTriggers, DoubleClicked = 0, 1
editingFinished = Signal()
editingStarted = Signal()
documentSizeChanged = Signal()
def __init__(self, *args, **kwargs):
super(GraphicsTextEdit, self).__init__(*args, **kwargs)
self.setCursor(Qt.IBeamCursor)
self.setTabChangesFocus(True)
self._edittrigger = GraphicsTextEdit.DoubleClicked
self._editing = False
self.document().documentLayout().documentSizeChanged.connect(
self.documentSizeChanged
)
def mouseDoubleClickEvent(self, event):
super(GraphicsTextEdit, self).mouseDoubleClickEvent(event)
if self._edittrigger == GraphicsTextEdit.DoubleClicked:
self._start()
def focusOutEvent(self, event):
super(GraphicsTextEdit, self).focusOutEvent(event)
if self._editing:
self._end()
def _start(self):
self._editing = True
self.setTextInteractionFlags(Qt.TextEditorInteraction)
self.setFocus(Qt.MouseFocusReason)
self.editingStarted.emit()
def _end(self):
self._editing = False
self.setTextInteractionFlags(Qt.NoTextInteraction)
self.editingFinished.emit()
class VennDiagram(QGraphicsWidget):
# rect and petal are for future work
Circle, Ellipse, Rect, Petal = 1, 2, 3, 4
TitleFormat = "<center><h4>{0}</h4>{1}</center>"
selectionChanged = Signal()
itemTextEdited = Signal(int, str)
def __init__(self, parent=None):
super(VennDiagram, self).__init__(parent)
self.shapeType = VennDiagram.Circle
self._items = []
self._vennareas = []
self._textitems = []
self._subsettextitems = []
self._textanchors = []
def item(self, index):
return self._items[index]
def items(self):
return list(self._items)
def count(self):
return len(self._items)
def setItems(self, items):
if self._items:
self.clear()
self._items = list(items)
for item in self._items:
item.setParentItem(self)
item.setVisible(True)
fmt = self.TitleFormat.format
font = self.font()
font.setPixelSize(14)
for item in items:
text = GraphicsTextEdit(self)
text.setFont(font)
text.setDefaultTextColor(QColor("#333"))
text.setHtml(fmt(escape(item.text), item.informativeText))
text.adjustSize()
text.editingStarted.connect(self._on_editingStarted)
text.editingFinished.connect(self._on_editingFinished)
text.documentSizeChanged.connect(
self._on_itemTextSizeChanged
)
self._textitems.append(text)
self._vennareas = [
VennIntersectionArea(parent=self)
for i in range(2 ** len(items))
]
self._subsettextitems = [
QGraphicsTextItem(parent=self)
for i in range(2 ** len(items))
]
self._updateLayout()
def clear(self):
scene = self.scene()
items = self.vennareas() + list(self.items()) + self._textitems
for item in self._textitems:
item.editingStarted.disconnect(self._on_editingStarted)
item.editingFinished.disconnect(self._on_editingFinished)
item.documentSizeChanged.disconnect(
self._on_itemTextSizeChanged
)
self._items = []
self._vennareas = []
self._textitems = []
self._subsettextitems = []
self._textanchors = []
for item in items:
item.setVisible(False)
item.setParentItem(None)
if scene is not None:
scene.removeItem(item)
def vennareas(self):
return list(self._vennareas)
def setFont(self, font):
if font != self.font():
self.prepareGeometryChange()
super().setFont(font)
for item in self.items():
item.setFont(font)
def _updateLayout(self):
rect = self.geometry()
n = len(self._items)
if not n:
return
regions = venn_diagram(n)
# The y axis in Qt points downward
transform = QTransform().scale(1, -1)
regions = list(map(transform.map, regions))
union_brect = reduce(QRectF.united,
(path.boundingRect() for path in regions))
scalex = rect.width() / union_brect.width()
scaley = rect.height() / union_brect.height()
scale = min(scalex, scaley)
transform = QTransform().scale(scale, scale)
regions = [transform.map(path) for path in regions]
center = rect.width() / 2, rect.height() / 2
for item, path in zip(self.items(), regions):
item.setPath(path)
item.setPos(*center)
intersections = venn_intersections(regions)
assert len(intersections) == 2 ** n
assert len(self.vennareas()) == 2 ** n
anchors = [(0, 0)] + subset_anchors(self._items)
anchor_transform = QTransform().scale(rect.width(), -rect.height())
for i, area in enumerate(self.vennareas()):
area.setPath(intersections[setkey(i, n)])
area.setPos(*center)
x, y = anchors[i]
anchor = anchor_transform.map(QPointF(x, y))
area.setTextAnchor(anchor)
area.setZValue(30)
self._updateTextAnchors()
def _updateTextAnchors(self):
n = len(self._items)
items = self._items
dist = 15
shape = reduce(QPainterPath.united, [item.path() for item in items])
brect = shape.boundingRect()
bradius = max(brect.width() / 2, brect.height() / 2)
center = self.boundingRect().center()
anchors = _category_anchors(items)
self._textanchors = []
for angle, anchor_h, anchor_v in anchors:
line = QLineF.fromPolar(bradius, angle)
ext = QLineF.fromPolar(dist, angle)
line = QLineF(line.p1(), line.p2() + ext.p2())
line = line.translated(center)
anchor_pos = line.p2()
self._textanchors.append((anchor_pos, anchor_h, anchor_v))
for i in range(n):
self._updateTextItemPos(i)
def _updateTextItemPos(self, i):
item = self._textitems[i]
anchor_pos, anchor_h, anchor_v = self._textanchors[i]
rect = item.boundingRect()
pos = anchor_rect(rect, anchor_pos, anchor_h, anchor_v)
item.setPos(pos)
def setGeometry(self, geometry):
super(VennDiagram, self).setGeometry(geometry)
self._updateLayout()
def _on_editingStarted(self):
item = self.sender()
index = self._textitems.index(item)
text = self._items[index].text
item.setTextWidth(-1)
item.setHtml(self.TitleFormat.format(escape(text), "<br/>"))
def _on_editingFinished(self):
item = self.sender()
index = self._textitems.index(item)
text = item.toPlainText()
if text != self._items[index].text:
self._items[index].text = text
self.itemTextEdited.emit(index, text)
item.setHtml(
self.TitleFormat.format(
escape(text), self._items[index].informativeText))
item.adjustSize()
def _on_itemTextSizeChanged(self):
item = self.sender()
index = self._textitems.index(item)
self._updateTextItemPos(index)
def anchor_rect(rect, anchor_pos,
anchor_h=Qt.AnchorHorizontalCenter,
anchor_v=Qt.AnchorVerticalCenter):
if anchor_h == Qt.AnchorLeft:
x = anchor_pos.x()
elif anchor_h == Qt.AnchorHorizontalCenter:
x = anchor_pos.x() - rect.width() / 2
elif anchor_h == Qt.AnchorRight:
x = anchor_pos.x() - rect.width()
else:
raise ValueError(anchor_h)
if anchor_v == Qt.AnchorTop:
y = anchor_pos.y()
elif anchor_v == Qt.AnchorVerticalCenter:
y = anchor_pos.y() - rect.height() / 2
elif anchor_v == Qt.AnchorBottom:
y = anchor_pos.y() - rect.height()
else:
raise ValueError(anchor_v)
return QPointF(x, y)
def radians(angle):
return 2 * math.pi * angle / 360
def unit_point(x, r=1.0):
x = radians(x)
return (r * math.cos(x), r * math.sin(x))
def _category_anchors(shapes):
n = len(shapes)
return _CATEGORY_ANCHORS[n - 1]
# (angle, horizontal anchor, vertical anchor)
_CATEGORY_ANCHORS = (
# n == 1
((90, Qt.AnchorHorizontalCenter, Qt.AnchorBottom),),
# n == 2
((180, Qt.AnchorRight, Qt.AnchorVerticalCenter),
(0, Qt.AnchorLeft, Qt.AnchorVerticalCenter)),
# n == 3
((150, Qt.AnchorRight, Qt.AnchorBottom),
(30, Qt.AnchorLeft, Qt.AnchorBottom),
(270, Qt.AnchorHorizontalCenter, Qt.AnchorTop)),
# n == 4
((270 + 45, Qt.AnchorLeft, Qt.AnchorTop),
(270 - 45, Qt.AnchorRight, Qt.AnchorTop),
(90 - 15, Qt.AnchorLeft, Qt.AnchorBottom),
(90 + 15, Qt.AnchorRight, Qt.AnchorBottom)),
# n == 5
((90 - 5, Qt.AnchorHorizontalCenter, Qt.AnchorBottom),
(18 - 5, Qt.AnchorLeft, Qt.AnchorVerticalCenter),
(306 - 5, Qt.AnchorLeft, Qt.AnchorTop),
(234 - 5, Qt.AnchorRight, Qt.AnchorTop),
(162 - 5, Qt.AnchorRight, Qt.AnchorVerticalCenter),)
)
def subset_anchors(shapes):
n = len(shapes)
if n == 1:
return [(0, 0)]
elif n == 2:
return [unit_point(180, r=1/3),
unit_point(0, r=1/3),
(0, 0)]
elif n == 3:
return [unit_point(150, r=0.35), # A
unit_point(30, r=0.35), # B
unit_point(90, r=0.27), # AB
unit_point(270, r=0.35), # C
unit_point(210, r=0.27), # AC
unit_point(330, r=0.27), # BC
unit_point(0, r=0),] # ABC
elif n == 4:
anchors = [
(0.400, 0.110), # A
(-0.400, 0.110), # B
(0.000, -0.285), # AB
(0.180, 0.330), # C
(0.265, 0.205), # AC
(-0.240, -0.110), # BC
(-0.100, -0.190), # ABC
(-0.180, 0.330), # D
(0.240, -0.110), # AD
(-0.265, 0.205), # BD
(0.100, -0.190), # ABD
(0.000, 0.250), # CD
(0.153, 0.090), # ACD
(-0.153, 0.090), # BCD
(0.000, -0.060), # ABCD
]
return anchors
elif n == 5:
anchors = [None] * 32
# Base anchors
A = (0.033, 0.385)
AD = (0.095, 0.250)
AE = (-0.100, 0.265)
ACE = (-0.130, 0.220)
ADE = (0.010, 0.225)
ACDE = (-0.095, 0.175)
ABCDE = (0.0, 0.0)
anchors[-1] = ABCDE
bases = [(0b00001, A),
(0b01001, AD),
(0b10001, AE),
(0b10101, ACE),
(0b11001, ADE),
(0b11101, ACDE)]
for i in range(5):
for index, anchor in bases:
index = bit_rot_left(index, i, bits=5)
assert anchors[index] is None
anchors[index] = rotate_point(anchor, - 72 * i)
assert all(anchors[1:])
return anchors[1:]
return None
def bit_rot_left(x, y, bits=32):
mask = 2 ** bits - 1
x_masked = x & mask
return (x << y) & mask | (x_masked >> bits - y)
def rotate_point(p, angle):
r = radians(angle)
R = np.array([[math.cos(r), -math.sin(r)],
[math.sin(r), math.cos(r)]])
x, y = np.dot(R, p)
return (float(x), float(y))
def line_extended(line, distance):
"""
Return an QLineF extended by `distance` units in the positive direction.
"""
angle = line.angle() / 360 * 2 * math.pi
dx, dy = unit_point(angle, r=distance)
return QLineF(line.p1(), line.p2() + QPointF(dx, dy))
def circle_path(center, r=1.0):
return ellipse_path(center, r, r, rotation=0)
def ellipse_path(center, a, b, rotation=0):
if not isinstance(center, QPointF):
center = QPointF(*center)
brect = QRectF(-a, -b, 2 * a, 2 * b)
path = QPainterPath()
path.addEllipse(brect)
if rotation != 0:
transform = QTransform().rotate(rotation)
path = transform.map(path)
path.translate(center)
return path
# TODO: Should include anchors for text layout (both inside and outside).
# for each item {path: QPainterPath,
# text_anchors: [{center}] * (2 ** n)
# mayor_axis: QLineF,
# boundingRect QPolygonF (with 4 vertices)}
#
# Should be a new class with overloads for ellipse/circle, rect, and petal
# shapes, should store all constructor parameters, rotation, center,
# mayor/minor axis.
def venn_diagram(n):
if n < 1 or n > 5:
raise ValueError()
paths = []
if n == 1:
paths = [circle_path(center=(0, 0), r=0.5)]
elif n == 2:
angles = [180, 0]
paths = [circle_path(center=unit_point(x, r=1/6), r=1/3)
for x in angles]
elif n == 3:
angles = [150 - 120 * i for i in range(3)]
paths = [circle_path(center=unit_point(x, r=1/6), r=1/3)
for x in angles]
elif n == 4:
# Constants shamelessly stolen from VennDiagram R package
paths = [
ellipse_path((0.65 - 0.5, 0.47 - 0.5), 0.35, 0.20, 45),
ellipse_path((0.35 - 0.5, 0.47 - 0.5), 0.35, 0.20, 135),
ellipse_path((0.5 - 0.5, 0.57 - 0.5), 0.35, 0.20, 45),
ellipse_path((0.5 - 0.5, 0.57 - 0.5), 0.35, 0.20, 134),
]
elif n == 5:
# Constants shamelessly stolen from VennDiagram R package
d = 0.13
a, b = 0.24, 0.48
a, b = b, a
a, b = 0.48, 0.24
paths = [ellipse_path(unit_point((1 - i) * 72, r=d),
a, b, rotation=90 - (i * 72))
for i in range(5)]
return paths
def setkey(intval, n):
return tuple(bool(intval & (2 ** i)) for i in range(n))
def keyrange(n):
if n < 0:
raise ValueError()
for i in range(2 ** n):
yield setkey(i, n)
def venn_intersections(paths):
n = len(paths)
return {key: venn_intersection(paths, key) for key in keyrange(n)}
def venn_intersection(paths, key):
if not any(key):
return QPainterPath()
# first take the intersection of all included paths
path = reduce(QPainterPath.intersected,
(path for path, included in zip(paths, key) if included))
# subtract all the excluded sets (i.e. take the intersection
# with the excluded set complements)
path = reduce(QPainterPath.subtracted,
(path for path, included in zip(paths, key) if not included),
path)
return path
def append_column(data, where, variable, column):
X, Y, M = data.X, data.Y, data.metas
domain = data.domain
attr = domain.attributes
class_vars = domain.class_vars
metas = domain.metas
if where == "X":
attr = attr + (variable,)
X = np.hstack((X, column))
elif where == "Y":
class_vars = class_vars + (variable,)
Y = np.hstack((Y, column))
elif where == "M":
metas = metas + (variable,)
M = np.hstack((M, column))
else:
raise ValueError
domain = Domain(attr, class_vars, metas)
new_data = data.transform(domain)
new_data[:, variable] = column
return new_data
def arrays_equal(a, b, type_):
"""
checks if arrays have nans in same places and if not-nan elements
are equal
"""
if a is None and b is None:
return True
if a is None or b is None:
return False
if type_ is not StringVariable:
nana = np.isnan(a)
nanb = np.isnan(b)
return np.all(nana == nanb) and np.all(a[~nana] == b[~nanb])
else:
return np.all(a == b)
def pad_columns(values, mask, l):
#inflates columns with nans
a = np.full((l, 1), np.nan, dtype=values.dtype)
a[mask] = values.reshape(-1, 1)
return a
def get_perm(ids, all_ids):
return [all_ids.index(el) for el in ids if el in all_ids]
def main(): # pragma: no cover
# pylint: disable=import-outside-toplevel
from Orange.evaluation import ShuffleSplit
data = Table("brown-selected")
if not "test_rows": # change to `if not "test_rows" to test columns
data = append_column(data, "M", StringVariable("Test"),
(np.arange(len(data)).reshape(-1, 1) % 30).astype(str))
res = ShuffleSplit(n_resamples=5, test_size=0.7, stratified=False)
indices = iter(res.get_indices(data))
datasets = []
for i in range(5):
sample, _ = next(indices)
data1 = data[sample]
data1.name = chr(ord("A") + i)
datasets.append((i, data1))
else:
domain = data.domain
data1 = data.transform(Domain(domain.attributes[:15], domain.class_var))
data2 = data.transform(Domain(domain.attributes[10:], domain.class_var))
datasets = [(0, data1), (1, data2)]
WidgetPreview(OWVennDiagram).run(insertData=datasets)
if __name__ == "__main__": # pragma: no cover
main() | PypiClean |
/INDIpy-0.4.0.tar.gz/INDIpy-0.4.0/indi/device/properties/instance/elements.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Tuple, Type, Union
from indi.device import events, values
from indi.device.events import EventSource
from indi.message import checks, const, def_parts, one_parts
if TYPE_CHECKING:
from indi.device.driver import Device
from indi.device.properties.instance.vectors import Vector
logger = logging.getLogger(__name__)
class Element(EventSource):
def_message_class: Union[
Type[def_parts.DefSwitch],
Type[def_parts.DefNumber],
Type[def_parts.DefLight],
Type[def_parts.DefBLOB],
Type[def_parts.DefText],
]
set_message_class: Union[
Type[one_parts.OneSwitch],
Type[one_parts.OneNumber],
Type[one_parts.OneLight],
Type[one_parts.OneBLOB],
Type[one_parts.OneText],
]
allowed_value_types: Tuple[Type, ...] = (None.__class__,)
def __init__(self, vector: Vector, definition):
self._vector = vector
self._definition = definition
self._value = definition.default
self._enabled = definition.enabled
@property
def name(self) -> str:
return self._definition.name
@property
def device(self) -> Device:
return self._vector.device
@property
def vector(self) -> Vector:
return self._vector
@property
def enabled(self) -> bool:
return self._enabled
@enabled.setter
def enabled(self, value: bool):
self._enabled = value
@property
def value(self):
e = events.Read(element=self)
self.raise_event(e)
return self._value
@value.setter
def value(self, value):
logger.debug(
"Element: setting value of element %s to %s", self._definition.name, value
)
prev_value = self._value
self.check_value_type(value)
self._value = self.check_value(value)
self.device.send_message(self._vector.to_set_message())
if prev_value != self._value:
e = events.Change(element=self, old_value=prev_value, new_value=self._value)
self.raise_event(e)
def set_value(self, value):
e = events.Write(element=self, new_value=value)
self.raise_event(e)
if not e.prevent_default:
self.value = value
def set_value_from_message(self, msg):
self.set_value(msg.value)
def reset_value(self, value):
self._value = self.check_value(value)
def check_value(self, value):
return value
def check_value_type(self, value):
assert isinstance(
value, self.allowed_value_types
), f"Value of {self.name} should be of type {self.allowed_value_types}"
def to_def_message(self):
return self.def_message_class(
name=self._definition.name,
value=self.value,
label=self._definition.label,
)
def to_set_message(self):
return self.set_message_class(name=self._definition.name, value=self.value)
class Number(Element):
def_message_class = def_parts.DefNumber
set_message_class = one_parts.OneNumber
allowed_value_types = (
int,
float,
) + Element.allowed_value_types
def to_def_message(self):
return self.def_message_class(
name=self._definition.name,
value=values.num_to_str(self.value, self._definition.format),
label=self._definition.label,
format=self._definition.format,
min=self._definition.min,
max=self._definition.max,
step=self._definition.step,
)
def to_set_message(self):
return self.set_message_class(
name=self._definition.name,
value=values.num_to_str(self.value, self._definition.format),
)
def set_value_from_message(self, msg):
self.set_value(values.str_to_num(msg.value, self._definition.format))
class Text(Element):
def_message_class = def_parts.DefText
set_message_class = one_parts.OneText
allowed_value_types = (str,) + Element.allowed_value_types
class Switch(Element):
def_message_class = def_parts.DefSwitch
set_message_class = one_parts.OneSwitch
allowed_value_types = (str,) + Element.allowed_value_types
def check_value(self, value):
value = checks.dictionary(value, const.SwitchState)
return self._vector.apply_rule(self, value)
@property
def bool_value(self):
return self.value == const.SwitchState.ON
@bool_value.setter
def bool_value(self, value):
self.value = const.SwitchState.ON if value else const.SwitchState.OFF
def reset_bool_value(self, value):
self._value = const.SwitchState.ON if value else const.SwitchState.OFF
class Light(Element):
def_message_class = def_parts.DefLight
set_message_class = one_parts.OneLight
allowed_value_types = (str,) + Element.allowed_value_types
def check_value(self, value):
return checks.dictionary(value, const.State)
class BLOB(Element):
def_message_class = def_parts.DefBLOB
set_message_class = one_parts.OneBLOB
allowed_value_types = (values.BLOB,) + Element.allowed_value_types
def to_set_message(self):
if self.value is None:
return self.set_message_class(
name=self._definition.name, value=None, format=None, size=None
)
return self.set_message_class(
name=self._definition.name,
value=self.value.binary_base64,
format=self.value.format,
size=self.value.size,
)
def set_value_from_message(self, msg):
blob_value = values.BLOB.from_base64(msg.value, msg.format)
assert msg.size == blob_value.size
self.set_value(blob_value) | PypiClean |
/Gnotty-0.2.7.tar.gz/Gnotty-0.2.7/gnotty/static/js/urlize.js | var urlize = (function () {
// From http://blog.stevenlevithan.com/archives/cross-browser-split
// modified to not add itself to String.prototype.
/*!
* Cross-Browser Split 1.1.1
* Copyright 2007-2012 Steven Levithan <stevenlevithan.com>
* Available under the MIT License
* ECMAScript compliant, uniform cross-browser split method
*/
/**
* Splits a string into an array of strings using a regex or string separator. Matches of the
* separator are not included in the result array. However, if `separator` is a regex that contains
* capturing groups, backreferences are spliced into the result each time `separator` is matched.
* Fixes browser bugs compared to the native `String.prototype.split` and can be used reliably
* cross-browser.
* @param {String} str String to split.
* @param {RegExp|String} separator Regex or string to use for separating the string.
* @param {Number} [limit] Maximum number of items to include in the result array.
* @returns {Array} Array of substrings.
* @example
*
* // Basic use
* split('a b c d', ' ');
* // -> ['a', 'b', 'c', 'd']
*
* // With limit
* split('a b c d', ' ', 2);
* // -> ['a', 'b']
*
* // Backreferences in result array
* split('..word1 word2..', /([a-z]+)(\d+)/i);
* // -> ['..', 'word', '1', ' ', 'word', '2', '..']
*/
var split;
// Avoid running twice; that would break the `nativeSplit` reference
split = split || function (undef) {
var nativeSplit = String.prototype.split,
compliantExecNpcg = /()??/.exec("")[1] === undef, // NPCG: nonparticipating capturing group
self;
self = function (str, separator, limit) {
// If `separator` is not a regex, use `nativeSplit`
if (Object.prototype.toString.call(separator) !== "[object RegExp]") {
return nativeSplit.call(str, separator, limit);
}
var output = [],
flags = (separator.ignoreCase ? "i" : "") +
(separator.multiline ? "m" : "") +
(separator.extended ? "x" : "") + // Proposed for ES6
(separator.sticky ? "y" : ""), // Firefox 3+
lastLastIndex = 0,
// Make `global` and avoid `lastIndex` issues by working with a copy
separator = new RegExp(separator.source, flags + "g"),
separator2, match, lastIndex, lastLength;
str += ""; // Type-convert
if (!compliantExecNpcg) {
// Doesn't need flags gy, but they don't hurt
separator2 = new RegExp("^" + separator.source + "$(?!\\s)", flags);
}
/* Values for `limit`, per the spec:
* If undefined: 4294967295 // Math.pow(2, 32) - 1
* If 0, Infinity, or NaN: 0
* If positive number: limit = Math.floor(limit); if (limit > 4294967295) limit -= 4294967296;
* If negative number: 4294967296 - Math.floor(Math.abs(limit))
* If other: Type-convert, then use the above rules
*/
limit = limit === undef ?
-1 >>> 0 : // Math.pow(2, 32) - 1
limit >>> 0; // ToUint32(limit)
while (match = separator.exec(str)) {
// `separator.lastIndex` is not reliable cross-browser
lastIndex = match.index + match[0].length;
if (lastIndex > lastLastIndex) {
output.push(str.slice(lastLastIndex, match.index));
// Fix browsers whose `exec` methods don't consistently return `undefined` for
// nonparticipating capturing groups
if (!compliantExecNpcg && match.length > 1) {
match[0].replace(separator2, function () {
for (var i = 1; i < arguments.length - 2; i++) {
if (arguments[i] === undef) {
match[i] = undef;
}
}
});
}
if (match.length > 1 && match.index < str.length) {
Array.prototype.push.apply(output, match.slice(1));
}
lastLength = match[0].length;
lastLastIndex = lastIndex;
if (output.length >= limit) {
break;
}
}
if (separator.lastIndex === match.index) {
separator.lastIndex++; // Avoid an infinite loop
}
}
if (lastLastIndex === str.length) {
if (lastLength || !separator.test("")) {
output.push("");
}
} else {
output.push(str.slice(lastLastIndex));
}
return output.length > limit ? output.slice(0, limit) : output;
};
return self;
}();
function startswith(string, prefix) {
return string.substr(0, prefix.length) == prefix;
}
function endswith(string, suffix) {
return string.substr(string.length - suffix.length, suffix.length) == suffix;
}
// http://stackoverflow.com/a/7924240/17498
function occurrences(string, substring) {
var n = 0;
var pos = 0;
while (true) {
pos = string.indexOf(substring, pos);
if (pos != -1) {
n++;
pos += substring.length;
} else{
break;
}
}
return n;
}
var unquoted_percents_re = /%(?![0-9A-Fa-f]{2})/;
// Quotes a URL if it isn't already quoted.
function smart_urlquote(url) {
// XXX: Not handling IDN.
//
// An URL is considered unquoted if it contains no % characters or
// contains a % not followed by two hexadecimal digits.
if (url.indexOf('%') == -1 || url.match(unquoted_percents_re)) {
return encodeURI(url);
} else {
return url;
}
}
var trailing_punctuation = ['.', ',', ':', ';'];
var wrapping_punctuation_django = [['(', ')'], ['<', '>'], ['<', '>']];
var wrapping_punctuation_improved = [['(', ')'], ['<', '>'], ['<', '>'],
['“', '”'], ['‘', '’']];
var word_split_re_django = /(\s+)/;
var word_split_re_improved = /([\s<>"]+)/;
var simple_url_re = /^https?:\/\/\w/;
var simple_url_2_re = /^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$/;
var simple_email_re = /^\S+@\S+\.\S+$/;
function htmlescape(html) {
return html.replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">").replace(/"/g, """).replace(/'/g, "'");
}
function convert_arguments(args) {
var options;
if (args.length == 2 && typeof(args[1]) == 'object') {
options = args[1];
} else {
options = {nofollow: args[1],
autoescape: args[2],
trim_url_limit: args[3],
target: args[4]};
}
if (!('django_compatible' in options))
options.django_compatible = true;
return options;
}
function urlize(text, options) {
options = convert_arguments(arguments);
function trim_url(x, limit) {
if (limit === undefined)
limit = options.trim_url_limit;
if (limit && x.length > limit)
return x.substr(0, limit - 3) + '...';
return x;
}
var safe_input = false;
var word_split_re = options.django_compatible ? word_split_re_django : word_split_re_improved;
var wrapping_punctuation = options.django_compatible ? wrapping_punctuation_django : wrapping_punctuation_improved;
var words = split(text, word_split_re);
for (var i = 0; i < words.length; i++) {
var word = words[i];
var match = undefined;
if (word.indexOf('.') != -1 ||
word.indexOf('@') != -1 ||
word.indexOf(':') != -1) {
// Deal with punctuation.
var lead = '';
var middle = word;
var trail = '';
for (var j = 0; j < trailing_punctuation.length; j++) {
var punctuation = trailing_punctuation[j];
if (endswith(middle, punctuation)) {
middle = middle.substr(0, middle.length - punctuation.length);
trail = punctuation + trail;
}
}
for (var j = 0; j < wrapping_punctuation.length; j++) {
var opening = wrapping_punctuation[j][0];
var closing = wrapping_punctuation[j][1];
if (startswith(middle, opening)) {
middle = middle.substr(opening.length);
lead = lead + opening;
}
// Keep parentheses at the end only if they're balanced.
if (endswith(middle, closing) &&
occurrences(middle, closing) == occurrences(middle, opening) + 1) {
middle = middle.substr(0, middle.length - closing.length);
trail = closing + trail;
}
}
// Make URL we want to point to.
var url = undefined;
var nofollow_attr = options.nofollow ? ' rel="nofollow"' : '';
var target_attr = options.target ? ' target="' + options.target + '"' : '';
if (middle.match(simple_url_re))
url = smart_urlquote(middle);
else if (middle.match(simple_url_2_re))
url = smart_urlquote('http://' + middle);
else if (middle.indexOf(':') == -1 && middle.match(simple_email_re)) {
// XXX: Not handling IDN.
url = 'mailto:' + middle;
nofollow_attr = '';
}
// Make link.
if (url) {
var trimmed = trim_url(middle);
if (options.autoescape) {
// XXX: Assuming autoscape == false
lead = htmlescape(lead);
trail = htmlescape(trail);
url = htmlescape(url);
trimmed = htmlescape(trimmed);
}
middle = '<a href="' + url + '"' + nofollow_attr + target_attr + '>' + trimmed + '</a>';
words[i] = lead + middle + trail;
} else {
if (safe_input) {
// Do nothing, as we have no mark_safe.
} else if (options.autoescape) {
words[i] = htmlescape(word);
}
}
} else if (safe_input) {
// Do nothing, as we have no mark_safe.
} else if (options.autoescape) {
words[i] = htmlescape(word);
}
}
return words.join('');
}
urlize.test = {};
urlize.test.split = split;
urlize.test.convert_arguments = convert_arguments;
return urlize;
})(); | PypiClean |
/MariaDB_SQLBuilder-1.0.0a5-py3-none-any.whl/mariadb_sqlbuilder/builder/insert_builder.py | from json import dumps
from typing import Union, Dict, List
from .base_builder import BaseBuilder, _transform_value_valid
class InsertBuilder(BaseBuilder):
"""
TODO: add a description
This is a dummy docstring.
"""
def __init__(self, tb, **kwargs):
super().__init__(tb, **kwargs)
self.__ignore = False
self.__toSet = {}
self.__jsonBuildings = []
def set(self, column: str, value: Union[str, int, None]):
"""
Set the value for a column in the table.
:param column:
:param value:
:return:
"""
if not self.tb.table in self.__toSet:
self.__toSet[self.tb.table] = {}
self.__toSet[self.tb.table][column] = _transform_value_valid(value)
return self
def add_join_table(self, table: str):
"""
Add a join table to the set of tables to insert data into.
:param table:
:return:
"""
if table in self.__toSet:
return self
self.__toSet[table] = {}
return self
def table_set(self, table: str, column: str, value: Union[str, int, None]):
"""
Insert data into another table in one insert.
:param table:
:param column:
:param value:
:return:
"""
if not table in self.__toSet:
self.__toSet[table] = {}
self.__toSet[table][column] = _transform_value_valid(value)
return self
def ignore(self, _ignore: bool = True):
"""
Set whether to ignore errors during the insert.
:param _ignore:
:return:
"""
self.__ignore = _ignore
return self
def execute(self) -> bool:
"""
Execute the insert query.
:return:
"""
cursor = self.tb.connect.get_available_cursor()
result = cursor.execute(
self.get_sql()
)
cursor.connection.commit()
self.tb.connect.release_cursor(cursor)
return result
def get_sql(self) -> str:
"""
Get the SQL query string for the insert.
:return:
"""
for element in self.__jsonBuildings:
self.__set_json(element[0], element[1])
sql = ""
key: str
value: Dict[str, dict]
for key, value in self.__toSet.items():
if not value:
continue
sql += f"INSERT {'IGNORE ' if self.__ignore else ''}INTO " \
f"{key} ({', '.join(value.keys())}) VALUES ({', '.join(value.values())});"
return sql
def __set_json(self, json: Dict[str, any], pop: List[str] = None):
"""
Set values using a JSON object.
:param json:
:param pop:
:return:
"""
if pop is None:
pop = []
key: str
value: any
join_keys = list(self.__toSet)
for key, value in json.items():
if isinstance(value, dict):
if key in join_keys and not key in pop:
for sub_key, sub_value in value.items():
self.table_set(key, sub_key, sub_value)
else:
self.set(key, dumps(value))
else:
self.set(key, value)
return self
def set_json(self, json: Dict[str, any], pop: List[str] = None):
"""
Set values with a json, don't forget where
:param json: dict with data example from select
:param pop: pop keys from the json,
if you have keys inside that are not a table but a dict/list
:return:
"""
self.__jsonBuildings.append([json, pop])
return self | PypiClean |
/Kr0nOs_Bot-3.3.11-py3-none-any.whl/redbot/cogs/audio/core/commands/llset.py | import logging
import discord
from redbot.core import commands
from ..abc import MixinMeta
from ..cog_utils import CompositeMetaClass, _
log = logging.getLogger("red.cogs.Audio.cog.Commands.lavalink_setup")
class LavalinkSetupCommands(MixinMeta, metaclass=CompositeMetaClass):
@commands.group(name="llsetup", aliases=["llset"])
@commands.is_owner()
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def command_llsetup(self, ctx: commands.Context):
"""Lavalink server configuration options."""
@command_llsetup.command(name="external")
async def command_llsetup_external(self, ctx: commands.Context):
"""Toggle using external Lavalink servers."""
external = await self.config.use_external_lavalink()
await self.config.use_external_lavalink.set(not external)
if external:
embed = discord.Embed(
title=_("Setting Changed"),
description=_("External Lavalink server: {true_or_false}.").format(
true_or_false=_("Enabled") if not external else _("Disabled")
),
)
await self.send_embed_msg(ctx, embed=embed)
else:
try:
if self.player_manager is not None:
await self.player_manager.shutdown()
except ProcessLookupError:
await self.send_embed_msg(
ctx,
title=_("Failed To Shutdown Lavalink"),
description=_(
"External Lavalink server: {true_or_false}\n"
"For it to take effect please reload "
"Audio (`{prefix}reload audio`)."
).format(
true_or_false=_("Enabled") if not external else _("Disabled"),
prefix=ctx.prefix,
),
)
else:
await self.send_embed_msg(
ctx,
title=_("Setting Changed"),
description=_("External Lavalink server: {true_or_false}.").format(
true_or_false=_("Enabled") if not external else _("Disabled")
),
)
try:
self.lavalink_restart_connect()
except ProcessLookupError:
await self.send_embed_msg(
ctx,
title=_("Failed To Shutdown Lavalink"),
description=_("Please reload Audio (`{prefix}reload audio`).").format(
prefix=ctx.prefix
),
)
@command_llsetup.command(name="host")
async def command_llsetup_host(self, ctx: commands.Context, host: str):
"""Set the Lavalink server host."""
await self.config.host.set(host)
footer = None
if await self.update_external_status():
footer = _("External Lavalink server set to True.")
await self.send_embed_msg(
ctx,
title=_("Setting Changed"),
description=_("Host set to {host}.").format(host=host),
footer=footer,
)
try:
self.lavalink_restart_connect()
except ProcessLookupError:
await self.send_embed_msg(
ctx,
title=_("Failed To Shutdown Lavalink"),
description=_("Please reload Audio (`{prefix}reload audio`).").format(
prefix=ctx.prefix
),
)
@command_llsetup.command(name="password")
async def command_llsetup_password(self, ctx: commands.Context, password: str):
"""Set the Lavalink server password."""
await self.config.password.set(str(password))
footer = None
if await self.update_external_status():
footer = _("External Lavalink server set to True.")
await self.send_embed_msg(
ctx,
title=_("Setting Changed"),
description=_("Server password set to {password}.").format(password=password),
footer=footer,
)
try:
self.lavalink_restart_connect()
except ProcessLookupError:
await self.send_embed_msg(
ctx,
title=_("Failed To Shutdown Lavalink"),
description=_("Please reload Audio (`{prefix}reload audio`).").format(
prefix=ctx.prefix
),
)
@command_llsetup.command(name="restport")
async def command_llsetup_restport(self, ctx: commands.Context, rest_port: int):
"""Set the Lavalink REST server port."""
await self.config.rest_port.set(rest_port)
footer = None
if await self.update_external_status():
footer = _("External Lavalink server set to True.")
await self.send_embed_msg(
ctx,
title=_("Setting Changed"),
description=_("REST port set to {port}.").format(port=rest_port),
footer=footer,
)
try:
self.lavalink_restart_connect()
except ProcessLookupError:
await self.send_embed_msg(
ctx,
title=_("Failed To Shutdown Lavalink"),
description=_("Please reload Audio (`{prefix}reload audio`).").format(
prefix=ctx.prefix
),
)
@command_llsetup.command(name="wsport")
async def command_llsetup_wsport(self, ctx: commands.Context, ws_port: int):
"""Set the Lavalink websocket server port."""
await self.config.ws_port.set(ws_port)
footer = None
if await self.update_external_status():
footer = _("External Lavalink server set to True.")
await self.send_embed_msg(
ctx,
title=_("Setting Changed"),
description=_("Websocket port set to {port}.").format(port=ws_port),
footer=footer,
)
try:
self.lavalink_restart_connect()
except ProcessLookupError:
await self.send_embed_msg(
ctx,
title=_("Failed To Shutdown Lavalink"),
description=_("Please reload Audio (`{prefix}reload audio`).").format(
prefix=ctx.prefix
),
) | PypiClean |
/ApiLogicServer-9.2.18-py3-none-any.whl/api_logic_server_cli/create_from_model/safrs-react-admin-npm-build/static/js/2478.4e6b45fa.chunk.js | "use strict";(self.webpackChunkreact_admin_upgrade=self.webpackChunkreact_admin_upgrade||[]).push([[2478],{42478:function(e,t,i){i.r(t),i.d(t,{conf:function(){return f},language:function(){return w}});var n,r,o=i(37762),a=i(94389),l=Object.defineProperty,d=Object.getOwnPropertyDescriptor,s=Object.getOwnPropertyNames,u=Object.prototype.hasOwnProperty,c=function(e,t,i,n){if(t&&"object"===typeof t||"function"===typeof t){var r,a=(0,o.Z)(s(t));try{var c=function(){var o=r.value;u.call(e,o)||o===i||l(e,o,{get:function(){return t[o]},enumerable:!(n=d(t,o))||n.enumerable})};for(a.s();!(r=a.n()).done;)c()}catch(m){a.e(m)}finally{a.f()}}return e},m={};c(m,n=a,"default"),r&&c(r,n,"default");var p=["area","base","br","col","embed","hr","img","input","keygen","link","menuitem","meta","param","source","track","wbr"],f={wordPattern:/(-?\d*\.\d\w*)|([^\`\~\!\@\$\^\&\*\(\)\=\+\[\{\]\}\\\|\;\:\'\"\,\.\<\>\/\s]+)/g,brackets:[["\x3c!--","--\x3e"],["<",">"],["{{","}}"],["{%","%}"],["{","}"],["(",")"]],autoClosingPairs:[{open:"{",close:"}"},{open:"%",close:"%"},{open:"[",close:"]"},{open:"(",close:")"},{open:'"',close:'"'},{open:"'",close:"'"}],surroundingPairs:[{open:"<",close:">"},{open:'"',close:'"'},{open:"'",close:"'"}],onEnterRules:[{beforeText:new RegExp("<(?!(?:".concat(p.join("|"),"))(\\w[\\w\\d]*)([^/>]*(?!/)>)[^<]*$"),"i"),afterText:/^<\/(\w[\w\d]*)\s*>$/i,action:{indentAction:m.languages.IndentAction.IndentOutdent}},{beforeText:new RegExp("<(?!(?:".concat(p.join("|"),"))(\\w[\\w\\d]*)([^/>]*(?!/)>)[^<]*$"),"i"),action:{indentAction:m.languages.IndentAction.Indent}}]},w={defaultToken:"",tokenPostfix:"",builtinTags:["if","else","elseif","endif","render","assign","capture","endcapture","case","endcase","comment","endcomment","cycle","decrement","for","endfor","include","increment","layout","raw","endraw","render","tablerow","endtablerow","unless","endunless"],builtinFilters:["abs","append","at_least","at_most","capitalize","ceil","compact","date","default","divided_by","downcase","escape","escape_once","first","floor","join","json","last","lstrip","map","minus","modulo","newline_to_br","plus","prepend","remove","remove_first","replace","replace_first","reverse","round","rstrip","size","slice","sort","sort_natural","split","strip","strip_html","strip_newlines","times","truncate","truncatewords","uniq","upcase","url_decode","url_encode","where"],constants:["true","false"],operators:["==","!=",">","<",">=","<="],symbol:/[=><!]+/,identifier:/[a-zA-Z_][\w]*/,tokenizer:{root:[[/\{\%\s*comment\s*\%\}/,"comment.start.liquid","@comment"],[/\{\{/,{token:"@rematch",switchTo:"@liquidState.root"}],[/\{\%/,{token:"@rematch",switchTo:"@liquidState.root"}],[/(<)([\w\-]+)(\/>)/,["delimiter.html","tag.html","delimiter.html"]],[/(<)([:\w]+)/,["delimiter.html",{token:"tag.html",next:"@otherTag"}]],[/(<\/)([\w\-]+)/,["delimiter.html",{token:"tag.html",next:"@otherTag"}]],[/</,"delimiter.html"],[/\{/,"delimiter.html"],[/[^<{]+/]],comment:[[/\{\%\s*endcomment\s*\%\}/,"comment.end.liquid","@pop"],[/./,"comment.content.liquid"]],otherTag:[[/\{\{/,{token:"@rematch",switchTo:"@liquidState.otherTag"}],[/\{\%/,{token:"@rematch",switchTo:"@liquidState.otherTag"}],[/\/?>/,"delimiter.html","@pop"],[/"([^"]*)"/,"attribute.value"],[/'([^']*)'/,"attribute.value"],[/[\w\-]+/,"attribute.name"],[/=/,"delimiter"],[/[ \t\r\n]+/]],liquidState:[[/\{\{/,"delimiter.output.liquid"],[/\}\}/,{token:"delimiter.output.liquid",switchTo:"@$S2.$S3"}],[/\{\%/,"delimiter.tag.liquid"],[/raw\s*\%\}/,"delimiter.tag.liquid","@liquidRaw"],[/\%\}/,{token:"delimiter.tag.liquid",switchTo:"@$S2.$S3"}],{include:"liquidRoot"}],liquidRaw:[[/^(?!\{\%\s*endraw\s*\%\}).+/],[/\{\%/,"delimiter.tag.liquid"],[/@identifier/],[/\%\}/,{token:"delimiter.tag.liquid",next:"@root"}]],liquidRoot:[[/\d+(\.\d+)?/,"number.liquid"],[/"[^"]*"/,"string.liquid"],[/'[^']*'/,"string.liquid"],[/\s+/],[/@symbol/,{cases:{"@operators":"operator.liquid","@default":""}}],[/\./],[/@identifier/,{cases:{"@constants":"keyword.liquid","@builtinFilters":"predefined.liquid","@builtinTags":"predefined.liquid","@default":"variable.liquid"}}],[/[^}|%]/,"variable.liquid"]]}}}}]);
//# sourceMappingURL=2478.4e6b45fa.chunk.js.map | PypiClean |
/MRUs-0.0.7.tar.gz/MRUs-0.0.7/README.md | # MRUs: Matrix Reduction Utils
> Author @MingjunXu
## Use MRUs in your project
```python
from MRUs import *
```
## LU Factorization
```python
A = np.array([[0, 1, 1],
[1, 1, 1],
[1, 1, 1]], dtype=float)
L, U = lu_factorization(A)
print('A 矩阵')
print(A)
print('L 矩阵')
print(L)
print('U 矩阵')
print(U)
print('验证: LU')
print(np.dot(L, U))
```
## QR Factorization
```python
A = np.array([[0, -20, -14],
[3, 27, -4],
[4, 11, -2]], dtype=float)
Q, R = qr_factorization(A)
print('A 矩阵')
print(A)
print('Q 矩阵')
print(Q)
print('R 矩阵')
print(R)
print('验证: QR')
print(np.dot(Q, R))
```
## Orthogonal Reduction
### Householder Reduction
```python
A = np.array([[3, 2, 9],
[4, 5, 1],
[0, 0, 0]], dtype=float)
Q, R = orthogonal_reduction(A, core='householder')
print('A 矩阵')
print(A)
print('Q 矩阵')
print(Q)
print('R 矩阵')
print(R)
print('验证: QR')
print(np.dot(Q, R))
```
### Givens Reduction
```python
A = np.array([[3, 2, 9],
[4, 5, 1],
[0, 0, 0]], dtype=float)
Q, R = orthogonal_reduction(A, core='givens')
print('A 矩阵')
print(A)
print('Q 矩阵')
print(Q)
print('R 矩阵')
print(R)
print('验证: QR')
print(np.dot(Q, R))
```
## URV Factorization
```python
A = np.array([[-4, -2, 4, 2],
[2, -2, -2, -1],
[-4, 1, 4, 2]], dtype=float)
U, R, V = urv_factorization(A)
print('A矩阵')
print(A)
print('U矩阵')
print(U)
print('R矩阵')
print(R)
print('V矩阵')
print(V)
print('验证: URV^T')
print(np.dot(np.dot(U, R), V))
``` | PypiClean |
/OASYS1-WONDER-1.0.45.tar.gz/OASYS1-WONDER-1.0.45/orangecontrib/wonder/widgets/wonder/ow_chebyshev_background.py |
import sys
from orangewidget.settings import Setting
from orangewidget import gui as orangegui
from orangecontrib.wonder.widgets.gui.ow_generic_parameter_widget import OWGenericWidget, OWGenericDiffractionPatternParametersWidget, ParameterBox
from orangecontrib.wonder.util.gui_utility import gui
from orangecontrib.wonder.fit.parameters.instrument.background_parameters import ChebyshevBackground
class OWChebyshevBackground(OWGenericDiffractionPatternParametersWidget):
name = "Chebyshev Background"
description = "Define Chebyshev background"
icon = "icons/chebyshev_background.png"
priority = 10
c0 = Setting([0.0])
c1 = Setting([0.0])
c2 = Setting([0.0])
c3 = Setting([0.0])
c4 = Setting([0.0])
c5 = Setting([0.0])
c6 = Setting([0.0])
c7 = Setting([0.0])
c8 = Setting([0.0])
c9 = Setting([0.0])
c0_fixed = Setting([0])
c1_fixed = Setting([0])
c2_fixed = Setting([0])
c3_fixed = Setting([0])
c4_fixed = Setting([0])
c5_fixed = Setting([0])
c6_fixed = Setting([1])
c7_fixed = Setting([1])
c8_fixed = Setting([1])
c9_fixed = Setting([1])
c0_has_min = Setting([0])
c1_has_min = Setting([0])
c2_has_min = Setting([0])
c3_has_min = Setting([0])
c4_has_min = Setting([0])
c5_has_min = Setting([0])
c6_has_min = Setting([0])
c7_has_min = Setting([0])
c8_has_min = Setting([0])
c9_has_min = Setting([0])
c0_min = Setting([0.0])
c1_min = Setting([0.0])
c2_min = Setting([0.0])
c3_min = Setting([0.0])
c4_min = Setting([0.0])
c5_min = Setting([0.0])
c6_min = Setting([0.0])
c7_min = Setting([0.0])
c8_min = Setting([0.0])
c9_min = Setting([0.0])
c0_has_max = Setting([0])
c1_has_max = Setting([0])
c2_has_max = Setting([0])
c3_has_max = Setting([0])
c4_has_max = Setting([0])
c5_has_max = Setting([0])
c6_has_max = Setting([0])
c7_has_max = Setting([0])
c8_has_max = Setting([0])
c9_has_max = Setting([0])
c0_max = Setting([0.0])
c1_max = Setting([0.0])
c2_max = Setting([0.0])
c3_max = Setting([0.0])
c4_max = Setting([0.0])
c5_max = Setting([0.0])
c6_max = Setting([0.0])
c7_max = Setting([0.0])
c8_max = Setting([0.0])
c9_max = Setting([0.0])
c0_function = Setting([0])
c1_function = Setting([0])
c2_function = Setting([0])
c3_function = Setting([0])
c4_function = Setting([0])
c5_function = Setting([0])
c6_function = Setting([0])
c7_function = Setting([0])
c8_function = Setting([0])
c9_function = Setting([0])
c0_function_value = Setting([""])
c1_function_value = Setting([""])
c2_function_value = Setting([""])
c3_function_value = Setting([""])
c4_function_value = Setting([""])
c5_function_value = Setting([""])
c6_function_value = Setting([""])
c7_function_value = Setting([""])
c8_function_value = Setting([""])
c9_function_value = Setting([""])
def __init__(self):
super().__init__()
def get_max_height(self):
return 600
def get_parameter_name(self):
return "Chebyshev Background"
def get_current_dimension(self):
return len(self.c0)
def get_parameter_box_instance(self, parameter_tab, index):
return ChebyshevBackgroundBox(widget=self,
parent=parameter_tab,
index = index,
c0 = self.c0[index],
c1 = self.c1[index],
c2 = self.c2[index],
c3 = self.c3[index],
c4 = self.c4[index],
c5 = self.c5[index],
c6 = self.c6[index],
c7 = self.c7[index],
c8 = self.c8[index],
c9 = self.c9[index],
c0_fixed = self.c0_fixed[index],
c1_fixed = self.c1_fixed[index],
c2_fixed = self.c2_fixed[index],
c3_fixed = self.c3_fixed[index],
c4_fixed = self.c4_fixed[index],
c5_fixed = self.c5_fixed[index],
c6_fixed = self.c6_fixed[index],
c7_fixed = self.c7_fixed[index],
c8_fixed = self.c8_fixed[index],
c9_fixed = self.c9_fixed[index],
c0_has_min = self.c0_has_min[index],
c1_has_min = self.c1_has_min[index],
c2_has_min = self.c2_has_min[index],
c3_has_min = self.c3_has_min[index],
c4_has_min = self.c4_has_min[index],
c5_has_min = self.c5_has_min[index],
c6_has_min = self.c6_has_min[index],
c7_has_min = self.c7_has_min[index],
c8_has_min = self.c8_has_min[index],
c9_has_min = self.c9_has_min[index],
c0_min = self.c0_min[index],
c1_min = self.c1_min[index],
c2_min = self.c2_min[index],
c3_min = self.c3_min[index],
c4_min = self.c4_min[index],
c5_min = self.c5_min[index],
c6_min = self.c6_min[index],
c7_min = self.c7_min[index],
c8_min = self.c8_min[index],
c9_min = self.c9_min[index],
c0_has_max = self.c0_has_max[index],
c1_has_max = self.c1_has_max[index],
c2_has_max = self.c2_has_max[index],
c3_has_max = self.c3_has_max[index],
c4_has_max = self.c4_has_max[index],
c5_has_max = self.c5_has_max[index],
c6_has_max = self.c6_has_max[index],
c7_has_max = self.c7_has_max[index],
c8_has_max = self.c8_has_max[index],
c9_has_max = self.c9_has_max[index],
c0_max = self.c0_max[index],
c1_max = self.c1_max[index],
c2_max = self.c2_max[index],
c3_max = self.c3_max[index],
c4_max = self.c4_max[index],
c5_max = self.c5_max[index],
c6_max = self.c6_max[index],
c7_max = self.c7_max[index],
c8_max = self.c8_max[index],
c9_max = self.c9_max[index],
c0_function = self.c0_function[index],
c1_function = self.c1_function[index],
c2_function = self.c2_function[index],
c3_function = self.c3_function[index],
c4_function = self.c4_function[index],
c5_function = self.c5_function[index],
c6_function = self.c6_function[index],
c7_function = self.c7_function[index],
c8_function = self.c8_function[index],
c9_function = self.c9_function[index],
c0_function_value = self.c0_function_value[index],
c1_function_value = self.c1_function_value[index],
c2_function_value = self.c2_function_value[index],
c3_function_value = self.c3_function_value[index],
c4_function_value = self.c4_function_value[index],
c5_function_value = self.c5_function_value[index],
c6_function_value = self.c6_function_value[index],
c7_function_value = self.c7_function_value[index],
c8_function_value = self.c8_function_value[index],
c9_function_value = self.c9_function_value[index])
def get_empty_parameter_box_instance(self, parameter_tab, index):
return ChebyshevBackground(widget=self, parent=parameter_tab, index=index)
def set_parameter_data(self):
self.fit_global_parameters.set_background_parameters([self.get_parameter_box(index).get_background() for index in range(self.get_current_dimension())])
def get_parameter_array(self):
return self.fit_global_parameters.get_background_parameters(ChebyshevBackground.__name__)
def get_parameter_item(self, diffraction_pattern_index):
return self.fit_global_parameters.get_background_parameters_item(ChebyshevBackground.__name__, diffraction_pattern_index)
def dumpSettings(self):
self.dump_c0()
self.dump_c1()
self.dump_c2()
self.dump_c3()
self.dump_c4()
self.dump_c5()
self.dump_c6()
self.dump_c7()
self.dump_c8()
self.dump_c9()
def dump_c0(self): self.dump_parameter("c0")
def dump_c1(self): self.dump_parameter("c1")
def dump_c2(self): self.dump_parameter("c2")
def dump_c3(self): self.dump_parameter("c3")
def dump_c4(self): self.dump_parameter("c4")
def dump_c5(self): self.dump_parameter("c5")
def dump_c6(self): self.dump_parameter("c6")
def dump_c7(self): self.dump_parameter("c7")
def dump_c8(self): self.dump_parameter("c8")
def dump_c9(self): self.dump_parameter("c9")
class ChebyshevBackgroundBox(ParameterBox):
def __init__(self,
widget=None,
parent=None,
index = 0,
c0 = 0.0,
c1 = 0.0,
c2 = 0.0,
c3 = 0.0,
c4 = 0.0,
c5 = 0.0,
c6 = 0.0,
c7 = 0.0,
c8 = 0.0,
c9 = 0.0,
c0_fixed = 0,
c1_fixed = 0,
c2_fixed = 0,
c3_fixed = 0,
c4_fixed = 0,
c5_fixed = 0,
c6_fixed = 1,
c7_fixed = 1,
c8_fixed = 1,
c9_fixed = 1,
c0_has_min = 0,
c1_has_min = 0,
c2_has_min = 0,
c3_has_min = 0,
c4_has_min = 0,
c5_has_min = 0,
c6_has_min = 0,
c7_has_min = 0,
c8_has_min = 0,
c9_has_min = 0,
c0_min = 0.0,
c1_min = 0.0,
c2_min = 0.0,
c3_min = 0.0,
c4_min = 0.0,
c5_min = 0.0,
c6_min = 0.0,
c7_min = 0.0,
c8_min = 0.0,
c9_min = 0.0,
c0_has_max = 0,
c1_has_max = 0,
c2_has_max = 0,
c3_has_max = 0,
c4_has_max = 0,
c5_has_max = 0,
c6_has_max = 0,
c7_has_max = 0,
c8_has_max = 0,
c9_has_max = 0,
c0_max = 0.0,
c1_max = 0.0,
c2_max = 0.0,
c3_max = 0.0,
c4_max = 0.0,
c5_max = 0.0,
c6_max = 0.0,
c7_max = 0.0,
c8_max = 0.0,
c9_max = 0.0,
c0_function = 0,
c1_function = 0,
c2_function = 0,
c3_function = 0,
c4_function = 0,
c5_function = 0,
c6_function = 0,
c7_function = 0,
c8_function = 0,
c9_function = 0,
c0_function_value = "",
c1_function_value = "",
c2_function_value = "",
c3_function_value = "",
c4_function_value = "",
c5_function_value = "",
c6_function_value = "",
c7_function_value = "",
c8_function_value = "",
c9_function_value = ""):
super(ChebyshevBackgroundBox, self).__init__(widget=widget,
parent=parent,
index=index,
c0=c0,
c1 = c1,
c2 = c2,
c3 = c3,
c4 = c4,
c5 = c5,
c6 = c6,
c7 = c7,
c8 = c8,
c9 = c9,
c0_fixed = c0_fixed,
c1_fixed = c1_fixed,
c2_fixed = c2_fixed,
c3_fixed = c3_fixed,
c4_fixed = c4_fixed,
c5_fixed = c5_fixed,
c6_fixed = c6_fixed,
c7_fixed = c7_fixed,
c8_fixed = c8_fixed,
c9_fixed = c9_fixed,
c0_has_min = c0_has_min,
c1_has_min = c1_has_min,
c2_has_min = c2_has_min,
c3_has_min = c3_has_min,
c4_has_min = c4_has_min,
c5_has_min = c5_has_min,
c6_has_min = c6_has_min,
c7_has_min = c7_has_min,
c8_has_min = c8_has_min,
c9_has_min = c9_has_min,
c0_min = c0_min,
c1_min = c1_min,
c2_min = c2_min,
c3_min = c3_min,
c4_min = c4_min,
c5_min = c5_min,
c6_min = c6_min,
c7_min = c7_min,
c8_min = c8_min,
c9_min = c9_min,
c0_has_max = c0_has_max,
c1_has_max = c1_has_max,
c2_has_max = c2_has_max,
c3_has_max = c3_has_max,
c4_has_max = c4_has_max,
c5_has_max = c5_has_max,
c6_has_max = c6_has_max,
c7_has_max = c7_has_max,
c8_has_max = c8_has_max,
c9_has_max = c9_has_max,
c0_max = c0_max,
c1_max = c1_max,
c2_max = c2_max,
c3_max = c3_max,
c4_max = c4_max,
c5_max = c5_max,
c6_max = c6_max,
c7_max = c7_max,
c8_max = c8_max,
c9_max = c9_max,
c0_function = c0_function,
c1_function = c1_function,
c2_function = c2_function,
c3_function = c3_function,
c4_function = c4_function,
c5_function = c5_function,
c6_function = c6_function,
c7_function = c7_function,
c8_function = c8_function,
c9_function = c9_function,
c0_function_value = c0_function_value,
c1_function_value = c1_function_value,
c2_function_value = c2_function_value,
c3_function_value = c3_function_value,
c4_function_value = c4_function_value,
c5_function_value = c5_function_value,
c6_function_value = c6_function_value,
c7_function_value = c7_function_value,
c8_function_value = c8_function_value,
c9_function_value = c9_function_value)
def get_height(self):
return 400
def init_fields(self, **kwargs):
self.c0 = kwargs["c0"]
self.c1 = kwargs["c1"]
self.c2 = kwargs["c2"]
self.c3 = kwargs["c3"]
self.c4 = kwargs["c4"]
self.c5 = kwargs["c5"]
self.c6 = kwargs["c6"]
self.c7 = kwargs["c7"]
self.c8 = kwargs["c8"]
self.c9 = kwargs["c9"]
self.c0_fixed = kwargs["c0_fixed"]
self.c1_fixed = kwargs["c1_fixed"]
self.c2_fixed = kwargs["c2_fixed"]
self.c3_fixed = kwargs["c3_fixed"]
self.c4_fixed = kwargs["c4_fixed"]
self.c5_fixed = kwargs["c5_fixed"]
self.c6_fixed = kwargs["c6_fixed"]
self.c7_fixed = kwargs["c7_fixed"]
self.c8_fixed = kwargs["c8_fixed"]
self.c9_fixed = kwargs["c9_fixed"]
self.c0_has_min = kwargs["c0_has_min"]
self.c1_has_min = kwargs["c1_has_min"]
self.c2_has_min = kwargs["c2_has_min"]
self.c3_has_min = kwargs["c3_has_min"]
self.c4_has_min = kwargs["c4_has_min"]
self.c5_has_min = kwargs["c5_has_min"]
self.c6_has_min = kwargs["c6_has_min"]
self.c7_has_min = kwargs["c7_has_min"]
self.c8_has_min = kwargs["c8_has_min"]
self.c9_has_min = kwargs["c9_has_min"]
self.c0_min = kwargs["c0_min"]
self.c1_min = kwargs["c1_min"]
self.c2_min = kwargs["c2_min"]
self.c3_min = kwargs["c3_min"]
self.c4_min = kwargs["c4_min"]
self.c5_min = kwargs["c5_min"]
self.c6_min = kwargs["c6_min"]
self.c7_min = kwargs["c7_min"]
self.c8_min = kwargs["c8_min"]
self.c9_min = kwargs["c9_min"]
self.c0_has_max = kwargs["c0_has_max"]
self.c1_has_max = kwargs["c1_has_max"]
self.c2_has_max = kwargs["c2_has_max"]
self.c3_has_max = kwargs["c3_has_max"]
self.c4_has_max = kwargs["c4_has_max"]
self.c5_has_max = kwargs["c5_has_max"]
self.c6_has_max = kwargs["c6_has_max"]
self.c7_has_max = kwargs["c7_has_max"]
self.c8_has_max = kwargs["c8_has_max"]
self.c9_has_max = kwargs["c9_has_max"]
self.c0_max = kwargs["c0_max"]
self.c1_max = kwargs["c1_max"]
self.c2_max = kwargs["c2_max"]
self.c3_max = kwargs["c3_max"]
self.c4_max = kwargs["c4_max"]
self.c5_max = kwargs["c5_max"]
self.c6_max = kwargs["c6_max"]
self.c7_max = kwargs["c7_max"]
self.c8_max = kwargs["c8_max"]
self.c9_max = kwargs["c9_max"]
self.c0_function = kwargs["c0_function"]
self.c1_function = kwargs["c1_function"]
self.c2_function = kwargs["c2_function"]
self.c3_function = kwargs["c3_function"]
self.c4_function = kwargs["c4_function"]
self.c5_function = kwargs["c5_function"]
self.c6_function = kwargs["c6_function"]
self.c7_function = kwargs["c7_function"]
self.c8_function = kwargs["c8_function"]
self.c9_function = kwargs["c9_function"]
self.c0_function_value = kwargs["c0_function_value"]
self.c1_function_value = kwargs["c1_function_value"]
self.c2_function_value = kwargs["c2_function_value"]
self.c3_function_value = kwargs["c3_function_value"]
self.c4_function_value = kwargs["c4_function_value"]
self.c5_function_value = kwargs["c5_function_value"]
self.c6_function_value = kwargs["c6_function_value"]
self.c7_function_value = kwargs["c7_function_value"]
self.c8_function_value = kwargs["c8_function_value"]
self.c9_function_value = kwargs["c9_function_value"]
def init_gui(self, container):
OWGenericWidget.create_box_in_widget(self, container, "c0", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c1", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c2", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c3", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c4", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c5", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c6", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c7", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c8", add_callback=True, trim=25)
OWGenericWidget.create_box_in_widget(self, container, "c9", add_callback=True, trim=25)
def callback_c0(self):
if not self.is_on_init: self.widget.dump_c0()
def callback_c1(self):
if not self.is_on_init: self.widget.dump_c1()
def callback_c2(self):
if not self.is_on_init: self.widget.dump_c2()
def callback_c3(self):
if not self.is_on_init: self.widget.dump_c3()
def callback_c4(self):
if not self.is_on_init: self.widget.dump_c4()
def callback_c5(self):
if not self.is_on_init: self.widget.dump_c5()
def callback_c6(self):
if not self.is_on_init: self.widget.dump_c6()
def callback_c7(self):
if not self.is_on_init: self.widget.dump_c7()
def callback_c8(self):
if not self.is_on_init: self.widget.dump_c8()
def callback_c9(self):
if not self.is_on_init: self.widget.dump_c9()
def get_basic_parameter_prefix(self):
return ChebyshevBackground.get_parameters_prefix()
def set_data(self, background_parameters):
OWGenericWidget.populate_fields_in_widget(self, "c0", background_parameters.c0, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c1", background_parameters.c1, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c2", background_parameters.c2, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c3", background_parameters.c3, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c4", background_parameters.c4, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c5", background_parameters.c5, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c6", background_parameters.c6, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c7", background_parameters.c7, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c8", background_parameters.c8, value_only=True)
OWGenericWidget.populate_fields_in_widget(self, "c9", background_parameters.c9, value_only=True)
def get_background(self):
return ChebyshevBackground(c0=OWGenericWidget.get_fit_parameter_from_widget(self, "c0", self.get_parameters_prefix()),
c1=OWGenericWidget.get_fit_parameter_from_widget(self, "c1", self.get_parameters_prefix()),
c2=OWGenericWidget.get_fit_parameter_from_widget(self, "c2", self.get_parameters_prefix()),
c3=OWGenericWidget.get_fit_parameter_from_widget(self, "c3", self.get_parameters_prefix()),
c4=OWGenericWidget.get_fit_parameter_from_widget(self, "c4", self.get_parameters_prefix()),
c5=OWGenericWidget.get_fit_parameter_from_widget(self, "c5", self.get_parameters_prefix()),
c6=OWGenericWidget.get_fit_parameter_from_widget(self, "c6", self.get_parameters_prefix()),
c7=OWGenericWidget.get_fit_parameter_from_widget(self, "c7", self.get_parameters_prefix()),
c8=OWGenericWidget.get_fit_parameter_from_widget(self, "c8", self.get_parameters_prefix()),
c9=OWGenericWidget.get_fit_parameter_from_widget(self, "c9", self.get_parameters_prefix()))
from PyQt5.QtWidgets import QApplication
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = OWChebyshevBackground()
ow.show()
a.exec_()
ow.saveSettings() | PypiClean |
/EMPOL_GUI-2.2.8.tar.gz/EMPOL_GUI-2.2.8/EMPOL_GUI/empol_astrometry.py | import numpy as np
#import pyfitsf
import os, sys
import time
import math
import time, datetime
import string
import urllib.request, urllib.parse, urllib.error
from astropy.io import fits
from astropy.wcs import WCS
from astropy.stats import sigma_clipped_stats
import glob
import subprocess
import warnings
import matplotlib.pyplot as plt
import photutils
import natsort
import pyregion
import wget
import fnmatch
import os.path
#filter = 'R'
#cluster = 'Teutsch1'
#match = 'W7'
proxies = {'http':'http://namita:[email protected]:3128',
'https':'https://namita:[email protected]:3128'}
sexcommand = 'sex'
#path = '/home/ubu123/Desktop/PRL/EMPOL/EMPOL_GUI/EMPOL_V2/V2_Data/23_oct'
#F = '/home/namita/Documents/Open_clusters/Mt_ABU/2021_Feb/EMPOL_Feb2021/Feb_6'
"""
Folder= path +'/'+cluster+'/frac_med_setcombine12_'+ filter
img_list = os.listdir(Folder)
img_list = natsort.natsorted(img_list)
imglist = []
for files in img_list:
if fnmatch.fnmatch(files, 'W7*'):
print(files)
imglist.append(files)
print(imglist)
"""
# CD1_1, CD2_1, CD1_2, CD2_2 are conversion matrix (rotational and translational) from CRPX1, CRPX2 (image center) to CRVAL1, CRVAL2 (cental ra, dec)
def edheader(Image, hdr, ra, dec, outpath, outfile): #add some quantities to the image headers
hdr.set('RA', ra) # central ra
hdr.set('DEC', dec) # central dec
hdr.set('CD1_1',0.00018) # fix for empol
hdr.set('CD1_2',0.000008) # fix for empol
hdr.set('CD2_1',0.000008) # fix for empol
hdr.set('CD2_2',-0.00018) # fix for empol
hdr.set('CRPIX1',128) #central x-pixel (for empol 256/2 == header('NAXIS1')/2)
hdr.set('CRPIX2',128) #central y- pixel (for empol 256/2 == header('NAXIS2')/2)
hdr.set('CRVAL1',ra)
hdr.set('CRVAL2',dec)
hdr.set('CTYPE1', 'RA---TAN') # fix
hdr.set('CTYPE2', 'DEC--TAN') # fix
hdr.set('EQUINOX',2000.0) #fix
F = os.path.join(outpath, outfile)
fits.writeto(F, Image, hdr, overwrite=True)
return(F)
def getcat(catalog, ra, dec, outpath, outfile):
boxsize= 1000#740#370 # (in arcsec) fix for empol
url = "http://tdc-www.harvard.edu/cgi-bin/scat?catalog=" + catalog + "&ra=" + str(ra) + "&dec=" + str(dec) + "&system=J2000&rad=" + str(boxsize) + "&sort=mag&epoch=2000.00000&nstar=6400" # catalog in string like 'ua2'(mag range 12-22) - for cezernik (for more catalogs visit imwcs site)
#out=os.path.join(outpath, outfile) #outfile and outpath must be a string
filename = wget.download(url, os.path.join(outpath, outfile))
#print(outfile)
return(outfile)
def Sextractor(code_path, NewImgName, border, corner, outpath, outfile, mag, flg): # NewImage having additional hedaer info (after edheader), border - x,y pixel to exclude
#print(outpath)
#print(NewImgName)
os.chdir(code_path)
os.system(sexcommand + ' -c default.sex'+ ' '+ NewImgName) # default.sex file and all the default files in the /usr/share/sextractor folder sho'uld be present in the same folder as the Image files. The extracted source file will be saved in the same folder with name present in default.sex (source.cat)
xmin = ymin = border
xmax = ymax = 256-border
source = np.loadtxt(os.path.join(code_path,'source.cat'), comments='#')
M = source[:,7]
ind = np.where(M==99)
M = np.delete(M, ind[0])
#print(max(source[:,7]), min(source[:,7]))
#source[:,0] = source[:,0]-1 # the sourceextractor assume the origin as (1,1) while python use (0,0)
#source[:,1] = source[:,1]-1 # minus 1 is introduce to convert the Sextractor output to python
goodsource=[]
i=0
while(i<len(source[:,0 ]-1)):
star = source[i,:] # one row all the columns (0=id, 1=mag, 2=x, 3=y)
i=i+1
if(star[0] < xmin): continue # exclude all the points with xpix < 15
if(star[0] > xmax): continue # exclude all the points with xpix > 240
if(star[1] < ymin): continue # exclude all the points with ypix < 15
if(star[1] > ymax): continue # # exclude all the points with ypix > 240
if(star[0]+star[1]< corner): continue
if(star[0] + (256-star[1])) < corner: continue
if((256-star[0]) < corner): continue
if((256-star[0]) + (256-star[1]) < corner): continue
if(star[8] > mag): continue # magnitude error constraint - change it if necessary
if(star[9] > flg): continue
if(star[7] >= max(M)-3.0):continue
goodsource.append(star)
#print(goodsource, len(goodsource))
np.savetxt(os.path.join(outpath, outfile), goodsource, header='1 X_IMAGE Object position along x [pixel] \n 2 Y_IMAGE Object position along y [pixel] \n 3 MAG_BEST Best of MAG_AUTO and MAG_ISOCOR [mag] \n 4 FLUX_APER Flux vector within fixed circular aperture(s) [count] \n 5 FLUXERR_APER RMS error vector for aperture flux(es) [count] \n 6 MAG_APER Fixed aperture magnitude vector [mag] \n 7 MAGERR_APER RMS error vector for fixed aperture mag. [mag] \n 8 FLAGS Extraction flags') # header should be same as that of the 'source.cat' file (source extracter file), so edit header if u are editing default.params
return(goodsource)
#Folder= path+'/'+cluster+'/frac_med_setcombine12_'+filter
###################################### for one file only ra = 15.78 & dec 62.787 (CZ3), ra = 111.533 & dec = -15.093(waterloo 7)#########################
def do_astrometry(code_path, path,cluster,filter, name, imglist, ra, dec):
out=[]
Folder= path+'/'+cluster+'/frac_med_setcombine12_'+filter
#img_list = os.listdir(Folder)
#img_list = natsort.natsorted(img_list)
#print(img_list)
#imglist = []
#for files in img_list:
# if fnmatch.fnmatch(files, name+'*'):
# imglist.append(files)
#print(imglist[0])
img = fits.getdata(os.path.join(Folder, imglist[0]))
hdul = fits.open(os.path.join(Folder,imglist[0]))
hdr = hdul[0].header
#print(hdr)
cat = ['ua2', 'ucac4','tmc']
con1 = np.array([10, 0.25, 0.2, 0.15, 0.1]) # e_mag constrain
con2 = np.array([0, 100]) # flag (only integer values are possible)
err = []
ID = []
for a in range(0,3):
cat_down = getcat(cat[a], ra, dec, Folder, 'cat_'+cat[a]+'.txt')
for b in range(0, len(con1)):
for c in range(0, len(con2)):
edimg = edheader(img, hdr, ra, dec, Folder, 'ed'+str(a)+str(b)+str(c)+'_'+imglist[0])
Source_ext = Sextractor(code_path, edimg, 10, 20, Folder, 'ed'+str(a)+str(b)+str(c)+'_source_ext_'+name+'_'+filter+'_0.txt', con1[b], con2[c])
os.chdir(Folder)
os.system('imwcs' + ' -d ' + os.path.join(Folder, 'ed'+str(a)+str(b)+str(c)+'_source_ext_'+name+'_'+filter+'_0.txt') + ' -c '+cat_down+' -q i9 -v -w ' + edimg+' > output.txt')
time.sleep(2)
I = 'ed'+str(a)+str(b)+str(c)+'_'+imglist[0]
initial = I[:-5]
print(initial)
file_exists = os.path.isfile(initial+'w.fits')
if file_exists:
print('exist')
else:
head = hdr
head.set('WCSSEP', 100)
fits.writeto(initial+'w.fits', img, head)
final = fits.open(initial+'w.fits')
sep = final[0].header['WCSSEP']
print(a,b,c,' = ', sep)
ID.append(str(a)+str(b)+str(c))
err.append(sep)
os.chdir(code_path)
min_err = np.argmin(err)
print(err[min_err], ID[min_err])
out.append(err[min_err])
out.append(ID[min_err])
return(out)
#code_path = '/home/ubu123/Desktop/PRL/EMPOL/EMPOL_GUI/EMPOL_V2/V2_Code'
#path = '/home/ubu123/Desktop/PRL/EMPOL/EMPOL_GUI/EMPOL_V2/V2_Data/23_oct'
#cluster = 'Teutsch1'
#filter = 'R'
#imgpath = path+'/'+cluster+'/frac_med_setcombine12_'+filter
#imglist = os.listdir(imgpath)
#imglist = natsort.natsorted(imglist)
#name = 'Teutsch1'
#ra = 84.87208 ##15.77875 #15.77875 #113.661 #111.532
#dec= 33.34611
#ast_id = astrometry(code_path, path,cluster,filter, name, imglist, ra, dec)
#print("This is ast", ast_id)
#Folder= path+'/'+cluster+'/frac_med_setcombine12_'+filter | PypiClean |
/GA_kit-0.2.1-py3-none-any.whl/GA_kit/hboa.py |
from sys import stderr
from math import log, lgamma
from .sga import PMBGA, log2
class Bayesian_Network (object) :
def __init__ \
( self, hboa, genes
, lgamma = lgamma
, do_debug = 0
, verbose = 0
, max_parent = 0
, min_split = 0 # do not split below that number
, s_penalty = 2.0 # Should be the tournament size
) :
self.hboa = hboa
self.nodecount = len (genes [0])
self.n = len (genes)
self.genes = genes
self.nodes = {}
self.roots = {}
self.cutoff = log2 (self.n) / 2.0 * s_penalty
self.do_debug = do_debug
self.verbose = verbose
self.lgamma = lgamma
self.maxparent = max_parent
self.min_split = min_split
for bitidx in range (self.nodecount) :
node = BNode (self, bitidx)
self.nodes [node] = 1
self.roots [node] = 1
# The initial candidates already need the full list of nodes
for n in self.nodes :
n.add_initial_candidates ()
nsplit = 0
while 1 :
maxgain = -1
leave = None
cidx = -1
for node in self.nodes :
for l in node.leaves () :
for c in l.candidate_iter () :
if c.feasible () :
if c.gain > maxgain :
maxgain = c.gain
leave = l
cidx = c.idx
# Since candidates are returned best-first
# we can stop after the first found
break
else :
l.del_candidate (c)
if maxgain <= 0 :
break
if self.verbose :
print ("maxgain: %2.2f" % maxgain, end = ' ')
print \
( "%4.2f -> %4.2f %4.2f"
% ( leave.score
, leave.candidates [cidx].children [0].score
, leave.candidates [cidx].children [1].score
)
)
nsplit += 1
leave.split (cidx)
print ("nsplit: %s" % nsplit)
# end def __init__
def debug (self, *args, **kw) :
if self.do_debug :
print (*args, **kw, file = stderr)
# end def debug
def _sample_node (self, node, d) :
dn = node.dnode
while not isinstance (dn, DLeaf) :
dn = dn.children [d [dn.idx]]
assert dn.idx == node.idx
bit = self.hboa.random_flip (dn.p)
d [dn.idx] = bit
for c in node.children :
if c.idx in d :
continue
for parent in c.parents :
if parent.idx not in d :
break
else :
self._sample_node (c, d)
# end def _sample_node
def sample_model (self) :
d = {}
for r in self.roots :
self._sample_node (r, d)
return d
# end def sample_model
# end class Bayesian_Network
class BNode (object) :
def __init__ (self, net, idx) :
self.net = net
self.debug = net.debug
self.verbose = net.verbose
self.genes = net.genes
self.idx = idx
self.parents = {}
self.children = {}
self.lvl = 0
self.dnode = DLeaf (self, self, 0, self.genes)
self.rank = 0
# end def __init__
def add_initial_candidates (self) :
for node in self.net.nodes :
if node.idx == self.idx :
continue
self.dnode.try_add_candidate (node)
# end def add_initial_candidates
def append_parent (self, node) :
self.debug ("append_parent: %d %d" % (self.idx, node.idx))
assert node not in self.children
assert self not in node.parents
self.parents [node] = 1
node.children [self] = 1
if self in self.net.roots :
del self.net.roots [self]
# end def append_parent
def leaves (self, root = None) :
""" The root can also be a leaf
"""
if not root :
root = self.dnode
if isinstance (root, DLeaf) :
yield (root)
else :
for c in root.children :
if isinstance (c, DLeaf) :
yield (c)
else :
for cx in self.leaves (c) :
yield (cx)
# end def leaves
def is_transitive_parent (self, node, visited = None) :
if visited is None :
visited = {}
for c in self.children :
if c in visited :
continue
visited [c] = 1
if node is c :
return True
if c.is_transitive_parent (node, visited) :
return True
return False
# end def is_transitive_parent
def may_append_parent (self, node) :
if node is self :
return False
if (self.net.maxparent and len (self.parents) >= self.net.maxparent) :
return False
if self.is_transitive_parent (node) :
return False
return True
# end def may_append_parent
def __hash__ (self) :
return self.idx
# end def __hash__
def __repr__ (self) :
if self.parents :
self.rank = max (self.rank, max (p.rank for p in self.parents) + 1)
r = [ "%sNode: %s children: %s parents: %s"
% ( '-' * self.rank
, self.idx
, tuple (c.idx for c in self.children)
, tuple (p.idx for p in self.parents)
)
]
if self.verbose :
indent = ' ' * self.rank
dn = str (self.dnode)
dn = '\n'.join (indent + d for d in dn.split ('\n'))
r.append (dn)
#for c in self.children :
# r.append (str (c))
return '\n'.join (r)
# end def __repr__
__str__ = __repr__
# end class BNode
class DNode (object) :
""" Binary decision tree node
A Tree either has two children.
A child may be a leaf not (which contains a single probability).
or another DNode.
"""
def __init__ (self, bnode, parent, cidx = None) :
self.bnode = bnode
self.debug = self.bnode.debug
self.idx = bnode.idx
self.children = []
self.parent = parent
if cidx is None :
self.genes = parent.genes
else :
self.genes = parent.gsplit [cidx]
self.lvl = self.parent.lvl + 1
self.gsplit = [[], []]
self.n = 0
for g in self.genes :
self.gsplit [g [self.idx]].append (g)
self.n += 1
# end def __init__
def __repr__ (self) :
indent = ' ' * self.lvl
r = []
r.append ("%s%d" % (indent, self.idx))
for c in self.children :
r.append (str (c))
return '\n'.join (r)
# end def __repr__
__str__ = __repr__
def feasible (self) :
assert self.idx == self.bnode.idx
assert self.idx != self.parent.idx
f = self.children [0].bnode.may_append_parent (self.bnode)
self.debug \
( "feasible %s: split %s on %s"
% (f, self.children [0].bnode.idx, self.idx)
)
return f
# end def feasible
# end class DNode
class DLeaf (object) :
""" Binary decision tree leaf
"""
def __init__ (self, bnode, parent, cidx, genes) :
self.bnode = bnode
self.idx = bnode.idx
self.cidx = cidx
self.parent = parent
self.lvl = parent.lvl + 1
self.genes = genes
self.candidates = {}
self.by_gain = None
self.debug = self.bnode.debug
self.min_split = self.bnode.net.min_split
self.n = 0
self.n1 = 0
for g in self.genes :
if g [self.idx] :
self.n1 += 1
self.n += 1
if self.n == 0 :
self.p = 1.0
else :
self.p = 1.0 * self.n1 / self.n
lgamma = self.bnode.net.lgamma
self.score = 0.0
self.score += lgamma (1 + self.n1)
self.score += lgamma (1 + self.n - self.n1)
self.score -= lgamma (2 + self.n)
if not isinstance (self.parent, BNode) :
self.parent.children.append (self)
assert len (self.parent.children) <= 2
# end def __init__
def __repr__ (self) :
indent = ' ' * self.lvl
return ("%s%d: %1.4f" % (indent, self.idx, self.p))
# end def __repr__
__str__ = __repr__
def candidate_iter (self) :
""" Iterate over candidates in sorted (best first) order
We take care to check if the candidate is still existing
"""
if self.by_gain is None :
self.by_gain = list \
(sorted
( self.candidates.keys ()
, key = lambda x : -self.candidates [x].gain
)
)
for idx in self.by_gain :
if idx in self.candidates :
yield (self.candidates [idx])
# end def candidate_iter
def del_candidate (self, cand) :
del self.candidates [cand.idx]
# end def del_candidate
def try_add_candidate (self, bnode) :
""" Try split on bnode
"""
if self.n < self.min_split :
return 0.0
idx = bnode.idx
assert idx not in self.candidates
assert idx != self.idx
p = self.parent
# Do we already have a split on that idx?
while not isinstance (p, BNode) :
if p.idx == idx :
return
p = p.parent
cidx = self.cidx
if isinstance (self.parent, BNode) :
cidx = None
n = DNode (bnode, self.parent, cidx)
c1 = self.__class__ (self.bnode, n, 0, genes = n.gsplit [0])
c2 = self.__class__ (self.bnode, n, 1, genes = n.gsplit [1])
n.gain = c1.score + c2.score - self.score - self.bnode.net.cutoff
if n.gain > 0 :
self.candidates [n.idx] = n
return n.gain
# end def try_add_candidate
def split (self, idx) :
cand = self.candidates [idx]
if isinstance (self.parent, BNode) :
assert self.cidx == 0
self.parent.dnode = cand
else :
self.parent.children [self.cidx] = cand
self.debug ("split %s on %s" % (self.idx, cand.bnode.idx))
self.debug ("split (dnode):", cand.idx, self.parent.idx, end = ' ')
self.debug ("split (bnode):", cand.bnode.idx, self.bnode.idx, end = ' ')
self.debug ("cbnode:", cand.children [0].idx)
self.bnode.append_parent (cand.bnode)
for node in self.bnode.net.nodes :
if self.bnode.may_append_parent (node) :
for l in cand.children :
g = l.try_add_candidate (node)
# end def split
# end class DLeaf
class HBOA (PMBGA) :
""" hierarchical Bayesian Optimization Algorithm
"""
def build_model (self, p_pop) :
self.__super.build_model (p_pop)
self.net = Bayesian_Network \
( self, self.genes, lgamma = self.lgamma
, s_penalty = self.s_penalty
, min_split = self.min_split
, max_parent = self.max_parent
)
# end def build_model
def clear_cache (self) :
pass
# end def clear_cache
def lgamma (self, x) :
""" lgamma cache, about 3 times faster than calling lgamma
"""
return self.l_gamma [x]
# end def lgamma
def sample_individual (self, p, pop) :
d = self.net.sample_model ()
assert len (d) == len (self)
for k in sorted (d) :
self.set_allele (p, pop, k, d [k])
# end def sample_individual
def post_init (self) :
self.__super.post_init ()
self.do_debug = 0
self.verbose = 0
self.clear_cache ()
self.l_gamma = [0]
for k in range (self.pop_size + 2) :
self.l_gamma.append (lgamma (k + 1))
# end def post_init
def print_model (self) :
for r in self.net.nodes :
print (r, file = self.file)
# end def print_model
# end class HBOA | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/prism/components/prism-handlebars.js | (function(Prism) {
var handlebars_pattern = /\{\{\{[\s\S]+?\}\}\}|\{\{[\s\S]+?\}\}/;
Prism.languages.handlebars = Prism.languages.extend('markup', {
'handlebars': {
pattern: handlebars_pattern,
inside: {
'delimiter': {
pattern: /^\{\{\{?|\}\}\}?$/i,
alias: 'punctuation'
},
'string': /(["'])(?:\\.|(?!\1)[^\\\r\n])*\1/,
'number': /\b-?(?:0x[\dA-Fa-f]+|\d*\.?\d+(?:[Ee][+-]?\d+)?)\b/,
'boolean': /\b(?:true|false)\b/,
'block': {
pattern: /^(\s*~?\s*)[#\/]\S+?(?=\s*~?\s*$|\s)/i,
lookbehind: true,
alias: 'keyword'
},
'brackets': {
pattern: /\[[^\]]+\]/,
inside: {
punctuation: /\[|\]/,
variable: /[\s\S]+/
}
},
'punctuation': /[!"#%&'()*+,.\/;<=>@\[\\\]^`{|}~]/,
'variable': /[^!"#%&'()*+,.\/;<=>@\[\\\]^`{|}~\s]+/
}
}
});
// Comments are inserted at top so that they can
// surround markup
Prism.languages.insertBefore('handlebars', 'tag', {
'handlebars-comment': {
pattern: /\{\{![\s\S]*?\}\}/,
alias: ['handlebars','comment']
}
});
// Tokenize all inline Handlebars expressions that are wrapped in {{ }} or {{{ }}}
// This allows for easy Handlebars + markup highlighting
Prism.hooks.add('before-highlight', function(env) {
if (env.language !== 'handlebars') {
return;
}
env.tokenStack = [];
env.backupCode = env.code;
env.code = env.code.replace(handlebars_pattern, function(match) {
var i = env.tokenStack.length;
// Check for existing strings
while (env.backupCode.indexOf('___HANDLEBARS' + i + '___') !== -1)
++i;
// Create a sparse array
env.tokenStack[i] = match;
return '___HANDLEBARS' + i + '___';
});
});
// Restore env.code for other plugins (e.g. line-numbers)
Prism.hooks.add('before-insert', function(env) {
if (env.language === 'handlebars') {
env.code = env.backupCode;
delete env.backupCode;
}
});
// Re-insert the tokens after highlighting
// and highlight them with defined grammar
Prism.hooks.add('after-highlight', function(env) {
if (env.language !== 'handlebars') {
return;
}
for (var i = 0, keys = Object.keys(env.tokenStack); i < keys.length; ++i) {
var k = keys[i];
var t = env.tokenStack[k];
// The replace prevents $$, $&, $`, $', $n, $nn from being interpreted as special patterns
env.highlightedCode = env.highlightedCode.replace('___HANDLEBARS' + k + '___', Prism.highlight(t, env.grammar, 'handlebars').replace(/\$/g, '$$$$'));
}
env.element.innerHTML = env.highlightedCode;
});
}(Prism)); | PypiClean |
/Amipy-1.0.2.tar.gz/Amipy-1.0.2/amipy/util/http.py | import copy
import requests
import asyncio
from contextlib import closing
async def send_async_http(session,method,url,*,
retries=1,
interval=1,
wait_factor=2,
timeout=0,
path = None,
success_callback=None,
fail_callback=None,
**kwargs) -> dict:
"""
Send an async http request and implement the retry mechanism.
Once the requesting operation failed,it will delay a interval time
for the next retry.
:param session:asynchronous request session
:param method:request method
:param url:request url
:param retries:how many it will retry
:param interval:a delay interval secs.
:param wait_factor:wait factor,every retry failed will multiply by it to delay for
next retry,recommended 1<wf<2
:param timeout:requesting timeout
:param success_callback:callback for successes
:param fail_callback:callback for fails
:param kwargs:other kwargs
:param path:file save path
:return:result
"""
ret = {'resp':None,'body':None,'code':-1,
'exception':None,'tries':-1}
wait_interval = interval
if method.lower() not in ['get','head','post','put']:
return ret
if retries == -1: # -1 means retry unlimited times
attempt = -1
elif retries == 0: # 0 means no retry
attempt = 1
else:
attempt = retries + 1
while attempt != 0:
size = 0
try:
if path:
loop = asyncio.get_running_loop()
try:
resp = await loop.run_in_executor(None,download,url,path,timeout,kwargs)
size = resp
body = True
code = 200
except requests.exceptions.Timeout:
raise TimeoutError
else:
async with getattr(session,method.lower())(url,timeout=timeout,**kwargs) as response:
code = response.status
resp = response
body = await response.read()
ret.update({'resp': resp, 'body':body,'code': code,
'tries': retries - attempt+1,'size':size})
if success_callback:
success_callback(ret)
return ret
except Exception as e:
ret['exception'] = e.__class__()
ret['tries'] += 1
await asyncio.sleep(wait_interval)
wait_interval = wait_interval * wait_factor
attempt-=1
if fail_callback:
fail_callback(ret)
return ret
def download(url,filepath,timeout,kwargs):
if timeout==False:
timeout=None
_kw = copy.deepcopy(kwargs)
buffer = _kw.pop('buffer')
with closing(requests.get(url, stream=True,timeout=timeout,**_kw)) as response:
chunk_size = buffer
data_count = 0
with open(filepath,'wb') as f:
for data in response.iter_content(chunk_size=chunk_size):
f.write(data)
data_count = data_count + len(data)
return data_count | PypiClean |
/Kato-FlaskAppBuilder-1.1.14.tar.gz/Kato-FlaskAppBuilder-1.1.14/flask_appbuilder/models/generic/interface.py | from . import filters
from ..base import BaseInterface
def _include_filters(obj):
for key in filters.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(filters, key))
class GenericInterface(BaseInterface):
filter_converter_class = filters.GenericFilterConverter
def __init__(self, obj, session=None):
_include_filters(self)
self.session = session
super(GenericInterface, self).__init__(obj)
def query(self, filters=None, order_column='', order_direction='',
page=None, page_size=None):
query = self.session.query(self.obj)
if filters:
query = filters.apply_all(query)
if order_column != '':
query = query.order_by(order_column + ' ' + order_direction)
if page:
query = query.offset(page * page_size)
if page_size:
query = query.limit(page_size)
return query.all()
def is_string(self, col_name):
return self.obj.properties[col_name].col_type == str
def is_integer(self, col_name):
return self.obj.properties[col_name].col_type == int
def is_nullable(self, col_name):
return self.obj.properties[col_name].nullable
def is_unique(self, col_name):
return self.obj.properties[col_name].unique
def is_pk(self, col_name):
return self.obj.properties[col_name].primary_key
def get_columns_list(self):
return self.obj.columns
def get_search_columns_list(self):
return self.obj.columns
def get_order_columns_list(self, list_columns=None):
if list_columns:
return list_columns
return self.obj.columns
def get_keys(self, lst):
"""
return a list of pk values from object list
"""
pk_name = self.get_pk_name()
return [getattr(item, pk_name) for item in lst]
def get_pk_name(self):
for col_name in self.obj.columns:
if self.is_pk(col_name):
return col_name
def get(self, id, filters=None):
# TODO: need to implement filters!
return self.session.get(id) | PypiClean |
/EpyNN-1.2.11.tar.gz/EpyNN-1.2.11/epynnlive/dummy_string/prepare_dataset.py | import random
def features_string(N_FEATURES=12):
"""Generate dummy string features.
:param N_FEATURES: Number of features, defaults to 12.
:type N_FEATURES: int
:return: random string features of length N_FEATURES.
:rtype: list[str]
"""
# List of words
WORDS = ['A', 'T', 'G', 'C']
# Random choice of words for N_FEATURES iterations
features = [random.choice(WORDS) for j in range(N_FEATURES)]
return features
def label_features(features):
"""Prepare label associated with features.
The dummy law is:
First and last elements are equal = positive.
First and last elements are NOT equal = negative.
:param features: random string features of length N_FEATURES.
:type features: list[str]
:return: Single-digit label with respect to features.
:rtype: int
"""
# Single-digit positive and negative labels
p_label = 0
n_label = 1
# Pattern associated with positive label (0)
if features[0] == features[-1]:
label = p_label
# Other pattern associated with negative label (1)
elif features[0] != features[-1]:
label = n_label
return label
def prepare_dataset(N_SAMPLES=100):
"""Prepare a set of dummy string sample features and label.
:param N_SAMPLES: Number of samples to generate, defaults to 100.
:type N_SAMPLES: int
:return: Set of sample features.
:rtype: tuple[list[str]]
:return: Set of single-digit sample label.
:rtype: tuple[int]
"""
# Initialize X and Y datasets
X_features = []
Y_label = []
# Iterate over N_SAMPLES
for i in range(N_SAMPLES):
# Compute random string features
features = features_string()
# Retrieve label associated with features
label = label_features(features)
# Append sample features to X_features
X_features.append(features)
# Append sample label to Y_label
Y_label.append(label)
# Prepare X-Y pairwise dataset
dataset = list(zip(X_features, Y_label))
# Shuffle dataset
random.shuffle(dataset)
# Separate X-Y pairs
X_features, Y_label = zip(*dataset)
return X_features, Y_label | PypiClean |
/McStasScript-0.0.63.tar.gz/McStasScript-0.0.63/mcstasscript/instrument_diagram/generate_Union.py | from mcstasscript.instrument_diagram.connections import ConnectionList
from mcstasscript.instrument_diagram.arrow import Arrow
def generate_Union_arrows(components, component_box_dict, box_names, component_categories, color=None):
"""
Generate Arrow objects related to use of Union components
Currently supports processes, materials, geometries and master. Can be
expanded to also support loggers, abs_loggers and conditionals.
"""
connections = ConnectionList()
process_names = []
material_names = []
geometry_names = []
simulated_geometry_names = []
abs_loggers = []
loggers = []
conditionals = []
master_names = []
geometry_activation_counters = {}
for component in components:
category = component_categories[component.component_name]
if category == "union" or True:
if "_process" in component.component_name:
# Process component
process_names.append(component.name)
elif component.component_name == "Union_make_material":
# Make material component
material_names.append(component.name)
process_string = component.process_string
if not isinstance(process_string, str):
continue
processes = process_string.strip('"').split(",")
for process in processes:
if process not in process_names:
print("Didn't find process of name '" + process + "'")
print(process_names)
else:
origin = component_box_dict[process]
connections.add(origin, component_box_dict[component.name])
elif "material_string" in component.parameter_names:
# Geometry
geometry_names.append(component.name)
if component.number_of_activations is not None:
try:
# If a number is given, it can be used directly
number_of_activations = int(component.number_of_activations)
except:
# If a variable or parameter is used, it can't be known ahead of time, assume 1
number_of_activations = 1
else:
number_of_activations = component.parameter_defaults["number_of_activations"]
geometry_activation_counters[component.name] = number_of_activations
if component.material_string is not None:
simulated_geometry_names.append(component.name)
if isinstance(component.material_string, str):
material = component.material_string.strip('"')
if material not in material_names:
if material not in ["Vacuum", "vacuum", "Exit", "exit"]:
print("Didn't find material of name '" + material + "'")
print(material_names)
else:
origin = component_box_dict[material]
connections.add(origin, component_box_dict[component.name])
if isinstance(component.mask_string, str):
masks = component.mask_string.strip('"').split(",")
for mask in masks:
if mask not in geometry_names:
print("Didn't find geometry target of name '" + mask + "'")
print(geometry_names)
else:
target = component_box_dict[mask]
connections.add(component_box_dict[component.name], target)
elif "_logger" in component.component_name:
if "_abs_logger" in component.component_name:
# Absoption logger
abs_loggers.append(component.name)
abs_logger = True
else:
# Scattering logger
loggers.append(component.name)
abs_logger = False
target_geometry = component.target_geometry
if isinstance(target_geometry, str):
geometries = target_geometry.strip('"').split(",")
for geometry in geometries:
if geometry not in simulated_geometry_names:
print(component.name)
print("Didn't find geometry of name '" + geometry + "'")
print(simulated_geometry_names)
else:
origin = component_box_dict[geometry]
connections.add(origin, component_box_dict[component.name])
if abs_logger:
# Abs loggers do not have target_process, as they target absorption
continue
target_process = component.target_process
if isinstance(target_process, str):
processes = target_process.strip('"').split(",")
for process in processes:
if process not in process_names:
print(component.name)
print("Didn't find process of name '" + process + "'")
print(process_names)
else:
origin = component_box_dict[process]
connections.add(origin, component_box_dict[component.name])
elif "target_loggers" in component.parameter_names:
# Conditional
conditionals.append(component.name)
target_loggers = component.target_loggers
if isinstance(target_loggers, str):
loggers = target_loggers.strip('"').split(",")
for logger in loggers:
if logger not in loggers + abs_loggers:
print(component.name)
print("Didn't find logger with name '" + logger + "'")
print(loggers, abs_loggers)
else:
target = component_box_dict[logger]
connections.add(component_box_dict[component.name], target)
elif component.component_name == "Union_master":
# Master
master_names.append(component.name)
for geometry in simulated_geometry_names:
if geometry_activation_counters[geometry] > 0: # May need to account for floating point precision
# Only include if activation counter for this geometry is still positive
geometry_activation_counters[geometry] -= 1
origin = component_box_dict[geometry]
connections.add(origin, component_box_dict[component.name])
connections.distribute_lane_numbers(box_names=box_names)
arrows = []
for connection in connections.get_connections():
origin = connection.origin
target = connection.target
lane = connection.lane_number
arrow = Arrow(origin, target, lane=lane, kind="Union")
arrow.set_sub_lane(2)
if color is None:
arrow.color = "green"
else:
arrow.color = color
if target.name in master_names:
arrow.set_linestyle("--")
arrows.append(arrow)
return arrows | PypiClean |
/BenchExec-3.17.tar.gz/BenchExec-3.17/benchexec/baseexecutor.py |
import errno
import logging
import os
import subprocess
import sys
import threading
from benchexec import __version__
from benchexec import util
sys.dont_write_bytecode = True # prevent creation of .pyc files
def add_basic_executor_options(argument_parser):
"""Add some basic options for an executor to an argparse argument_parser."""
argument_parser.add_argument(
"args",
nargs="+",
metavar="ARG",
help='command line to run (prefix with "--" to ensure all arguments are treated correctly)',
)
argument_parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__
)
verbosity = argument_parser.add_mutually_exclusive_group()
verbosity.add_argument("--debug", action="store_true", help="show debug output")
verbosity.add_argument("--quiet", action="store_true", help="show only warnings")
def handle_basic_executor_options(options, parser):
"""Handle the options specified by add_basic_executor_options()."""
# setup logging
logLevel = logging.INFO
if options.debug:
logLevel = logging.DEBUG
elif options.quiet:
logLevel = logging.WARNING
util.setup_logging(level=logLevel)
class BaseExecutor(object):
"""Class for starting and handling processes."""
def __init__(self):
self.PROCESS_KILLED = False
# killing process is triggered asynchronously, need a lock for synchronization
self.SUB_PROCESS_PIDS_LOCK = threading.Lock()
self.SUB_PROCESS_PIDS = set()
def _get_result_files_base(self, temp_dir):
"""Given the temp directory that is created for each run, return the path to the directory
where files created by the tool are stored."""
return temp_dir
def _start_execution(
self,
args,
stdin,
stdout,
stderr,
env,
cwd,
temp_dir,
cgroups,
parent_setup_fn,
child_setup_fn,
parent_cleanup_fn,
):
"""Actually start the tool and the measurements.
@param parent_setup_fn a function without parameters that is called in the parent process
immediately before the tool is started
@param child_setup_fn a function without parameters that is called in the child process
before the tool is started
@param parent_cleanup_fn a function that is called in the parent process
immediately after the tool terminated, with three parameters:
the result of parent_setup_fn, the result of the executed process as ProcessExitCode,
and the base path for looking up files as parameter values
@return: a tuple of PID of process and a blocking function, which waits for the process
and a triple of the exit code and the resource usage of the process
and the result of parent_cleanup_fn (do not use os.wait)
"""
def pre_subprocess():
# Do some other setup the caller wants.
child_setup_fn()
# put us into the cgroup(s)
pid = os.getpid()
cgroups.add_task(pid)
# Set HOME and TMPDIR to fresh directories.
tmp_dir = os.path.join(temp_dir, "tmp")
home_dir = os.path.join(temp_dir, "home")
os.mkdir(tmp_dir)
os.mkdir(home_dir)
env["HOME"] = home_dir
env["TMPDIR"] = tmp_dir
env["TMP"] = tmp_dir
env["TEMPDIR"] = tmp_dir
env["TEMP"] = tmp_dir
logging.debug("Executing run with $HOME and $TMPDIR below %s.", temp_dir)
parent_setup = parent_setup_fn()
p = subprocess.Popen(
args,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd,
close_fds=True,
preexec_fn=pre_subprocess,
)
def wait_and_get_result():
exitcode, ru_child = self._wait_for_process(p.pid, args[0])
parent_cleanup = parent_cleanup_fn(
parent_setup, util.ProcessExitCode.from_raw(exitcode), ""
)
return exitcode, ru_child, parent_cleanup
return p.pid, wait_and_get_result
def _wait_for_process(self, pid, name):
"""Wait for the given process to terminate.
@return tuple of exit code and resource usage
"""
try:
logging.debug("Waiting for process %s with pid %s", name, pid)
unused_pid, exitcode, ru_child = os.wait4(pid, 0)
return exitcode, ru_child
except OSError as e:
if self.PROCESS_KILLED and e.errno == errno.EINTR:
# Interrupted system call seems always to happen
# if we killed the process ourselves after Ctrl+C was pressed
# We can try again to get exitcode and resource usage.
logging.debug(
"OSError %s while waiting for termination of %s (%s): %s.",
e.errno,
name,
pid,
e.strerror,
)
try:
unused_pid, exitcode, ru_child = os.wait4(pid, 0)
return exitcode, ru_child
except OSError:
pass # original error will be handled and this ignored
logging.critical(
"OSError %s while waiting for termination of %s (%s): %s.",
e.errno,
name,
pid,
e.strerror,
)
return 0, None
def stop(self):
self.PROCESS_KILLED = True
with self.SUB_PROCESS_PIDS_LOCK:
for pid in self.SUB_PROCESS_PIDS:
logging.warning("Killing process %s forcefully.", pid)
try:
util.kill_process(pid)
except OSError as e:
# May fail due to race conditions
logging.debug(e) | PypiClean |
/LUBEAT-0.13.1-cp38-cp38-macosx_10_9_x86_64.whl/econml/sklearn_extensions/linear_model.py | import numbers
import numpy as np
import warnings
from collections.abc import Iterable
from scipy.stats import norm
from econml.sklearn_extensions.model_selection import WeightedKFold, WeightedStratifiedKFold
from econml.utilities import ndim, shape, reshape, _safe_norm_ppf, check_input_arrays
from sklearn import clone
from sklearn.linear_model import LinearRegression, LassoCV, MultiTaskLassoCV, Lasso, MultiTaskLasso
from sklearn.linear_model._base import _preprocess_data
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold, StratifiedKFold
# TODO: consider working around relying on sklearn implementation details
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.multioutput import MultiOutputRegressor
from sklearn.utils import check_array, check_X_y
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_is_fitted
from sklearn.base import BaseEstimator
from statsmodels.tools.tools import add_constant
from statsmodels.api import RLM
import statsmodels
from joblib import Parallel, delayed
# TODO: once we drop support for sklearn < 1.0, we can remove this
def _add_normalize(to_wrap):
"""
Add a fictitious "normalize" argument to linear model initializer signatures.
This is necessary for their get_params to play nicely with some other sklearn-internal methods.
Note that directly adding a **params argument to the ordinary initializer will not work,
because get_params explicitly looks only at the initializer signature arguments that are not
varargs or varkeywords, so we need to modify the signature of the initializer to include the
"normalize" argument.
"""
# if we're decorating a class, just update the __init__ method,
# so that the result is still a class instead of a wrapper method
if isinstance(to_wrap, type):
import sklearn
from packaging import version
if version.parse(sklearn.__version__) >= version.parse("1.0"):
# normalize was deprecated or removed; don't need to do anything
return to_wrap
else:
from inspect import Parameter, signature
from functools import wraps
old_init = to_wrap.__init__
@wraps(old_init)
def new_init(self, *args, normalize=False, **kwargs):
if normalize is not False:
warnings.warn("normalize is deprecated and will be ignored", stacklevel=2)
return old_init(self, *args, **kwargs)
sig = signature(old_init)
sig = sig.replace(parameters=[*sig.parameters.values(),
Parameter("normalize", kind=Parameter.KEYWORD_ONLY, default=False)])
new_init.__signature__ = sig
to_wrap.__init__ = new_init
return to_wrap
else:
raise ValueError("This decorator was applied to a method, but is intended to be applied only to types.")
def _weighted_check_cv(cv=5, y=None, classifier=False, random_state=None):
cv = 5 if cv is None else cv
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return WeightedStratifiedKFold(cv, random_state=random_state)
else:
return WeightedKFold(cv, random_state=random_state)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _WeightedCVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
class _WeightedCVIterableWrapper(_CVIterableWrapper):
def __init__(self, cv):
super().__init__(cv)
def get_n_splits(self, X=None, y=None, groups=None, sample_weight=None):
if groups is not None and sample_weight is not None:
raise ValueError("Cannot simultaneously use grouping and weighting")
return super().get_n_splits(X, y, groups)
def split(self, X=None, y=None, groups=None, sample_weight=None):
if groups is not None and sample_weight is not None:
raise ValueError("Cannot simultaneously use grouping and weighting")
return super().split(X, y, groups)
class WeightedModelMixin:
"""Mixin class for weighted models.
For linear models, weights are applied as reweighting of the data matrix X and targets y.
"""
def _fit_weighted_linear_model(self, X, y, sample_weight, check_input=None):
# Convert X, y into numpy arrays
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
# Define fit parameters
fit_params = {'X': X, 'y': y}
# Some algorithms don't have a check_input option
if check_input is not None:
fit_params['check_input'] = check_input
if sample_weight is not None:
# Check weights array
if np.atleast_1d(sample_weight).ndim > 1:
# Check that weights are size-compatible
raise ValueError("Sample weights must be 1D array or scalar")
if np.ndim(sample_weight) == 0:
sample_weight = np.repeat(sample_weight, X.shape[0])
else:
sample_weight = check_array(sample_weight, ensure_2d=False, allow_nd=False)
if sample_weight.shape[0] != X.shape[0]:
raise ValueError(
"Found array with {0} sample(s) while {1} samples were expected.".format(
sample_weight.shape[0], X.shape[0])
)
# Normalize inputs
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=False,
copy=self.copy_X, check_input=check_input if check_input is not None else True,
sample_weight=sample_weight)
# Weight inputs
normalized_weights = X.shape[0] * sample_weight / np.sum(sample_weight)
sqrt_weights = np.sqrt(normalized_weights)
X_weighted = sqrt_weights.reshape(-1, 1) * X
y_weighted = sqrt_weights.reshape(-1, 1) * y if y.ndim > 1 else sqrt_weights * y
fit_params['X'] = X_weighted
fit_params['y'] = y_weighted
if self.fit_intercept:
# Fit base class without intercept
self.fit_intercept = False
# Fit Lasso
super().fit(**fit_params)
# Reset intercept
self.fit_intercept = True
# The intercept is not calculated properly due the sqrt(weights) factor
# so it must be recomputed
self._set_intercept(X_offset, y_offset, X_scale)
else:
super().fit(**fit_params)
else:
# Fit lasso without weights
super().fit(**fit_params)
@_add_normalize
class WeightedLasso(WeightedModelMixin, Lasso):
"""Version of sklearn Lasso that accepts weights.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to ordinary least squares, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with Lasso is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random
number generator; If None, the random number generator is the
:class:`~numpy.random.mtrand.RandomState` instance used by :mod:`np.random<numpy.random>`. Used when
``selection='random'``.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
"""
def __init__(self, alpha=1.0, fit_intercept=True,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample.
The weights will be normalized internally.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
"""
self._fit_weighted_linear_model(X, y, sample_weight, check_input)
return self
@_add_normalize
class WeightedMultiTaskLasso(WeightedModelMixin, MultiTaskLasso):
"""Version of sklearn MultiTaskLasso that accepts weights.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random
number generator; If None, the random number generator is the
:class:`~numpy.random.mtrand.RandomState` instance used by :mod:`np.random<numpy.random>`. Used when
``selection='random'``.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
"""
def __init__(self, alpha=1.0, fit_intercept=True,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
copy_X=copy_X, max_iter=max_iter, tol=tol, warm_start=warm_start,
random_state=random_state, selection=selection)
def fit(self, X, y, sample_weight=None):
"""Fit model with coordinate descent.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample.
The weights will be normalized internally.
"""
self._fit_weighted_linear_model(X, y, sample_weight)
return self
@_add_normalize
class WeightedLassoCV(WeightedModelMixin, LassoCV):
"""Version of sklearn LassoCV that accepts weights.
.. testsetup::
import numpy as np
from sklearn.linear_model import lasso_path
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or an iterable, optional (default=None)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold weighted cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`WeightedKFold` is used.
If None then 5 folds are used.
verbose : bool or integer
Amount of verbosity.
n_jobs : int or None, optional (default=None)
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, optional
If positive, restrict regression coefficients to be positive
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random
number generator; If None, the random number generator is the
:class:`~numpy.random.mtrand.RandomState` instance used by :mod:`np.random<numpy.random>`. Used when
``selection='random'``.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
"""
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=None,
positive=False, random_state=None, selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
def fit(self, X, y, sample_weight=None):
"""Fit model with coordinate descent.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample.
The weights will be normalized internally.
"""
# Make weighted splitter
cv_temp = self.cv
self.cv = _weighted_check_cv(self.cv, random_state=self.random_state).split(X, y, sample_weight=sample_weight)
# Fit weighted model
self._fit_weighted_linear_model(X, y, sample_weight)
self.cv = cv_temp
return self
@_add_normalize
class WeightedMultiTaskLassoCV(WeightedModelMixin, MultiTaskLassoCV):
"""Version of sklearn MultiTaskLassoCV that accepts weights.
.. testsetup::
import numpy as np
from sklearn.linear_model import lasso_path
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or an iterable, optional (default = None)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold weighted cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`WeightedKFold` is used.
If None then 5-folds are used.
verbose : bool or integer
Amount of verbosity.
n_jobs : int or None, optional (default=None)
Number of CPUs to use during the cross validation. Note that this is
used only if multiple values for l1_ratio are given.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random
number generator; If None, the random number generator is the
:class:`~numpy.random.mtrand.RandomState` instance used by :mod:`np.random<numpy.random>`. Used when
``selection='random'``.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
"""
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=None,
random_state=None, selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs,
random_state=random_state, selection=selection)
def fit(self, X, y, sample_weight=None):
"""Fit model with coordinate descent.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample.
The weights will be normalized internally.
"""
# Make weighted splitter
cv_temp = self.cv
self.cv = _weighted_check_cv(self.cv, random_state=self.random_state).split(X, y, sample_weight=sample_weight)
# Fit weighted model
self._fit_weighted_linear_model(X, y, sample_weight)
self.cv = cv_temp
return self
def _get_theta_coefs_and_tau_sq(i, X, sample_weight, alpha_cov, n_alphas_cov, max_iter, tol, random_state):
n_samples, n_features = X.shape
y = X[:, i]
X_reduced = X[:, list(range(i)) + list(range(i + 1, n_features))]
# Call weighted lasso on reduced design matrix
if alpha_cov == 'auto':
local_wlasso = WeightedLassoCV(cv=3, n_alphas=n_alphas_cov,
fit_intercept=False,
max_iter=max_iter,
tol=tol, n_jobs=1,
random_state=random_state)
else:
local_wlasso = WeightedLasso(alpha=alpha_cov,
fit_intercept=False,
max_iter=max_iter,
tol=tol,
random_state=random_state)
local_wlasso.fit(X_reduced, y, sample_weight=sample_weight)
coefs = local_wlasso.coef_
# Weighted tau
if sample_weight is not None:
y_weighted = y * sample_weight / np.sum(sample_weight)
else:
y_weighted = y / n_samples
tausq = np.dot(y - local_wlasso.predict(X_reduced), y_weighted)
return coefs, tausq
@_add_normalize
class DebiasedLasso(WeightedLasso):
"""Debiased Lasso model.
Implementation was derived from <https://arxiv.org/abs/1303.0518>.
Only implemented for single-dimensional output.
.. testsetup::
import numpy as np
from sklearn.linear_model import lasso_path
Parameters
----------
alpha : string | float, optional, default 'auto'.
Constant that multiplies the L1 term. Defaults to 'auto'.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`.LinearRegression` object.
n_alphas : int, optional, default 100
How many alphas to try if alpha='auto'
alpha_cov : string | float, optional, default 'auto'
The regularization alpha that is used when constructing the pseudo inverse of
the covariance matrix Theta used to for correcting the lasso coefficient. Each
such regression corresponds to the regression of one feature on the remainder
of the features.
n_alphas_cov : int, optional, default 10
How many alpha_cov to try if alpha_cov='auto'.
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
precompute : True | False | array-like, default False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random
number generator; If None, the random number generator is the
:class:`~numpy.random.mtrand.RandomState` instance used by :mod:`np.random<numpy.random>`. Used when
``selection='random'``.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
n_jobs : int or None, default None
How many jobs to use whenever parallelism is invoked
Attributes
----------
coef_ : array, shape (n_features,)
Parameter vector (w in the cost function formula).
intercept_ : float
Independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
selected_alpha_ : float
Penalty chosen through cross-validation, if alpha='auto'.
coef_stderr_ : array, shape (n_features,)
Estimated standard errors for coefficients (see ``coef_`` attribute).
intercept_stderr_ : float
Estimated standard error intercept (see ``intercept_`` attribute).
"""
def __init__(self, alpha='auto', n_alphas=100, alpha_cov='auto', n_alphas_cov=10,
fit_intercept=True, precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False,
random_state=None, selection='cyclic', n_jobs=None):
self.n_jobs = n_jobs
self.n_alphas = n_alphas
self.alpha_cov = alpha_cov
self.n_alphas_cov = n_alphas_cov
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=False, random_state=random_state,
selection=selection)
def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit debiased lasso model.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Input data.
y : array, shape (n_samples,)
Target. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample.
The weights will be normalized internally.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
"""
self.selected_alpha_ = None
if self.alpha == 'auto':
# Select optimal penalty
self.alpha = self._get_optimal_alpha(X, y, sample_weight)
self.selected_alpha_ = self.alpha
else:
# Warn about consistency
warnings.warn("Setting a suboptimal alpha can lead to miscalibrated confidence intervals. "
"We recommend setting alpha='auto' for optimality.")
# Convert X, y into numpy arrays
X, y = check_X_y(X, y, y_numeric=True, multi_output=False)
# Fit weighted lasso with user input
super().fit(X, y, sample_weight, check_input)
# Center X, y
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=False,
copy=self.copy_X, check_input=check_input, sample_weight=sample_weight)
# Calculate quantities that will be used later on. Account for centered data
y_pred = self.predict(X) - self.intercept_
self._theta_hat = self._get_theta_hat(X, sample_weight)
self._X_offset = X_offset
# Calculate coefficient and error variance
num_nonzero_coefs = np.count_nonzero(self.coef_)
self._error_variance = np.average((y - y_pred)**2, weights=sample_weight) / \
(1 - num_nonzero_coefs / X.shape[0])
self._mean_error_variance = self._error_variance / X.shape[0]
self._coef_variance = self._get_unscaled_coef_var(
X, self._theta_hat, sample_weight) * self._error_variance
# Add coefficient correction
coef_correction = self._get_coef_correction(
X, y, y_pred, sample_weight, self._theta_hat)
self.coef_ += coef_correction
# Set coefficients and intercept standard errors
self.coef_stderr_ = np.sqrt(np.diag(self._coef_variance))
if self.fit_intercept:
self.intercept_stderr_ = np.sqrt(
self._X_offset @ self._coef_variance @ self._X_offset +
self._mean_error_variance
)
else:
self.intercept_stderr_ = 0
# Set intercept
self._set_intercept(X_offset, y_offset, X_scale)
# Return alpha to 'auto' state
if self.selected_alpha_ is not None:
self.alpha = 'auto'
return self
def prediction_stderr(self, X):
"""Get the standard error of the predictions using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
Returns
-------
prediction_stderr : array like, shape (n_samples, )
The standard error of each coordinate of the output at each point we predict
"""
# Note that in the case of no intercept, X_offset is 0
if self.fit_intercept:
X = X - self._X_offset
# Calculate the variance of the predictions
var_pred = np.sum(np.matmul(X, self._coef_variance) * X, axis=1)
if self.fit_intercept:
var_pred += self._mean_error_variance
pred_stderr = np.sqrt(var_pred)
return pred_stderr
def predict_interval(self, X, alpha=0.05):
"""Build prediction confidence intervals using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
alpha: optional float in [0, 1] (Default=0.05)
The overall level of confidence of the reported interval.
The alpha/2, 1-alpha/2 confidence interval is reported.
Returns
-------
(y_lower, y_upper) : tuple of arrays, shape (n_samples, )
Returns lower and upper interval endpoints.
"""
lower = alpha / 2
upper = 1 - alpha / 2
y_pred = self.predict(X)
# Calculate prediction confidence intervals
sd_pred = self.prediction_stderr(X)
y_lower = y_pred + \
np.apply_along_axis(lambda s: norm.ppf(
lower, scale=s), 0, sd_pred)
y_upper = y_pred + \
np.apply_along_axis(lambda s: norm.ppf(
upper, scale=s), 0, sd_pred)
return y_lower, y_upper
def coef__interval(self, alpha=0.05):
"""Get a confidence interval bounding the fitted coefficients.
Parameters
----------
alpha : float, default 0.05
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
(coef_lower, coef_upper) : tuple of arrays, shape (n_coefs, )
Returns lower and upper interval endpoints for the coefficients.
"""
lower = alpha / 2
upper = 1 - alpha / 2
return self.coef_ + np.apply_along_axis(lambda s: norm.ppf(lower, scale=s), 0, self.coef_stderr_), \
self.coef_ + np.apply_along_axis(lambda s: norm.ppf(upper, scale=s), 0, self.coef_stderr_)
def intercept__interval(self, alpha=0.05):
"""Get a confidence interval bounding the fitted intercept.
Parameters
----------
alpha : float, default 0.05
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
(intercept_lower, intercept_upper) : tuple floats
Returns lower and upper interval endpoints for the intercept.
"""
lower = alpha / 2
upper = 1 - alpha / 2
if self.fit_intercept:
return self.intercept_ + norm.ppf(lower, scale=self.intercept_stderr_), self.intercept_ + \
norm.ppf(upper, scale=self.intercept_stderr_),
else:
return 0.0, 0.0
def _get_coef_correction(self, X, y, y_pred, sample_weight, theta_hat):
# Assumes flattened y
n_samples, _ = X.shape
y_res = np.ndarray.flatten(y) - y_pred
# Compute weighted residuals
if sample_weight is not None:
y_res_scaled = y_res * sample_weight / np.sum(sample_weight)
else:
y_res_scaled = y_res / n_samples
delta_coef = np.matmul(
theta_hat, np.matmul(X.T, y_res_scaled))
return delta_coef
def _get_optimal_alpha(self, X, y, sample_weight):
# To be done once per target. Assumes y can be flattened.
cv_estimator = WeightedLassoCV(cv=5, n_alphas=self.n_alphas, fit_intercept=self.fit_intercept,
precompute=self.precompute, copy_X=True,
max_iter=self.max_iter, tol=self.tol,
random_state=self.random_state,
selection=self.selection,
n_jobs=self.n_jobs)
cv_estimator.fit(X, y.flatten(), sample_weight=sample_weight)
return cv_estimator.alpha_
def _get_theta_hat(self, X, sample_weight):
# Assumes that X has already been offset
n_samples, n_features = X.shape
# Special case: n_features=1
if n_features == 1:
C_hat = np.ones((1, 1))
tausq = (X.T @ X / n_samples).flatten()
return np.diag(1 / tausq) @ C_hat
# Compute Lasso coefficients for the columns of the design matrix
results = Parallel(n_jobs=self.n_jobs)(
delayed(_get_theta_coefs_and_tau_sq)(i, X, sample_weight,
self.alpha_cov, self.n_alphas_cov,
self.max_iter, self.tol, self.random_state)
for i in range(n_features))
coefs, tausq = zip(*results)
coefs = np.array(coefs)
tausq = np.array(tausq)
# Compute C_hat
C_hat = np.diag(np.ones(n_features))
C_hat[0][1:] = -coefs[0]
for i in range(1, n_features):
C_hat[i][:i] = -coefs[i][:i]
C_hat[i][i + 1:] = -coefs[i][i:]
# Compute theta_hat
theta_hat = np.diag(1 / tausq) @ C_hat
return theta_hat
def _get_unscaled_coef_var(self, X, theta_hat, sample_weight):
if sample_weight is not None:
norm_weights = sample_weight / np.sum(sample_weight)
sigma = X.T @ (norm_weights.reshape(-1, 1) * X)
else:
sigma = np.matmul(X.T, X) / X.shape[0]
_unscaled_coef_var = np.matmul(
np.matmul(theta_hat, sigma), theta_hat.T) / X.shape[0]
return _unscaled_coef_var
@_add_normalize
class MultiOutputDebiasedLasso(MultiOutputRegressor):
"""Debiased MultiOutputLasso model.
Implementation was derived from <https://arxiv.org/abs/1303.0518>.
Applies debiased lasso once per target. If only a flat target is passed in,
it reverts to the DebiasedLasso algorithm.
Parameters
----------
alpha : string | float, optional. Default='auto'.
Constant that multiplies the L1 term. Defaults to 'auto'.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
n_alphas : int, optional, default 100
How many alphas to try if alpha='auto'
alpha_cov : string | float, optional, default 'auto'
The regularization alpha that is used when constructing the pseudo inverse of
the covariance matrix Theta used to for correcting the lasso coefficient. Each
such regression corresponds to the regression of one feature on the remainder
of the features.
n_alphas_cov : int, optional, default 10
How many alpha_cov to try if alpha_cov='auto'.
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random
number generator; If None, the random number generator is the
:class:`~numpy.random.mtrand.RandomState` instance used by :mod:`np.random<numpy.random>`. Used when
``selection='random'``.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
n_jobs : int or None, default None
How many jobs to use whenever parallelism is invoked
Attributes
----------
coef_ : array, shape (n_targets, n_features) or (n_features,)
Parameter vector (w in the cost function formula).
intercept_ : array, shape (n_targets, ) or float
Independent term in decision function.
selected_alpha_ : array, shape (n_targets, ) or float
Penalty chosen through cross-validation, if alpha='auto'.
coef_stderr_ : array, shape (n_targets, n_features) or (n_features, )
Estimated standard errors for coefficients (see ``coef_`` attribute).
intercept_stderr_ : array, shape (n_targets, ) or float
Estimated standard error intercept (see ``intercept_`` attribute).
"""
def __init__(self, alpha='auto', n_alphas=100, alpha_cov='auto', n_alphas_cov=10,
fit_intercept=True,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False,
random_state=None, selection='cyclic', n_jobs=None):
self.estimator = DebiasedLasso(alpha=alpha, n_alphas=n_alphas, alpha_cov=alpha_cov, n_alphas_cov=n_alphas_cov,
fit_intercept=fit_intercept,
precompute=precompute, copy_X=copy_X, max_iter=max_iter,
tol=tol, warm_start=warm_start,
random_state=random_state, selection=selection,
n_jobs=n_jobs)
super().__init__(estimator=self.estimator, n_jobs=n_jobs)
def fit(self, X, y, sample_weight=None):
"""Fit the multi-output debiased lasso model.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Input data.
y : array, shape (n_samples, n_targets) or (n_samples, )
Target. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample.
The weights will be normalized internally.
"""
# Allow for single output as well
# When only one output is passed in, the MultiOutputDebiasedLasso behaves like the DebiasedLasso
self.flat_target = False
if np.ndim(y) == 1:
self.flat_target = True
y = np.asarray(y).reshape(-1, 1)
super().fit(X, y, sample_weight)
# Set coef_ attribute
self._set_attribute("coef_")
# Set intercept_ attribute
self._set_attribute("intercept_",
condition=self.estimators_[0].fit_intercept,
default=0.0)
# Set selected_alpha_ attribute
self._set_attribute("selected_alpha_",
condition=(self.estimators_[0].alpha == 'auto'))
# Set coef_stderr_
self._set_attribute("coef_stderr_")
# intercept_stderr_
self._set_attribute("intercept_stderr_")
return self
def predict(self, X):
"""Get the prediction using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
Returns
-------
prediction : array like, shape (n_samples, ) or (n_samples, n_targets)
The prediction at each point.
"""
pred = super().predict(X)
if self.flat_target:
pred = pred.flatten()
return pred
def prediction_stderr(self, X):
"""Get the standard error of the predictions using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
Returns
-------
prediction_stderr : array like, shape (n_samples, ) or (n_samples, n_targets)
The standard error of each coordinate of the output at each point we predict
"""
n_estimators = len(self.estimators_)
X = check_array(X)
pred_stderr = np.empty((X.shape[0], n_estimators))
for i, estimator in enumerate(self.estimators_):
pred_stderr[:, i] = estimator.prediction_stderr(X)
if self.flat_target:
pred_stderr = pred_stderr.flatten()
return pred_stderr
def predict_interval(self, X, alpha=0.05):
"""Build prediction confidence intervals using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
alpha: optional float in [0, 1] (Default=0.05)
The overall level of confidence of the reported interval.
The alpha/2, 1-alpha/2 confidence interval is reported.
Returns
-------
(y_lower, y_upper) : tuple of arrays, shape (n_samples, n_targets) or (n_samples, )
Returns lower and upper interval endpoints.
"""
n_estimators = len(self.estimators_)
X = check_array(X)
y_lower = np.empty((X.shape[0], n_estimators))
y_upper = np.empty((X.shape[0], n_estimators))
for i, estimator in enumerate(self.estimators_):
y_lower[:, i], y_upper[:, i] = estimator.predict_interval(X, alpha=alpha)
if self.flat_target:
y_lower = y_lower.flatten()
y_upper = y_upper.flatten()
return y_lower, y_upper
def coef__interval(self, alpha=0.05):
"""Get a confidence interval bounding the fitted coefficients.
Parameters
----------
alpha : float, default 0.05
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
(coef_lower, coef_upper) : tuple of arrays, shape (n_targets, n_coefs) or (n_coefs, )
Returns lower and upper interval endpoints for the coefficients.
"""
n_estimators = len(self.estimators_)
coef_lower = np.empty((n_estimators, self.estimators_[0].coef_.shape[0]))
coef_upper = np.empty((n_estimators, self.estimators_[0].coef_.shape[0]))
for i, estimator in enumerate(self.estimators_):
coef_lower[i], coef_upper[i] = estimator.coef__interval(alpha=alpha)
if self.flat_target == 1:
coef_lower = coef_lower.flatten()
coef_upper = coef_upper.flatten()
return coef_lower, coef_upper
def intercept__interval(self, alpha=0.05):
"""Get a confidence interval bounding the fitted intercept.
Parameters
----------
alpha : float, default 0.05
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
(intercept_lower, intercept_upper) : tuple of arrays of size (n_targets, ) or tuple of floats
Returns lower and upper interval endpoints for the intercept.
"""
if len(self.estimators_) == 1:
return self.estimators_[0].intercept__interval(alpha=alpha)
else:
intercepts = np.array([estimator.intercept__interval(alpha=alpha) for estimator in self.estimators_])
return intercepts[:, 0], intercepts[:, 1]
def get_params(self, deep=True):
"""Get parameters for this estimator."""
return self.estimator.get_params(deep=deep)
def set_params(self, **params):
"""Set parameters for this estimator."""
self.estimator.set_params(**params)
def _set_attribute(self, attribute_name, condition=True, default=None):
if condition:
if not self.flat_target:
attribute_value = np.array([getattr(estimator, attribute_name) for estimator in self.estimators_])
else:
attribute_value = getattr(self.estimators_[0], attribute_name)
else:
attribute_value = default
setattr(self, attribute_name, attribute_value)
class WeightedLassoCVWrapper:
"""Helper class to wrap either WeightedLassoCV or WeightedMultiTaskLassoCV depending on the shape of the target."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
# set model to WeightedLassoCV by default so there's always a model to get and set attributes on
self.model = WeightedLassoCV(*args, **kwargs)
# whitelist known params because full set is not necessarily identical between LassoCV and MultiTaskLassoCV
# (e.g. former has 'positive' and 'precompute' while latter does not)
known_params = set(['eps', 'n_alphas', 'alphas', 'fit_intercept', 'normalize', 'max_iter', 'tol', 'copy_X',
'cv', 'verbose', 'n_jobs', 'random_state', 'selection'])
def fit(self, X, y, sample_weight=None):
self.needs_unravel = False
params = {key: value
for (key, value) in self.get_params().items()
if key in self.known_params}
if ndim(y) == 2 and shape(y)[1] > 1:
self.model = WeightedMultiTaskLassoCV(**params)
else:
if ndim(y) == 2 and shape(y)[1] == 1:
y = np.ravel(y)
self.needs_unravel = True
self.model = WeightedLassoCV(**params)
self.model.fit(X, y, sample_weight)
# set intercept_ attribute
self.intercept_ = self.model.intercept_
# set coef_ attribute
self.coef_ = self.model.coef_
# set alpha_ attribute
self.alpha_ = self.model.alpha_
# set alphas_ attribute
self.alphas_ = self.model.alphas_
# set n_iter_ attribute
self.n_iter_ = self.model.n_iter_
return self
def predict(self, X):
predictions = self.model.predict(X)
return reshape(predictions, (-1, 1)) if self.needs_unravel else predictions
def score(self, X, y, sample_weight=None):
return self.model.score(X, y, sample_weight)
def __getattr__(self, key):
if key in self.known_params:
return getattr(self.model, key)
else:
raise AttributeError("No attribute " + key)
def __setattr__(self, key, value):
if key in self.known_params:
setattr(self.model, key, value)
else:
super().__setattr__(key, value)
def get_params(self, deep=True):
"""Get parameters for this estimator."""
return self.model.get_params(deep=deep)
def set_params(self, **params):
"""Set parameters for this estimator."""
self.model.set_params(**params)
class SelectiveRegularization:
"""
Estimator of a linear model where regularization is applied to only a subset of the coefficients.
Assume that our loss is
.. math::
\\ell(\\beta_1, \\beta_2) = \\lVert y - X_1 \\beta_1 - X_2 \\beta_2 \\rVert^2 + f(\\beta_2)
so that we're regularizing only the coefficients in :math:`\\beta_2`.
Then, since :math:`\\beta_1` doesn't appear in the penalty, the problem of finding :math:`\\beta_1` to minimize the
loss once :math:`\\beta_2` is known reduces to just a normal OLS regression, so that:
.. math::
\\beta_1 = (X_1^\\top X_1)^{-1}X_1^\\top(y - X_2 \\beta_2)
Plugging this into the loss, we obtain
.. math::
~& \\lVert y - X_1 (X_1^\\top X_1)^{-1}X_1^\\top(y - X_2 \\beta_2) - X_2 \\beta_2 \\rVert^2 + f(\\beta_2) \\\\
=~& \\lVert (I - X_1 (X_1^\\top X_1)^{-1}X_1^\\top)(y - X_2 \\beta_2) \\rVert^2 + f(\\beta_2)
But, letting :math:`M_{X_1} = I - X_1 (X_1^\\top X_1)^{-1}X_1^\\top`, we see that this is
.. math::
\\lVert (M_{X_1} y) - (M_{X_1} X_2) \\beta_2 \\rVert^2 + f(\\beta_2)
so finding the minimizing :math:`\\beta_2` can be done by regressing :math:`M_{X_1} y` on :math:`M_{X_1} X_2` using
the penalized regression method incorporating :math:`f`. Note that these are just the residual values of :math:`y`
and :math:`X_2` when regressed on :math:`X_1` using OLS.
Parameters
----------
unpenalized_inds : list of int, other 1-dimensional indexing expression, or callable
The indices that should not be penalized when the model is fit; all other indices will be penalized.
If this is a callable, it will be called with the arguments to `fit` and should return a corresponding
indexing expression. For example, ``lambda X, y: unpenalized_inds=slice(1,-1)`` will result in only the first
and last indices being penalized.
penalized_model : :term:`regressor`
A penalized linear regression model
fit_intercept : bool, optional, default True
Whether to fit an intercept; the intercept will not be penalized if it is fit
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
intercept_ : float or array of shape (n_targets)
Independent term in the linear model.
penalized_model : :term:`regressor`
The penalized linear regression model, cloned from the one passed into the initializer
"""
def __init__(self, unpenalized_inds, penalized_model, fit_intercept=True):
self._unpenalized_inds_expr = unpenalized_inds
self.penalized_model = clone(penalized_model)
self._fit_intercept = fit_intercept
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Parameters
----------
X : array-like, shape (n, d_x)
The features to regress against
y : array-like, shape (n,) or (n, d_y)
The regression target
sample_weight : array-like, shape (n,), optional, default None
Relative weights for each sample
"""
X, y = check_X_y(X, y, multi_output=True, estimator=self)
if callable(self._unpenalized_inds_expr):
if sample_weight is None:
self._unpenalized_inds = self._unpenalized_inds_expr(X, y)
else:
self._unpenalized_inds = self._unpenalized_inds_expr(X, y, sample_weight=sample_weight)
else:
self._unpenalized_inds = self._unpenalized_inds_expr
mask = np.ones(X.shape[1], dtype=bool)
mask[self._unpenalized_inds] = False
self._penalized_inds = np.arange(X.shape[1])[mask]
X1 = X[:, self._unpenalized_inds]
X2 = X[:, self._penalized_inds]
X2_res = X2 - LinearRegression(fit_intercept=self._fit_intercept).fit(X1, X2,
sample_weight=sample_weight).predict(X1)
y_res = y - LinearRegression(fit_intercept=self._fit_intercept).fit(X1, y,
sample_weight=sample_weight).predict(X1)
if sample_weight is not None:
self.penalized_model.fit(X2_res, y_res, sample_weight=sample_weight)
else:
self.penalized_model.fit(X2_res, y_res)
# The unpenalized model can't contain an intercept, because in the analysis above
# we rely on the fact that M(X beta) = (M X) beta, but M(X beta + c) is not the same
# as (M X) beta + c, so the learned coef and intercept will be wrong
intercept = self.penalized_model.predict(np.zeros_like(X2[0:1]))
if not np.allclose(intercept, 0):
raise AttributeError("The penalized model has a non-zero intercept; to fit an intercept "
"you should instead either set fit_intercept to True when initializing the "
"SelectiveRegression instance (for an unpenalized intercept) or "
"explicitly add a column of ones to the data being fit and include that "
"column in the penalized indices.")
# now regress X1 on y - X2 * beta2 to learn beta1
self._model_X1 = LinearRegression(fit_intercept=self._fit_intercept)
self._model_X1.fit(X1, y - self.penalized_model.predict(X2), sample_weight=sample_weight)
# set coef_ and intercept_ attributes
self.coef_ = np.empty(shape(y)[1:] + shape(X)[1:])
self.coef_[..., self._penalized_inds] = self.penalized_model.coef_
self.coef_[..., self._unpenalized_inds] = self._model_X1.coef_
# Note that the penalized model should *not* have an intercept
self.intercept_ = self._model_X1.intercept_
return self
def predict(self, X):
"""
Make a prediction for each sample.
Parameters
----------
X : array-like, shape (m, d_x)
The samples whose targets to predict
Output
------
arr : array-like, shape (m,) or (m, d_y)
The predicted targets
"""
check_is_fitted(self, "coef_")
X1 = X[:, self._unpenalized_inds]
X2 = X[:, self._penalized_inds]
return self._model_X1.predict(X1) + self.penalized_model.predict(X2)
def score(self, X, y):
"""
Score the predictions for a set of features to ground truth.
Parameters
----------
X : array-like, shape (m, d_x)
The samples to predict
y : array-like, shape (m,) or (m, d_y)
The ground truth targets
Output
------
score : float
The model's score
"""
check_is_fitted(self, "coef_")
X, y = check_X_y(X, y, multi_output=True, estimator=self)
return r2_score(y, self.predict(X))
known_params = {'known_params', 'coef_', 'intercept_', 'penalized_model',
'_unpenalized_inds_expr', '_fit_intercept', '_unpenalized_inds', '_penalized_inds', '_model_X1'}
def __getattr__(self, key):
# don't proxy special methods
if key.startswith('__'):
raise AttributeError(key)
# don't pass get_params through to model, because that will cause sklearn to clone this
# regressor incorrectly
if key != "get_params" and key not in self.known_params:
return getattr(self.penalized_model, key)
else:
# Note: for known attributes that have been set this method will not be called,
# so we should just throw here because this is an attribute belonging to this class
# but which hasn't yet been set on this instance
raise AttributeError("No attribute " + key)
def __setattr__(self, key, value):
if key not in self.known_params:
setattr(self.penalized_model, key, value)
else:
super().__setattr__(key, value)
class _StatsModelsWrapper(BaseEstimator):
""" Parent class for statsmodels linear models. At init time each children class should set the
boolean flag property fit_intercept. At fit time, each children class must calculate and set the
following properties:
_param: (m,) or (m, p) array
Where m is number of features and p is number of outcomes, which corresponds to the
coefficients of the linear model (including the intercept in the first column if fit_intercept=True).
_param_var: (m, m) or (p, m, m) array
Where m is number of features and p is number of outcomes, where each (m, m) matrix corresponds
to the scaled covariance matrix of the parameters of the linear model.
_n_out: the second dimension of the training y, or 0 if y is a vector
"""
def predict(self, X):
"""
Predicts the output given an array of instances.
Parameters
----------
X : (n, d) array like
The covariates on which to predict
Returns
-------
predictions : {(n,) array, (n,p) array}
The predicted mean outcomes
"""
if X is None:
X = np.empty((1, 0))
if self.fit_intercept:
X = add_constant(X, has_constant='add')
return np.matmul(X, self._param)
@property
def coef_(self):
"""
Get the model's coefficients on the covariates.
Returns
-------
coef_ : {(d,), (p, d)} nd array like
The coefficients of the variables in the linear regression. If label y
was p-dimensional, then the result is a matrix of coefficents, whose p-th
row containts the coefficients corresponding to the p-th coordinate of the label.
"""
if self.fit_intercept:
if self._n_out == 0:
return self._param[1:]
else:
return self._param[1:].T
else:
if self._n_out == 0:
return self._param
else:
return self._param.T
@property
def intercept_(self):
"""
Get the intercept(s) (or 0 if no intercept was fit).
Returns
-------
intercept_ : float or (p,) nd array like
The intercept of the linear regresion. If label y was p-dimensional, then the result is a vector
whose p-th entry containts the intercept corresponding to the p-th coordinate of the label.
"""
return self._param[0] if self.fit_intercept else (0 if self._n_out == 0 else np.zeros(self._n_out))
@property
def _param_stderr(self):
"""
The standard error of each parameter that was estimated.
Returns
-------
_param_stderr : {(d (+1),) (d (+1), p)} nd array like
The standard error of each parameter that was estimated.
"""
if self._n_out == 0:
return np.sqrt(np.clip(np.diag(self._param_var), 0, np.inf))
else:
return np.array([np.sqrt(np.clip(np.diag(v), 0, np.inf)) for v in self._param_var]).T
@property
def coef_stderr_(self):
"""
Gets the standard error of the fitted coefficients.
Returns
-------
coef_stderr_ : {(d,), (p, d)} nd array like
The standard error of the coefficients
"""
return self._param_stderr[1:].T if self.fit_intercept else self._param_stderr.T
@property
def intercept_stderr_(self):
"""
Gets the standard error of the intercept(s) (or 0 if no intercept was fit).
Returns
-------
intercept_stderr_ : float or (p,) nd array like
The standard error of the intercept(s)
"""
return self._param_stderr[0] if self.fit_intercept else (0 if self._n_out == 0 else np.zeros(self._n_out))
def prediction_stderr(self, X):
"""
Gets the standard error of the predictions.
Parameters
----------
X : (n, d) array like
The covariates at which to predict
Returns
-------
prediction_stderr : (n, p) array like
The standard error of each coordinate of the output at each point we predict
"""
if X is None:
X = np.empty((1, 0))
if self.fit_intercept:
X = add_constant(X, has_constant='add')
if self._n_out == 0:
return np.sqrt(np.clip(np.sum(np.matmul(X, self._param_var) * X, axis=1), 0, np.inf))
else:
return np.array([np.sqrt(np.clip(np.sum(np.matmul(X, v) * X, axis=1), 0, np.inf))
for v in self._param_var]).T
def coef__interval(self, alpha=0.05):
"""
Gets a confidence interval bounding the fitted coefficients.
Parameters
----------
alpha : float, default 0.05
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
coef__interval : {tuple ((p, d) array, (p,d) array), tuple ((d,) array, (d,) array)}
The lower and upper bounds of the confidence interval of the coefficients
"""
return np.array([_safe_norm_ppf(alpha / 2, loc=p, scale=err)
for p, err in zip(self.coef_, self.coef_stderr_)]),\
np.array([_safe_norm_ppf(1 - alpha / 2, loc=p, scale=err)
for p, err in zip(self.coef_, self.coef_stderr_)])
def intercept__interval(self, alpha=0.05):
"""
Gets a confidence interval bounding the intercept(s) (or 0 if no intercept was fit).
Parameters
----------
alpha : float, default 0.05
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
intercept__interval : {tuple ((p,) array, (p,) array), tuple (float, float)}
The lower and upper bounds of the confidence interval of the intercept(s)
"""
if not self.fit_intercept:
return (0 if self._n_out == 0 else np.zeros(self._n_out)),\
(0 if self._n_out == 0 else np.zeros(self._n_out))
if self._n_out == 0:
return _safe_norm_ppf(alpha / 2, loc=self.intercept_, scale=self.intercept_stderr_),\
_safe_norm_ppf(1 - alpha / 2, loc=self.intercept_, scale=self.intercept_stderr_)
else:
return np.array([_safe_norm_ppf(alpha / 2, loc=p, scale=err)
for p, err in zip(self.intercept_, self.intercept_stderr_)]),\
np.array([_safe_norm_ppf(1 - alpha / 2, loc=p, scale=err)
for p, err in zip(self.intercept_, self.intercept_stderr_)])
def predict_interval(self, X, alpha=0.05):
"""
Gets a confidence interval bounding the prediction.
Parameters
----------
X : (n, d) array like
The covariates on which to predict
alpha : float, default 0.05
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
prediction_intervals : {tuple ((n,) array, (n,) array), tuple ((n,p) array, (n,p) array)}
The lower and upper bounds of the confidence intervals of the predicted mean outcomes
"""
return np.array([_safe_norm_ppf(alpha / 2, loc=p, scale=err)
for p, err in zip(self.predict(X), self.prediction_stderr(X))]),\
np.array([_safe_norm_ppf(1 - alpha / 2, loc=p, scale=err)
for p, err in zip(self.predict(X), self.prediction_stderr(X))])
class StatsModelsLinearRegression(_StatsModelsWrapper):
"""
Class which mimics weighted linear regression from the statsmodels package.
However, unlike statsmodels WLS, this class also supports sample variances in addition to sample weights,
which enables more accurate inference when working with summarized data.
Parameters
----------
fit_intercept : bool (optional, default=True)
Whether to fit an intercept in this model
fit_args : dict (optional, default=`{}`)
The statsmodels-style fit arguments; keys can include 'cov_type'
"""
def __init__(self, fit_intercept=True, cov_type="HC0"):
self.cov_type = cov_type
self.fit_intercept = fit_intercept
return
def _check_input(self, X, y, sample_weight, freq_weight, sample_var):
"""Check dimensions and other assertions."""
X, y, sample_weight, freq_weight, sample_var = check_input_arrays(
X, y, sample_weight, freq_weight, sample_var, dtype='numeric')
if X is None:
X = np.empty((y.shape[0], 0))
if self.fit_intercept:
X = add_constant(X, has_constant='add')
# set default values for None
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
if freq_weight is None:
freq_weight = np.ones(y.shape[0])
if sample_var is None:
sample_var = np.zeros(y.shape)
# check freq_weight should be integer and should be accompanied by sample_var
if np.any(np.not_equal(np.mod(freq_weight, 1), 0)):
raise AttributeError("Frequency weights must all be integers for inference to be valid!")
if sample_var.ndim < 2:
if np.any(np.equal(freq_weight, 1) & np.not_equal(sample_var, 0)):
warnings.warn(
"Variance was set to non-zero for an observation with freq_weight=1! "
"sample_var represents the variance of the original observations that are "
"summarized in this sample. Hence, cannot have a non-zero variance if only "
"one observations was summarized. Inference will be invalid!")
elif np.any(np.not_equal(freq_weight, 1) & np.equal(sample_var, 0)):
warnings.warn(
"Variance was set to zero for an observation with freq_weight>1! "
"sample_var represents the variance of the original observations that are "
"summarized in this sample. If it's zero, please use sample_wegiht instead "
"to reflect the weight for each individual sample!")
else:
if np.any(np.equal(freq_weight, 1) & np.not_equal(np.sum(sample_var, axis=1), 0)):
warnings.warn(
"Variance was set to non-zero for an observation with freq_weight=1! "
"sample_var represents the variance of the original observations that are "
"summarized in this sample. Hence, cannot have a non-zero variance if only "
"one observations was summarized. Inference will be invalid!")
elif np.any(np.not_equal(freq_weight, 1) & np.equal(np.sum(sample_var, axis=1), 0)):
warnings.warn(
"Variance was set to zero for an observation with freq_weight>1! "
"sample_var represents the variance of the original observations that are "
"summarized in this sample. If it's zero, please use sample_wegiht instead "
"to reflect the weight for each individual sample!")
# check array shape
assert (X.shape[0] == y.shape[0] == sample_weight.shape[0] ==
freq_weight.shape[0] == sample_var.shape[0]), "Input lengths not compatible!"
if y.ndim >= 2:
assert (y.ndim == sample_var.ndim and
y.shape[1] == sample_var.shape[1]), "Input shapes not compatible: {}, {}!".format(
y.shape, sample_var.shape)
# weight X and y and sample_var
weighted_X = X * np.sqrt(sample_weight).reshape(-1, 1)
if y.ndim < 2:
weighted_y = y * np.sqrt(sample_weight)
sample_var = sample_var * sample_weight
else:
weighted_y = y * np.sqrt(sample_weight).reshape(-1, 1)
sample_var = sample_var * (sample_weight.reshape(-1, 1))
return weighted_X, weighted_y, freq_weight, sample_var
def fit(self, X, y, sample_weight=None, freq_weight=None, sample_var=None):
"""
Fits the model.
Parameters
----------
X : (N, d) nd array like
co-variates
y : {(N,), (N, p)} nd array like
output variable(s)
sample_weight : (N,) array like or None
Individual weights for each sample. If None, it assumes equal weight.
freq_weight: (N, ) array like of integers or None
Weight for the observation. Observation i is treated as the mean
outcome of freq_weight[i] independent observations.
When ``sample_var`` is not None, this should be provided.
sample_var : {(N,), (N, p)} nd array like or None
Variance of the outcome(s) of the original freq_weight[i] observations that were used to
compute the mean outcome represented by observation i.
Returns
-------
self : StatsModelsLinearRegression
"""
# TODO: Add other types of covariance estimation (e.g. Newey-West (HAC), HC2, HC3)
X, y, freq_weight, sample_var = self._check_input(X, y, sample_weight, freq_weight, sample_var)
WX = X * np.sqrt(freq_weight).reshape(-1, 1)
if y.ndim < 2:
self._n_out = 0
wy = y * np.sqrt(freq_weight)
else:
self._n_out = y.shape[1]
wy = y * np.sqrt(freq_weight).reshape(-1, 1)
param, _, rank, _ = np.linalg.lstsq(WX, wy, rcond=None)
if rank < param.shape[0]:
warnings.warn("Co-variance matrix is underdetermined. Inference will be invalid!")
sigma_inv = np.linalg.pinv(np.matmul(WX.T, WX))
self._param = param
var_i = sample_var + (y - np.matmul(X, param))**2
n_obs = np.sum(freq_weight)
df = len(param) if self._n_out == 0 else param.shape[0]
if n_obs <= df:
warnings.warn("Number of observations <= than number of parameters. Using biased variance calculation!")
correction = 1
else:
correction = (n_obs / (n_obs - df))
if (self.cov_type is None) or (self.cov_type == 'nonrobust'):
if y.ndim < 2:
self._var = correction * np.average(var_i, weights=freq_weight) * sigma_inv
else:
vars = correction * np.average(var_i, weights=freq_weight, axis=0)
self._var = [v * sigma_inv for v in vars]
elif (self.cov_type == 'HC0'):
if y.ndim < 2:
weighted_sigma = np.matmul(WX.T, WX * var_i.reshape(-1, 1))
self._var = np.matmul(sigma_inv, np.matmul(weighted_sigma, sigma_inv))
else:
self._var = []
for j in range(self._n_out):
weighted_sigma = np.matmul(WX.T, WX * var_i[:, [j]])
self._var.append(np.matmul(sigma_inv, np.matmul(weighted_sigma, sigma_inv)))
elif (self.cov_type == 'HC1'):
if y.ndim < 2:
weighted_sigma = np.matmul(WX.T, WX * var_i.reshape(-1, 1))
self._var = correction * np.matmul(sigma_inv, np.matmul(weighted_sigma, sigma_inv))
else:
self._var = []
for j in range(self._n_out):
weighted_sigma = np.matmul(WX.T, WX * var_i[:, [j]])
self._var.append(correction * np.matmul(sigma_inv, np.matmul(weighted_sigma, sigma_inv)))
else:
raise AttributeError("Unsupported cov_type. Must be one of nonrobust, HC0, HC1.")
self._param_var = np.array(self._var)
return self
class StatsModelsRLM(_StatsModelsWrapper):
"""
Class which mimics robust linear regression from the statsmodels package.
Parameters
----------
t : float (optional, default=1.345)
The tuning constant for Huber’s t function
maxiter : int (optional, default=50)
The maximum number of iterations to try
tol : float (optional, default=1e-08)
The convergence tolerance of the estimate
fit_intercept : bool (optional, default=True)
Whether to fit an intercept in this model
cov_type : one of {'H1', 'H2', or 'H3'} (optional, default='H1')
Indicates how the covariance matrix is estimated. See statsmodels.robust.robust_linear_model.RLMResults
for more information.
"""
def __init__(self, t=1.345,
maxiter=50,
tol=1e-08,
fit_intercept=True,
cov_type='H1'):
self.t = t
self.maxiter = maxiter
self.tol = tol
self.cov_type = cov_type
self.fit_intercept = fit_intercept
return
def _check_input(self, X, y):
"""Check dimensions and other assertions."""
if X is None:
X = np.empty((y.shape[0], 0))
assert (X.shape[0] == y.shape[0]), "Input lengths not compatible!"
return X, y
def fit(self, X, y):
"""
Fits the model.
Parameters
----------
X : (N, d) nd array like
co-variates
y : (N,) nd array like or (N, p) array like
output variable
Returns
-------
self : StatsModelsRLM
"""
X, y = self._check_input(X, y)
if self.fit_intercept:
X = add_constant(X, has_constant='add')
self._n_out = 0 if len(y.shape) == 1 else (y.shape[1],)
def model_gen(y):
return RLM(endog=y,
exog=X,
M=statsmodels.robust.norms.HuberT(t=self.t)).fit(cov=self.cov_type,
maxiter=self.maxiter,
tol=self.tol)
if y.ndim < 2:
self.model = model_gen(y)
self._param = self.model.params
self._param_var = self.model.cov_params()
else:
self.models = [model_gen(y[:, i]) for i in range(y.shape[1])]
self._param = np.array([mdl.params for mdl in self.models]).T
self._param_var = np.array([mdl.cov_params() for mdl in self.models])
return self
class StatsModels2SLS(_StatsModelsWrapper):
"""
Class that solves the moment equation E[(y-theta*T)*Z]=0
Parameters
----------
cov_type : one of {'HC0', 'HC1', 'nonrobust' or None} (optional, default='HC0')
Indicates how the covariance matrix is estimated.
"""
def __init__(self, cov_type="HC0"):
self.fit_intercept = False
self.cov_type = cov_type
return
def _check_input(self, Z, T, y, sample_weight):
"""Check dimensions and other assertions."""
# set default values for None
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# check array shape
assert (T.shape[0] == Z.shape[0] == y.shape[0] == sample_weight.shape[0]), "Input lengths not compatible!"
# check dimension of instruments is more than dimension of treatments
if Z.shape[1] < T.shape[1]:
raise AssertionError("The number of treatments couldn't be larger than the number of instruments!")
# weight X and y
weighted_Z = Z * np.sqrt(sample_weight).reshape(-1, 1)
weighted_T = T * np.sqrt(sample_weight).reshape(-1, 1)
if y.ndim < 2:
weighted_y = y * np.sqrt(sample_weight)
else:
weighted_y = y * np.sqrt(sample_weight).reshape(-1, 1)
return weighted_Z, weighted_T, weighted_y
def fit(self, Z, T, y, sample_weight=None, freq_weight=None, sample_var=None):
"""
Fits the model.
Parameters
----------
Z : {(N, p)} nd array like
instrumental variables
T : {(N, p)} nd array like
treatment variables
y : {(N,), (N, p)} nd array like
output variables
sample_weight : (N,) array like or None
Individual weights for each sample. If None, it assumes equal weight.
freq_weight: (N, ) array like of integers or None
Weight for the observation. Observation i is treated as the mean
outcome of freq_weight[i] independent observations.
When ``sample_var`` is not None, this should be provided.
sample_var : {(N,), (N, p)} nd array like or None
Variance of the outcome(s) of the original freq_weight[i] observations that were used to
compute the mean outcome represented by observation i.
Returns
-------
self : StatsModels2SLS
"""
assert freq_weight is None, "freq_weight is not supported yet for this class!"
assert sample_var is None, "sample_var is not supported yet for this class!"
Z, T, y = self._check_input(Z, T, y, sample_weight)
self._n_out = 0 if y.ndim < 2 else y.shape[1]
# learn point estimate
# solve first stage linear regression E[T|Z]
zT_z = np.dot(Z.T, Z)
zT_t = np.dot(Z.T, T)
# "that" means T̂
self._thatparams = np.linalg.solve(zT_z, zT_t)
that = np.dot(Z, self._thatparams)
# solve second stage linear regression E[Y|that]
# (T̂.T*T̂)^{-1}
thatT_that = np.dot(that.T, that)
thatT_y = np.dot(that.T, y)
param = np.linalg.solve(thatT_that, thatT_y)
self._param = param
n_obs = y.shape[0]
df = len(param) if self._n_out == 0 else param.shape[0]
if n_obs <= df:
warnings.warn("Number of observations <= than number of parameters. Using biased variance calculation!")
correction = 1
else:
correction = (n_obs / (n_obs - df))
# learn cov(theta)
# (T̂.T*T̂)^{-1}
thatT_that_inv = np.linalg.inv(thatT_that)
# sigma^2
var_i = (y - np.dot(T, param))**2
# reference: http://www.hec.unil.ch/documents/seminars/deep/361.pdf
if (self.cov_type is None) or (self.cov_type == 'nonrobust'):
if y.ndim < 2:
self._var = correction * np.average(var_i) * thatT_that_inv
else:
sigma2 = correction * np.average(var_i, axis=0)
self._var = [s * thatT_that_inv for s in sigma2]
elif (self.cov_type == 'HC0'):
if y.ndim < 2:
weighted_sigma = np.matmul(that.T, that * var_i.reshape(-1, 1))
self._var = np.matmul(thatT_that_inv, np.matmul(weighted_sigma, thatT_that_inv))
else:
self._var = []
for j in range(self._n_out):
weighted_sigma = np.matmul(that.T, that * var_i[:, [j]])
self._var.append(np.matmul(thatT_that_inv, np.matmul(weighted_sigma, thatT_that_inv)))
elif (self.cov_type == 'HC1'):
if y.ndim < 2:
weighted_sigma = np.matmul(that.T, that * var_i.reshape(-1, 1))
self._var = correction * np.matmul(thatT_that_inv, np.matmul(weighted_sigma, thatT_that_inv))
else:
self._var = []
for j in range(self._n_out):
weighted_sigma = np.matmul(that.T, that * var_i[:, [j]])
self._var.append(correction * np.matmul(thatT_that_inv,
np.matmul(weighted_sigma, thatT_that_inv)))
else:
raise AttributeError("Unsupported cov_type. Must be one of nonrobust, HC0, HC1.")
self._param_var = np.array(self._var)
return self | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/xinha/lang/ru.js |
// LANG: "ru", ENCODING: UTF-8
// Author: Yulya Shtyryakova, <[email protected]>
// Some additions by: Alexey Kirpichnikov, <[email protected]>
// I took French version as a source of English phrases because French version was the most comprehensive
// (fr.js was the largest file, actually) %)
// FOR TRANSLATORS:
//
// 1. PLEASE PUT YOUR CONTACT INFO IN THE ABOVE LINE
// (at least a valid email address)
//
// 2. PLEASE TRY TO USE UTF-8 FOR ENCODING;
// (if this is not possible, please include a comment
// that states what encoding is necessary.)
{
"Bold": "Полужирный",
"Italic": "Наклонный",
"Underline": "Подчеркнутый",
"Strikethrough": "Перечеркнутый",
"Subscript": "Нижний индекс",
"Superscript": "Верхний индекс",
"Justify Left": "По левому краю",
"Justify Center": "По центру",
"Justify Right": "По правому краю",
"Justify Full": "По ширине",
"Ordered List": "Нумерованный список",
"Bulleted List": "Маркированный список",
"Decrease Indent": "Уменьшить отступ",
"Increase Indent": "Увеличить отступ",
"Font Color": "Цвет шрифта",
"Background Color": "Цвет фона",
"Horizontal Rule": "Горизонтальный разделитель",
"Insert Web Link": "Вставить гиперссылку",
"Insert/Modify Image": "Вставить изображение",
"Insert Table": "Вставить таблицу",
"Toggle HTML Source": "Показать Html-код",
"Enlarge Editor": "Увеличить редактор",
"About this editor": "О редакторе",
"Help using editor": "Помощь",
"Current style": "Текущий стиль",
"Undoes your last action": "Отменить",
"Redoes your last action": "Повторить",
"Cut selection": "Вырезать",
"Copy selection": "Копировать",
"Paste from clipboard": "Вставить",
"Direction left to right": "Направление слева направо",
"Direction right to left": "Направление справа налево",
"Remove formatting": "Убрать форматирование",
"Select all": "Выделить все",
"Print document": "Печать",
"Clear MSOffice tags": "Удалить разметку MSOffice",
"Clear Inline Font Specifications": "Удалить непосредственное задание шрифтов",
"Would you like to clear font typefaces?": "Удалить типы шрифтов?",
"Would you like to clear font sizes?": "Удалить размеры шрифтов ?",
"Would you like to clear font colours?": "Удалить цвета шрифтов ?",
"Split Block": "Разделить блок",
"Toggle Borders": "Включить/выключить отображение границ",
"Save as": "Сохранить как",
"Insert/Overwrite": "Вставка/замена",
"— format —": "— форматирование —",
"Heading 1": "Заголовок 1",
"Heading 2": "Заголовок 2",
"Heading 3": "Заголовок 3",
"Heading 4": "Заголовок 4",
"Heading 5": "Заголовок 5",
"Heading 6": "Заголовок 6",
"Normal": "Обычный текст",
"Address": "Адрес",
"Formatted": "Отформатированный текст",
"— font —": "— шрифт —",
"— size —": "— размер —",
// Диалоги
"OK": "OK",
"Cancel": "Отмена",
"Path": "Путь",
"You are in TEXT MODE. Use the [<>] button to switch back to WYSIWYG.": "Вы в режиме отображения Html-кода. нажмите кнопку [<>], чтобы переключиться в визуальный режим.",
"The Paste button does not work in Mozilla based web browsers (technical security reasons). Press CTRL-V on your keyboard to paste directly.": "Кнопка Вставить не работает в браузерах на основе Mozilla (по техническим причинам, связанным с безопасностью). Нажмите Ctrl-V на клавиатуре, чтобы вставить.",
"Your Document is not well formed. Check JavaScript console for details.": "Ваш документ неправильно сформирован. Посмотрите Консоль JavaScript, чтобы узнать подробности.",
"Alignment:": "Выравнивание",
"Not set": "Не установлено",
"Left": "По левому краю",
"Right": "По правому краю",
"Texttop": "По верхней границе текста",
"Absmiddle": "По середине текста",
"Baseline": "По нижней границе текста",
"Absbottom": "По нижней границе",
"Bottom": "По нижнему краю",
"Middle": "Посредине",
"Top": "По верхнему краю",
"Layout": "Расположение",
"Spacing": "Поля",
"Horizontal:": "По горизонтали",
"Horizontal padding": "Горизонтальные поля",
"Vertical:": "По вертикали",
"Vertical padding": "Вертикальные поля",
"Border thickness:": "Толщина рамки",
"Leave empty for no border": "Оставьте пустым, чтобы убрать рамку",
//Insert Link
"Insert/Modify Link": "Вставка/изменение ссылки",
"None (use implicit)": "По умолчанию",
"New window (_blank)": "Новое окно (_blank)",
"Same frame (_self)": "То же окно (_self)",
"Top frame (_top)": "Родительское окно (_top)",
"Other": "Другое",
"Target:": "Открывать в окне:",
"Title (tooltip):": "Всплывающая подсказка",
"URL:": "URL:",
"You must enter the URL where this link points to": "Вы должны указать URL, на который будет указывать ссылка",
"You need to select some text before creating a link": "Вы должны выделить текст, который будет преобразован в ссылку",
// Insert Table
"Insert Table": "Вставка таблицы",
"Rows:": "Строки",
"Number of rows": "Количество строк",
"Cols:": "Столбцы",
"Number of columns": "Количество столбцов",
"Width:": "Ширина",
"Width of the table": "Ширина таблицы",
"Percent": "проценты",
"Pixels": "пикселы",
"Em": "em",
"Width unit": "Единицы измерения",
"Fixed width columns": "Столбцы фиксированной ширины",
"Positioning of this table": "Расположение таблицы",
"Cell spacing:": "Расстояние между ячейками",
"Space between adjacent cells": "Расстояние между соседними ячейками",
"Cell padding:": "Поля в ячейках",
"Space between content and border in cell": "Расстояние между границей ячейки и текстом",
"You must enter a number of rows": "Вы должны ввести количество строк",
"You must enter a number of columns": "Вы должны ввести количество столбцов",
// Insert Image
"Insert Image": "Вставка изображения",
"Image URL:": "URL изображения",
"Enter the image URL here": "Вставьте адрес изображения",
"Preview": "Предварительный просмотр",
"Preview the image in a new window": "Предварительный просмотр в отдельном окне",
"Alternate text:": "Альтернативный текст",
"For browsers that don't support images": "Для браузеров, которые не отображают картинки",
"Positioning of this image": "Расположение изображения",
"Image Preview:": "Предварительный просмотр",
"You must enter the URL": "Вы должны ввести URL",
// Editor Help
"Xinha Help": "Помощь",
"Editor Help": "Помощь",
"Keyboard shortcuts": "Горячие клавиши",
"The editor provides the following key combinations:": "Редактор поддерживает следующие комбинации клавиш:",
"ENTER": "ENTER",
"new paragraph": "новый абзац",
"SHIFT-ENTER": "SHIFT+ENTER",
"insert linebreak": "перенос строки",
"Set format to paragraph": "Отформатировать абзац",
"Clean content pasted from Word": "Очистить текст, вставленный из Word",
"Headings": "Заголовки",
"Close": "Закрыть",
// Loading messages
"Loading in progress. Please wait !": "Загрузка... Пожалуйста, подождите.",
"Constructing main object": "Создание главного объекта",
"Constructing object": "Создание объекта",
"Register panel right": "Регистрация правой панели",
"Register panel left": "Регистрация левой панели",
"Register panel top": "Регистрация верхней панели",
"Register panel bottom": "Регистрация нижней панели",
"Create Toolbar": "Создание панели инструментов",
"Create StatusBar": "Создание панели состояния",
"Generate Xinha object": "Создание объекта Xinha",
"Init editor size": "Инициализация размера редактора",
"Init IFrame": "инициализация iframe",
"Register plugin $plugin": "Регистрация $plugin"
}; | PypiClean |
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/pylot/component/static/pylot/vendor/mdeditor/bower_components/codemirror/mode/r/r.js | CodeMirror.defineMode("r", function(config) {
function wordObj(str) {
var words = str.split(" "), res = {};
for (var i = 0; i < words.length; ++i) res[words[i]] = true;
return res;
}
var atoms = wordObj("NULL NA Inf NaN NA_integer_ NA_real_ NA_complex_ NA_character_");
var builtins = wordObj("list quote bquote eval return call parse deparse");
var keywords = wordObj("if else repeat while function for in next break");
var blockkeywords = wordObj("if else repeat while function for");
var opChars = /[+\-*\/^<>=!&|~$:]/;
var curPunc;
function tokenBase(stream, state) {
curPunc = null;
var ch = stream.next();
if (ch == "#") {
stream.skipToEnd();
return "comment";
} else if (ch == "0" && stream.eat("x")) {
stream.eatWhile(/[\da-f]/i);
return "number";
} else if (ch == "." && stream.eat(/\d/)) {
stream.match(/\d*(?:e[+\-]?\d+)?/);
return "number";
} else if (/\d/.test(ch)) {
stream.match(/\d*(?:\.\d+)?(?:e[+\-]\d+)?L?/);
return "number";
} else if (ch == "'" || ch == '"') {
state.tokenize = tokenString(ch);
return "string";
} else if (ch == "." && stream.match(/.[.\d]+/)) {
return "keyword";
} else if (/[\w\.]/.test(ch) && ch != "_") {
stream.eatWhile(/[\w\.]/);
var word = stream.current();
if (atoms.propertyIsEnumerable(word)) return "atom";
if (keywords.propertyIsEnumerable(word)) {
if (blockkeywords.propertyIsEnumerable(word)) curPunc = "block";
return "keyword";
}
if (builtins.propertyIsEnumerable(word)) return "builtin";
return "variable";
} else if (ch == "%") {
if (stream.skipTo("%")) stream.next();
return "variable-2";
} else if (ch == "<" && stream.eat("-")) {
return "arrow";
} else if (ch == "=" && state.ctx.argList) {
return "arg-is";
} else if (opChars.test(ch)) {
if (ch == "$") return "dollar";
stream.eatWhile(opChars);
return "operator";
} else if (/[\(\){}\[\];]/.test(ch)) {
curPunc = ch;
if (ch == ";") return "semi";
return null;
} else {
return null;
}
}
function tokenString(quote) {
return function(stream, state) {
if (stream.eat("\\")) {
var ch = stream.next();
if (ch == "x") stream.match(/^[a-f0-9]{2}/i);
else if ((ch == "u" || ch == "U") && stream.eat("{") && stream.skipTo("}")) stream.next();
else if (ch == "u") stream.match(/^[a-f0-9]{4}/i);
else if (ch == "U") stream.match(/^[a-f0-9]{8}/i);
else if (/[0-7]/.test(ch)) stream.match(/^[0-7]{1,2}/);
return "string-2";
} else {
var next;
while ((next = stream.next()) != null) {
if (next == quote) { state.tokenize = tokenBase; break; }
if (next == "\\") { stream.backUp(1); break; }
}
return "string";
}
};
}
function push(state, type, stream) {
state.ctx = {type: type,
indent: state.indent,
align: null,
column: stream.column(),
prev: state.ctx};
}
function pop(state) {
state.indent = state.ctx.indent;
state.ctx = state.ctx.prev;
}
return {
startState: function() {
return {tokenize: tokenBase,
ctx: {type: "top",
indent: -config.indentUnit,
align: false},
indent: 0,
afterIdent: false};
},
token: function(stream, state) {
if (stream.sol()) {
if (state.ctx.align == null) state.ctx.align = false;
state.indent = stream.indentation();
}
if (stream.eatSpace()) return null;
var style = state.tokenize(stream, state);
if (style != "comment" && state.ctx.align == null) state.ctx.align = true;
var ctype = state.ctx.type;
if ((curPunc == ";" || curPunc == "{" || curPunc == "}") && ctype == "block") pop(state);
if (curPunc == "{") push(state, "}", stream);
else if (curPunc == "(") {
push(state, ")", stream);
if (state.afterIdent) state.ctx.argList = true;
}
else if (curPunc == "[") push(state, "]", stream);
else if (curPunc == "block") push(state, "block", stream);
else if (curPunc == ctype) pop(state);
state.afterIdent = style == "variable" || style == "keyword";
return style;
},
indent: function(state, textAfter) {
if (state.tokenize != tokenBase) return 0;
var firstChar = textAfter && textAfter.charAt(0), ctx = state.ctx,
closing = firstChar == ctx.type;
if (ctx.type == "block") return ctx.indent + (firstChar == "{" ? 0 : config.indentUnit);
else if (ctx.align) return ctx.column + (closing ? 0 : 1);
else return ctx.indent + (closing ? 0 : config.indentUnit);
}
};
});
CodeMirror.defineMIME("text/x-rsrc", "r"); | PypiClean |
/Blackboard_LMS_CLI-1.0.9-py3-none-any.whl/bbcli/commands/contents.py | import json
from bbcli.utils.URL_builder import URL_builder
from bbcli.utils.utils import format_date
from bbcli.utils.error_handler import create_exception_handler, delete_exception_handler, list_exception_handler, update_exception_handler
import click
from bbcli.entities.content_builder_entitites import FileOptions, GradingOptions, StandardOptions, WeblinkOptions
from bbcli.services import contents_services
import concurrent.futures
from bbcli.entities.Node import Node
from bbcli.utils import content_utils
from bbcli.utils.content_handler import content_handler
from bbcli.views import contents_views
url_builder = URL_builder()
"""
GROUPS OF REUSEABLE OPTIONS
"""
def standard_options(function):
function = click.option('-h', '--hide-content', is_flag=True,
help='Hide content for students')(function)
function = click.option(
'-r', '--reviewable', is_flag=True, help='Make content reviewable')(function)
function = click.option('--start-date', type=str,
help='When to make content available. Format: DD/MM/YY HH:MM:SS')(function)
function = click.option(
'--end-date', type=str, help='When to make content unavailable. Format: DD/MM/YY HH:MM:SS')(function)
return function
def grading_options(function):
function = click.option('-d', '--due-date', type=str,
help='Set a sumbission deadline for assignment. Format: DD/MM/YY HH:MM:SS')(function)
function = click.option('-a', '--max-attempts', type=int,
help='Set maximum amount of attempts')(function)
function = click.option('-u', '--unlimited-attempts',
is_flag=True, help='Enable unlimited attempts')(function)
function = click.option('-s', '--score', required=True,
type=int, help='Set assignment score reward')(function)
return function
def file_options(function):
function = click.option('-n', '--new-window',
'launch_in_new_window', is_flag=True)(function)
return function
def web_link_options(function):
function = click.option('-n', '--new-window',
'launch_in_new_window', is_flag=True)(function)
return function
@click.command(name='list', help='List contents\n\nFolders are blue and files are white')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-f', '--folder', 'folder_id', required=False, type=str, help='FOLDER ID')
@click.option('-fo', '--folders-only', required=False, is_flag=True, help='List only folders')
@click.option('-ct', '--content-type', required=False, type=click.Choice(content_handler.keys(), case_sensitive=False))
@click.pass_context
@list_exception_handler
def list_contents(ctx: click.core.Context, course_id: str, folder_id: str, content_type: str, folders_only: bool) -> None:
if folder_id:
content_utils.check_content_handler(ctx, course_id, folder_id)
else:
ct = 'content' if content_type is None else content_type
click.echo(f'Listing the {ct}s...')
response = contents_services.list_contents(
ctx.obj['SESSION'], course_id)
data = response.json()['results']
folder_ids = []
node_ids = []
threads = []
with concurrent.futures.ThreadPoolExecutor() as executor:
for node in data:
root = Node(node)
worklist = [root]
folder_ids.append(node['id'])
args = [ctx, course_id, worklist, folder_ids,
node_ids, root, folders_only, content_type]
t = executor.submit(content_utils.list_contents_thread, *args)
threads.append(t)
for t in threads:
root_node = t.result()
if root_node is not None:
contents_views.list_tree(root_node, folder_ids, node_ids)
else:
click.ClickException(
'Cannot list folders only and a specific content type. Try either one.'
).show()
return
@click.command(name='get', help='Get content')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-co', '--content', 'node_id', required=True, type=str, help='CONTENT ID')
@click.option('-p', '--path', required=False, type=click.Path(exists=True), help='Path to be downloaded to')
@click.pass_context
@list_exception_handler
def get_content(ctx: click.core.Context, course_id: str, node_id: str, path: str) -> None:
content_utils.check_content_handler(ctx, course_id, node_id, path)
@click.command(name='attachment', help='Add attachment to content\n\nOnly supports contents of type document and assignment')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID of the course where the content is located')
@click.option('-co', '--content', 'content_id', required=True, type=str, help='CONTENT ID of content to attach a file')
@click.argument('file_path', required=True, type=click.Path(exists=True))
@click.option('-j', '--json', 'print_json', required=False, is_flag=True, help='Print the data in json format')
@click.pass_context
@create_exception_handler
def upload_attachment(ctx: click.core.Context, course_id: str, content_id: str, file_path: str, print_json: bool) -> None:
response = contents_services.upload_attachment(
ctx.obj['SESSION'], course_id, content_id, file_path)
contents_views.print_created_attachment_response(response, print_json)
@click.command(name='document', help='Create document content')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-f', '--folder', 'parent_id', required=True, type=str, help='FOLDER ID')
@click.argument('title', required=True, type=str)
@click.argument('attachments', required=False, nargs=-1, type=click.Path())
@click.option('-j', '--json', 'print_json', required=False, is_flag=True, help='Print the data in json format')
@click.option('-md', '--markdown', required=False, is_flag=True, help='Use this flag if you want to use markdown in body')
@standard_options
@click.pass_context
@create_exception_handler
def create_document(ctx: click.core.Context, course_id: str, parent_id: str, title: str,
hide_content: bool, reviewable: bool, start_date: str, end_date: str,
attachments: tuple, print_json: bool, markdown: bool) -> None:
standard_options = StandardOptions(
hide_content=hide_content, reviewable=reviewable)
set_dates(standard_options, start_date, end_date)
response = contents_services.create_document(
ctx.obj['SESSION'], course_id, parent_id, title, standard_options, attachments, markdown)
contents_views.print_created_content_response(response, print_json)
@click.command(name='file', help='Create file content')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-f', '--folder', 'parent_id', required=True, type=str, help='FOLDER ID')
@click.argument('title', required=True, type=str)
@click.argument('file_path', required=True, type=click.Path(exists=True))
@file_options
@click.option('-j', '--json', 'print_json', required=False, is_flag=True, help='Print the data in json format')
@standard_options
@click.pass_context
@create_exception_handler
def create_file(ctx: click.core.Context, course_id: str, parent_id: str, title: str, file_path: str,
launch_in_new_window: bool, hide_content: bool, reviewable: bool,
start_date: str, end_date: str, print_json: bool) -> None:
file_options = FileOptions(launch_in_new_window)
standard_options = StandardOptions(
hide_content=hide_content, reviewable=reviewable)
set_dates(standard_options, start_date, end_date)
response = contents_services.create_file(
ctx.obj['SESSION'], course_id, parent_id, title, file_path, file_options, standard_options)
contents_views.print_created_content_response(response, print_json)
@click.command(name='web-link', help='Create web link content')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-f', '--folder', 'parent_id', required=True, type=str, help='FOLDER ID')
@click.argument('title', required=True, type=str)
@click.argument('url', required=True, type=str)
@click.option('-j', '--json', 'print_json', required=False, is_flag=True, help='Print the data in json format')
@standard_options
@web_link_options
@click.pass_context
@create_exception_handler
def create_web_link(ctx: click.core.Context, course_id: str, parent_id: str, title: str, url: str,
launch_in_new_window: bool, hide_content: bool, reviewable: bool,
start_date: str, end_date: str, print_json: bool) -> None:
web_link_options = WeblinkOptions(launch_in_new_window)
standard_options = StandardOptions(hide_content, reviewable)
set_dates(standard_options, start_date, end_date)
response = contents_services.create_externallink(
ctx.obj['SESSION'], course_id, parent_id, title, url, web_link_options, standard_options)
contents_views.print_created_content_response(response, print_json)
@click.command(name='folder', help='Create folder')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-f', '--folder', 'parent_id', required=False, type=str, help='FOLDER ID of the parent folder')
@click.argument('title', required=True, type=str)
@click.option('--is-bb-page', is_flag=True, help='Make folder a blackboard page')
@click.option('-j', '--json', 'print_json', required=False, is_flag=True, help='Print the data in json format')
@click.option('-md', '--markdown', required=False, is_flag=True, help='Use this flag if you want to use markdown in body')
@standard_options
@click.pass_context
@create_exception_handler
def create_folder(ctx: click.core.Context, course_id: str, parent_id: str, title: str,
hide_content: bool, reviewable: bool, is_bb_page: bool,
start_date: str, end_date: str, print_json: bool, markdown: bool) -> None:
standard_options = StandardOptions(hide_content, reviewable)
set_dates(standard_options, start_date, end_date)
response = contents_services.create_folder(
ctx.obj['SESSION'], course_id, parent_id, title, is_bb_page, standard_options, markdown)
contents_views.print_created_content_response(response, print_json)
@click.command(name='course-link', help='Create course link content\n\nRedirects user to the target content')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-f', '--folder', 'parent_id', required=True, type=str, help='FOLDER ID')
@click.option('-t', '--target', 'target_id', required=True, type=str, help='TARGET ID')
@click.argument('title', required=True, type=str)
@click.option('-j', '--json', 'print_json', required=False, is_flag=True, help='Print the data in json format')
@click.option('-md', '--markdown', required=False, is_flag=True, help='Use this flag if you want to use markdown in body')
@standard_options
@click.pass_context
@create_exception_handler
def create_courselink(ctx: click.core.Context, course_id: str, parent_id: str, title: str, target_id: str,
hide_content: bool, reviewable: bool,
start_date: str, end_date: str, print_json: bool, markdown: bool) -> None:
standard_options = StandardOptions(hide_content, reviewable)
set_dates(standard_options, start_date, end_date)
response = contents_services.create_courselink(
ctx.obj['SESSION'], course_id, parent_id, title, target_id, standard_options, markdown)
contents_views.print_created_content_response(response, print_json)
@click.command(name='assignment', help='Create assignment')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-f', '--folder', 'parent_id', required=True, type=str, help='FOLDER ID')
@click.argument('title', required=True, type=str)
@click.argument('attachments', required=False, nargs=-1, type=click.Path())
@click.option('-j', '--json', 'print_json', required=False, is_flag=True, help='Print the data in json format')
@click.option('-md', '--markdown', required=False, is_flag=True, help='Use this flag if you want to use markdown in body')
@standard_options
@grading_options
@click.pass_context
@create_exception_handler
def create_assignment_from_contents(ctx: click.core.Context, course_id: str, parent_id: str, title: str,
hide_content: bool, reviewable: bool,
start_date: str, end_date: str,
due_date: str, max_attempts: int, unlimited_attempts: bool, score: int,
attachments: tuple, print_json: bool, markdown: bool) -> None:
standard_options = StandardOptions(hide_content, reviewable)
grading_options = GradingOptions(
attempts_allowed=max_attempts, is_unlimited_attemps_allowed=unlimited_attempts, score_possible=score)
set_dates(standard_options, start_date, end_date)
grading_options.due = format_date(due_date)
response = contents_services.create_assignment(
ctx.obj['SESSION'], course_id, parent_id, title, standard_options, grading_options, attachments, markdown)
contents_views.print_created_content_response(response, print_json)
@click.command(name='delete', help='Delete content')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID')
@click.option('-co', '--content', 'content_id', required=True, type=str, help='CONTENT ID')
@click.option('--delete-grades', is_flag=True, help='Delete grades if a grade column is associated with the content')
@click.pass_context
@delete_exception_handler
def delete_content(ctx: click.core.Context, course_id: str, content_id: str, delete_grades: bool) -> None:
contents_services.delete_content(
ctx.obj['SESSION'], course_id, content_id, delete_grades)
contents_views.print_deleted_content_response()
@click.command(name='update', help='Update content\n\nEditable content types: document, files, assignments, externallinks, courselinks')
@click.option('-c', '--course', 'course_id', required=True, type=str, help='COURSE ID.')
@click.option('-co', '--content', 'content_id', required=True, type=str, help='CONTENT ID')
@click.option('-j', '--json', 'print_json', required=False, is_flag=True, help='Print the data in json format')
@click.option('-md', '--markdown', required=False, is_flag=True, help='Use this flag if you want to use markdown in body')
@click.option('--advanced', required=False, is_flag=True, help='Use this flag if you also want to update the advanced settings of the content')
@click.pass_context
@update_exception_handler
def update_content(ctx: click.core.Context, course_id: str, content_id: str, print_json: bool, markdown: bool, advanced: bool) -> None:
if advanced:
response = contents_services.update_content_advanced(ctx.obj['SESSION'], course_id, content_id, markdown)
else:
response = contents_services.update_content(
ctx.obj['SESSION'], course_id, content_id, markdown)
contents_views.print_updated_content_response(response, print_json)
"""
HELPER FUNCTIONS
"""
def set_dates(standard_options: StandardOptions, start_date: str, end_date: str) -> None:
if start_date:
standard_options.date_interval.start_date = format_date(start_date)
if end_date:
standard_options.date_interval.end_date = format_date(end_date) | PypiClean |
/DESlib-0.3.5-py3-none-any.whl/deslib/util/dfp.py |
# Author: Rafael Menelau Oliveira e Cruz <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
def frienemy_pruning(X_query, X_dsel, y_dsel, ensemble, k):
"""Implements the Online Pruning method (frienemy) which prunes base
classifiers that do not cross the region of competence of a given instance.
A classifier crosses the region of competence if it correctly
classify at least one sample for each different class in the region.
Parameters
----------
X_query : array-like of shape (n_samples, n_features)
Test set.
X_dsel : array-like of shape (n_samples, n_features)
Dynamic selection set.
y_dsel : array-like of shape (n_samples,)
The target values (Dynamic selection set).
ensemble : list of shape = [n_classifiers]
The ensemble of classifiers to be pruned.
k : int
Number of neighbors used to compute the regions of competence.
Returns
-------
DFP_mask : array-like of shape = [n_samples, n_classifiers]
Mask containing 1 for the selected base classifier and 0
otherwise.
"""
predictions = np.zeros((X_dsel.shape[0], len(ensemble)),
dtype=np.intp)
for index, clf in enumerate(ensemble):
predictions[:, index] = clf.predict(X_dsel)
hit_miss = predictions == y_dsel[:, np.newaxis]
competence_region = KNeighborsClassifier(n_neighbors=k).fit(X_dsel, y_dsel)
neighbors = competence_region.kneighbors(X_query, return_distance=False)
return frienemy_pruning_preprocessed(neighbors, y_dsel, hit_miss)
def frienemy_pruning_preprocessed(neighbors, y_val, hit_miss):
"""Implements the Online Pruning method (frienemy) which prunes base
classifiers that do not cross the region of competence of a given instance.
A classifier crosses the region of competence if it correctly
classify at least one sample for each different class in the region.
Notes
-----
This implementation assumes the regions of competence of each query example
(neighbors) and the predictions for the dynamic selection data (hit_miss)
were already pre-computed.
Parameters
----------
neighbors : array-like of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors.
y_val : array-like of shape (n_samples,)
The target values (class labels).
hit_miss : array-like of shape (n_samples, n_classifiers)
Matrix containing 1 when the base classifier made the correct
prediction, 0 otherwise.
Returns
-------
DFP_mask : array-like of shape = [n_samples, n_classifiers]
Mask containing 1 for the selected base classifier and 0
otherwise.
"""
if neighbors.ndim < 2:
neighbors = neighbors.reshape(1, -1)
n_samples = neighbors.shape[0]
n_classifiers = hit_miss.shape[1]
dfp_mask = np.zeros((n_samples, n_classifiers))
# TODO: vectorize this code?
for sample_idx in range(n_samples):
curr_neighbors = neighbors[sample_idx]
neighbors_y = y_val[curr_neighbors]
if len(set(neighbors_y)) > 1:
# Indecision region. Check if the base classifier predict the
# correct label for a sample belonging to each class.
for clf_index in range(n_classifiers):
[mask] = np.where(hit_miss[curr_neighbors, clf_index])
if len(set(neighbors_y[mask])) > 1:
dfp_mask[sample_idx, clf_index] = 1.0
else:
# Safe region.
dfp_mask[sample_idx, :] = 1.0
# rows that all classifiers were pruned are set to 1.0
dfp_mask[np.all(dfp_mask == 0, axis=1)] = 1.0
return dfp_mask | PypiClean |
/MnemoPwd-1.2.1-py3-none-any.whl/mnemopwd/server/clients/protocol/StateS21.py |
# Copyright (c) 2015-2017, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
State S21 : Login
"""
import logging
from ...util.funcutils import singleton
from .StateSCC import StateSCC
from ..DBHandler import DBHandler
@singleton
class StateS21(StateSCC):
"""State S21 : Login"""
def do(self, client, data):
"""Action of the state S21: control client login and id"""
try:
# Control challenge
if self.control_challenge(client, data, b'S21.7'):
# Test for S21 command
is_cd_S21 = data[170:175] == b"LOGIN"
if not is_cd_S21:
raise Exception('S21 protocol error')
eid = data[176:345] # id encrypted
elogin = data[346:] # Login encrypted
# Compute client id
login = client.ephecc.decrypt(elogin)
id = self.compute_client_id(client.ms, login)
# Get id from client
id_from_client = client.ephecc.decrypt(eid)
# If ids are not equal
if id != id_from_client:
msg = b'ERROR;application protocol error'
client.loop.call_soon_threadsafe(client.transport.write, msg)
raise Exception('S21: incorrect id')
# Test if login exists
filename = self.compute_client_filename(id, client.ms, login)
exist = DBHandler.exist(client.dbpath, filename)
# If login is OK and ids are equal
if id == id_from_client and exist:
client.dbH = DBHandler(client.dbpath, filename)
client.loop.call_soon_threadsafe(
client.transport.write, b'OK')
client.state = client.states['31']
# If login is unknown
elif id == id_from_client and not exist:
ip, port = client.peername
client.shield.add_suspect_ip(ip) # Suspect client ?
msg = b'ERROR;application protocol error'
client.loop.call_soon_threadsafe(client.transport.write, msg)
raise Exception('S21: user account does not exist')
logging.info('Login from {}'.format(client.peername))
except Exception as exc:
# Schedule a callback to client exception handler
client.loop.call_soon_threadsafe(client.exception_handler, exc) | PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/sharepoint/files/file.py | from office365.runtime.client_result import ClientResult
from office365.runtime.compat import is_string_type
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.http.request_options import RequestOptions
from office365.runtime.queries.function import FunctionQuery
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.runtime.paths.resource_path import ResourcePath
from office365.runtime.paths.service_operation import ServiceOperationPath
from office365.runtime.queries.update_entity import UpdateEntityQuery
from office365.sharepoint.activities.capabilities import ActivityCapabilities
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.files.versions.event import FileVersionEvent
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.folders.folder import Folder
from office365.sharepoint.permissions.irm.effective_settings import EffectiveInformationRightsManagementSettings
from office365.sharepoint.permissions.irm.settings import InformationRightsManagementSettings
from office365.sharepoint.principal.users.user import User
from office365.sharepoint.files.versions.collection import FileVersionCollection
from office365.sharepoint.listitems.listitem import ListItem
from office365.sharepoint.utilities.move_copy_options import MoveCopyOptions
from office365.sharepoint.utilities.move_copy_util import MoveCopyUtil
from office365.sharepoint.utilities.upload_status import UploadStatus
from office365.sharepoint.utilities.wopi_frame_action import SPWOPIFrameAction
from office365.sharepoint.webparts.limited_manager import LimitedWebPartManager
from office365.sharepoint.types.resource_path import ResourcePath as SPResPath
from office365.sharepoint.webparts.personalization_scope import PersonalizationScope
class AbstractFile(BaseEntity):
def read(self):
"""Immediately read content of file"""
if not self.is_property_available("ServerRelativeUrl"):
raise ValueError
response = File.open_binary(
self.context, self.properties["ServerRelativeUrl"])
return response.content
def write(self, content):
"""Immediately writes content of file"""
if not self.is_property_available("ServerRelativeUrl"):
raise ValueError
response = File.save_binary(
self.context, self.properties["ServerRelativeUrl"], content)
return response
class File(AbstractFile):
"""Represents a file in a SharePoint Web site that can be a Web Part Page, an item in a document library,
or a file in a folder."""
@staticmethod
def from_url(abs_url):
"""
Retrieves a File from absolute url
:type abs_url: str
"""
from office365.sharepoint.client_context import ClientContext
ctx = ClientContext.from_url(abs_url)
file_relative_url = abs_url.replace(ctx.base_url, "")
return_type = ctx.web.get_file_by_server_relative_url(file_relative_url)
return return_type
def create_anonymous_link(self, is_edit_link=False):
"""Create an anonymous link which can be used to access a document without needing to authenticate.
:param bool is_edit_link: If true, the link will allow the guest user edit privileges on the item.
"""
return_type = ClientResult(self.context, str())
def _file_loaded():
from office365.sharepoint.webs.web import Web
Web.create_anonymous_link(self.context, self.serverRelativeUrl, is_edit_link, return_type)
self.ensure_property("ServerRelativeUrl", _file_loaded)
return return_type
def create_anonymous_link_with_expiration(self, expiration, is_edit_link=False):
"""Creates and returns an anonymous link that can be used to access a document without needing to authenticate.
:param bool is_edit_link: If true, the link will allow the guest user edit privileges on the item.
string parameters
:param datetime.datetime expiration: A date/time string for which the format conforms to the ISO 8601:2004(E) complete
representation for calendar date and time of day, and which represents the time and date of expiry for the
anonymous link. Both the minutes and hour value MUST be specified for the difference between the local and
UTC time. Midnight is represented as 00:00:00.
"""
return_type = ClientResult(self.context, str())
def _file_loaded():
from office365.sharepoint.webs.web import Web
Web.create_anonymous_link_with_expiration(self.context, self.serverRelativeUrl, is_edit_link,
expiration.isoformat(timespec='seconds'), return_type)
self.ensure_property("ServerRelativeUrl", _file_loaded)
return return_type
def get_content(self):
"""Downloads a file content"""
return_type = ClientResult(self.context, bytes())
qry = FunctionQuery(self, "$value", return_type=return_type)
self.context.add_query(qry)
return return_type
def get_absolute_url(self):
"""Gets absolute url of a File"""
return_type = ClientResult(self.context, str())
def _loaded():
return_type.set_property("__value", self.listItemAllFields.properties.get("EncodedAbsUrl"))
self.listItemAllFields.ensure_property("EncodedAbsUrl", _loaded)
return return_type
def get_sharing_information(self):
"""Gets the sharing information for a file."""
return self.listItemAllFields.get_sharing_information()
def get_wopi_frame_url(self, action=SPWOPIFrameAction.View):
"""
Returns the full URL to the SharePoint frame page that will initiate the specified WOPI frame action with the
file's associated WOPI application. If there is no associated WOPI application or associated action,
the return value is an empty string.
:param str action: The full URL to the WOPI frame.
"""
return_type = ClientResult(self.context, str())
params = {
"action": action
}
qry = ServiceOperationQuery(self, "GetWOPIFrameUrl", params, None, None, return_type)
self.context.add_query(qry)
return return_type
def share_link(self, link_kind, expiration=None):
"""Creates a tokenized sharing link for a file based on the specified parameters and optionally
sends an email to the people that are listed in the specified parameters.
:param int link_kind: The kind of the tokenized sharing link to be created/updated or retrieved.
:param datetime or None expiration: A date/time string for which the format conforms to the ISO 8601:2004(E)
complete representation for calendar date and time of day and which represents the time and date of expiry
for the tokenized sharing link. Both the minutes and hour value MUST be specified for the difference
between the local and UTC time. Midnight is represented as 00:00:00. A null value indicates no expiry.
This value is only applicable to tokenized sharing links that are anonymous access links.
"""
return self.listItemAllFields.share_link(link_kind, expiration)
def unshare_link(self, link_kind, share_id=None):
"""
Removes the specified tokenized sharing link of the file.
:param int link_kind: This optional value specifies the globally unique identifier (GUID) of the tokenized
sharing link that is intended to be removed.
:param str or None share_id: The kind of tokenized sharing link that is intended to be removed.
"""
return self.listItemAllFields.unshare_link(link_kind, share_id)
def get_image_preview_uri(self, width, height, client_type=None):
"""
Returns the uri where the thumbnail with the closest size to the desired can be found.
The actual resolution of the thumbnail might not be the same as the desired values.
:param int width: The desired width of the resolution.
:param int height: The desired height of the resolution.
:param str client_type: The client type. Used for logging.
"""
return_type = ClientResult(self.context, str())
payload = {
"width": width,
"height": height,
"clientType": client_type
}
qry = ServiceOperationQuery(self, "GetImagePreviewUri", None, payload, None, return_type)
self.context.add_query(qry)
return return_type
def get_image_preview_url(self, width, height, client_type=None):
"""
Returns the url where the thumbnail with the closest size to the desired can be found.
The actual resolution of the thumbnail might not be the same as the desired values.
:param int width: The desired width of the resolution.
:param int height: The desired height of the resolution.
:param str client_type: The client type. Used for logging.
"""
return_type = ClientResult(self.context, str())
payload = {
"width": width,
"height": height,
"clientType": client_type
}
qry = ServiceOperationQuery(self, "GetImagePreviewUrl", None, payload, None, return_type)
self.context.add_query(qry)
return return_type
def recycle(self):
"""Moves the file to the Recycle Bin and returns the identifier of the new Recycle Bin item."""
return_type = ClientResult(self.context, str())
qry = ServiceOperationQuery(self, "Recycle", None, None, None, return_type)
self.context.add_query(qry)
return return_type
def approve(self, comment):
"""
Approves the file submitted for content approval with the specified comment.
:param str comment: A string containing the comment.
"""
qry = ServiceOperationQuery(self, "Approve", {"comment": comment})
self.context.add_query(qry)
return self
def deny(self, comment):
"""Denies approval for a file that was submitted for content approval.
:param str comment: A string containing the comment.
"""
qry = ServiceOperationQuery(self, "Deny", {"comment": comment})
self.context.add_query(qry)
return self
def copyto(self, destination, overwrite=False):
"""Copies the file to the destination URL.
:param office365.sharepoint.folders.folder.Folder or str destination: Specifies the destination folder or
folder server relative url where to copy a file.
:param bool overwrite: Specifies whether a file with the same name is overwritten.
"""
return_type = File(self.context)
self.parent_collection.add_child(return_type)
def _copyto(destination_folder):
"""
:type destination_folder: Folder
"""
file_path = "/".join([str(destination_folder.serverRelativeUrl), self.name])
return_type.set_property("ServerRelativeUrl", file_path)
params = {
"strNewUrl": file_path,
"boverwrite": overwrite
}
qry = ServiceOperationQuery(self, "CopyTo", params)
self.context.add_query(qry)
def _source_file_resolved():
if isinstance(destination, Folder):
destination.ensure_property("ServerRelativeUrl", _copyto, destination)
else:
self.context.web.ensure_folder_path(destination).get().after_execute(_copyto)
self.ensure_property("ServerRelativeUrl", _source_file_resolved)
return return_type
def copyto_using_path(self, destination, overwrite=False):
"""
Copies the file to the destination path. Server MUST overwrite an existing file of the same name
if overwrite is true.
:param bool overwrite: Specifies whether a file with the same name is overwritten.
:param office365.sharepoint.folders.folder.Folder or str destination: Specifies the destination folder or
folder server relative url where to copy a file.
"""
return_type = File(self.context)
self.parent_collection.add_child(return_type)
def _copyto_using_path(destination_folder):
"""
:type destination_folder: Folder
"""
file_path = "/".join([str(destination_folder.server_relative_path), self.name])
return_type.set_property("ServerRelativePath", file_path)
params = {
"DecodedUrl": file_path,
"bOverWrite": overwrite
}
qry = ServiceOperationQuery(self, "CopyToUsingPath", params)
self.context.add_query(qry)
def _source_file_resolved():
if isinstance(destination, Folder):
destination.ensure_property("ServerRelativePath", _copyto_using_path, destination)
else:
self.context.web.ensure_folder_path(destination).get().select(["ServerRelativePath"])\
.after_execute(_copyto_using_path)
self.ensure_properties(["ServerRelativePath", "Name"], _source_file_resolved)
return return_type
def moveto(self, destination, flag):
"""Moves the file to the specified destination url.
:param str or office365.sharepoint.folders.folder.Folder destination: Specifies the existing folder or folder
site relative url.
:param int flag: Specifies the kind of move operation.
"""
def _update_file(return_type, new_file_url):
return_type.set_property("ServerRelativeUrl", new_file_url)
def _moveto(destination_folder):
"""
:type destination_folder: Folder
"""
file_path = "/".join([str(destination_folder.serverRelativeUrl), self.name])
params = {
"newurl": file_path,
"flags": flag
}
qry = ServiceOperationQuery(self, "moveto", params)
self.context.add_query(qry)
self.context.after_query_execute(_update_file, self, file_path)
def _source_file_resolved():
if isinstance(destination, Folder):
destination.ensure_property("ServerRelativeUrl", _moveto, destination)
else:
self.context.web.ensure_folder_path(destination).get().after_execute(_moveto)
self.ensure_properties(["ServerRelativeUrl", "Name"], _source_file_resolved)
return self
def move_to_using_path(self, destination, flag):
"""
Moves the file to the specified destination path.
:param str or office365.sharepoint.folders.folder.Folder destination: Specifies the destination folder path or
existing folder object
:param int flag: Specifies the kind of move operation.
"""
def _update_file(return_type, new_file_url):
return_type.set_property("ServerRelativePath", new_file_url)
def _move_to_using_path(destination_folder):
file_path = "/".join([str(destination_folder.server_relative_path), self.name])
params = {
"DecodedUrl": file_path,
"moveOperations": flag
}
qry = ServiceOperationQuery(self, "MoveToUsingPath", params)
self.context.add_query(qry)
self.context.after_query_execute(_update_file, self, file_path)
def _source_file_resolved():
if isinstance(destination, Folder):
destination.ensure_property("ServerRelativePath", _move_to_using_path, destination)
else:
self.context.web.ensure_folder_path(destination).get().select(["ServerRelativePath"])\
.after_execute(_move_to_using_path)
self.ensure_properties(["ServerRelativePath", "Name"], _source_file_resolved)
return self
def publish(self, comment):
"""Submits the file for content approval with the specified comment.
:param str comment: Specifies the comment.
"""
qry = ServiceOperationQuery(self, "Publish", {"comment": comment})
self.context.add_query(qry)
return self
def unpublish(self, comment):
"""Removes the file from content approval or unpublishes a major version.
:param str comment: Specifies the comment for UnPublish. Its length MUST be equal to or less than 1023.
"""
qry = ServiceOperationQuery(self, "unpublish", {"comment": comment})
self.context.add_query(qry)
return self
def check_access_and_post_view_audit_event(self):
""""""
return_type = ClientResult(self.context, bool())
qry = ServiceOperationQuery(self, "CheckAccessAndPostViewAuditEvent", return_type=return_type)
self.context.add_query(qry)
return return_type
def checkout(self):
"""Checks out the file from a document library based on the check-out type."""
qry = ServiceOperationQuery(self, "checkout")
self.context.add_query(qry)
return self
def checkin(self, comment, checkin_type):
"""
Checks the file in to a document library based on the check-in type.
:param comment: comment to the new version of the file
:param checkin_type: 0 (minor), or 1 (major) or 2 (overwrite)
For more information on checkin types, please see
https://docs.microsoft.com/en-us/previous-versions/office/sharepoint-csom/ee542953(v%3Doffice.15)
:param int checkin_type: Specifies the type of check-in.
"""
params = {
"comment": comment,
"checkInType": checkin_type
}
qry = ServiceOperationQuery(self, "checkin", params)
self.context.add_query(qry)
return self
def undocheckout(self):
"""Reverts an existing checkout for the file."""
qry = ServiceOperationQuery(self, "UndoCheckout")
self.context.add_query(qry)
return self
def get_limited_webpart_manager(self, scope=PersonalizationScope.User):
"""Specifies the control set used to access, modify, or add Web Parts associated with this Web Part Page and
view.
:param int scope: Specifies the personalization scope value that depicts how Web Parts are viewed on the
Web Part Page.
"""
return LimitedWebPartManager(self.context,
ServiceOperationPath("GetLimitedWebPartManager", [scope], self.resource_path))
def open_binary_stream(self):
"""Opens the file as a stream."""
return_type = ClientResult(self.context, bytes())
qry = ServiceOperationQuery(self, "OpenBinaryStream", None, None, None, return_type)
self.context.add_query(qry)
return return_type
def save_binary_stream(self, stream):
"""Saves the file in binary format.
:param str or bytes stream: A stream containing the contents of the specified file.
"""
qry = ServiceOperationQuery(self, "SaveBinaryStream", None, stream)
self.context.add_query(qry)
return self
def get_upload_status(self, upload_id):
"""Gets the status of a chunk upload session.
:param str upload_id: The upload session ID.
"""
payload = {
"uploadId": upload_id,
}
return_type = UploadStatus(self.context)
qry = ServiceOperationQuery(self, "GetUploadStatus", None, payload, None, return_type)
self.context.add_query(qry)
return return_type
def upload_with_checksum(self, upload_id, checksum, stream):
"""
:param str upload_id: The upload session ID.
:param str checksum:
:param bytes stream:
"""
return_type = File(self.context)
payload = {
"uploadId": upload_id,
"checksum": checksum,
"stream": stream
}
qry = ServiceOperationQuery(self, "UploadWithChecksum", None, payload, None, return_type)
self.context.add_query(qry)
return return_type
def cancel_upload(self, upload_id):
"""
Aborts the chunk upload session without saving the uploaded data. If StartUpload (section 3.2.5.64.2.1.22)
created the file, the file will be deleted.
:param str upload_id: The upload session ID.
"""
payload = {
"uploadId": upload_id,
}
qry = ServiceOperationQuery(self, "CancelUpload", None, payload)
self.context.add_query(qry)
return self
def start_upload(self, upload_id, content):
"""Starts a new chunk upload session and uploads the first fragment.
:param bytes content: File content
:param str upload_id: Upload session id
"""
return_type = ClientResult(self.context, int())
params = {"uploadID": upload_id}
qry = ServiceOperationQuery(self, "startUpload", params, content, None, return_type)
self.context.add_query(qry)
return return_type
def continue_upload(self, upload_id, file_offset, content):
"""
Continues the chunk upload session with an additional fragment. The current file content is not changed.
:param str upload_id: Upload session id
:param int file_offset: File offset
:param bytes content: File content
"""
return_type = ClientResult(self.context, int())
qry = ServiceOperationQuery(self,
"continueUpload",
{
"uploadID": upload_id,
"fileOffset": file_offset,
},
content,
None,
return_type
)
self.context.add_query(qry)
return return_type
def finish_upload(self, upload_id, file_offset, content):
"""Uploads the last file fragment and commits the file. The current file content is changed when this method
completes.
:param str upload_id: Upload session id
:param int file_offset: File offset
:param bytes content: File content
"""
params = {
"uploadID": upload_id,
"fileOffset": file_offset
}
qry = ServiceOperationQuery(self, "finishUpload", params, content, None, self)
self.context.add_query(qry)
return self
@staticmethod
def save_binary(context, server_relative_url, content):
"""Uploads a file
:type context: office365.sharepoint.client_context.ClientContext
:type server_relative_url: str
:type content: str
"""
url = r"{0}/web/getFileByServerRelativePath(DecodedUrl='{1}')/\$value".format(context.service_root_url(),
server_relative_url)
request = RequestOptions(url)
request.method = HttpMethod.Post
request.set_header('X-HTTP-Method', 'PUT')
request.data = content
response = context.pending_request().execute_request_direct(request)
return response
@staticmethod
def open_binary(context, server_relative_url):
"""
Returns the file object located at the specified server-relative URL.
:type context: office365.sharepoint.client_context.ClientContext
:type server_relative_url: str
:return Response
"""
url = r"{0}/web/getFileByServerRelativePath(DecodedUrl='{1}')/\$value".format(context.service_root_url(),
server_relative_url)
request = RequestOptions(url)
request.method = HttpMethod.Get
response = context.pending_request().execute_request_direct(request)
return response
def download(self, file_object):
"""
Download a file content. Use this method to download a content of a small size
:type file_object: typing.IO
"""
def _save_content(return_type):
file_object.write(return_type.value)
def _download_inner():
return_type = self.get_content().after_execute(_save_content)
self.ensure_property("ServerRelativePath", _download_inner)
return self
def download_session(self, file_object, chunk_downloaded=None, chunk_size=1024 * 1024, use_path=True):
"""
Download a file content. Use this method to download a content of a large size
:type file_object: typing.IO
:type chunk_downloaded: (int)->None or None
:type chunk_size: int
:param bool use_path: File addressing by path flag
"""
def _download_as_stream():
qry = ServiceOperationQuery(self, "$value")
def _construct_download_request(request):
"""
:type request: office365.runtime.http.request_options.RequestOptions
"""
request.stream = True
request.method = HttpMethod.Get
self.context.before_execute(_construct_download_request)
def _process_download_response(response):
"""
:type response: requests.Response
"""
response.raise_for_status()
bytes_read = 0
for chunk in response.iter_content(chunk_size=chunk_size):
bytes_read += len(chunk)
if callable(chunk_downloaded):
chunk_downloaded(bytes_read)
file_object.write(chunk)
self.context.after_execute(_process_download_response)
self.context.add_query(qry)
if use_path:
self.ensure_property("ServerRelativePath", _download_as_stream)
else:
self.ensure_property("ServerRelativeUrl", _download_as_stream)
return self
def rename(self, new_file_name):
"""
Rename a file
:param str new_file_name: A new file name
"""
item = self.listItemAllFields
item.set_property('FileLeafRef', new_file_name)
qry = UpdateEntityQuery(item)
self.context.add_query(qry)
return self
@property
def activity_capabilities(self):
return self.properties.get("ActivityCapabilities", ActivityCapabilities())
@property
def author(self):
"""Specifies the user who added the file."""
return self.properties.get('Author',
User(self.context, ResourcePath("Author", self.resource_path)))
@property
def checked_out_by_user(self):
"""Gets an object that represents the user who has checked out the file."""
return self.properties.get('CheckedOutByUser',
User(self.context, ResourcePath("CheckedOutByUser", self.resource_path)))
@property
def version_events(self):
"""Gets the history of events on this version object."""
return self.properties.get("VersionEvents",
BaseEntityCollection(self.context,
FileVersionEvent,
ResourcePath("VersionEvents", self.resource_path)))
@property
def effective_information_rights_management_settings(self):
"""
Returns the effective Information Rights Management (IRM) settings for the file.
A file can be IRM-protected based on the IRM settings for the file itself, based on the IRM settings for the
list which contains the file, or based on a rule. From greatest to least, IRM settings take precedence in the
following order: rule, list, then file.
"""
path = ResourcePath("EffectiveInformationRightsManagementSettings", self.resource_path)
return self.properties.get('EffectiveInformationRightsManagementSettings',
EffectiveInformationRightsManagementSettings(self.context, path))
@property
def information_rights_management_settings(self):
"""
Returns the Information Rights Management (IRM) settings for the file.
"""
return self.properties.get('InformationRightsManagementSettings',
InformationRightsManagementSettings(self.context,
ResourcePath(
"InformationRightsManagementSettings",
self.resource_path)))
@property
def listItemAllFields(self):
"""Gets a value that specifies the list item fields values for the list item corresponding to the file."""
return self.properties.setdefault('ListItemAllFields',
ListItem(self.context, ResourcePath("listItemAllFields", self.resource_path)))
@property
def versions(self):
"""Gets a value that returns a collection of file version objects that represent the versions of the file."""
return self.properties.get('Versions',
FileVersionCollection(self.context, ResourcePath("versions", self.resource_path)))
@property
def modified_by(self):
"""
Gets a value that returns the user who last modified the file.
"""
return self.properties.get("ModifiedBy", User(self.context, ResourcePath("ModifiedBy", self.resource_path)))
@property
def locked_by_user(self):
"""
Gets a value that returns the user that owns the current lock on the file.
"""
return self.properties.get("LockedByUser", User(self.context, ResourcePath("LockedByUser", self.resource_path)))
@property
def serverRelativeUrl(self):
"""Gets the relative URL of the file based on the URL for the server.
:rtype: str or None
"""
return self.properties.get("ServerRelativeUrl", None)
@property
def server_relative_path(self):
"""Gets the server-relative Path of the list folder.
:rtype: SPResPath or None
"""
return self.properties.get("ServerRelativePath", SPResPath())
@property
def length(self):
"""Gets the file size.
:rtype: int or None
"""
return int(self.properties.get("Length", -1))
@property
def exists(self):
"""Specifies whether the file exists.
:rtype: bool or None
"""
return self.properties.get("Exists", None)
@property
def name(self):
"""Specifies the file name including the extension.
It MUST NOT be NULL. Its length MUST be equal to or less than 260.
:rtype: str or None
"""
return self.properties.get("Name", None)
@property
def list_id(self):
"""Gets the GUID that identifies the List containing the file.
:rtype: str or None
"""
return self.properties.get("ListId", None)
@property
def site_id(self):
"""Gets the GUID that identifies the site collection containing the file.
:rtype: str or None
"""
return self.properties.get("SiteId", None)
@property
def web_id(self):
"""Gets the GUID for the site containing the file.
:rtype: str or None
"""
return self.properties.get("WebId", None)
@property
def time_created(self):
"""Gets a value that specifies when the file was created.
:rtype: str or None
"""
return self.properties.get("TimeCreated", None)
@property
def time_last_modified(self):
"""Specifies when the file was last modified.
:rtype: str or None
"""
return self.properties.get("TimeLastModified", None)
@property
def minor_version(self):
"""
Gets a value that specifies the minor version of the file.
"""
return int(self.properties.get("MinorVersion", -1))
@property
def major_version(self):
"""
Gets a value that specifies the major version of the file.
:rtype: int or None
"""
return int(self.properties.get("MajorVersion", -1))
@property
def unique_id(self):
"""
Gets a value that specifies the a file unique identifier
:rtype: str or None
"""
return self.properties.get("UniqueId", None)
@property
def customized_page_status(self):
"""Specifies the customization status of the file.
:rtype: int or None
"""
return self.properties.get("CustomizedPageStatus", None)
@property
def parent_folder(self):
"""
:rtype: office365.sharepoint.folders.folder.Folder
"""
if self.parent_collection is None:
return None
return self.parent_collection.parent
def get_property(self, name, default_value=None):
if default_value is None:
property_mapping = {
"CheckedOutByUser": self.checked_out_by_user,
"VersionEvents": self.version_events,
"EffectiveInformationRightsManagementSettings": self.effective_information_rights_management_settings,
"InformationRightsManagementSettings": self.information_rights_management_settings,
"LockedByUser": self.locked_by_user,
"ModifiedBy": self.modified_by,
"ServerRelativePath": self.server_relative_path
}
default_value = property_mapping.get(name, None)
return super(File, self).get_property(name, default_value)
def set_property(self, name, value, persist_changes=True):
super(File, self).set_property(name, value, persist_changes)
# prioritize using UniqueId
if name == "UniqueId":
self._resource_path = self.context.web.get_file_by_id(value).resource_path
# fallback: create a new resource path
if self._resource_path is None:
if name == "ServerRelativeUrl":
self._resource_path = self.context.web.get_file_by_server_relative_url(value).resource_path
elif name == "ServerRelativePath":
self._resource_path = self.context.web.get_file_by_server_relative_path(value).resource_path
return self | PypiClean |
/DataTig-0.5.0.tar.gz/DataTig-0.5.0/datatig/sqlite.py | import json
import sqlite3
from contextlib import closing
from datatig.models.siteconfig import SiteConfigModel
from .exceptions import DuplicateRecordIdException
from .models.error import ErrorModel
from .models.record import RecordModel
from .models.record_error import RecordErrorModel
class DataStoreSQLite:
def __init__(self, site_config: SiteConfigModel, out_filename: str):
self._site_config: SiteConfigModel = site_config
self._out_filename: str = out_filename
self._connection = sqlite3.connect(out_filename)
self._connection.row_factory = sqlite3.Row
# Create table
with closing(self._connection.cursor()) as cur:
cur.execute(
"""CREATE TABLE error (
filename TEXT,
message TEXT
)"""
)
cur.execute(
"""CREATE TABLE type (
id TEXT PRIMARY KEY,
fields TEXT
)"""
)
cur.execute(
"""CREATE TABLE type_field (
type_id TEXT ,
id TEXT,
key TEXT,
type TEXT,
title TEXT,
PRIMARY KEY(type_id, id),
FOREIGN KEY(type_id) REFERENCES type(id)
)"""
)
for type in site_config.get_types().values():
cur.execute(
"""INSERT INTO type (
id
) VALUES (?)""",
[type.get_id()],
)
cur.execute(
"""CREATE TABLE record_{type} (
id TEXT PRIMARY KEY,
data TEXT,
git_filename TEXT,
format TEXT
)""".format(
type=type.get_id()
),
[],
)
cur.execute(
"""CREATE TABLE record_error_{type} (
record_id TEXT,
message TEXT,
data_path TEXT,
schema_path TEXT,
generator TEXT,
FOREIGN KEY(record_id) REFERENCES record_{type}(id)
)""".format(
type=type.get_id()
),
[],
)
for type_field_id, type_field in type.get_fields().items():
cur.execute(
"""INSERT INTO type_field (
type_id , id, key, type, title
) VALUES (?, ?, ?, ?, ?)""",
[
type.get_id(),
type_field_id,
type_field.get_key(),
type_field.get_type(),
type_field.get_title(),
],
)
if type_field.get_type() in [
"url",
"string",
"list-strings",
]:
cur.execute(
"""ALTER TABLE record_"""
+ type.get_id()
+ """ ADD field_"""
+ type_field_id
+ """ TEXT """,
[],
)
elif type_field.get_type() in [
"datetime",
"date",
]:
cur.execute(
"""ALTER TABLE record_"""
+ type.get_id()
+ """ ADD field_"""
+ type_field_id
+ """ TEXT """,
[],
)
cur.execute(
"""ALTER TABLE record_"""
+ type.get_id()
+ """ ADD field_"""
+ type_field_id
+ """___timestamp INTEGER """,
[],
)
elif type_field.get_type() in ["boolean", "integer"]:
cur.execute(
"""ALTER TABLE record_"""
+ type.get_id()
+ """ ADD field_"""
+ type_field_id
+ """ INTEGER """,
[],
)
if type_field.get_type() in ["list-strings"]:
cur.execute(
"""CREATE TABLE record_{type}_field_{field} (
record_id TEXT,
value TEXT,
FOREIGN KEY(record_id) REFERENCES record_{type}(id)
)
""".format(
type=type.get_id(), field=type_field_id
),
[],
)
self._connection.commit()
def store(self, record: RecordModel) -> None:
with closing(self._connection.cursor()) as cur:
# Check
cur.execute(
"SELECT * FROM record_" + record.get_type().get_id() + " WHERE id=?",
[record.get_id()],
)
data = cur.fetchone()
if data:
raise DuplicateRecordIdException(
"The id "
+ record.get_id()
+ " is duplicated in "
+ record.get_git_filename()
+ " and "
+ data["git_filename"]
)
# Store
insert_data = [
record.get_id(),
json.dumps(record.get_data(), default=str),
record.get_git_filename(),
record.get_format(),
]
cur.execute(
"""INSERT INTO record_"""
+ record.get_type().get_id()
+ """ (
id, data, git_filename, format
) VALUES (?, ?, ?, ?)""",
insert_data,
)
for field in record.get_type().get_fields().values():
value_object = record.get_field_value(field.get_id())
value = value_object.get_value()
if field.get_type() in [
"url",
"string",
] and isinstance(value, str):
cur.execute(
"""UPDATE record_"""
+ record.get_type().get_id()
+ """ SET field_"""
+ field.get_id()
+ """ = ? WHERE id=?""",
[value, record.get_id()],
)
elif field.get_type() in [
"datetime",
"date",
] and isinstance(value, str):
cur.execute(
"""UPDATE record_"""
+ record.get_type().get_id()
+ """ SET field_"""
+ field.get_id()
+ """ = ? WHERE id=?""",
[value, record.get_id()],
)
cur.execute(
"""UPDATE record_"""
+ record.get_type().get_id()
+ """ SET field_"""
+ field.get_id()
+ """___timestamp = ? WHERE id=?""",
[value_object.get_value_timestamp(), record.get_id()],
)
if (
field.get_type() in ["list-strings"]
and isinstance(value, list)
and len(value) > 0
):
cur.execute(
"""UPDATE record_"""
+ record.get_type().get_id()
+ """ SET field_"""
+ field.get_id()
+ """ = ? WHERE id=?""",
[", ".join([str(v) for v in value]), record.get_id()],
)
for v in value:
cur.execute(
"""INSERT INTO record_"""
+ record.get_type().get_id()
+ """_field_"""
+ field.get_id()
+ """ (record_id, value) VALUES (?, ?) """,
[record.get_id(), str(v)],
)
if field.get_type() == "boolean" and isinstance(value, bool):
cur.execute(
"""UPDATE record_"""
+ record.get_type().get_id()
+ """ SET field_"""
+ field.get_id()
+ """ = ? WHERE id=?""",
[1 if value else 0, record.get_id()],
)
if field.get_type() == "integer" and isinstance(value, int):
cur.execute(
"""UPDATE record_"""
+ record.get_type().get_id()
+ """ SET field_"""
+ field.get_id()
+ """ = ? WHERE id=?""",
[value, record.get_id()],
)
self._connection.commit()
def store_json_schema_validation_errors(self, type_id, item_id, errors) -> None:
with closing(self._connection.cursor()) as cur:
for error in errors:
insert_data = [
item_id,
error["message"],
error["path_str"],
error["schema_path_str"],
"jsonschema",
]
cur.execute(
"""INSERT INTO record_error_"""
+ type_id
+ """ (
record_id, message, data_path, schema_path, generator
) VALUES (?, ?, ?, ?, ?)""",
insert_data,
)
self._connection.commit()
def get_all_record_errors_generator_in_type(self, type_id):
with closing(self._connection.cursor()) as cur:
cur.execute("SELECT * FROM record_error_" + type_id, [])
for data in cur.fetchall():
m = RecordErrorModel()
m.load_from_database(data)
yield m
def get_ids_in_type(self, type_id: str) -> list:
with closing(self._connection.cursor()) as cur:
cur.execute("SELECT id FROM record_" + type_id, [])
return [i["id"] for i in cur.fetchall()]
def get_ids_in_type_with_record_error(self, type_id) -> list:
with closing(self._connection.cursor()) as cur:
cur.execute(
"SELECT r.id FROM record_"
+ type_id
+ " AS r JOIN record_error_"
+ type_id
+ " AS re ON r.id = re.record_id GROUP BY r.id ",
[],
)
return [i["id"] for i in cur.fetchall()]
def get_item(self, type_id: str, item_id: str):
with closing(self._connection.cursor()) as cur:
cur.execute("SELECT * FROM record_" + type_id + " WHERE id=?", [item_id])
data = cur.fetchone()
if data:
cur.execute(
"SELECT * FROM record_error_" + type_id + " WHERE record_id=?",
[item_id],
)
errors_data = cur.fetchall()
record = RecordModel(
type=self._site_config.get_type(type_id), id=item_id
)
record.load_from_database(
data,
errors_data=errors_data,
)
return record
def store_error(self, error) -> None:
with closing(self._connection.cursor()) as cur:
insert_data = [
error.get_filename(),
error.get_message(),
]
cur.execute(
"""INSERT INTO error (
filename, message
) VALUES (?, ?)""",
insert_data,
)
self._connection.commit()
def get_all_errors_generator(self):
with closing(self._connection.cursor()) as cur:
cur.execute("SELECT * FROM error", [])
for data in cur.fetchall():
m = ErrorModel()
m.load_from_database(data)
yield m
def get_count_site_errors(self) -> int:
with closing(self._connection.cursor()) as cur:
cur.execute("SELECT count(*) AS c FROM error", [])
return cur.fetchone()["c"]
def get_count_record_errors_for_type(self, type_id) -> int:
# must check type_id passed is valid, or this could be an SQL injection issue
if not type_id in self._site_config.get_types().keys():
raise Exception("That type_id is not known!")
with closing(self._connection.cursor()) as cur:
cur.execute("SELECT count(*) AS c FROM record_error_" + type_id, [])
return cur.fetchone()["c"]
def get_count_record_errors(self) -> int:
count = 0
for type in self._site_config.get_types().values():
count += self.get_count_record_errors_for_type(type.get_id())
return count
def get_file_name(self) -> str:
return self._out_filename | PypiClean |
/FastGets-0.3.5.tar.gz/FastGets-0.3.5/fastgets/web/static/dist/plugins/save/plugin.js | (function () {
var defs = {}; // id -> {dependencies, definition, instance (possibly undefined)}
// Used when there is no 'main' module.
// The name is probably (hopefully) unique so minification removes for releases.
var register_3795 = function (id) {
var module = dem(id);
var fragments = id.split('.');
var target = Function('return this;')();
for (var i = 0; i < fragments.length - 1; ++i) {
if (target[fragments[i]] === undefined) { target[fragments[i]] = {}; }
target = target[fragments[i]];
}
target[fragments[fragments.length - 1]] = module;
};
var instantiate = function (id) {
var actual = defs[id];
var dependencies = actual.deps;
var definition = actual.defn;
var len = dependencies.length;
var instances = new Array(len);
for (var i = 0; i < len; ++i) { instances[i] = dem(dependencies[i]); }
var defResult = definition.apply(null, instances);
if (defResult === undefined) { throw 'module [' + id + '] returned undefined'; }
actual.instance = defResult;
};
var def = function (id, dependencies, definition) {
if (typeof id !== 'string') { throw 'module id must be a string'; } else if (dependencies === undefined) { throw 'no dependencies for ' + id; } else if (definition === undefined) { throw 'no definition function for ' + id; }
defs[id] = {
deps: dependencies,
defn: definition,
instance: undefined
};
};
var dem = function (id) {
var actual = defs[id];
if (actual === undefined) { throw 'module [' + id + '] was undefined'; } else if (actual.instance === undefined) { instantiate(id); }
return actual.instance;
};
var req = function (ids, callback) {
var len = ids.length;
var instances = new Array(len);
for (var i = 0; i < len; ++i) { instances[i] = dem(ids[i]); }
callback.apply(null, instances);
};
var ephox = {};
ephox.bolt = {
module: {
api: {
define: def,
require: req,
demand: dem
}
}
};
var define = def;
var require = req;
var demand = dem;
// this helps with minification when using a lot of global references
var defineGlobal = function (id, ref) {
define(id, [], function () { return ref; });
};
/* jsc
["tinymce.plugins.save.Plugin","tinymce.core.PluginManager","tinymce.plugins.save.api.Commands","tinymce.plugins.save.ui.Buttons","global!tinymce.util.Tools.resolve","tinymce.plugins.save.core.Actions","tinymce.plugins.save.api.Settings","tinymce.core.dom.DOMUtils","tinymce.core.util.Tools"]
jsc */
defineGlobal('global!tinymce.util.Tools.resolve', tinymce.util.Tools.resolve);
/**
* ResolveGlobal.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.core.PluginManager',
[
'global!tinymce.util.Tools.resolve'
],
function (resolve) {
return resolve('tinymce.PluginManager');
}
);
/**
* ResolveGlobal.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.core.dom.DOMUtils',
[
'global!tinymce.util.Tools.resolve'
],
function (resolve) {
return resolve('tinymce.dom.DOMUtils');
}
);
/**
* ResolveGlobal.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.core.util.Tools',
[
'global!tinymce.util.Tools.resolve'
],
function (resolve) {
return resolve('tinymce.util.Tools');
}
);
/**
* Settings.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.save.api.Settings',
[
],
function () {
var enableWhenDirty = function (editor) {
return editor.getParam('save_enablewhendirty', true);
};
var hasOnSaveCallback = function (editor) {
return !!editor.getParam('save_onsavecallback');
};
var hasOnCancelCallback = function (editor) {
return !!editor.getParam('save_oncancelcallback');
};
return {
enableWhenDirty: enableWhenDirty,
hasOnSaveCallback: hasOnSaveCallback,
hasOnCancelCallback: hasOnCancelCallback
};
}
);
/**
* Actions.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.save.core.Actions',
[
'tinymce.core.dom.DOMUtils',
'tinymce.core.util.Tools',
'tinymce.plugins.save.api.Settings'
],
function (DOMUtils, Tools, Settings) {
var displayErrorMessage = function (editor, message) {
editor.notificationManager.open({
text: editor.translate(message),
type: 'error'
});
};
var save = function (editor) {
var formObj;
formObj = DOMUtils.DOM.getParent(editor.id, 'form');
if (Settings.enableWhenDirty(editor) && !editor.isDirty()) {
return;
}
editor.save();
// Use callback instead
if (Settings.hasOnSaveCallback(editor)) {
editor.execCallback('save_onsavecallback', editor);
editor.nodeChanged();
return;
}
if (formObj) {
editor.setDirty(false);
if (!formObj.onsubmit || formObj.onsubmit()) {
if (typeof formObj.submit === 'function') {
formObj.submit();
} else {
displayErrorMessage(editor, 'Error: Form submit field collision.');
}
}
editor.nodeChanged();
} else {
displayErrorMessage(editor, 'Error: No form element found.');
}
};
var cancel = function (editor) {
var h = Tools.trim(editor.startContent);
// Use callback instead
if (Settings.hasOnCancelCallback(editor)) {
editor.execCallback('save_oncancelcallback', editor);
return;
}
editor.setContent(h);
editor.undoManager.clear();
editor.nodeChanged();
};
return {
save: save,
cancel: cancel
};
}
);
/**
* Commands.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.save.api.Commands',
[
'tinymce.plugins.save.core.Actions'
],
function (Actions) {
var register = function (editor) {
editor.addCommand('mceSave', function () {
Actions.save(editor);
});
editor.addCommand('mceCancel', function () {
Actions.cancel(editor);
});
};
return {
register: register
};
}
);
/**
* Buttons.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.save.ui.Buttons',
[
'tinymce.plugins.save.api.Settings'
],
function (Settings) {
var stateToggle = function (editor) {
return function (e) {
var ctrl = e.control;
editor.on('nodeChange dirty', function () {
ctrl.disabled(Settings.enableWhenDirty(editor) && !editor.isDirty());
});
};
};
var register = function (editor) {
editor.addButton('save', {
icon: 'save',
text: 'Save',
cmd: 'mceSave',
disabled: true,
onPostRender: stateToggle(editor)
});
editor.addButton('cancel', {
text: 'Cancel',
icon: false,
cmd: 'mceCancel',
disabled: true,
onPostRender: stateToggle(editor)
});
editor.addShortcut('Meta+S', '', 'mceSave');
};
return {
register: register
};
}
);
/**
* Plugin.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.save.Plugin',
[
'tinymce.core.PluginManager',
'tinymce.plugins.save.api.Commands',
'tinymce.plugins.save.ui.Buttons'
],
function (PluginManager, Commands, Buttons) {
PluginManager.add('save', function (editor) {
Buttons.register(editor);
Commands.register(editor);
});
return function () { };
}
);
dem('tinymce.plugins.save.Plugin')();
})(); | PypiClean |
/Fanery-0.2.5.tar.gz/Fanery-0.2.5/docs/index.rst | .. Fanery documentation master file, created by
sphinx-quickstart on Sun Aug 31 13:49:54 2014.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Fanery's documentation!
==================================
Contents:
.. toctree::
:maxdepth: 2
fanery
intro
hardened-system
backend-web-farm
working-with-fanery
tutorial
security-protocol
storage
.. Indices and tables
==================
.. * :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| PypiClean |
/CartiMorph_nnUNet-1.7.14.tar.gz/CartiMorph_nnUNet-1.7.14/CartiMorph_nnUNet/postprocessing/connected_components.py |
import ast
from copy import deepcopy
from multiprocessing.pool import Pool
import numpy as np
from CartiMorph_nnUNet.configuration import default_num_threads
from CartiMorph_nnUNet.evaluation.evaluator import aggregate_scores
from scipy.ndimage import label
import SimpleITK as sitk
from CartiMorph_nnUNet.utilities.sitk_stuff import copy_geometry
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
def load_remove_save(input_file: str, output_file: str, for_which_classes: list,
minimum_valid_object_size: dict = None):
# Only objects larger than minimum_valid_object_size will be removed. Keys in minimum_valid_object_size must
# match entries in for_which_classes
img_in = sitk.ReadImage(input_file)
img_npy = sitk.GetArrayFromImage(img_in)
volume_per_voxel = float(np.prod(img_in.GetSpacing(), dtype=np.float64))
image, largest_removed, kept_size = remove_all_but_the_largest_connected_component(img_npy, for_which_classes,
volume_per_voxel,
minimum_valid_object_size)
# print(input_file, "kept:", kept_size)
img_out_itk = sitk.GetImageFromArray(image)
img_out_itk = copy_geometry(img_out_itk, img_in)
sitk.WriteImage(img_out_itk, output_file)
return largest_removed, kept_size
def remove_all_but_the_largest_connected_component(image: np.ndarray, for_which_classes: list, volume_per_voxel: float,
minimum_valid_object_size: dict = None):
"""
removes all but the largest connected component, individually for each class
:param image:
:param for_which_classes: can be None. Should be list of int. Can also be something like [(1, 2), 2, 4].
Here (1, 2) will be treated as a joint region, not individual classes (example LiTS here we can use (1, 2)
to use all foreground classes together)
:param minimum_valid_object_size: Only objects larger than minimum_valid_object_size will be removed. Keys in
minimum_valid_object_size must match entries in for_which_classes
:return:
"""
if for_which_classes is None:
for_which_classes = np.unique(image)
for_which_classes = for_which_classes[for_which_classes > 0]
assert 0 not in for_which_classes, "cannot remove background"
largest_removed = {}
kept_size = {}
for c in for_which_classes:
if isinstance(c, (list, tuple)):
c = tuple(c) # otherwise it cant be used as key in the dict
mask = np.zeros_like(image, dtype=bool)
for cl in c:
mask[image == cl] = True
else:
mask = image == c
# get labelmap and number of objects
lmap, num_objects = label(mask.astype(int))
# collect object sizes
object_sizes = {}
for object_id in range(1, num_objects + 1):
object_sizes[object_id] = (lmap == object_id).sum() * volume_per_voxel
largest_removed[c] = None
kept_size[c] = None
if num_objects > 0:
# we always keep the largest object. We could also consider removing the largest object if it is smaller
# than minimum_valid_object_size in the future but we don't do that now.
maximum_size = max(object_sizes.values())
kept_size[c] = maximum_size
for object_id in range(1, num_objects + 1):
# we only remove objects that are not the largest
if object_sizes[object_id] != maximum_size:
# we only remove objects that are smaller than minimum_valid_object_size
remove = True
if minimum_valid_object_size is not None:
remove = object_sizes[object_id] < minimum_valid_object_size[c]
if remove:
image[(lmap == object_id) & mask] = 0
if largest_removed[c] is None:
largest_removed[c] = object_sizes[object_id]
else:
largest_removed[c] = max(largest_removed[c], object_sizes[object_id])
return image, largest_removed, kept_size
def load_postprocessing(json_file):
'''
loads the relevant part of the pkl file that is needed for applying postprocessing
:param pkl_file:
:return:
'''
a = load_json(json_file)
if 'min_valid_object_sizes' in a.keys():
min_valid_object_sizes = ast.literal_eval(a['min_valid_object_sizes'])
else:
min_valid_object_sizes = None
return a['for_which_classes'], min_valid_object_sizes
def determine_postprocessing(base, gt_labels_folder, raw_subfolder_name="validation_raw",
temp_folder="temp",
final_subf_name="validation_final", processes=default_num_threads,
dice_threshold=0, debug=False,
advanced_postprocessing=False,
pp_filename="postprocessing.json"):
"""
:param base:
:param gt_labels_folder: subfolder of base with niftis of ground truth labels
:param raw_subfolder_name: subfolder of base with niftis of predicted (non-postprocessed) segmentations
:param temp_folder: used to store temporary data, will be deleted after we are done here undless debug=True
:param final_subf_name: final results will be stored here (subfolder of base)
:param processes:
:param dice_threshold: only apply postprocessing if results is better than old_result+dice_threshold (can be used as eps)
:param debug: if True then the temporary files will not be deleted
:return:
"""
# lets see what classes are in the dataset
classes = [int(i) for i in load_json(join(base, raw_subfolder_name, "summary.json"))['results']['mean'].keys() if
int(i) != 0]
folder_all_classes_as_fg = join(base, temp_folder + "_allClasses")
folder_per_class = join(base, temp_folder + "_perClass")
if isdir(folder_all_classes_as_fg):
shutil.rmtree(folder_all_classes_as_fg)
if isdir(folder_per_class):
shutil.rmtree(folder_per_class)
# multiprocessing rules
p = Pool(processes)
assert isfile(join(base, raw_subfolder_name, "summary.json")), "join(base, raw_subfolder_name) does not " \
"contain a summary.json"
# these are all the files we will be dealing with
fnames = subfiles(join(base, raw_subfolder_name), suffix=".nii.gz", join=False)
# make output and temp dir
maybe_mkdir_p(folder_all_classes_as_fg)
maybe_mkdir_p(folder_per_class)
maybe_mkdir_p(join(base, final_subf_name))
pp_results = {}
pp_results['dc_per_class_raw'] = {}
pp_results['dc_per_class_pp_all'] = {} # dice scores after treating all foreground classes as one
pp_results['dc_per_class_pp_per_class'] = {} # dice scores after removing everything except larges cc
# independently for each class after we already did dc_per_class_pp_all
pp_results['for_which_classes'] = []
pp_results['min_valid_object_sizes'] = {}
validation_result_raw = load_json(join(base, raw_subfolder_name, "summary.json"))['results']
pp_results['num_samples'] = len(validation_result_raw['all'])
validation_result_raw = validation_result_raw['mean']
if advanced_postprocessing:
# first treat all foreground classes as one and remove all but the largest foreground connected component
results = []
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(folder_all_classes_as_fg, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, (classes,)),)))
results = [i.get() for i in results]
# aggregate max_size_removed and min_size_kept
max_size_removed = {}
min_size_kept = {}
for tmp in results:
mx_rem, min_kept = tmp[0]
for k in mx_rem:
if mx_rem[k] is not None:
if max_size_removed.get(k) is None:
max_size_removed[k] = mx_rem[k]
else:
max_size_removed[k] = max(max_size_removed[k], mx_rem[k])
for k in min_kept:
if min_kept[k] is not None:
if min_size_kept.get(k) is None:
min_size_kept[k] = min_kept[k]
else:
min_size_kept[k] = min(min_size_kept[k], min_kept[k])
print("foreground vs background, smallest valid object size was", min_size_kept[tuple(classes)])
print("removing only objects smaller than that...")
else:
min_size_kept = None
# we need to rerun the step from above, now with the size constraint
pred_gt_tuples = []
results = []
# first treat all foreground classes as one and remove all but the largest foreground connected component
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(folder_all_classes_as_fg, f)
results.append(
p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, (classes,), min_size_kept),)))
pred_gt_tuples.append([output_file, join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(folder_all_classes_as_fg, "summary.json"),
json_author="Fabian", num_threads=processes)
# now we need to figure out if doing this improved the dice scores. We will implement that defensively in so far
# that if a single class got worse as a result we won't do this. We can change this in the future but right now I
# prefer to do it this way
validation_result_PP_test = load_json(join(folder_all_classes_as_fg, "summary.json"))['results']['mean']
for c in classes:
dc_raw = validation_result_raw[str(c)]['Dice']
dc_pp = validation_result_PP_test[str(c)]['Dice']
pp_results['dc_per_class_raw'][str(c)] = dc_raw
pp_results['dc_per_class_pp_all'][str(c)] = dc_pp
# true if new is better
do_fg_cc = False
comp = [pp_results['dc_per_class_pp_all'][str(cl)] > (pp_results['dc_per_class_raw'][str(cl)] + dice_threshold) for
cl in classes]
before = np.mean([pp_results['dc_per_class_raw'][str(cl)] for cl in classes])
after = np.mean([pp_results['dc_per_class_pp_all'][str(cl)] for cl in classes])
print("Foreground vs background")
print("before:", before)
print("after: ", after)
if any(comp):
# at least one class improved - yay!
# now check if another got worse
# true if new is worse
any_worse = any(
[pp_results['dc_per_class_pp_all'][str(cl)] < pp_results['dc_per_class_raw'][str(cl)] for cl in classes])
if not any_worse:
pp_results['for_which_classes'].append(classes)
if min_size_kept is not None:
pp_results['min_valid_object_sizes'].update(deepcopy(min_size_kept))
do_fg_cc = True
print("Removing all but the largest foreground region improved results!")
print('for_which_classes', classes)
print('min_valid_object_sizes', min_size_kept)
else:
# did not improve things - don't do it
pass
if len(classes) > 1:
# now depending on whether we do remove all but the largest foreground connected component we define the source dir
# for the next one to be the raw or the temp dir
if do_fg_cc:
source = folder_all_classes_as_fg
else:
source = join(base, raw_subfolder_name)
if advanced_postprocessing:
# now run this for each class separately
results = []
for f in fnames:
predicted_segmentation = join(source, f)
output_file = join(folder_per_class, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, classes),)))
results = [i.get() for i in results]
# aggregate max_size_removed and min_size_kept
max_size_removed = {}
min_size_kept = {}
for tmp in results:
mx_rem, min_kept = tmp[0]
for k in mx_rem:
if mx_rem[k] is not None:
if max_size_removed.get(k) is None:
max_size_removed[k] = mx_rem[k]
else:
max_size_removed[k] = max(max_size_removed[k], mx_rem[k])
for k in min_kept:
if min_kept[k] is not None:
if min_size_kept.get(k) is None:
min_size_kept[k] = min_kept[k]
else:
min_size_kept[k] = min(min_size_kept[k], min_kept[k])
print("classes treated separately, smallest valid object sizes are")
print(min_size_kept)
print("removing only objects smaller than that")
else:
min_size_kept = None
# rerun with the size thresholds from above
pred_gt_tuples = []
results = []
for f in fnames:
predicted_segmentation = join(source, f)
output_file = join(folder_per_class, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, classes, min_size_kept),)))
pred_gt_tuples.append([output_file, join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(folder_per_class, "summary.json"),
json_author="Fabian", num_threads=processes)
if do_fg_cc:
old_res = deepcopy(validation_result_PP_test)
else:
old_res = validation_result_raw
# these are the new dice scores
validation_result_PP_test = load_json(join(folder_per_class, "summary.json"))['results']['mean']
for c in classes:
dc_raw = old_res[str(c)]['Dice']
dc_pp = validation_result_PP_test[str(c)]['Dice']
pp_results['dc_per_class_pp_per_class'][str(c)] = dc_pp
print(c)
print("before:", dc_raw)
print("after: ", dc_pp)
if dc_pp > (dc_raw + dice_threshold):
pp_results['for_which_classes'].append(int(c))
if min_size_kept is not None:
pp_results['min_valid_object_sizes'].update({c: min_size_kept[c]})
print("Removing all but the largest region for class %d improved results!" % c)
print('min_valid_object_sizes', min_size_kept)
else:
print("Only one class present, no need to do each class separately as this is covered in fg vs bg")
if not advanced_postprocessing:
pp_results['min_valid_object_sizes'] = None
print("done")
print("for which classes:")
print(pp_results['for_which_classes'])
print("min_object_sizes")
print(pp_results['min_valid_object_sizes'])
pp_results['validation_raw'] = raw_subfolder_name
pp_results['validation_final'] = final_subf_name
# now that we have a proper for_which_classes, apply that
pred_gt_tuples = []
results = []
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(base, final_subf_name, f)
results.append(p.starmap_async(load_remove_save, (
(predicted_segmentation, output_file, pp_results['for_which_classes'],
pp_results['min_valid_object_sizes']),)))
pred_gt_tuples.append([output_file,
join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(base, final_subf_name, "summary.json"),
json_author="Fabian", num_threads=processes)
pp_results['min_valid_object_sizes'] = str(pp_results['min_valid_object_sizes'])
save_json(pp_results, join(base, pp_filename))
# delete temp
if not debug:
shutil.rmtree(folder_per_class)
shutil.rmtree(folder_all_classes_as_fg)
p.close()
p.join()
print("done")
def apply_postprocessing_to_folder(input_folder: str, output_folder: str, for_which_classes: list,
min_valid_object_size:dict=None, num_processes=8):
"""
applies removing of all but the largest connected component to all niftis in a folder
:param min_valid_object_size:
:param min_valid_object_size:
:param input_folder:
:param output_folder:
:param for_which_classes:
:param num_processes:
:return:
"""
maybe_mkdir_p(output_folder)
p = Pool(num_processes)
nii_files = subfiles(input_folder, suffix=".nii.gz", join=False)
input_files = [join(input_folder, i) for i in nii_files]
out_files = [join(output_folder, i) for i in nii_files]
results = p.starmap_async(load_remove_save, zip(input_files, out_files, [for_which_classes] * len(input_files),
[min_valid_object_size] * len(input_files)))
res = results.get()
p.close()
p.join()
if __name__ == "__main__":
input_folder = "/media/fabian/DKFZ/predictions_Fabian/Liver_and_LiverTumor"
output_folder = "/media/fabian/DKFZ/predictions_Fabian/Liver_and_LiverTumor_postprocessed"
for_which_classes = [(1, 2), ]
apply_postprocessing_to_folder(input_folder, output_folder, for_which_classes) | PypiClean |
/3d-converter-0.9.0.tar.gz/3d-converter-0.9.0/models_converter/formats/gltf/parser.py | import json
from models_converter.formats import universal
from models_converter.formats.gltf.chunk import GlTFChunk
from models_converter.formats.gltf.gltf import GlTF
from models_converter.formats.gltf.node import Node
from models_converter.formats.universal import Scene, Geometry
from models_converter.interfaces import ParserInterface
from models_converter.utilities.reader import Reader
class Parser(ParserInterface):
def __init__(self, data: bytes):
self.file_data = data
self.scene = Scene()
self.version = None
self.length = None
self.json_chunk = None
self.bin_chunk = None
self.buffer_views = []
self.accessors = []
self.buffers = []
self.gltf = GlTF()
def parse_bin(self):
reader = Reader(self.bin_chunk.data, 'little')
for buffer in self.gltf.buffers:
parsed_buffer = reader.read(buffer.byte_length)
self.buffers.append(parsed_buffer)
for buffer_view in self.gltf.buffer_views:
reader.__init__(self.buffers[buffer_view.buffer], 'little')
reader.read(buffer_view.byte_offset)
length = buffer_view.byte_length
data = reader.read(length)
self.buffer_views.append(data)
for accessor in self.gltf.accessors:
reader.__init__(self.buffer_views[accessor.buffer_view], 'little')
reader.read(accessor.byte_offset)
types = {
5120: (reader.readByte, 1),
5121: (reader.readUByte, 1),
5122: (reader.readShort, 2),
5123: (reader.readUShort, 2),
5125: (reader.readUInt32, 4),
5126: (reader.readFloat, 4)
}
if accessor.normalized:
types = {
5120: (lambda: max(reader.readByte() / 127, -1.0), 1),
5121: (lambda: reader.readUByte() / 255, 1),
5122: (lambda: max(reader.readShort() / 32767, -1.0), 2),
5123: (lambda: reader.readUShort() / 65535, 2),
5125: (reader.readUInt32, 4),
5126: (reader.readFloat, 4)
}
items_count = {
'SCALAR': 1,
'VEC2': 2,
'VEC3': 3,
'VEC4': 4,
'MAT2': 4,
'MAT3': 9,
'MAT4': 16
}
components_count = items_count[accessor.type]
read_type, bytes_per_element = types[accessor.component_type]
default_stride = bytes_per_element * components_count
stride = self.gltf.buffer_views[accessor.buffer_view].byte_stride or default_stride
elements_per_stride = stride // bytes_per_element
elements_count = accessor.count * elements_per_stride
temp_list = []
for i in range(elements_count):
temp_list.append(read_type())
self.accessors.append([
temp_list[i:i + components_count]
for i in range(0, elements_count, elements_per_stride)
])
def parse(self):
reader = Reader(self.file_data, 'little')
magic = reader.read(4)
if magic != b'glTF':
raise TypeError('Wrong file magic! "676c5446" expected, but given is ' + magic.hex())
self.version = reader.readUInt32()
self.length = reader.readUInt32()
self.json_chunk = GlTFChunk()
self.bin_chunk = GlTFChunk()
self.json_chunk.chunk_length = reader.readUInt32()
self.json_chunk.chunk_name = reader.read(4)
self.json_chunk.data = reader.read(self.json_chunk.chunk_length)
self.bin_chunk.chunk_length = reader.readUInt32()
self.bin_chunk.chunk_name = reader.read(4)
self.bin_chunk.data = reader.read(self.bin_chunk.chunk_length)
self.gltf.from_dict(json.loads(self.json_chunk.data))
self.parse_bin()
scene_id = self.gltf.scene
scene = self.gltf.scenes[scene_id]
for node_id in scene.nodes:
node = self.gltf.nodes[node_id]
self.parse_node(node)
# TODO: animations
# for animation in self.gltf.animations:
# for channel in animation.channels:
# sampler: Animation.AnimationSampler = animation.samplers[channel.sampler]
# input_accessor = self.accessors[sampler.input]
def parse_node(self, gltf_node: Node, parent: str = None):
node_name = gltf_node.name.split('|')[-1]
node = universal.Node(
name=node_name,
parent=parent
)
instance = None
if gltf_node.mesh is not None and type(self.gltf.meshes) is list:
mesh = self.gltf.meshes[gltf_node.mesh]
mesh_name = mesh.name.split('|')
group = 'GEO'
name = mesh_name[0]
if len(mesh_name) > 1:
group = mesh_name[0]
name = mesh_name[1]
geometry = Geometry(name=name, group=group)
if gltf_node.skin is not None:
instance = universal.Node.Instance(name=geometry.get_name(), instance_type='CONT')
geometry.set_controller_bind_matrix([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1])
skin_id = gltf_node.skin
skin = self.gltf.skins[skin_id]
bind_matrices = self.accessors[skin.inverse_bind_matrices]
bind_matrices = [[m[0::4], m[1::4], m[2::4], m[3::4]] for m in bind_matrices]
for matrix_index in range(len(bind_matrices)):
m = bind_matrices[matrix_index]
matrix = m[0]
matrix.extend(m[1])
matrix.extend(m[2])
matrix.extend(m[3])
bind_matrices[matrix_index] = matrix
for joint in skin.joints:
joint_index = skin['joints'].index(joint)
joint_node = self.gltf.nodes[joint]
joint_name = joint_node['name']
matrix = bind_matrices[joint_index]
geometry.add_joint(Geometry.Joint(joint_name, matrix))
else:
instance = universal.Node.Instance(name=geometry.get_name(), instance_type='GEOM')
position_offset = 0
normal_offset = 0
texcoord_offset = 0
for primitive in mesh.primitives:
if primitive.to_dict() != {}:
primitive_index = mesh.primitives.index(primitive)
attributes = primitive.attributes
material_id = primitive.material
polygons_id = primitive.indices
triangles = self.accessors[polygons_id]
material = self.gltf.materials[material_id]
material_name = material.extensions['SC_shader']['name']
instance.add_bind(material_name, material_name)
position = []
normal = []
texcoord = []
joint_ids = 0
for attribute_id in attributes:
attribute = attributes[attribute_id]
points = None
if attribute_id == 'POSITION':
position = self.accessors[attribute]
points = list(map(
lambda point: (
point[0] * gltf_node.scale.x + gltf_node.translation.x,
point[1] * gltf_node.scale.y + gltf_node.translation.y,
point[2] * gltf_node.scale.z + gltf_node.translation.z
),
position
))
elif attribute_id == 'NORMAL':
normal = self.accessors[attribute]
points = list(map(
lambda point: (
point[0] * gltf_node.scale.x,
point[1] * gltf_node.scale.y,
point[2] * gltf_node.scale.z
),
normal
))
elif attribute_id.startswith('TEXCOORD'):
texcoord = self.accessors[attribute]
texcoord = [[item[0], 1 - item[1]] for item in texcoord]
attribute_id = 'TEXCOORD'
points = texcoord
elif attribute_id.startswith('JOINTS'):
joint_ids = self.accessors[attribute]
elif attribute_id.startswith('WEIGHTS'):
weights = self.accessors[attribute]
for x in range(len(joint_ids)):
geometry.add_weight(Geometry.Weight(joint_ids[x][0], weights[x][0] / 255))
geometry.add_weight(Geometry.Weight(joint_ids[x][1], weights[x][1] / 255))
geometry.add_weight(Geometry.Weight(joint_ids[x][2], weights[x][2] / 255))
geometry.add_weight(Geometry.Weight(joint_ids[x][3], weights[x][3] / 255))
if points:
geometry.add_vertex(Geometry.Vertex(
name=f'{attribute_id.lower()}_{primitive_index}',
vertex_type=attribute_id,
vertex_index=len(geometry.get_vertices()),
vertex_scale=1,
points=points
))
triangles = [
[
[
point[0] + normal_offset,
point[0] + position_offset,
point[0] + texcoord_offset
] for point in triangles[x:x + 3]
] for x in range(0, len(triangles), 3)
]
geometry.add_material(Geometry.Material(material_name, triangles))
for attribute_id in attributes:
if attribute_id == 'POSITION':
position_offset += len(position)
elif attribute_id == 'NORMAL':
normal_offset += len(normal)
elif attribute_id.startswith('TEXCOORD'):
texcoord_offset += len(texcoord)
self.scene.add_geometry(geometry)
if instance is not None:
node.add_instance(instance)
self.scene.add_node(node)
node.add_frame(universal.Node.Frame(
0,
gltf_node.translation,
gltf_node.scale,
gltf_node.rotation
))
if gltf_node.children:
for child_id in gltf_node.children:
child = self.gltf.nodes[child_id]
self.parse_node(child, node_name) | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_en-150.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"am",
"pm"
],
"DAY": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"MONTH": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
],
"SHORTDAY": [
"Sun",
"Mon",
"Tue",
"Wed",
"Thu",
"Fri",
"Sat"
],
"SHORTMONTH": [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec"
],
"fullDate": "EEEE d MMMM y",
"longDate": "d MMM y",
"medium": "dd MMM y HH:mm:ss",
"mediumDate": "dd MMM y",
"mediumTime": "HH:mm:ss",
"short": "dd/MM/yy HH:mm",
"shortDate": "dd/MM/yy",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "$",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "en-150",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/MufSim-1.2.2.tar.gz/MufSim-1.2.2/mufsim/insts/directives.py | import mufsim.utils as util
import mufsim.gamedb as db
from mufsim.logger import log
from mufsim.errors import MufCompileError, ReloadAsMuvException
from mufsim.insts.base import Instruction, instr
@instr("$abort")
class InstDollarAbort(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_to_eol(src)
raise MufCompileError(val)
@instr("$echo")
class InstDollarEcho(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_to_eol(src)
log("$ECHO: %s" % val)
return (False, src)
@instr("$pragma")
class InstDollarPragma(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_to_eol(src)
return (False, src)
@instr("$language")
class InstDollarLanguage(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_to_eol(src)
if val.strip().lower() == '"muv"':
raise ReloadAsMuvException()
return (False, src)
@instr("$author")
class InstDollarAuthor(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_to_eol(src)
comp = cmplr.compiled
db.getobj(comp.program).setprop("_author", val)
return (False, src)
@instr("$note")
class InstDollarNote(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_to_eol(src)
comp = cmplr.compiled
db.getobj(comp.program).setprop("_note", val)
return (False, src)
@instr("$version")
class InstDollarVersion(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_word(src)
comp = cmplr.compiled
db.getobj(comp.program).setprop("_version", val)
return (False, src)
@instr("$lib-version")
class InstDollarLibVersion(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_word(src)
comp = cmplr.compiled
db.getobj(comp.program).setprop("_lib-version", val)
return (False, src)
@instr("$def")
class InstDollarDef(Instruction):
def compile(self, cmplr, code, src):
nam, src = cmplr.get_word(src)
val, src = cmplr.get_to_eol(src)
cmplr.defines[nam] = val
return (False, src)
@instr("$define")
class InstDollarDefine(Instruction):
def compile(self, cmplr, code, src):
nam, src = cmplr.get_word(src)
if "$enddef" not in src:
raise MufCompileError("Incomplete $define for %s" % nam)
val, src = src.split("$enddef", 1)
cmplr.defines[nam] = val
return (False, src)
@instr("$undef")
class InstDollarUnDef(Instruction):
def compile(self, cmplr, code, src):
nam, src = cmplr.get_word(src)
if nam in cmplr.defines:
del cmplr.defines[nam]
return (False, src)
@instr("$include")
class InstDollarInclude(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
targ, src = cmplr.get_word(src)
if targ == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, targ)
cmplr.include_defs_from(obj)
return (False, src)
@instr("$pubdef")
class InstDollarPubDef(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
nam, src = cmplr.get_word(src)
val, src = cmplr.get_to_eol(src)
if nam == ":":
db.getobj(comp.program).delprop("_defs")
elif not val.strip():
db.getobj(comp.program).delprop("_defs/%s" % nam)
else:
if nam[0] == '\\':
nam = nam[1:]
if db.getobj(comp.program).getprop("_defs/%s" % nam):
return (False, src)
db.getobj(comp.program).setprop("_defs/%s" % nam, val)
return (False, src)
@instr("$libdef")
class InstDollarLibDef(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
nam, src = cmplr.get_word(src)
if nam.startswith('\\'):
nam = nam[1:]
if db.getobj(comp.program).getprop("_defs/%s" % nam):
return (False, src)
prog = db.getobj(comp.program)
val = "#%d %s call" % (prog.dbref, util.escape_str(nam))
prog.setprop("_defs/%s" % nam, val)
return (False, src)
@instr("$cleardefs")
class InstDollarClearDefs(Instruction):
def compile(self, cmplr, code, src):
val, src = cmplr.get_word(src)
cmplr.defines = dict(cmplr.builtin_defines)
if val.strip().upper() != "ALL":
cmplr.include_defs_from(0, suppress=True)
return (False, src)
@instr("$ifdef")
class InstDollarIfDef(Instruction):
def compile(self, cmplr, code, src):
cond, src = cmplr.get_word(src, expand=False)
istrue = True
if '=' in cond:
nam, val = cond.split('=', 1)
istrue = nam in cmplr.defines and cmplr.defines[nam] == val
elif '>' in cond:
nam, val = cond.split('>', 1)
istrue = nam in cmplr.defines and cmplr.defines[nam] > val
elif '<' in cond:
nam, val = cond.split('<', 1)
istrue = nam in cmplr.defines and cmplr.defines[nam] < val
else:
istrue = cond in cmplr.defines
if not istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$ifndef")
class InstDollarIfNDef(Instruction):
def compile(self, cmplr, code, src):
cond, src = cmplr.get_word(src, expand=False)
istrue = True
if '=' in cond:
nam, val = cond.split('=', 1)
istrue = nam in cmplr.defines and cmplr.defines[nam] == val
elif '>' in cond:
nam, val = cond.split('>', 1)
istrue = nam in cmplr.defines and cmplr.defines[nam] > val
elif '<' in cond:
nam, val = cond.split('<', 1)
istrue = nam in cmplr.defines and cmplr.defines[nam] < val
else:
istrue = cond in cmplr.defines
if istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$ifver")
class InstDollarIfVer(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
obj, src = cmplr.get_word(src)
ver, src = cmplr.get_word(src)
if obj == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, obj)
istrue = True
if not db.validobj(obj):
istrue = False
else:
val = db.getobj(obj).getprop("_version")
if not val:
istrue = False
else:
istrue = val >= ver
if not istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$ifnver")
class InstDollarIfNVer(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
obj, src = cmplr.get_word(src)
ver, src = cmplr.get_word(src)
if obj == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, obj)
istrue = True
if not db.validobj(obj):
istrue = False
else:
val = db.getobj(obj).getprop("_version")
if not val:
istrue = False
else:
istrue = val >= ver
if istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$iflibver")
class InstDollarIfLibVer(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
obj, src = cmplr.get_word(src)
ver, src = cmplr.get_word(src)
if obj == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, obj)
istrue = True
if not db.validobj(obj):
istrue = False
else:
val = db.getobj(obj).getprop("_lib-version")
if not val:
istrue = False
else:
istrue = val >= ver
if not istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$ifnlibver")
class InstDollarIfNLibVer(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
obj, src = cmplr.get_word(src)
ver, src = cmplr.get_word(src)
if obj == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, obj)
istrue = True
if not db.validobj(obj):
istrue = False
else:
val = db.getobj(obj).getprop("_lib-version")
if not val:
istrue = False
else:
istrue = val >= ver
if istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$iflib")
class InstDollarIfLib(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
obj, src = cmplr.get_word(src)
if obj == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, obj)
istrue = db.validobj(obj) and db.getobj(obj).objtype == "program"
if not istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$ifnlib")
class InstDollarIfNLib(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
obj, src = cmplr.get_word(src)
if obj == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, obj)
istrue = db.validobj(obj) and db.getobj(obj).objtype == "program"
if istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$ifcancall")
class InstDollarIfCanCall(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
obj, src = cmplr.get_word(src)
pub, src = cmplr.get_word(src)
if obj == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, obj)
obj = db.getobj(obj)
istrue = (
obj.objtype == "program" and
obj.compiled and
obj.compiled.publics and
pub in obj.compiled.publics
)
if not istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$ifncancall")
class InstDollarIfNCanCall(Instruction):
def compile(self, cmplr, code, src):
comp = cmplr.compiled
obj, src = cmplr.get_word(src)
pub, src = cmplr.get_word(src)
if obj == "this":
obj = comp.program
else:
who = db.getobj(comp.program).owner
obj = db.match_from(who, obj)
obj = db.getobj(obj)
istrue = (
obj.objtype == "program" and
obj.compiled and
obj.compiled.publics and
pub in obj.compiled.publics
)
if istrue:
src = cmplr.skip_directive_if_block(src)
return (False, src)
@instr("$else")
class InstDollarElse(Instruction):
def compile(self, cmplr, code, src):
level = 0
while True:
if not src:
raise MufCompileError("Incomplete $else directive block.")
word, src = cmplr.get_word(src, expand=False)
if word.startswith("$if"):
cond, src = cmplr.get_word(src, expand=False)
level += 1
elif word == "$endif":
if not level:
break
level -= 1
elif word == "$else":
if not level:
raise MufCompileError("Multiple $else clauses.")
return (False, src)
@instr("$endif")
class InstDollarEndif(Instruction):
def compile(self, cmplr, code, src):
return (False, src)
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap | PypiClean |
/MagPy_TMS-1.4.tar.gz/MagPy_TMS-1.4/magpy/magstim.py | from __future__ import division
import serial
from sys import version_info, platform
from os.path import realpath, join, dirname
from os import getcwd
from queue import Empty
from time import sleep
from multiprocessing import Queue, Process
from functools import partial
from yaml import safe_load
from ast import literal_eval
# Switch timer based on python version and platform
if version_info >= (3,3):
# In python 3.3+
from time import perf_counter
defaultTimer = perf_counter
else:
if platform == 'win32':
# On Windows, use time.clock
from time import clock
defaultTimer = clock
else:
# On other platforms use time.time
from time import time
defaultTimer = time
# Calculate checksum for command
if version_info >= (3,):
def calcCRC(command):
"""Return the CRC checksum for the command string."""
# Convert command string to sum of ASCII/byte values
commandSum = sum(command)
# Convert command sum to binary, then invert and return 8-bit character value
return bytearray(chr(~commandSum & 0xff),encoding='latin_1')
else:
def calcCRC(command):
"""Return the CRC checksum for the command string."""
# Convert command string to sum of ASCII/byte values
commandSum = sum(command)
# Convert command sum to binary, then invert and return 8-bit character value
return chr(~commandSum & 0xff)
class MagstimError(Exception):
pass
class serialPortController(Process):
"""
The class creates a Python process which has direct control of the serial port. Commands for relaying via the serial port are received from separate Python processes via Queues.
N.B. To start the process you must call start() from the parent Python process.
Args:
serialWriteQueue (multiprocessing.Queue): a Queue for receiving commands to be written to the Magstim unit via the serial port
serialReadQueue (multiprocessing.Queue): a Queue for returning automated replies from the Magstim unit when requested
"""
# Error codes
SERIAL_WRITE_ERR = (1, 'SERIAL_WRITE_ERR: Could not send the command.')
SERIAL_READ_ERR = (2, 'SERIAL_READ_ERR: Could not read the magstim response.')
def __init__(self, serialConnection, serialWriteQueue, serialReadQueue, connectionCommand, debugSerialConnection=None):
Process.__init__(self)
self._serialWriteQueue = serialWriteQueue
self._serialReadQueue = serialReadQueue
self._address = serialConnection
self._maintainCommunicationPaused = True
self._connectionCommand = connectionCommand
self._debugAddress = debugSerialConnection
def run(self):
"""
Continuously monitor the serialWriteQueue for commands from other Python processes to be sent to the Magstim.
When requested, will return the automated reply from the Magstim unit to the calling process via the serialReadQueue.
N.B. This should be called via start() from the parent Python process.
"""
# N.B. most of these settings are actually the default in PySerial, but just being careful.
self._port = serial.Serial(port=self._address,
baudrate=9600,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
xonxoff=False)
# Make sure the RTS pin is set to off
self._port.setRTS(False)
# Set up version compatibility
if int(serial.VERSION.split('.')[0]) >= 3:
self._port.write_timeout = 0.3
self._port.portFlush = self._port.reset_input_buffer
self._port.anyWaiting = lambda:self._port.in_waiting
else:
self._port.writeTimeout = 0.3
self._port.portFlush = self._port.flushInput
self._port.anyWaiting = self._port.inWaiting
if self._debugAddress is not None:
self._debugPort = serial.Serial(port=self._debugAddress,
baudrate=9600,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
xonxoff=False)
if int(serial.VERSION.split('.')[0]) >= 3:
self._debugPort.write_timeout = 0.3
self._debugPort.portFlush = self._debugPort.reset_input_buffer
self._debugPort.anyWaiting = lambda:self._port.in_waiting
else:
self._debugPort.writeTimeout = 0.3
self._debugPort.portFlush = self._debugPort.flushInput
self._debugPort.anyWaiting = self._debugPort.inWaiting
# This sends an "enable remote control" command to the magstim every 500ms (if armed) or 5000 ms (if disarmed); only runs once the stimulator is armed
pokeLatency = 5
# Set time of last command
lastCommand = defaultTimer()
# This continually monitors the serialWriteQueue for write requests
while True:
timeout = pokeLatency - (defaultTimer() - lastCommand)
try:
message, reply, readBytes = self._serialWriteQueue.get(timeout=None if self._maintainCommunicationPaused else timeout)
# If Empty raised, there was no command sent in the required time so send the standard connection command instead
except Empty:
message, reply, readBytes = self._connectionCommand
try:
# If the first part of the message is None this signals the process to close the port and stop
if message is None:
break
# If the first part of the message is a 1 this signals the process to trigger a quick fire using the RTS pin
elif message == 1:
self._port.setRTS(True)
# If the first part of the message is a -1 this signals the process to reset the RTS pin
elif message == -1:
self._port.setRTS(False)
# If the first part of the message is a 0 this signals the process to reset the regular polling of the Magstim
elif message == 0:
lastCommand = defaultTimer()
# Otherwise, the message is a command string
else:
# There shouldn't be any rubbish in the input buffer, but check and clear it just in case
if self._port.anyWaiting():
self._port.portFlush()
try:
# Try writing to the port
self._port.write(message)
# Set time of last command
lastCommand = defaultTimer()
# Mirror the message to the debug port (if defined); prepend with 1 to signal it as outgoing
if self._debugAddress is not None:
self._debugPort.write(b'\x01' + message)
# Read response (this gets a little confusing, as I don't want to rely on timeout to know if there's an error)
try:
# Read the first byte
response = bytearray(self._port.read(1))
# If the first returned byte is a 'N', we need to read the version number in one byte at a time to catch the string terminator.
if response == b'N':
while response[-1] > 0:
response += self._port.read(1)
# After the end of the version number, read one more byte to grab the CRC
response += self._port.read(1)
# If the first byte is not '?', then the message was understood so carry on reading in the response (if it was a '?', then this will be the only returned byte).
elif response != b'?':
# Read the second byte
response += self._port.read(1)
# If the second returned byte is a '?' or 'S', then the data value supplied either wasn't acceptable ('?') or the command conflicted with the current settings ('S'),
# In these cases, just grab the CRC
if response[-1] in {ord(b'?'), ord(b'S')}:
response += self._port.read(1)
# Otherwise, everything is ok so carry on reading the rest of the message
else:
response += self._port.read(readBytes - 2)
# If we are enabling/disabling remote control, update maintaining the connection as appropriate
if response[0] == ord(b'Q'):
self._maintainCommunicationPaused = False
elif response[0] == ord(b'R'):
self._maintainCommunicationPaused = True
# Otherwise, if we're arming or disarming, then update the poke latency as appropriate
elif response[0] == ord(b'E'):
# Check the original message for which we were doing
if message[1] == ord(b'B'):
pokeLatency = 0.5
elif message[1] == ord(b'A'):
pokeLatency = 5
# Return the reply if we want it
if reply:
self._serialReadQueue.put([0, response])
# Mirror the reply to the debug port (if defined); prepend with 2 to signal it as incoming
if self._debugAddress is not None:
self._debugPort.write(b'\x02' + response)
except Exception: #serial.SerialException:
self._serialReadQueue.put(serialPortController.SERIAL_READ_ERR)
except Exception: #serial.SerialException:
self._serialReadQueue.put(serialPortController.SERIAL_WRITE_ERR)
except IOError:
break
#If we get here, it's time to shutdown the serial port controller
self._port.close()
return
class Magstim(object):
"""
The base Magstim class. This is used for controlling 200^2 Magstim units, and acts as a parent class for the BiStim^2 and Rapid^2 sub-classes.
It also creates two additional Python processes; one for the purposes of directly controlling the serial port and another for maintaining constant contact with the Magstim.
N.B. This class can effect limited control over BiStim^2 and Rapid^2 units, however some functionality will not be able to be accessed and return values (including confirmation of commands) may be invalid.
To begin sending commands to the Magstim, and start the additional Python processes, you must first call connect().
Args:
serialConnection (str): The address of the serial port. On Windows this is typically 'COM1' or similar. To create a virtual magstim, set the address to 'virtual'
"""
# Hardware error codes (for all types of stimulators)
INVALID_COMMAND_ERR = (3, 'INVALID_COMMAND_ERR: Invalid command sent.')
INVALID_DATA_ERR = (4, 'INVALID_DATA_ERR: Invalid data provided.')
COMMAND_CONFLICT_ERR = (5, 'COMMAND_CONFLICT_ERR: Command conflicts with current system configuration.')
INVALID_CONFIRMATION_ERR = (6, 'INVALID_CONFIRMATION_ERR: Unexpected command confirmation received.')
CRC_MISMATCH_ERR = (7, 'CRC_MISMATCH_ERR: Message contents and CRC value do not match.')
NO_REMOTE_CONTROL_ERR = (8, 'NO_REMOTE_CONTROL_ERR: You have not established control of the Magstim unit.')
PARAMETER_ACQUISTION_ERR = (9, 'PARAMETER_ACQUISTION_ERR: Could not obtain prior parameter settings.')
PARAMETER_UPDATE_ERR = (10, 'PARAMETER_UPDATE_ERR: Could not update secondary parameter to accommodate primary parameter change.')
PARAMETER_FLOAT_ERR = (11, 'PARAMETER_FLOAT_ERR: A float value is not allowed for this parameter.')
PARAMETER_PRECISION_ERR = (12, 'PARAMETER_PRECISION_ERR: Only one decimal placed allowed for this parameter.')
PARAMETER_RANGE_ERR = (13, 'PARAMETER_RANGE_ERR: Parameter value is outside the allowed range.')
GET_SYSTEM_STATUS_ERR = (14, 'GET_SYSTEM_STATUS_ERR: Cannot call getSystemStatus() until software version has been established.')
SYSTEM_STATUS_VERSION_ERR = (15, 'SYSTEM_STATUS_VERSION_ERR: Method getSystemStatus() is not compatible with your software version.')
SEQUENCE_VALIDATION_ERR = (16, 'SEQUENCE_VALIDATION_ERR: You must call validateSequence() before you can run a rTMS train.')
MIN_WAIT_TIME_ERR = (17, 'MIN_WAIT_TIME_ERR: Minimum wait time between trains violated. Call isReadyToFire() to check.')
MAX_ON_TIME_ERR = (18, 'MAX_ON_TIME_ERR: Maximum on time exceeded for current train.')
@staticmethod
def formatMagstimResponse(response):
"""Formats already parsed responses from the Magstim unit into strings."""
outString = ''
if 'instr' in response.keys():
outString += ('Instrument Status:\n' +
' > Standby: ' + str(response['instr']['standby']) + '\n' +
' > Armed: ' + str(response['instr']['armed']) + '\n' +
' > Ready: ' + str(response['instr']['ready']) + '\n' +
' > Coil Present: ' + str(response['instr']['coilPresent']) + '\n' +
' > Replace Coil: ' + str(response['instr']['replaceCoil']) + '\n' +
' > Error Present: ' + str(response['instr']['errorPresent']) + '\n' +
' > Error Type: ' + str(response['instr']['errorType']) + '\n' +
' > Remote Status: ' + str(response['instr']['remoteStatus']) + '\n\n')
if 'extInstr' in response.keys():
outString += ('Extended Instrument Status:\n' +
' > Plus 1 Module Detected: ' + str(response['extInstr']['plus1ModuleDetected']) + '\n' +
' > Special Trigger Mode Active: ' + str(response['extInstr']['specialTriggerModeActive']) + '\n' +
' > Charge Delay Set: ' + str(response['extInstr']['chargeDelaySet']) + '\n\n')
if 'rapid' in response.keys():
outString += ('Rapid Status:\n' +
' > Enhanced Power Mode: ' + str(response['rapid']['enhancedPowerMode']) + '\n' +
' > Train: ' + str(response['rapid']['train']) + '\n' +
' > Wait: ' + str(response['rapid']['wait']) + '\n' +
' > Single-Pulse Mode: ' + str(response['rapid']['singlePulseMode']) + '\n' +
' > HV-PSU Connected: ' + str(response['rapid']['hvpsuConnected']) + '\n' +
' > Coil Ready: ' + str(response['rapid']['coilReady']) + '\n' +
' > Theta PSU Detected: ' + str(response['rapid']['thetaPSUDetected']) + '\n' +
' > Modified Coil Algorithm: ' + str(response['rapid']['modifiedCoilAlgorithm']) + '\n\n')
if 'magstimParam' in response.keys():
outString += ('Magstim Parameters:\n' +
' > Power: ' + str(response['magstimParam']['power']) + '%\n\n')
if 'bistimParam' in response.keys():
outString += ('BiStim Parameters:\n' +
' > Power A: ' + str(response['bistimParam']['powerA']) + '%\n' +
' > Power B: ' + str(response['bistimParam']['powerB']) + '%\n' +
' > Paired-Pulse Offset: ' + str(response['bistimParam']['ppOffset']) + '\n\n')
if 'rapidParam' in response.keys():
outString += ('Rapid Parameters:\n' +
' > Power: ' + str(response['rapidParam']['power']) + '%\n' +
' > Frequency: ' + str(response['rapidParam']['frequency']) + 'Hz\n' +
' > Number of Pulses: ' + str(response['rapidParam']['nPulses']) + '\n' +
' > Duration: ' + str(response['rapidParam']['duration']) + 's\n' +
' > Wait: ' + str(response['rapidParam']['wait']) + 's\n\n')
if 'chargeDelay' in response.keys():
outString += ('Charge Delay: ' + str(response['chargeDelay']['chargeDelay']) + 's\n\n')
if 'magstimTemp' in response.keys():
outString += ('Coil Temperatures:\n' +
' > Coil 1 Temperature: ' + str(response['magstimTemp']['coil1Temp']) + ' Degrees Celsius\n' +
' > Coil 2 Temperature: ' + str(response['magstimTemp']['coil2Temp']) + ' Degrees Celsius\n\n')
if 'currentErrorCode' in response.keys():
outString += ('Error Code: ' + str(response['magstimTemp']['currentErrorCode']) + '\n\n')
return outString
@staticmethod
def parseMagstimResponse(responseString, responseType):
"""Interprets responses sent from the Magstim unit."""
if responseType == 'version':
magstimResponse = tuple(int(x) for x in ''.join([chr(x) for x in responseString[1:-1]]).strip().split('.') if x.isdigit())
else:
# Get ASCII code of first data character
temp = responseString.pop(0)
# Interpret bits
magstimResponse = {'instr':{'standby': temp & 1,
'armed': (temp >> 1) & 1,
'ready': (temp >> 2) & 1,
'coilPresent': (temp >> 3) & 1,
'replaceCoil': (temp >> 4) & 1,
'errorPresent': (temp >> 5) & 1,
'errorType': (temp >> 6) & 1,
'remoteStatus': (temp >> 7) & 1}}
# If a Rapid system and response includes rTMS status
if responseType in {'instrRapid','rapidParam','systemRapid'}:
# Get ASCII code of second data character
temp = responseString.pop(0)
# Interpret bits
magstimResponse['rapid'] = {'enhancedPowerMode': temp & 1,
'train': (temp >> 1) & 1,
'wait': (temp >> 2) & 1,
'singlePulseMode': (temp >> 3) & 1,
'hvpsuConnected': (temp >> 4) & 1,
'coilReady': (temp >> 5) & 1,
'thetaPSUDetected': (temp >> 6) & 1,
'modifiedCoilAlgorithm': (temp >> 7) & 1}
# If requesting parameter settings or coil temperature
if responseType == 'bistimParam':
magstimResponse['bistimParam'] = {'powerA': int(''.join(chr(x) for x in responseString[0:3])),
'powerB': int(''.join(chr(x) for x in responseString[3:6])),
'ppOffset': int(''.join(chr(x) for x in responseString[6:9]))}
elif responseType == 'magstimParam':
magstimResponse['magstimParam'] = {'power': int(''.join(chr(x) for x in responseString[:3]))}
elif responseType in 'rapidParam':
# This is a bit of a hack to determine which software version we're dealing with
if len(responseString) == 20:
magstimResponse['rapidParam'] = {'power': int(''.join(chr(x) for x in responseString[0:3])),
'frequency': int(''.join(chr(x) for x in responseString[3:7])) / 10.0,
'nPulses': int(''.join(chr(x) for x in responseString[7:12])),
'duration': int(''.join(chr(x) for x in responseString[12:16])) / 10.0,
'wait': int(''.join(chr(x) for x in responseString[16:])) / 10.0}
else:
magstimResponse['rapidParam'] = {'power': int(''.join(chr(x) for x in responseString[0:3])),
'frequency': int(''.join(chr(x) for x in responseString[3:7])) / 10.0,
'nPulses': int(''.join(chr(x) for x in responseString[7:11])),
'duration': int(''.join(chr(x) for x in responseString[11:14])) / 10.0,
'wait': int(''.join(chr(x) for x in responseString[14:])) / 10.0}
elif responseType == 'magstimTemp':
magstimResponse['magstimTemp'] = {'coil1Temp': int(''.join(chr(x) for x in responseString[0:3])) / 10.0,
'coil2Temp': int(''.join(chr(x) for x in responseString[3:6])) / 10.0}
elif responseType == 'systemRapid':
temp = responseString.pop(0)
magstimResponse['extInstr'] = {'plus1ModuleDetected': temp & 1,
'specialTriggerModeActive': (temp >> 1) & 1,
'chargeDelaySet': (temp >> 2) & 1}
elif responseType == 'error':
magstimResponse['currentErrorCode'] = ''.join(chr(x) for x in responseString[:-1])
elif responseType == 'instrCharge':
magstimResponse['chargeDelay'] = int(''.join(chr(x) for x in responseString))
return magstimResponse
def __init__(self, serialConnection, debugSerialConnection=None):
self._sendQueue = Queue()
self._receiveQueue = Queue()
self._parameterReturnBytes = None
self._connectionCommand = (b'Q@n', None, 3)
self._queryCommand = partial(self.remoteControl, enable=True, receipt=True)
self._setupSerialPort(serialConnection, debugSerialConnection)
self._connection.daemon = True
self._connected = False
def _setupSerialPort(self, serialConnection, debugSerialConnection=None):
if serialConnection.lower() == 'virtual':
from _virtual import virtualPortController
self._connection = virtualPortController(self.__class__.__name__,self._sendQueue,self._receiveQueue)
else:
self._connection = serialPortController(serialConnection, self._sendQueue, self._receiveQueue, self._connectionCommand, debugSerialConnection)
def connect(self):
"""
Connect to the Magstim.
This starts the serial port controller, as well as a process that constantly keeps in contact with the Magstim so as not to lose control.
"""
if not self._connected:
self._connection.start()
#success,message = self.remoteControl(enable=True, receipt=True)
#if success:
if not self.remoteControl(enable=True, receipt=True)[0]:
self._connected = True
else:
self._sendQueue.put((None, None, None))
if self._connection.is_alive():
self._connection.join()
raise MagstimError('Could not establish remote control over the Magstim.')
def disconnect(self):
"""
Disconnect from the Magstim.
This stops maintaining contact with the Magstim and turns the serial port controller off.
"""
if self._connected:
self.disarm()
self.remoteControl(enable=False, receipt=True)
self._sendQueue.put((None, None, None))
if self._connection.is_alive():
self._connection.join(timeout=2.0)
self._connected = False
def _processCommand(self, commandString, receiptType, readBytes):
"""
Process Magstim command.
Args:
commandString (str): command and data characters making up the command string (N.B. do not include CRC character)
reciptType (bool): whether to return the occurrence of any error when executing the command and the automated response from the Magstim unit
readBytes (int): number of bytes in the response
Returns:
If receiptType argument is not None:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing one or more Magstim parameter dicts, otherwise returns an error string
If receiptType argument is None:
None
"""
response = (0, None)
# Unify Python 2 and 3 strings
commandString = bytearray(commandString)
# Only process command if toggling remote control, querying parameters, or disarming, or otherwise only if connected to the Magstim
# N.B. For Rapid stimulators, we first need to have established what version number we are (which sets _parameterReturnBytes) before we can query parameters
if self._connected or (commandString[0] in {ord(b'Q'), ord(b'R'), ord(b'J'), ord(b'F')}) or commandString == b'EA' or (commandString[0] == b'\\' and self._parameterReturnBytes is not None):
# Put command in the send queue to the serial port controller along with what kind of reply is requested and how many bytes to read back from the Magstim
self._sendQueue.put((bytes(commandString + calcCRC(commandString)), receiptType, readBytes))
# If expecting a response, start inspecting the receive queue back from the serial port controller
if receiptType is not None:
error, reply = self._receiveQueue.get()
# If error is true, that means we either couldn't send the command or didn't get anything back from the Magstim
if error:
response = (error, reply)
# If we did get something back from the Magstim, parse the message and the return it
else:
# Check for error messages
if reply[0] == ord(b'?'):
response = Magstim.INVALID_COMMAND_ERR
elif reply[1] == ord(b'?'):
response = Magstim.INVALID_DATA_ERR
elif reply[1] == ord(b'S'):
response = Magstim.COMMAND_CONFLICT_ERR
elif reply[0] != commandString[0]:
response = Magstim.INVALID_CONFIRMATION_ERR
elif ord(calcCRC(reply[:-1])) != reply[-1]:
response = Magstim.CRC_MISMATCH_ERR
else:
# Then return the parsed response if requested
response = (0, Magstim.parseMagstimResponse(list(reply[1:-1]), receiptType))
else:
response = Magstim.NO_REMOTE_CONTROL_ERR
return response
def remoteControl(self, enable, receipt=False):
"""
Enable/Disable remote control of stimulator. Disabling remote control will first disarm the Magstim unit.
Args:
enable (bool): whether to enable (True) or disable (False) control
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'Q@' if enable else b'R@', 'instr' if receipt else None, 3)
def getParameters(self):
"""
Request current parameter settings from the Magstim.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Magstim instrument status ['instr'] and parameter setting ['magstimParam'] dicts, otherwise returns an error string
"""
return self._processCommand(b'J@', 'magstimParam', 12)
def setPower(self, newPower, receipt=False, delay=False, _commandByte=b'@'):
"""
Set power level for Magstim.
N.B. Allow 100 ms per unit drop in power, or 10 ms per unit increase in power.
Args:
newPower (int): new power level (0-100)
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
delay (bool): enforce delay to allow Magstim time to change Power (defaults to False)
_commandByte should not be changed by the user
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
# Make sure we have a valid power value
if newPower % 1:
return Magstim.PARAMETER_FLOAT_ERR
elif not 0 <= newPower <= 100:
return Magstim.PARAMETER_RANGE_ERR
#If enforcing power change delay, grab current parameters
if delay:
error, priorPower = self.getParameters()
if error:
return Magstim.PARAMETER_ACQUISTION_ERR
else:
# Switch keys depending on whether we're returning for a BiStim
if type(self).__name__ == 'BiStim':
priorPower = priorPower['bistimParam']['powerA'] if _commandByte == b'@' else priorPower['bistimParam']['powerB']
elif type(self).__name__ == 'Rapid':
priorPower = priorPower['rapidParam']['power']
else:
priorPower = priorPower['magstimParam']['power']
error, message = self._processCommand(_commandByte + bytearray(str(int(newPower)).zfill(3),encoding='ascii'), 'instr' if (receipt or delay) else None, 3)
# If we're meant to delay (and we were able to change the power), then enforce if prior power settings are available
if delay and not error:
if not error:
if newPower > priorPower:
sleep((newPower - priorPower) * 0.01)
else:
sleep((priorPower - newPower) * 0.1)
else:
return Magstim.PARAMETER_UPDATE_ERR
return (error, message) if receipt else None
def getTemperature(self):
"""
Request current coil temperature from the Magstim.
N.B. Coil1 and Coil2 refer to the separate windings in a single figure-8 coil connected to the Magstim.
Magstim units will automatically disarm (and cannot be armed) if the coil temperature exceeds 40 degrees celsius.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Magstim instrument status ['instr'] and coil temperature ['magstimTemp'] dicts, otherwise returns an error string
"""
return self._processCommand(b'F@', 'magstimTemp', 9)
def poke(self, silent=False):
"""
'Poke' the stimulator with an enable remote control command (only if currently connected).
This should be used prior to any time-senstive commands, such as triggering the magstim to coincide with stimulus presentation. Conservatively, around 40-50ms should
be enough time to allow for (~20ms if 'silently' poking). This needs to be done to ensure that the ongoing communication with the magstim to maintain remote control
does not interfere with the sent command. Note that this simply resets the timer controlling this ongoing communication (i.e., incrementing it a further 500 ms).
Args:
silent (bool): whether to bump polling robot but without sending enable remote control command (defaults to False)
"""
if silent and self._connected:
self._sendQueue.put((0, None, 0))
else:
self._processCommand(*self._connectionCommand)
def arm(self, receipt=False, delay=False):
"""
Arm the stimulator.
N.B. You must allow at around 1 s for the stimulator to arm.
If you send an arm() command when the Magstim is already armed, you will receive an non-fatal error reply from the Magstim that the command conflicts with the current settings.
If the unit does not fire for more than 1 min while armed, it will disarm
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
delay (bool): enforce delay to allow Magstim time to arm (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
error, message = self._processCommand(b'EB', 'instr' if receipt else None, 3)
#Enforcing arming delay if requested
if delay:
sleep(1.1)
return (error, message)
def disarm(self, receipt=False):
"""
Disarm the stimulator.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'EA', 'instr' if receipt else None, 3)
def isArmed(self):
"""
Helper function that returns True if the Magstim is armed or ready, False if not or if it could not be determined.
"""
error,parameters = self._queryCommand()
return (bool(parameters['instr']['armed']) or bool(parameters['instr']['remoteStatus'])) if not error else False
def isUnderControl(self):
"""
Helper function that returns True if the Magstim is under remote control, False if not or if it could not be determined.
"""
error,parameters = self._queryCommand()
return bool(parameters['instr']['remoteStatus']) if not error else False
def isReadyToFire(self):
"""
Helper function that returns True if the Magstim is ready to fire, False if not or if it could not be determined.
"""
error,parameters = self._queryCommand()
return bool(parameters['instr']['ready']) if not error else False
def fire(self, receipt=False):
"""
Fire the stimulator.
N.B. Will only succeed if previously armed.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'EH', 'instr' if receipt else None, 3)
def resetQuickFire(self):
"""
Reset the RTS pin used for quick firing.
N.B. There must be a few ms between triggering QuickFire and reseting the pin.
"""
self._sendQueue.put((-1, None, 0))
def quickFire(self):
"""
Trigger the stimulator to fire with very low latency using the RTS pin and a custom serial connection.
"""
self._sendQueue.put((1, None, 0))
class BiStim(Magstim):
"""
This is a sub-class of the parent Magstim class used for controlling BiStim^2 Magstim units. It allows firing in either BiStim mode or Simultaneous Discharge mode.
To enable Simultaneous Discharge mode, you must change the pulseInterval parameter to 0 s (i.e., by calling: setPulseInterval(0)).
N.B. In BiStim mode, the maximum firing frequency is 0.25 Hz. In Simulatenous Discharge mode, the maximum frequency depends on the power level (0.25 - 0.5 Hz)
"""
def __init__(self, serialConnection):
super(BiStim, self).__init__(serialConnection)
self._highResolutionMode = False
def highResolutionMode(self, enable, receipt=False):
"""
Enable/Disable high resolution timing of interpulse interval.
When enabling high-resolution mode, the system will default to a 1ms interval.
When disabling high-resolution mode, the system will default to a 10ms interval.
N.B. This cannot be changed while the system is armed.
Args:
enable (bool): whether to enable (True) or disable (False) high-resolution mode
receipt (bool): whether to return occurrence of an error and the automated response from the BiStim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a BiStim instrument status ['instr'] dict, otherwise returns an error strin
If receipt argument is False:
None
"""
error,message = self._processCommand(b'Y@' if enable else b'Z@', 'instr' if receipt else None, 3)
if not error:
self._highResolutionMode = enable
return (error,message)
def getParameters(self):
"""
Request current coil temperature from the BiStim.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing BiStim instrument status ['instr'] and parameter setting ['bistimParam'] dicts, otherwise returns an error string
"""
(error,message) = self._processCommand(b'J@', 'bistimParam', 12)
if not error and self._highResolutionMode:
message['bistimParam']['ppOffset'] /= 10.0
return (error,message)
def setPowerA(self, newPower, receipt=False, delay=False):
"""
Set power level for BiStim A.
N.B. Allow 100ms per unit drop in power, or 10ms per unit increase in power.
In BiStim mode, power output is actually 90% of a 200^2 unit's power output. In Simulatenous Discharge mode (pulseInterval = 0), power output is actually 113% of a 200^2 unit's power output
Args:
newPower (int): new power level (0-100)
receipt (bool): whether to return occurrence of an error and the automated response from the BiStim unit (defaults to False)
delay (bool): enforce delay to allow BiStim time to change Power (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a BiStim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
#This is just an alias for the base magstim class method setPower
return super(BiStim, self).setPower(newPower, receipt=receipt, delay=delay, _commandByte=b'@')
def setPowerB(self, newPower, receipt=False, delay=False):
"""
Set power level for BiStim B.
N.B. Allow 100ms per unit drop in power, or 10ms per unit increase in power.
Power output is actually 90% of a 200^2 unit's power output.
Args:
newPower (int): new power level (0-100)
receipt (bool): whether to return occurrence of an error and the automated response from the BiStim unit (defaults to False)
delay (bool): enforce delay to allow BiStim time to change Power (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a BiStim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
#This is just an alias for the base magstim class method setPower
return super(BiStim, self).setPower(newPower, receipt=receipt, delay=delay, _commandByte=b'A')
def setPulseInterval(self, newInterval, receipt=False):
"""
Set interpulse interval.
Args:
newInterval (int/float): new interpulse interval in milliseconds (Range low-resolution mode: 0-999; Range high-resolution mode: 0-99.9)
receipt (bool): whether to return occurrence of an error and the automated response from the BiStim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a BiStim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
# If we're in high resolution mode, then convert to tenths of a millisecond
if self._highResolutionMode:
newInterval = newInterval * 10
# Make sure we have a valid ipi value
if newInterval % 1:
return Magstim.PARAMETER_PRECISION_ERR if self._highResolutionMode else Magstim.PARAMETER_FLOAT_ERR
elif not (0 <= newInterval <= 999):
return Magstim.PARAMETER_RANGE_ERR
return self._processCommand(b'C' + bytearray(str(int(newInterval)).zfill(3),encoding='ascii'), 'instr' if receipt else None, 3)
class Rapid(Magstim):
"""
This is a sub-class of the parent Magstim class used for controlling Rapid^2 Magstim units. It allows firing in either single-pulse mode or rTMS mode.
In single-pulse mode, the maximum firing frequency is 1 Hz (0.5 Hz if enhanced-power mode is enabled and power is 100 - 110%).
To enable rTMS mode, you must first call rTMSMode(True). To disable rTMS mode, call rTMSMode(False).
N.B. In rTMS mode the maximum frequency allowed is dependent on the power level. Also, there is a dependent relationship between the Duration, NPulses, and Frequency parameter settings.
Therefore it is recommended either to seek confirmation of any change in settings or to evaluate allowable changes beforehand.
In addition, after each rTMS train there is an enforced delay (minimum 500 ms) before any subsequent train can be initiated or before any rTMS parameter settings can be altered.
"""
# Load settings file (resort to default values if not found)
__location__ = realpath(join(getcwd(), dirname(__file__)))
try:
with open(join(__location__, 'rapid_config.yaml')) as yaml_file:
config_data = safe_load(yaml_file)
except:
DEFAULT_RAPID_TYPE = 0
DEFAULT_VOLTAGE = 240
DEFAULT_UNLOCK_CODE = ''
ENFORCE_ENERGY_SAFETY = True
DEFAULT_VIRTUAL_VERSION = (5,0,0)
else:
DEFAULT_RAPID_TYPE = config_data['defaultRapidType']
DEFAULT_VOLTAGE = config_data['defaultVoltage']
DEFAULT_UNLOCK_CODE = config_data['unlockCode']
ENFORCE_ENERGY_SAFETY = config_data['enforceEnergySafety']
DEFAULT_VIRTUAL_VERSION = literal_eval(config_data['virtualVersionNumber'])
# Load system info file
with open(join(__location__, 'rapid_system_info.yaml')) as yaml_file:
system_info = safe_load(yaml_file)
# Maximum allowed rTMS frequency based on voltage and current power setting
MAX_FREQUENCY = system_info['maxFrequency']
# Minimum wait time (s) required for rTMS train. Power:Joules per pulse
JOULES = system_info['joules']
def getRapidMinWaitTime(power, nPulses, frequency):
""" Calculate minimum wait time between trains for given power, frequency, and number of pulses."""
return max(0.5, (nPulses * ((frequency * Rapid.JOULES[power]) - 1050.0)) / (1050.0 * frequency))
def getRapidMaxOnTime(power, frequency):
""" Calculate maximum train duration per minute for given power and frequency. If greater than 60 seconds, will allow for continuous operation for up to 6000 pulses."""
return 63000.0 / (frequency * Rapid.JOULES[power])
def getRapidMaxContinuousOperationFrequency(power):
""" Calculate maximum frequency that will allow for continuous operation (up to 6000 pulses)."""
return 1050.0 / Rapid.JOULES[power]
def __init__(self, serialConnection, superRapid=DEFAULT_RAPID_TYPE, unlockCode=DEFAULT_UNLOCK_CODE, voltage=DEFAULT_VOLTAGE, version=DEFAULT_VIRTUAL_VERSION, debugSerialConnection=None):
self._super = superRapid
self._unlockCode = unlockCode
self._voltage = voltage
self._version = version if serialConnection.lower() == 'virtual' else (0,0,0)
self._sendQueue = Queue()
self._receiveQueue = Queue()
self._parameterReturnBytes = None
# If an unlock code has been supplied, then the Rapid requires a different command to stay in contact with it.
if self._unlockCode:
self._connectionCommand = (b'x@G', None, 6)
self._queryCommand = self.getSystemStatus
else:
self._connectionCommand = (b'Q@n', None, 3)
self._queryCommand = partial(self.remoteControl, enable=True, receipt=True)
self._setupSerialPort(serialConnection, debugSerialConnection)
self._connection.daemon = True
self._connected = False
self._sequenceValidated = False
self._repetitiveMode = False
def _setupSerialPort(self, serialConnection, debugSerialConnection=None):
if serialConnection.lower() == 'virtual':
from _virtual import virtualPortController
self._connection = virtualPortController(self.__class__.__name__,self._sendQueue,self._receiveQueue,superRapid=self._super,unlockCode=self._unlockCode,voltage=self._voltage,version=self._version)
else:
self._connection = serialPortController(serialConnection, self._sendQueue, self._receiveQueue, self._connectionCommand, debugSerialConnection)
def getVersion(self):
"""
Get Magstim software version number. This is needed when obtaining parameters from the Magstim.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (tuple): if error is 0 (False) returns a tuple containing the version number (in (Major,Minor,Patch) format), otherwise returns an error string
"""
error, message = self._processCommand(b'ND', 'version', None)
#If we didn't receive an error, update the version number and the number of bytes that will be returned by a getParameters() command
if not error:
self._version = message
if self._version >= (9,):
self._parameterReturnBytes = 24
elif self._version >= (7,):
self._parameterReturnBytes = 22
else:
self._parameterReturnBytes = 21
return (error,message)
def getErrorCode(self):
"""
Get current error code from Rapid.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and current error code ['errorCode'] dicts, otherwise returns an error string
"""
return self._processCommand(b'I@', 'error', 6)
def connect(self, receipt=False):
"""
Connect to the Rapid.
This starts the serial port controller, as well as a process that constantly keeps in contact with the Rapid so as not to lose control.
It also collects the software version number of the Rapid in order to send the correct command for obtaining parameter settings.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (str): if error is 0 (False) returns a string containing the version number (in (X,X,X) format), otherwise returns an error string
"""
super(Rapid,self).connect()
# We have to be able to determine the software version of the Rapid, otherwise we won't be able to communicate properly
error, message = self.getVersion()
if error:
self.disconnect()
raise MagstimError('Could not determine software version of Rapid. Disconnecting.')
def disconnect(self):
"""
Disconnect from the Magstim.
This stops maintaining contact with the Magstim and turns the serial port controller off.
"""
#Just some housekeeping before we call the base magstim class method disconnect
self._sequenceValidated = False
self._repetitiveMode = False
return super(Rapid, self).disconnect()
def rTMSMode(self, enable, receipt=False):
"""
This is a helper function to enable/disable rTMS mode.
Args:
enable (bool): whether to enable (True) or disable (False) control
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Get current parameters
updateError,currentParameters = self.getParameters()
if updateError:
return Magstim.PARAMETER_ACQUISTION_ERR
else:
# See if Rapid already in rTMS mode (if enabling) or already in single-pulse mode (if disabling)
if (not currentParameters['rapid']['singlePulseMode'] and enable) or (currentParameters['rapid']['singlePulseMode'] and not enable):
del currentParameters['rapidParam']
return (0,currentParameters) if receipt else None
# Durations of 1 or 0 are used to toggle repetitive mode on and off
if self._version >= (9,):
commandString = b'[0010' if enable else b'[0000'
else:
commandString = b'[010' if enable else b'[000'
error,message = self._processCommand(commandString, 'instrRapid', 4)
if not error:
if enable:
self._repetitiveMode = True
updateError,currentParameters = self.getParameters()
if not updateError:
if currentParameters['rapidParam']['frequency'] == 0:
updateError,currentParameters = self._processCommand(b'B0010', 'instrRapid', 4)
if updateError:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
else:
self._repetitiveMode = False
return (error,message) if receipt else None
def ignoreCoilSafetySwitch(self, receipt=False):
"""
This allows the stimulator to ignore the state of coil safety interlock switch.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'b@', 'instr' if receipt else None, 3)
def remoteControl(self, enable, receipt=False):
"""
Enable/Disable remote control of stimulator. Disabling remote control will first disarm the Magstim unit.
Args:
enable (bool): whether to enable (True) or disable (False) control
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
if self._unlockCode:
return self._processCommand(b'Q' + bytearray(self._unlockCode,encoding='latin_1') if enable else b'R@', 'instr' if receipt else None, 3)
else:
return self._processCommand(b'Q@' if enable else b'R@', 'instr' if receipt else None, 3)
def enhancedPowerMode(self, enable, receipt=False):
"""
Enable/Disable enhanced power mode; allowing intensity to be set to 110%.
N.B. This can only be enabled in single-pulse mode, and lowers the maximum firing frequency to 0.5 Hz.
Disabling will automatically reduce intensity to 100% if over
Args:
enable (bool): whether to enable (True) or disable (False) enhanced-power mode
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
return self._processCommand(b'^@' if enable else b'_@', 'instrRapid' if receipt else None, 4)
def isEnhanced(self):
"""
Helper function that returns True if the Rapid is in enhanced power mode, False if not if it could not be determined.
"""
error,parameters = self._queryCommand()
return bool(parameters['rapid']['enhancedPowerMode']) if not error else False
def setFrequency(self, newFrequency, receipt=False):
"""
Set frequency of rTMS pulse train.
N.B. Changing the Frequency will automatically update the NPulses parameter based on the current Duration parameter setting.
The maximum frequency allowed depends on the current Power level and the regional power settings (i.e., 115V vs. 240V)
Args:
newFrequency (int/float): new frequency of pulse train in Hertz (0-100 for 240V systems, 0-60 for 115V systems); decimal values are allowed for frequencies up to 30Hz
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Convert to tenths of a Hz
newFrequency = newFrequency * 10
# Make sure we have a valid frequency value
if newFrequency % 1:
return Magstim.PARAMETER_PRECISION_ERR
updateError,currentParameters = self.getParameters()
if updateError:
return Magstim.PARAMETER_ACQUISTION_ERR
else:
maxFrequency = Rapid.MAX_FREQUENCY[self._voltage][self._super][currentParameters['rapidParam']['power']] * 10
if not (0 <= newFrequency <= maxFrequency):
return Magstim.PARAMETER_RANGE_ERR
#Send command
error, message = self._processCommand(b'B' + bytearray(str(int(newFrequency)).zfill(4),encoding='ascii'), 'instrRapid', 4)
#If we didn't get an error, update the other parameters accordingly
if not error:
updateError,currentParameters = self.getParameters()
if not updateError:
updateError,currentParameters = self._processCommand(b'D' + bytearray(str(int(currentParameters['rapidParam']['duration'] * currentParameters['rapidParam']['frequency'])).zfill(5 if self._version >= (9,) else 4),encoding='ascii'), 'instrRapid', 4)
if updateError:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
return (error, message) if receipt else None
def setNPulses(self, newNPulses, receipt=False):
"""
Set number of pulses in rTMS pulse train.
N.B. Changing the NPulses parameter will automatically update the Duration parameter (this cannot exceed 10 s) based on the current Frequency parameter setting.
Args:
newNPulses (int): new number of pulses (Version 9+: 1-6000; Version 7+: ?; Version 5+: 1-1000?)
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Make sure we have a valid number of pulses value
if newNPulses % 1:
return Magstim.PARAMETER_FLOAT_ERR
if not (0 <= newNPulses <= 6000):
return Magstim.PARAMETER_RANGE_ERR
#Send command
error, message = self._processCommand(b'D' + bytearray(str(int(newNPulses)).zfill(5 if self._version >= (9,) else 4),encoding='ascii'), 'instrRapid', 4)
#If we didn't get an error, update the other parameters accordingly
if not error:
updateError, currentParameters = self.getParameters()
if not updateError:
updateError, currentParameters = self._processCommand(b'[' + bytearray(str(int(currentParameters['rapidParam']['nPulses'] / currentParameters['rapidParam']['frequency'])).zfill(4 if self._version >= (9,) else 3),encoding='ascii'), 'instrRapid' if receipt else None, 4)
if updateError:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
return (error, message) if receipt else None
def setDuration(self, newDuration, receipt=False):
"""
Set duration of rTMS pulse train.
N.B. Changing the Duration parameter will automatically update the NPulses parameter based on the current Frequency parameter setting.
Args:
newDuration (int/float): new duration of pulse train in seconds (Version 9+: 1-600; Version 7+: ?; Version 5+: 1-10?); decimal values are allowed for durations up to 30s
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'] and rMTS setting ['rapid'] dicts, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Convert to tenths of a second
newDuration = newDuration * 10
# Make sure we have a valid duration value
if newDuration % 1:
return Magstim.PARAMETER_PRECISION_ERR
elif not (0 <= newDuration <= (999 if self._version < (9,) else 9999)):
return Magstim.PARAMETER_RANGE_ERR
error, message = self._processCommand(b'[' + bytearray(str(int(newDuration)).zfill(4 if self._version >= (9,) else 3),encoding='ascii'), 'instrRapid', 4)
if not error:
updateError, currentParameters = self.getParameters()
if not updateError:
updateError, currentParameters = self._processCommand(b'D' + bytearray(str(int(currentParameters['rapidParam']['duration'] * currentParameters['rapidParam']['frequency'])).zfill(5 if self._version >= (9,) else 4),encoding='ascii'), 'instrRapid', 4)
if updateError:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
return (error, message) if receipt else None
def getParameters(self):
"""
Request current parameter settings from the Rapid.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'], rMTS setting ['rapid'], and parameter setting ['rapidParam'] dicts, otherwise returns an error string
"""
return self._processCommand(b'\\@', 'rapidParam', self._parameterReturnBytes)
def setPower(self, newPower, receipt=False, delay=False):
"""
Set power level for the Rapid.
N.B. Allow 100 ms per unit drop in power, or 10 ms per unit increase in power.
Changing the power level can result in automatic updating of the Frequency parameter (if in rTMS mode)
Args:
newPower (int): new power level (0-100; or 0-110 if enhanced-power mode is enabled)
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
delay (bool): enforce delay to allow Rapid time to change Power (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Rapid instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
self._sequenceValidated = False
# Check current enhanced power status
if self.isEnhanced():
maxPower = 110
else:
maxPower = 100
# Make sure we have a valid power value
if newPower % 1:
return Magstim.PARAMETER_FLOAT_ERR
elif not 0 <= newPower <= maxPower:
return Magstim.PARAMETER_RANGE_ERR
error, message = super(Rapid,self).setPower(newPower,True,delay,b'@')
if not error:
updateError, currentParameters = self.getParameters()
if not updateError:
if not currentParameters['rapid']['singlePulseMode']:
maxFrequency = Rapid.MAX_FREQUENCY[self._voltage][self._super][currentParameters['rapidParam']['power']]
if currentParameters['rapidParam']['frequency'] > maxFrequency:
if not self.setFrequency(maxFrequency)[0]:
return Magstim.PARAMETER_UPDATE_ERR
else:
return Magstim.PARAMETER_ACQUISTION_ERR
return (error,message) if receipt else None
def setChargeDelay(self, newDelay, receipt=False):
"""
Set charge delay duration for the Rapid.
Args:
newDelay (int): new delay duration in seconds (Version 10+: 1-10000; Version 9: 1-2000)
receipt (bool): whether to return occurrence of an error and the automated response from the Rapid unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Rapid instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
if self._version is None:
return Magstim.GET_SYSTEM_STATUS_ERR
elif self._version < (9,):
return Magstim.SYSTEM_STATUS_VERSION_ERR
self._sequenceValidated = False
#Make sure we have a valid delay duration value
if newDelay % 1:
return Magstim.PARAMETER_FLOAT_ERR
error, message = self._processCommand(b'n' + bytearray(str(int(newDelay)).zfill(5 if self._version >= (10,) else 4),encoding='ascii'), 'systemRapid' if self._version >= (10,) else 'instrRapid', 6 if self._version >= (10,) else 4)
return (error,message) if receipt else None
def getChargeDelay(self):
"""
Get current charge delay duration for the Rapid.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Rapid instrument status ['instr'] dict and charge delay duration ['chargeDelay'] value, otherwise returns an error string
"""
if self._version is None:
return Magstim.GET_SYSTEM_STATUS_ERR
elif self._version < (9,):
return Magstim.SYSTEM_STATUS_VERSION_ERR
return self._processCommand(b'o@', 'instrCharge', 8 if self._version > (9,) else 7)
def fire(self, receipt=False):
"""
Fire the stimulator. This overrides the base Magstim method in order to check whether rTMS mode is active, and if so whether the sequence has been validated and the min wait time between trains has elapsed
N.B. Will only succeed if previously armed.
Args:
receipt (bool): whether to return occurrence of an error and the automated response from the Magstim unit (defaults to False)
Returns:
If receipt argument is True:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing a Magstim instrument status ['instr'] dict, otherwise returns an error string
If receipt argument is False:
None
"""
if self._repetitiveMode and Rapid.ENFORCE_ENERGY_SAFETY and not self._sequenceValidated:
return Magstim.SEQUENCE_VALIDATION_ERR
else:
return super(Rapid,self).fire(receipt)
def quickFire(self):
"""
Trigger the stimulator to fire with very low latency using the RTS pin and a custom serial connection.
"""
if self._repetitiveMode and Rapid.ENFORCE_ENERGY_SAFETY and not self._sequenceValidated:
return Magstim.SEQUENCE_VALIDATION_ERR
else:
super(Rapid,self).quickFire()
def validateSequence(self):
"""
Validate the energy consumption for the current rTMS parameters for the Rapid.
This must be performed before running any new sequence, otherwise calling fire() will return an error.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns current Rapid parameters, otherwise returns an error string
"""
self._sequenceValidated = False
error,parameters = self.getParameters()
if error:
return Magstim.PARAMETER_ACQUISTION_ERR
elif min(parameters['rapidParam']['duration'], 60) > Rapid.getRapidMaxOnTime(parameters['rapidParam']['power'], parameters['rapidParam']['frequency']):
return Magstim.MAX_ON_TIME_ERR
else:
self._sequenceValidated = True
return (0, parameters)
def getSystemStatus(self):
"""
Get system status from the Rapid. Available only on software version of 9 or later.
Returns:
:tuple:(error,message):
error (int): error code (0 = no error; 1+ = error)
message (dict,str): if error is 0 (False) returns a dict containing Rapid instrument status ['instr'], rMTS setting ['rapid'], and extended instrument status ['extInstr'] dicts, otherwise returns an error string
"""
if self._version is None:
return Magstim.GET_SYSTEM_STATUS_ERR
elif self._version >= (9,):
return self._processCommand(b'x@', 'systemRapid', 6)
else:
return Magstim.SYSTEM_STATUS_VERSION_ERR | PypiClean |
/Demomgr-1.10.1-py3-none-any.whl/demomgr/dialogues/play.py | from itertools import chain, cycle, repeat
import os
import queue
import re
import subprocess
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox as tk_msg
from multiframe_list import MultiframeList
from multiframe_list.multiframe_list import SELECTION_TYPE
import vdf
from demomgr import constants as CNST
from demomgr.dialogues._base import BaseDialog
from demomgr.dialogues._diagresult import DIAGSIG
from demomgr.helpers import frmd_label, tk_secure_str, int_validator
from demomgr.platforming import get_steam_exe
from demomgr.threadgroup import ThreadGroup, THREADGROUPSIG
from demomgr.threads import THREADSIG, RCONThread
from demomgr.tk_widgets import PasswordButton, TtkText
from demomgr.tk_widgets.misc import DynamicLabel
def follow_vdf_keys(vdf_data, keys, key_case_sensitive = True):
"""
Resolves a sequence of vdf keys through a given dict representing
a vdf file and returns the last value resulted from following the
keys.
Keys can be processed case insensitively where they will be compared
to the vdf data keys one-by-one case-agnostically and the first
matching one returned. Since dict order is stable post-3.6 but icky,
this is really ugly if a vdf file should contain two keys that
-ignoring case- are equal.
Returns None if no dict was present yet there were keys to be
resolved or when a key was not found in a dictionary.
"""
for key in keys:
if not isinstance(vdf_data, dict):
return None
elif key in vdf_data:
vdf_data = vdf_data[key]
elif not key_case_sensitive:
lwr = key.lower()
for test_key in vdf_data:
if test_key.lower() == lwr:
vdf_data = vdf_data[test_key]
break
else:
return None
else:
return None
return vdf_data
RE_DOES_NOT_REQUIRE_ESCAPING = re.compile(r"^[!#$%&*+,\-./0-9<=>?@A-Z[\\\]^_a-z|]+$")
def _demo_name_needs_escaping(n):
return not RE_DOES_NOT_REQUIRE_ESCAPING.match(n)
class User():
__slots__ = ("dir_name", "name", "launch_opt")
def __init__(self, dir_name = None, name = None, launch_opt = None):
self.dir_name = dir_name
self.name = name
self.launch_opt = launch_opt
def is_fake(self):
return self.dir_name is None
def get_display_str(self):
if self.is_fake():
return "No one"
return self.dir_name + (f" - {self.name}" if self.name is not None else '')
class ErrorLabel():
__slots__ = ("label", "grid_options", "is_set")
def __init__(self):
self.label = None
self.grid_options = {}
self.is_set = False
def set(self, val):
if val == self.is_set or self.label is None:
return
self.is_set = val
if val:
self.label.grid(**self.grid_options)
else:
self.label.grid_forget()
def set_grid_options(self, **kw):
self.grid_options = kw
class ERR_IDX:
STEAMDIR = 0
STEAMDIR_DRIVE = 1
DEMO_OUTSIDE_GAME = 2
LAUNCHOPT = 3
class Play(BaseDialog):
"""
Dialog that reads and displays TF2 launch arguments and steam
profile information, offers ability to change those and launch TF2
with an additional command that plays the demo on the game's
startup, or directly hook HLAE into the game.
After the dialog is closed:
`self.result.state` will be SUCCESS if user hit launch, else
FAILURE.
Widget state remembering:
0: HLAE launch checkbox state (bool)
1: Gototick in play commands checkbox state (bool)
2: Selected userprofile's directory name (str)
(ignored when not existing)
"""
def __init__(self, parent, demo_dir, info, cfg, style, remember):
"""
parent: Parent widget, should be a `Tk` or `Toplevel` instance.
demo_dir: Demo directory the demo is located in (str)
info: Information about the demo. (DemoInfo)
cfg: The program configuration. (dict)
style: ttk.Style object
remember: List of arbitrary values. See class docstring for details.
"""
super().__init__(parent, "Play demo / Launch TF2...")
self.demopath = os.path.join(demo_dir, info.demo_name)
self.info = info
self._style = style
self.cfg = cfg
self.remember = remember
self.rcon_password_var = tk.StringVar()
self.user_select_var = tk.StringVar()
self.gototick_launchcmd_var = tk.BooleanVar()
self.usehlae_var = tk.BooleanVar()
self.user_launch_options_var = tk.StringVar()
self.custom_launch_options_var = tk.StringVar()
self.play_commands_var = tk.StringVar()
self.tick_var = tk.StringVar()
self.tick_offset_var = tk.StringVar()
self.true_tick = 0
self.demo_play_cmd = None
self.demo_gototick_cmd = None
self.users = [User()]
self.spinner = cycle(
chain(*(
repeat(sign, max(100 // CNST.GUI_UPDATE_WAIT, 1))
for sign in ("|", "/", "-", "\\")
))
)
self.rcon_threadgroup = ThreadGroup(RCONThread, self)
self.rcon_threadgroup.register_run_always_method_pre(self._rcon_run_always)
self.rcon_threadgroup.build_cb_method(self._rcon_after_callback)
self.animate_spinner = False
self.rcon_in_queue = queue.Queue()
self._tf2_head_path = vdf_data = None
if self.cfg.steam_path is not None:
try:
with open(
os.path.join(self.cfg.steam_path, CNST.LIBRARYFOLDER_VDF),
"r",
encoding = "utf-8"
) as f:
vdf_data = vdf.load(f)
libfolders = follow_vdf_keys(vdf_data, ("libraryfolders",), False)
if libfolders is not None and isinstance(libfolders, dict):
for v in libfolders.values():
if isinstance(v, dict) and CNST.TF2_GAME_ID in v["apps"]:
self._tf2_head_path = os.path.join(v["path"], CNST.TF2_HEAD_PATH)
break
except (OSError, SyntaxError, TypeError, KeyError):
# Might as well catch everything tbh
pass
self.error_steamdir_invalid = ErrorLabel()
self.warning_not_in_tf_dir = ErrorLabel()
self.info_launch_options_not_found = ErrorLabel()
# I am not sure whether this error is correct.
# It's possible I'm too incompetent to plug in the correct launch arg into
# tf2, but any kind of escaping i've tried causes tf2 to always cut the
# file name up and treat stuff as seperate commands.
self.warning_bad_chars_in_demo_name = ErrorLabel()
# DONE make the tick/tick offset elements work nicely as outlined in that throwaway note file
# DONE fix the launch argument/rcon playdemo weirdness
# DONE push 1.10.0 cause new play dialog and interface is just too nice
# TODO make cool frag video and link back to demomgr
# TODO step 5: 2 unique visitors
# TODO step 6: ???
# TODO step 7: profit
self.rcon_password_var.set(self.cfg.rcon_pwd or "")
def body(self, master):
"""UI"""
self.protocol("WM_DELETE_WINDOW", self.done)
rcon_labelframe = ttk.LabelFrame(
master, padding = (10, 0, 10, 10), labelwidget = frmd_label(master, "RCON")
)
rcon_connect_frame = ttk.Frame(rcon_labelframe, style = "Contained.TFrame")
rcon_password_label = ttk.Label(
rcon_connect_frame, text = "Password", style = "Contained.TLabel"
)
rcon_password_entry = ttk.Entry(
rcon_connect_frame, style = "Contained.TEntry", textvariable = self.rcon_password_var,
show = "\u25A0"
)
pwd_entry_show_toggle = PasswordButton(rcon_connect_frame, text = "Show")
self.rcon_connect_button = ttk.Button(
rcon_connect_frame, style = "Contained.TButton", text = "Connect",
command = self._rcon_start
)
rcon_text_frame = ttk.Frame(rcon_labelframe, style = "Contained.TFrame")
self.rcon_text = TtkText(rcon_text_frame, self._style, height = 4, width = 48)
self.rcon_text.configure(wrap = tk.NONE)
rcon_text_scrollbar = ttk.Scrollbar(
rcon_text_frame, orient = tk.HORIZONTAL, command = self.rcon_text.xview
)
play_labelframe = ttk.LabelFrame(
master, padding = (10, 0, 10, 10), labelwidget = frmd_label(master, "Play")
)
launch_config_frame = ttk.Frame(play_labelframe, style = "Contained.TFrame")
user_select_label = ttk.Label(
launch_config_frame, text = "Use launch options of:",
style = "Contained.TLabel"
)
self.error_steamdir_invalid.label = ttk.Label(
launch_config_frame, style = "Error.Contained.TLabel",
text = (
"Failed listing users, Steam directory is malformed.\n"
"Make sure you selected the root directory ending in \"Steam\" in the "
"Settings > Paths section."
)
)
self.info_launch_options_not_found.label = ttk.Label(
launch_config_frame, style = "Info.Contained.TLabel",
text = "(No launch options found for this user)"
)
self.user_select_combobox = ttk.Combobox(
launch_config_frame, textvariable = self.user_select_var, state = "readonly"
)
use_hlae_checkbox = ttk.Checkbutton(
launch_config_frame, variable = self.usehlae_var, text = "Use HLAE",
style = "Contained.TCheckbutton"
)
arg_region = ttk.Frame(play_labelframe, style = "Contained.TFrame")
user_launch_options_entry = ttk.Entry(
arg_region, style = "Contained.TEntry", textvariable = self.user_launch_options_var,
state = "readonly"
)
user_launch_options_ignored_label = DynamicLabel(
250, 400, arg_region,
style = "Contained.TLabel",
text = (
"Note: The options immediatedly below are ignored when launching via steam.\n"
"Steam will always force the currently logged in user's launch options in "
"addition to the ones in the last two fields."
),
)
custom_launch_options_entry = ttk.Entry(
arg_region, style = "Contained.TEntry", textvariable = self.custom_launch_options_var
)
play_commands_entry = ttk.Entry(
arg_region, style = "Contained.TEntry", textvariable = self.play_commands_var,
state = "readonly"
)
self.rcon_send_commands_button = ttk.Button(
arg_region, style = "Contained.TButton", text = "[RCON] Send play commands",
state = tk.DISABLED, command = self._rcon_send_commands
)
self.warning_bad_chars_in_demo_name.label = ttk.Label(
arg_region, style = "Warning.Contained.TLabel", text = (
"playdemo can't be set as launch argument due to special characters in demo name."
)
)
bookmark_region = ttk.Frame(play_labelframe, style = "Contained.TFrame")
self.tick_mfl = MultiframeList(
bookmark_region,
[
{"name": "Type", "col_id": "col_type"},
{"name": "Tick", "col_id": "col_tick"},
{"name": "Value", "col_id": "col_val"},
],
selection_type = SELECTION_TYPE.SINGLE,
resizable = True,
)
tick_frame = ttk.Frame(bookmark_region, style = "Contained.TFrame")
tick_options_frame = ttk.Frame(tick_frame, style = "Contained.TFrame")
self.gototick_launchcmd_checkbox = ttk.Checkbutton(
tick_options_frame, style = "Contained.TCheckbutton",
text = "Go to tick in play commands?", variable = self.gototick_launchcmd_var
)
int_val_id = master.register(int_validator)
self.tick_entry = ttk.Entry(
tick_options_frame, style = "Contained.TEntry", textvariable = self.tick_var,
validate = "key", validatecommand = (int_val_id, "%S", "%P")
)
self.rcon_send_gototick_button = ttk.Button(
tick_options_frame, style = "Contained.TButton", text = "[RCON] Go to tick",
state = tk.DISABLED, command = self._rcon_send_gototick
)
tick_offset_frame = ttk.Frame(tick_frame, style = "Contained.TFrame")
tick_offset_spinner = ttk.Spinbox(
tick_offset_frame, width = 10, textvariable = self.tick_offset_var,
from_ = 0, increment = 20, to = 5000, wrap = True,
validate = "key", validatecommand = (int_val_id, "%S", "%P")
)
tick_offset_label = ttk.Label(
tick_offset_frame, style = "Contained.TLabel", text = "Tick offset:"
)
self.warning_not_in_tf_dir.label = ttk.Label(
bookmark_region, style = "Warning.Contained.TLabel",
text = "Demo can not be played as it is\nnot in TF2's filesystem.",
justify = tk.CENTER, anchor = tk.CENTER,
)
launch_button = ttk.Button(
bookmark_region, style = "Contained.TButton", text = "Launch TF2",
command = self._launch
)
# the griddening
master.grid_rowconfigure(1, weight = 1)
master.grid_columnconfigure(0, weight = 1)
# = RCON frame
rcon_labelframe.grid_columnconfigure(0, weight = 1)
rcon_connect_frame.grid_columnconfigure(1, weight = 1)
rcon_password_label.grid(row = 0, column = 0, pady = (0, 0), padx = (0, 5), sticky = "w")
rcon_password_entry.grid(row = 0, column = 1, padx = (0, 5), pady = (0, 5), sticky = "ew")
pwd_entry_show_toggle.grid(row = 0, column = 2, pady = (0, 5), sticky = "e")
self.rcon_connect_button.grid(row = 1, column = 0, columnspan = 3, ipadx = 20)
rcon_connect_frame.grid(row = 0, column = 0, sticky = "ew")
self.rcon_text.grid(row = 0, column = 0, sticky = "nesw")
rcon_text_scrollbar.grid(row = 1, column = 0, sticky = "ew")
rcon_text_frame.grid(row = 0, column = 1, rowspan = 2, padx = (5, 0))
rcon_labelframe.grid(row = 0, column = 0, pady = (0, 5), sticky = "nesw")
# = Play frame
play_labelframe.grid_columnconfigure(0, weight = 1)
play_labelframe.grid_rowconfigure(2, weight = 1)
# Launch config region
launch_config_frame.grid_columnconfigure(1, weight = 1)
user_select_label.grid(row = 0, column = 0, pady = (0, 5), sticky = "e")
self.user_select_combobox.grid(row = 0, column = 1, pady = (0, 5), sticky = "ew")
self.error_steamdir_invalid.set_grid_options(
row = 1, column = 0, columnspan = 2, pady = (0, 5), sticky = "ew"
)
self.info_launch_options_not_found.set_grid_options(
row = 2, column = 0, columnspan = 2, pady = (0, 5), sticky = "ew"
)
use_hlae_checkbox.grid(row = 3, column = 0, ipadx = 2, sticky = "w")
launch_config_frame.grid(row = 0, column = 0, pady = (0, 5), sticky = "nesw")
# Launch options region
arg_region.grid_columnconfigure(0, weight = 1)
user_launch_options_ignored_label.grid(row = 1, column = 0, columnspan = 2, sticky = "ew")
user_launch_options_entry.grid(
row = 2, column = 0, columnspan = 2, pady = (0, 5), sticky = "ew"
)
custom_launch_options_entry.grid(
row = 3, column = 0, columnspan = 2, pady = (0, 5), sticky = "ew"
)
play_commands_entry.grid(row = 4, column = 0, sticky = "ew")
self.warning_bad_chars_in_demo_name.set_grid_options(
row = 5, column = 0, columnspan = 2, sticky = "ew"
)
self.rcon_send_commands_button.grid(row = 4, column = 1, padx = (5, 0), sticky = "e")
arg_region.grid(row = 1, column = 0, sticky = "nesw")
# Event tick entry
tick_options_frame.grid_columnconfigure(0, weight = 1)
self.gototick_launchcmd_checkbox.grid(
row = 0, column = 0, columnspan = 2, pady = (0, 5), ipadx = 2, sticky = "w"
)
self.tick_entry.grid(row = 1, column = 0, padx = (0, 5), sticky = "ew")
self.rcon_send_gototick_button.grid(row = 1, column = 1, sticky = "ew")
tick_options_frame.grid(row = 0, sticky = "ew", pady = (0, 5))
# Event tick offset
tick_offset_frame.grid_columnconfigure(1, weight = 1)
tick_offset_label.grid(row = 0, column = 0)
tick_offset_spinner.grid(row = 0, column = 1, sticky = "ew")
tick_offset_frame.grid(row = 1, sticky = "ew")
# Higher tick frame
tick_frame.grid_columnconfigure(0, weight = 1)
tick_frame.grid(row = 0, column = 1, sticky = "ew")
bookmark_region.grid_columnconfigure(0, weight = 1)
bookmark_region.grid_rowconfigure(0, weight = 1)
self.tick_mfl.grid(row = 0, column = 0, rowspan = 3, padx = (0, 5), pady = (5, 0), sticky = "nesw")
self.warning_not_in_tf_dir.set_grid_options(row = 1, column = 1, sticky = "ew")
launch_button.grid(row = 2, column = 1, ipadx = 40)
bookmark_region.grid(row = 2, column = 0, pady = (0, 5), sticky = "nesw")
play_labelframe.grid(row = 1, column = 0, sticky = "nesw")
pwd_entry_show_toggle.bind_to_entry(rcon_password_entry)
self.tick_mfl.bind("<<MultiframeSelect>>", lambda _: self._on_mfl_selection())
self.rcon_text.insert(tk.END, "Status: Disconnected [.]\n\n\n")
self.rcon_text.mark_set("status0", "1.8")
self.rcon_text.mark_set("status1", "1.20")
self.rcon_text.mark_set("spinner", "1.22")
self.rcon_text.mark_gravity("status0", tk.LEFT)
self.rcon_text.mark_gravity("status1", tk.RIGHT)
self.rcon_text.mark_gravity("spinner", tk.LEFT)
self.rcon_text.configure(xscrollcommand = rcon_text_scrollbar.set, state = tk.DISABLED)
# Populate the event mfl here
events = []
events += [("Killstreak", t, v) for v, t, _ in self.info.killstreak_peaks]
events += [("Bookmark", t, v) for v, t, _ in self.info.bookmarks]
events.sort(key = lambda x: x[1])
data = {"col_type": [], "col_tick": [], "col_val": []}
for type, tick, val in events:
data["col_type"].append(type)
data["col_tick"].append(tick)
data["col_val"].append(val)
self.tick_mfl.set_data(data)
self.tick_mfl.format()
self.usehlae_var.set(self.remember[0])
self.gototick_launchcmd_var.trace("w", self._update_gototick_command_and_demo_commands_var)
self.gototick_launchcmd_var.set(self.remember[1])
self._ini_load_users(self.remember[2])
self.user_select_var.trace("w", self.on_user_select)
self.custom_launch_options_var.set(self.remember[3])
self.tick_offset_var.set(max(0, self.remember[4]))
self.tick_offset_var.trace("w", self._on_tick_offset_change)
self.tick_var.trace("w", self._on_tick_entry_change)
del self.remember
self._set_play_command()
self._set_gototick_command()
self._update_demo_commands_var()
def get_user_data(self, user_dir):
"""
Retrieves a user's information.
Returns a two-element tuple where [0] is the user's name, safe for
display in tkinter and [1] is the user's TF2 launch configuration.
If an error getting the respective user data occurs, the last
two values may be None. Does not set any error states.
"""
cnf_file_path = os.path.join(
self.cfg.steam_path, CNST.STEAM_CFG_PATH0, user_dir, CNST.STEAM_CFG_PATH1
)
try:
with open(cnf_file_path, encoding = "utf-8") as h:
vdf_data = vdf.load(h)
launch_options = follow_vdf_keys(vdf_data, CNST.STEAM_CFG_LAUNCH_OPTIONS, False)
username = follow_vdf_keys(vdf_data, CNST.STEAM_CFG_USER_NAME)
return (
None if username is None else tk_secure_str(username),
launch_options,
)
except (OSError, KeyError, SyntaxError):
return (None, None)
def _ini_load_users(self, set_if_present = None):
"""
Lists the users based on the passed config's steam directory,
saves them in `self.users` and configures the `user_select_var`
with either the user associated with the passed in
`set_if_present` directory or, if it is `None`, the first user
present in `self.users`, or `""` if `self.users` is empty.
Sets errstates.
"""
try:
raw_list = os.listdir(os.path.join(self.cfg.steam_path, CNST.STEAM_CFG_PATH0))
except (TypeError, OSError):
# TypeError for when steam_path is None.
self.error_steamdir_invalid.set(True)
else:
self.error_steamdir_invalid.set(False)
self.users = [User(x, *self.get_user_data(x)) for x in raw_list]
self.users.append(User())
self.user_select_combobox.config(values = [user.get_display_str() for user in self.users])
tgt = self.users[-1].get_display_str()
if set_if_present is not None:
for user in self.users:
if set_if_present == user.dir_name:
tgt = user.get_display_str()
break
# Needs to be done manually here as the trace on user_select_var is
# not set up yet. If I did that, for some reason the first call always
# fails, even when the values are configured beforehand.
self.user_select_var.set(tgt)
self.on_user_select()
def on_user_select(self, *_):
"""
Callback to retrieve launch options and update error labels.
"""
user = self.users[self.user_select_combobox.current()]
self.user_launch_options_var.set(user.launch_opt or "")
self.info_launch_options_not_found.set((user.launch_opt is None) and (not user.is_fake()))
def _on_tick_offset_change(self, *_):
self._update_tick_entry()
self._update_gototick_command_and_demo_commands_var()
def _on_tick_entry_change(self, *_):
self.true_tick = int(self.tick_var.get() or 0) + int(self.tick_offset_var.get() or 0)
self._update_gototick_command_and_demo_commands_var()
def _on_mfl_selection(self):
tt = 0
if self.tick_mfl.selection:
tt = self.tick_mfl.get_cell("col_tick", self.tick_mfl.get_selection())
self.true_tick = tt
self._update_tick_entry()
self._update_demo_commands_var()
def _update_gototick_command_and_demo_commands_var(self, *_):
self._set_gototick_command()
self._update_demo_commands_var()
def _update_tick_entry(self):
"""
Sets the tick entry to `max(0, true_tick - self.tick_offset_var.get())`
"""
self.tick_var.set(str(max(
0,
self.true_tick - int(self.tick_offset_var.get() or 0)),
))
def get_demo_commands(self, escape_play = False):
"""
Gets the commands necessary to play the demo as tuples of
command name and arguments.
If escape_play is given, the play command will be escaped so
it can be processed by the TF2 console, if necessary.
If escaping is necessary and this parameter is False, no play
command will be returned.
"""
play_cmd = self.demo_play_cmd
if play_cmd is not None:
# The placement of "" here may seem insanely careless, but TF2's console in
# general behaves weirdly with quotes and seems to treat everything between the
# first and last quote as a complete string?
# Even more weird, the string escaping behavior seems to be depending on whatever
# command you're running and changes between i.e. `say`, `echo` and `playdemo`.`
# Demos with `"` in their filename are unplayable even from within TF2, so don't
# name them that lol
# Without this, demos with a space in their filename are not playable via RCON.
demo_name = play_cmd[1]
if _demo_name_needs_escaping(demo_name):
if escape_play:
play_cmd = ("playdemo", '"' + demo_name + '"')
else:
play_cmd = None
else:
play_cmd = ("playdemo", demo_name)
return tuple(c for c in (play_cmd, self.demo_gototick_cmd) if c is not None)
def _set_play_command(self):
self.demo_play_cmd = None
try:
shortdemopath = os.path.relpath(
self.demopath,
os.path.join(self._tf2_head_path, CNST.TF2_FS_ROOT_TAIL_PATH),
)
if ".." in os.path.normpath(shortdemopath).split(os.sep):
raise ValueError("Can't exit game directory")
# Should i handle other space characters here? Who knows!
# Though if someone somehow puts \n into their demo's filename, they're asking for it.
if _demo_name_needs_escaping(shortdemopath):
self.warning_bad_chars_in_demo_name.set(True)
self.demo_play_cmd = ("playdemo", shortdemopath)
except (TypeError, ValueError):
# TypeError occurrs when steam_path is None.
self.warning_not_in_tf_dir.set(True)
def _set_gototick_command(self):
self.demo_gototick_cmd = None
if self.gototick_launchcmd_var.get():
self.demo_gototick_cmd = ("demo_gototick", self.tick_var.get() or "0")
def _update_demo_commands_var(self):
"""Updates the contents of the third launch args entry."""
commands = self.get_demo_commands(escape_play = True)
self.play_commands_var.set("; ".join(" ".join(c) for c in commands))
def _rcon_txt_set_line(self, n, content):
"""
Sets line n (0-2) of rcon txt widget to content.
"""
self.rcon_text.replace(f"{n + 2}.0", f"{n + 2}.{tk.END}", content)
def _rcon_start(self):
self.animate_spinner = True
self.rcon_connect_button.configure(text = "Cancel", command = self._rcon_cancel)
with self.rcon_text:
for i in range(3):
self._rcon_txt_set_line(i, "")
self.rcon_threadgroup.start_thread(
queue_in = self.rcon_in_queue,
password = self.rcon_password_var.get(),
port = self.cfg.rcon_port,
)
def _rcon_cancel(self):
self.animate_spinner = True
self.rcon_send_commands_button.config(state = tk.DISABLED)
self.rcon_send_gototick_button.config(state = tk.DISABLED)
self.rcon_threadgroup.join_thread()
def _rcon_after_callback(self, sig, *args):
if sig.is_finish_signal():
self._rcon_on_finish(sig)
return THREADGROUPSIG.FINISHED
elif sig is THREADSIG.CONNECTED:
self._rcon_on_connect()
elif sig is THREADSIG.INFO_IDX_PARAM:
with self.rcon_text:
self._rcon_txt_set_line(args[0], args[1])
return THREADGROUPSIG.CONTINUE
def _rcon_run_always(self):
if not self.animate_spinner:
return
with self.rcon_text:
self.rcon_text.delete("spinner", "spinner + 1 chars")
self.rcon_text.insert("spinner", next(self.spinner))
def _rcon_on_finish(self, sig):
self.animate_spinner = False
self.rcon_connect_button.configure(text = "Connect", command = self._rcon_start)
self.rcon_send_commands_button.config(state = tk.DISABLED)
self.rcon_send_gototick_button.config(state = tk.DISABLED)
with self.rcon_text:
self.rcon_text.replace("status0", "status1", "Disconnected")
self.rcon_text.replace("spinner", "spinner + 1 chars", ".")
if sig is not THREADSIG.FAILURE:
for i in range(3):
self._rcon_txt_set_line(i, "")
def _rcon_on_connect(self):
self.animate_spinner = False
self.rcon_connect_button.configure(text = "Disconnect")
self.rcon_send_commands_button.config(state = tk.NORMAL)
self.rcon_send_gototick_button.config(state = tk.NORMAL)
with self.rcon_text:
self.rcon_text.replace("status0", "status1", "Connected")
self.rcon_text.replace("spinner", "spinner + 1 chars", ".")
def _rcon_send_commands(self):
for cmd in self.get_demo_commands(escape_play = True):
self.rcon_in_queue.put(" ".join(cmd).encode("utf-8"))
def _rcon_send_gototick(self):
entry_contents = self.tick_var.get()
if entry_contents == "":
return
# Otherwise, entry_contents must be a number (if the validation function didn't fail)
self.rcon_in_queue.put(f"demo_gototick {entry_contents}".encode("utf-8"))
def _launch(self):
for cond, name in (
(self.cfg.steam_path is None, "Steam"),
(self.usehlae_var.get() and self.cfg.hlae_path is None, "HLAE")
):
if cond:
tk_msg.showerror(
"Demomgr - Error",
f"{name} path not specified. Please do so in Settings > Paths.",
parent = self,
)
return
if self._tf2_head_path is None:
tk_msg.showerror("Demomgr - Error", "Could not locate TF2.", parent = self)
return
steam_user_args = self.user_launch_options_var.get().split()
custom_args = self.custom_launch_options_var.get().split()
demo_args = []
for cmd_name, *cmd_args in self.get_demo_commands():
demo_args.append('+' + cmd_name)
demo_args.extend(cmd_args)
if self.usehlae_var.get():
launch_args = CNST.HLAE_LAUNCHARGS0.copy() # hookdll required
launch_args.append(os.path.join(self.cfg.hlae_path, CNST.HLAE_HOOK_DLL))
# hl2 exe path required
launch_args.extend(CNST.HLAE_LAUNCHARGS1)
launch_args.append(os.path.join(self._tf2_head_path, CNST.TF2_EXE_TAIL_PATH))
launch_args.extend(CNST.HLAE_LAUNCHARGS2)
# Supply all of the hl2.exe launch args as a string.
# This thing needs to be escaped properly or else bad things will likely happen.
launch_args.append(" ".join(
CNST.TF2_LAUNCHARGS +
CNST.HLAE_ADD_TF2_ARGS +
steam_user_args +
custom_args +
demo_args
))
final_launchoptions = [os.path.join(self.cfg.hlae_path, CNST.HLAE_EXE)] + launch_args
else:
final_launchoptions = (
[os.path.join(self.cfg.steam_path, get_steam_exe())] +
CNST.APPLAUNCH_ARGS +
custom_args +
demo_args
)
try:
subprocess.Popen(final_launchoptions)
except FileNotFoundError:
tk_msg.showerror("Demomgr - Error", "Executable not found.", parent = self)
except OSError as error:
tk_msg.showerror(
"Demomgr - Error",
f"Could not access executable :\n{error}",
parent = self
)
def done(self):
self._rcon_cancel()
self.result.state = DIAGSIG.SUCCESS
self.result.remember = [
self.usehlae_var.get(),
self.gototick_launchcmd_var.get(),
self.users[self.user_select_combobox.current()].dir_name,
self.custom_launch_options_var.get(),
int(self.tick_offset_var.get() or 0),
]
self.destroy() | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/localflavor/uy/forms.py | from __future__ import absolute_import, unicode_literals
from django.core.validators import EMPTY_VALUES
from django.forms.fields import Select, RegexField
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.localflavor.uy.util import get_validation_digit
class UYDepartamentSelect(Select):
"""
A Select widget that uses a list of Uruguayan departaments as its choices.
"""
def __init__(self, attrs=None):
from django.contrib.localflavor.uy.uy_departaments import DEPARTAMENT_CHOICES
super(UYDepartamentSelect, self).__init__(attrs, choices=DEPARTAMENT_CHOICES)
class UYCIField(RegexField):
"""
A field that validates Uruguayan 'Cedula de identidad' (CI) numbers.
"""
default_error_messages = {
'invalid': _("Enter a valid CI number in X.XXX.XXX-X,"
"XXXXXXX-X or XXXXXXXX format."),
'invalid_validation_digit': _("Enter a valid CI number."),
}
def __init__(self, *args, **kwargs):
super(UYCIField, self).__init__(r'(?P<num>(\d{6,7}|(\d\.)?\d{3}\.\d{3}))-?(?P<val>\d)',
*args, **kwargs)
def clean(self, value):
"""
Validates format and validation digit.
The official format is [X.]XXX.XXX-X but usually dots and/or slash are
omitted so, when validating, those characters are ignored if found in
the correct place. The three typically used formats are supported:
[X]XXXXXXX, [X]XXXXXX-X and [X.]XXX.XXX-X.
"""
value = super(UYCIField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = self.regex.match(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number = int(match.group('num').replace('.', ''))
validation_digit = int(match.group('val'))
if not validation_digit == get_validation_digit(number):
raise ValidationError(self.error_messages['invalid_validation_digit'])
return value | PypiClean |
/LowCostSmartFarmHub-0.1.9.tar.gz/LowCostSmartFarmHub-0.1.9/docs/installation.rst | .. highlight:: shell
============
Installation
============
Stable release
--------------
To install Smart Farm Hub, run this command in your terminal:
.. code-block:: console
$ pip install LowCostSmartFarmHub
This is the preferred method to install Smart Farm Hub, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for Smart Farm Hub can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/itumeleng96/LowCostSmartFarmHub
Or download the `tarball`_:
.. code-block:: console
$ curl -OJL https://github.com/itumeleng96/LowCostSmartFarmHub/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/itumeleng96/LowCostSmartFarmHub
.. _tarball: https://github.com/itumeleng96/LowCostSmartFarmHub/tarball/master
| PypiClean |
/CaMo-0.0.5-py3-none-any.whl/camo/structure/structural_causal_model.py | from typing import Any, Dict, Iterable, Tuple
import numpy as np
import pandas as pd
from sympy import solve, stats
from sympy.parsing.sympy_parser import parse_expr
from .causal_model import CausalModel
from ..backend import DirectedGraph, topological_sort
from ..utils import _as_set
class StructuralCausalModel(CausalModel):
_F: Dict[str, Any]
_P: Dict[str, Any]
def __init__(
self,
V: Iterable[str] = None,
U: Iterable[str] = None,
F: Dict[str, Any] = None,
P: Dict[str, Any] = None,
):
self._V, self._U = _as_set(V), _as_set(U)
self._F = dict(F) if F else {v: None for v in self._V}
self._P = dict(P) if P else {u: None for u in self._U}
E = [
(u.name, v)
for (v, f) in self._parse_expr(
dict(self._P, **self._F)
).items()
if v in self._V
for u in f.rhs.atoms()
if u.is_Symbol
]
super().__init__(self._V, self._U, E)
def _parse_expr(self, expr: Dict[str, str]) -> Dict[str, Any]:
out = {}
symbols = {}
global_symbols = {}
# Load global symbols adding stats
exec('from sympy import *; from sympy.stats import *', global_symbols)
# Begin parsing
for (k, v) in expr.items():
# Parse argument
out[k] = parse_expr(
v,
symbols,
global_dict=global_symbols,
evaluate=False
)
# Add discovered symbols
for atom in out[k].atoms():
if atom.is_Symbol:
symbols[atom.name] = atom
return out
def copy(self):
return StructuralCausalModel(
self._V, self._U, self._F, self._P
)
@property
def F(self) -> Dict[str, Any]:
return self._F.copy()
@property
def P(self) -> Dict[str, Any]:
return self._P.copy()
def do(self, **kwargs):
# Check if v is endogenous
if not (kwargs.keys() & self._V):
raise ValueError()
# Copy model
out = self.copy()
# Set intervened variables
for (v, k) in kwargs.items():
# Fix v variable to constant k
out._F[v] = f"Eq(Symbol('{v}'), {k})"
# Remove incoming edges
for u in out.parents(v):
out.del_edge(u, v)
return out
def sample(self, size: int, seed: int = None) -> pd.DataFrame:
# Set random seed
np.random.seed(seed)
# Parse the symbolic expression of the system
system = self._parse_expr(dict(self._P, **self._F))
# Pre-compute solving order
order = [v for v in topological_sort(self) if v in self._V]
# Pre-sample from exogenous distribution
P = {u: stats.sample_iter(system[u]) for u in self._U}
# Sample from equation system
samples = []
for _ in range(size):
sample = {u: next(s) for u, s in P.items()}
for v in order:
sample[v] = float(solve(system[v].subs(sample), v)[0])
samples.append(sample)
return pd.DataFrame(samples)
@classmethod
def from_structure(
cls,
V: Iterable[str],
E: Iterable[Tuple[str, str]]
):
V, U = set(V), set()
# Check if both vertices are in a vertex set
# else, add to exogenous variables
for (u, v) in E:
if u not in V:
U.add(u)
if v not in V:
U.add(v)
# Build the functional graph
G = DirectedGraph(V | U, E)
# Build the function set given the graph
F = {
v: f"""Eq(Symbol('{v}'), Function(Symbol('f_{v}'))({
', '.join([f"Symbol('{p}')" for p in G.parents(v)])
}))"""
for v in topological_sort(G)
if v in V
}
# Build the probability set given the variables
P = {u: f"Normal(Symbol('{u}'), 0, 1)" for u in U}
return cls(V, U, F, P)
def __repr__(self):
return f"{self.__class__.__name__}(V={self._V}, U={self._U}, F={self._F}, P={self._P})" | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/ed_event.py | __author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: ed_event.py 63789 2010-03-30 02:25:17Z CJP $"
__revision__ = "$Revision: 63789 $"
#-----------------------------------------------------------------------------#
# Dependencies
import wx
#-----------------------------------------------------------------------------#
edEVT_UPDATE_TEXT = wx.NewEventType()
EVT_UPDATE_TEXT = wx.PyEventBinder(edEVT_UPDATE_TEXT, 1)
class UpdateTextEvent(wx.PyCommandEvent):
"""Event to signal that text needs updating"""
def __init__(self, etype, eid, value=None):
"""Creates the event object"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._value = value
def GetValue(self):
"""Returns the value from the event.
@return: the value of this event
"""
return self._value
#--------------------------------------------------------------------------#
edEVT_NOTIFY = wx.NewEventType()
EVT_NOTIFY = wx.PyEventBinder(edEVT_NOTIFY, 1)
class NotificationEvent(UpdateTextEvent):
"""General notification event"""
def __init__(self, etype, eid, value=None, obj=None):
UpdateTextEvent.__init__(self, etype, eid, value)
self.SetEventObject(obj)
#--------------------------------------------------------------------------#
edEVT_MAINWINDOW_EXIT = wx.NewEventType()
EVT_MAINWINDOW_EXIT = wx.PyEventBinder(edEVT_MAINWINDOW_EXIT, 1)
class MainWindowExitEvent(wx.PyCommandEvent):
"""Event to signal that the main window is exiting"""
pass
#--------------------------------------------------------------------------#
edEVT_STATUS = wx.NewEventType()
EVT_STATUS = wx.PyEventBinder(edEVT_STATUS, 1)
class StatusEvent(wx.PyCommandEvent):
"""Event for posting status events"""
def __init__(self, etype, eid, msg=None, sec=0):
"""Create an event that can be used to post status messages
to the main windows status bar.
@param etype: The type of event to create
@param eid: The event id
@keyword msg: The status message to post with the event
@keyword sec: The section of the status bar to post message to
"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._msg = msg
self._sec = sec
def GetMessage(self):
"""Returns the value from the event.
@return: the value of this event
"""
return self._msg
def GetSection(self):
"""Returns the messages posting section
@return: int zero based index of where to post to statusbar
"""
return self._sec | PypiClean |
/DjangoKit-0.13.tar.gz/DjangoKit-0.13/djangokit/db/mixins.py | from djangokit.utils.deep import to_dict, from_dict
def _save(instance, save):
if save is not False and hasattr(instance, 'save'):
fields = set(['details'])
if save is not True:
fields = fields.union(save)
instance.save(update_fields=fields)
class DetailsMixin:
"""Methods for model with JSON field `details`."""
details = None
def to_details(self, field, value, append_to_list=False, save=False):
"""
Advanced setup details parts.
Default NOT stored in the database. To change this
behavior, you should pass a parameter `save is not False`.
"""
self.details = to_dict(
self.details, field=field, value=value,
append_to_list=append_to_list,
)
_save(self, save)
return self.details
def from_details(self, field, default=None, update=False, delete=False,
save=False):
"""
Advanced receiving/removing a portion of the details.
When receiving can set the specified default value.
Default NOT stored in the database. To change this
behavior, you should pass a parameter `save is not False`.
"""
value = from_dict(
self.details, field=field, default=default, update=update,
delete=delete,
)
_save(self, save)
return value
def set_details(self, details=None, save=False, **kwargs):
"""Simple installation details, or upgrade part details."""
if details:
self.details = details
else:
d = self.details or dict()
d.update(kwargs)
self.details = d
_save(self, save)
return self.details
class StateMixin:
"""
Миксин необходим в первую очередь для проверки нового объекта в шаблонах.
"""
@property
def is_new_instance(self):
return self._state.adding
@property
def model_state(self):
return self._state | PypiClean |
/Flask-RRBAC-0.3.0.tar.gz/Flask-RRBAC-0.3.0/flask_rrbac/models/acl_role_route_map_mixin.py | from datetime import datetime
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import func, case
class ACLRoleRouteMapMixin(object):
def __init__(self):
if not hasattr(self.__class__, 'role'):
self.role = None
if not hasattr(self.__class__, 'route'):
self.route = None
@property
def get_role(self):
"""
Get the role attached to this entry. This has to be implemented.
"""
try:
return self.role
except AttributeError:
raise NotImplementedError('No `role` attribute is present')
@hybrid_property
def get_id(self):
"""
Get the unique identifier for this entry. This has to be implemented.
"""
try:
return self.id
except AttributeError:
raise NotImplementedError('No `id` attribute is present')
@property
def get_route(self):
"""
Get the route attached to this entry. This has to be implemented.
"""
try:
return self.route
except AttributeError:
raise NotImplementedError('No `route` attribute is present')
@hybrid_property
def is_deleted(self):
"""
Check if this entry is active or not
An entry is active when the following conditions are met:
1. deleted_at is empty (None). This means that this entry will not
expire
2. deleted_at is in the future. This means that the entry
has not already expired
"""
try:
if self.deleted_at is None:
return False
elif self.deleted_at > datetime.utcnow():
return False
else:
return True
except AttributeError:
return False
@is_deleted.expression
def is_deleted(cls):
try:
return case([
(cls.deleted_at == None, False),
(cls.deleted_at > func.now(), False)
], else_=True)
except AttributeError:
return False | PypiClean |
/APL_Brochu-0.0.1-py3-none-any.whl/apl/apl.py | from typing import Tuple
import numpy as np
from numpy.random import default_rng
from sklearn.gaussian_process.kernels import Kernel
from .posterior_approximation import LogLikelihood, laplace_approximation
from .acquisitions import Acquisition
from .gaussian_process import gaussian_process_conditional
from .utils import transfer_id_from_query_to_explored
class ActivePreferenceLearning:
def __init__(
self,
kernel: Kernel,
loglikelihood: LogLikelihood,
acquisition: Acquisition,
random_state: int = 0,
):
self.kernel = kernel
self.loglikelihood = loglikelihood
self.acquisition = acquisition
self.rng = default_rng(random_state)
def query(
self,
X: np.ndarray,
explored_item_idx: np.ndarray,
query_item_idx: np.ndarray,
mu=None,
pair_selections=None,
) -> Tuple[int, int, np.ndarray, np.ndarray, np.ndarray]:
if (
len(explored_item_idx) == 0
): # first query, just pick two randomly from the query set
return self.first_query(
query_item_idx=query_item_idx,
explored_item_idx=explored_item_idx,
)
else:
return self.subsequent_query(
X, explored_item_idx, query_item_idx, mu, pair_selections
)
def first_query(
self, query_item_idx: np.ndarray, explored_item_idx: np.ndarray
) -> Tuple[int, int, np.ndarray, np.ndarray, np.ndarray]:
opt1_idx, opt2_idx = self.rng.choice(query_item_idx, size=2)
for idx in (opt1_idx, opt2_idx):
(
query_item_idx,
explored_item_idx,
) = transfer_id_from_query_to_explored(
idx, query_item_idx, explored_item_idx
)
return (
opt1_idx,
opt2_idx,
explored_item_idx,
query_item_idx,
np.zeros(len(explored_item_idx)),
)
def subsequent_query(
self,
X: np.ndarray,
explored_item_idx: np.ndarray,
query_item_idx: np.ndarray,
mu=None,
pair_selections=None,
) -> Tuple[int, int, np.ndarray, np.ndarray, np.ndarray]:
X_train = X[explored_item_idx]
cov = self.kernel(X_train)
self.loglikelihood.register_data(pair_selections)
mu_map, _ = laplace_approximation(mu, cov, self.loglikelihood)
mu_query, s2_query = gaussian_process_conditional(
X_train, mu_map, X[query_item_idx], self.kernel
)
acquisitions_on_query_set = self.acquisition(
mu_query, s2_query, **{"mu_max": mu_map.max()}
)
opt1_idx = explored_item_idx[pair_selections[-1, 0].item()]
opt2_idx = query_item_idx[np.argmax(acquisitions_on_query_set).item()]
(
query_item_idx,
explored_item_idx,
) = transfer_id_from_query_to_explored(
opt2_idx, query_item_idx, explored_item_idx
)
x = X[opt2_idx].reshape(1, -1)
k_star = self.kernel(X_train, x)
mu_x = k_star.T @ np.linalg.solve(cov, mu_map)
return (
opt1_idx,
opt2_idx,
explored_item_idx,
query_item_idx,
np.append(mu_map, mu_x),
) | PypiClean |
/MLapp1-0.0.37.tar.gz/MLapp1-0.0.37/src/MLapp/generateScript.py | from .UI.componentIDs import classification_Com_IDS,classification_models,\
undersampling_Com_IDS,underSamp_models,\
overrsampling_Com_IDS, overSamp_models, \
modelEval_Com_IDS,\
scaling_Com_IDS,scaling_models, \
featSel_Com_IDS, featSel_models,featSel_est
from .helperFunctions import getAlgoNames,removeModelId,getActiveAlgo,getMoedlEvalActive,saveUserInputData,getActiveAlgoFeatSel
import pickle
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# importing the module
import ast
with open('myfile.txt') as f:
data = f.read()
userInputData=ast.literal_eval(data)
#a=saveUserInputData(userInputData)
#get random state4
if("random_seed" in userInputData.keys()):
rs=userInputData["random_seed"]
else:
rs=12345
#set numpy random seed
import numpy as np
np.random.seed(rs)
scaling_tab_active=getActiveAlgo(userInputData,"scaling_tab_data",
scaling_models,rs,scaling_Com_IDS)
underSamp_tab_active=getActiveAlgo(userInputData,"underSamp_tab_para",
underSamp_models,rs,undersampling_Com_IDS)
overSamp_tab_active=getActiveAlgo(userInputData,"overSamp_tab_para",
overSamp_models,rs,overrsampling_Com_IDS)
featSel_tab_active=getActiveAlgoFeatSel(userInputData,"featSel_tab_para",
featSel_models,rs,featSel_Com_IDS,featSel_est)
classification_tab_active=getActiveAlgo(userInputData,"classification_tab_para",
classification_models,rs,classification_Com_IDS)
modelEval_tab_active=getMoedlEvalActive(userInputData,"modelEval_tab_para",
modelEval_Com_IDS,rs)
userInputData={"random_state":rs,"n_jobs":userInputData["n_jobs"],\
"scaling_tab_active":scaling_tab_active,"underSamp_tab_active":underSamp_tab_active,\
"overSamp_tab_active":overSamp_tab_active,"classification_tab_active":classification_tab_active,
"featSel_tab_active":userInputData["featSel_tab_para"],\
"modelEval_tab_active":modelEval_tab_active,\
"modelEval_metrices":userInputData["modelEval_metrices_tab_para"][0]
}
#Save user input data as pkl object
with open('userInputData.pkl', 'wb') as handle:
pickle.dump(userInputData, handle)
with open('userInputData_1.pkl', 'rb') as handle:
userInputData=pickle.load(handle) | PypiClean |
/ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/rename.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified April 1, 2020
Description: Renames reads to <prefix>_<number> where you specify the prefix
and the numbers are ordered. There are other renaming modes too.
If reads are paired, pairs should be processed together; if reads are
interleaved, the interleaved flag should be set. This ensures that if a
read number (such as 1: or 2:) is added, it will be added correctly.
Usage: rename.sh in=<file> in2=<file2> out=<outfile> out2=<outfile2> prefix=<>
in2 and out2 are for paired reads and are optional.
If input is paired and there is only one output file, it will be written interleaved.
Parameters:
prefix= The string to prepend to existing read names.
ow=f (overwrite) Overwrites files that already exist.
zl=4 (ziplevel) Set compression level, 1 (low) to 9 (max).
int=f (interleaved) Determines whether INPUT file is considered interleaved.
fastawrap=70 Length of lines in fasta output.
minscaf=1 Ignore fasta reads shorter than this.
qin=auto ASCII offset for input quality. May be 33 (Sanger), 64 (Illumina), or auto.
qout=auto ASCII offset for output quality. May be 33 (Sanger), 64 (Illumina), or auto (same as input).
ignorebadquality=f (ibq) Fix out-of-range quality values instead of crashing with a warning.
Renaming modes (if not default):
renamebyinsert=f Rename the read to indicate its correct insert size.
renamebymapping=f Rename the read to indicate its correct mapping coordinates.
renamebytrim=f Rename the read to indicate its correct post-trimming length.
addprefix=f Rename the read by prepending the prefix to the existing name.
prefixonly=f Only use the prefix; don't add _<number>
addunderscore=t Add an underscore after the prefix (if there is a prefix).
addpairnum=t Add a pairnum (e.g. ' 1:') to paired reads in some modes.
fixsra=f Fixes headers of SRA reads renamed from Illumina.
Specifically, it converts something like this:
SRR17611.11 HWI-ST79:17:D091UACXX:4:1101:210:824 length=75
...into this:
HWI-ST79:17:D091UACXX:4:1101:210:824 1:
Sampling parameters:
reads=-1 Set to a positive number to only process this many INPUT reads (or pairs), then quit.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
}
calcXmx "$@"
function rename() {
local CMD="java $EA $EOOM $z -cp $CP jgi.RenameReads $@"
echo $CMD >&2
eval $CMD
}
rename "$@" | PypiClean |
/CryptoLyzer-0.9.1-py3-none-any.whl/cryptolyzer/ssh/dhparams.py |
import attr
import six
from cryptodatahub.common.algorithm import KeyExchange
from cryptodatahub.ssh.algorithm import SshKexAlgorithm
from cryptoparser.common.base import Serializable
from cryptoparser.ssh.subprotocol import (
SshDHGroupExchangeGroup,
SshDHGroupExchangeReply,
SshMessageCode,
)
from cryptoparser.ssh.version import SshProtocolVersion, SshVersion
from cryptolyzer.common.analyzer import AnalyzerSshBase
from cryptolyzer.common.dhparam import get_dh_public_key_from_bytes
from cryptolyzer.common.exception import NetworkError, NetworkErrorType
from cryptolyzer.common.result import AnalyzerResultSsh, AnalyzerTargetSsh
from cryptolyzer.common.utils import LogSingleton
from cryptolyzer.ssh.client import L7ServerSshGexParams, SshKeyExchangeInitAnyAlgorithm
from cryptolyzer.ssh.ciphers import AnalyzerCiphers
@attr.s
class AnalyzerResultGroupExchange(object):
gex_algorithms = attr.ib(
attr.validators.deep_iterable(attr.validators.instance_of(SshKexAlgorithm)),
metadata={'human_readable_name': 'Group Exchange Algorithms'}
)
key_sizes = attr.ib(attr.validators.deep_iterable(attr.validators.instance_of(six.integer_types)))
bounds_tolerated = attr.ib(attr.validators.instance_of(bool))
@attr.s
class AnalyzerResultKeyExchange(Serializable):
kex_algorithms = attr.ib(attr.validators.deep_iterable(attr.validators.instance_of(SshKexAlgorithm)))
def _as_markdown(self, level):
return self._markdown_result([
'{} ({})'.format(kex_algorithm.value.code, kex_algorithm.value.key_size)
for kex_algorithm in self.kex_algorithms
], level)
@attr.s
class AnalyzerResultDHParams(AnalyzerResultSsh):
key_exchange = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(AnalyzerResultKeyExchange))
)
group_exchange = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(AnalyzerResultGroupExchange))
)
class AnalyzerDHParams(AnalyzerSshBase):
@classmethod
def get_name(cls):
return 'dhparams'
@classmethod
def get_help(cls):
return 'Check DH parameters offered by the server(s)'
@classmethod
def _get_negotiable_key_sizes(cls, analyzable, gex_algorithms):
gex_min_size = 1
gex_max_size = 8192
gex_tolerates_bounds = True
gex_key_sizes = set()
while True:
try:
key_exchange_init_message = SshKeyExchangeInitAnyAlgorithm(kex_algorithms=[gex_algorithms[0], ])
server_messages = analyzable.do_handshake(
key_exchange_init_message=key_exchange_init_message,
gex_params=L7ServerSshGexParams(
gex_min=gex_min_size, gex_max=gex_max_size, gex_number=gex_min_size
),
last_message_type=SshMessageCode.DH_GEX_REPLY,
)
except NetworkError as e:
if e.error == NetworkErrorType.NO_RESPONSE:
break
raise # pragma: no cover
dh_group_exchange_group = server_messages[SshDHGroupExchangeGroup]
dh_group_exchange_reply = server_messages[SshDHGroupExchangeReply]
dh_public_key = get_dh_public_key_from_bytes(
dh_group_exchange_group.p, dh_group_exchange_group.g, dh_group_exchange_reply.ephemeral_public_key
)
if gex_min_size > gex_max_size:
break
if dh_public_key.key_size in gex_key_sizes:
if gex_min_size > dh_public_key.key_size:
gex_tolerates_bounds = False
gex_min_size = ((gex_min_size // 1024) + 1) * 1024
else:
gex_min_size = dh_public_key.key_size + 1
if dh_public_key.key_size not in gex_key_sizes:
gex_key_sizes.add(dh_public_key.key_size)
LogSingleton().log(level=60, msg=six.u(
'Server offers custom DH public parameter with size %d-bit (%s)') % (
dh_public_key.key_size, SshProtocolVersion(SshVersion.SSH2),
)
)
return AnalyzerResultGroupExchange(gex_algorithms, list(sorted(gex_key_sizes)), gex_tolerates_bounds)
def analyze(self, analyzable):
LogSingleton().disabled = True
analyzer_result = AnalyzerCiphers().analyze(analyzable)
LogSingleton().disabled = False
gex_algorithms = []
kex_algorithms = []
algorithms = filter(
lambda kex_algorithm: (
isinstance(kex_algorithm, SshKexAlgorithm) and
kex_algorithm.value.kex == KeyExchange.DHE
),
analyzer_result.kex_algorithms
)
for kex_algorithm in algorithms:
if kex_algorithm.value.key_size is None:
gex_algorithms.append(kex_algorithm)
else:
kex_algorithms.append(kex_algorithm)
for algorithm in kex_algorithms:
LogSingleton().log(level=60, msg=six.u(
'Server offers well-known DH public parameter with size %s-bit (%s)') % (
'unknown' if isinstance(algorithm, six.string_types) else str(algorithm.value.key_size),
algorithm if isinstance(algorithm, six.string_types) else algorithm.value.code,
)
)
return AnalyzerResultDHParams(
AnalyzerTargetSsh.from_l7_client(analyzable),
AnalyzerResultKeyExchange(kex_algorithms) if kex_algorithms else None,
self._get_negotiable_key_sizes(analyzable, gex_algorithms) if gex_algorithms else None
) | PypiClean |
/NICpolpy-0.1.5-py3-none-any.whl/nicpolpy/ysfitsutilpy4nicpolpy/hduutil.py | import glob
import re
from copy import deepcopy
from pathlib import Path, PosixPath, WindowsPath
from warnings import warn
import bottleneck as bn
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import CCDData, Cutout2D
from astropy.stats import mad_std
from astropy.table import Table
from astropy.time import Time
from astropy.visualization import ImageNormalize, ZScaleInterval
from astropy.wcs import WCS, Wcsprm
# from scipy.interpolate import griddata
from scipy.ndimage import label as ndlabel
from .misc import (_image_shape, bezel2slice, binning, change_to_quantity,
cmt2hdr, is_list_like, listify, slicefy, update_process,
update_tlm)
try:
import fitsio
HAS_FITSIO = True
except ImportError:
HAS_FITSIO = False
try:
import numexpr as ne
HAS_NE = True
NEVAL = ne.evaluate # "n"umerical "eval"uator
NPSTR = ""
except ImportError:
HAS_NE = False
NEVAL = eval # "n"umerical "eval"uator
NPSTR = "np."
__all__ = [
"ASTROPY_CCD_TYPES",
# ! file io related:
"write2fits",
# ! parsers:
"_parse_data_header", "_parse_image", "_has_header", "_parse_extension",
# ! loaders:
"load_ccd", "inputs2list", "load_ccds",
# ! setters:
"CCDData_astype", "set_ccd_attribute", "set_ccd_gain_rdnoise",
"propagate_ccdmask",
# ! ccd processes
"imslice", "trim_overlap", "cut_ccd", "bin_ccd",
"fixpix",
# "make_errormap",
"errormap",
"find_extpix", "find_satpix",
# ! header update:
"hedit", "key_remover", "key_mapper", "chk_keyval",
# ! header accessor:
"valinhdr", "get_from_header", "get_if_none",
# ! WCS related:
"wcs_crota", "midtime_obs", "center_radec",
"calc_offset_wcs", "calc_offset_physical",
"wcsremove", "fov_radius",
# ! math:
"give_stats"
]
ASTROPY_CCD_TYPES = (CCDData, fits.PrimaryHDU, fits.ImageHDU) # fits.CompImageHDU ?
def write2fits(data, header, output, return_ccd=False, **kwargs):
""" A convenience function to write proper FITS file.
Parameters
----------
data : ndarray
The data
header : `~astropy.io.fits.Header`
The header
output : path-like
The output file path
return_ccd : bool, optional.
Whether to return the generated CCDData.
**kwargs :
The keyword arguements to write FITS file by
`~astropy.nddata.fits_data_writer`, such as ``output_verify=True``,
``overwrite=True``.
"""
ccd = CCDData(data=data, header=header, unit=header.get("BUNIT", "adu"))
try:
ccd.write(output, **kwargs)
except fits.VerifyError:
print("Try using output_verify='fix' to avoid this error.")
if return_ccd:
return ccd
# **************************************************************************************** #
#* PARSERS * #
# **************************************************************************************** #
def _parse_data_header(
ccdlike,
extension=None,
parse_data=True,
parse_header=True,
copy=True
):
"""Parses data and header and return them separately after copy.
Paramters
---------
ccdlike : CCDData, PrimaryHDU, ImageHDU, HDUList, Header, ndarray, number-like, path-like, None
The object to be parsed into data and header.
extension: int, str, (str, int)
The extension of FITS to be used. It can be given as integer
(0-indexing) of the extension, ``EXTNAME`` (single str), or a tuple of
str and int: ``(EXTNAME, EXTVER)``. If `None` (default), the *first
extension with data* will be used.
parse_data, parse_header : bool, optional.
Because this function uses ``.copy()`` for safety, it may take a bit of
time if this function is used iteratively. One then can turn off one of
these to ignore either data or header part.
Returns
-------
data : ndarray, None
The data part of the input `ccdlike`. If `ccdlike` is ``''`` or `None`,
`None` is returned.
hdr : Header, None
The header if header exists; otherwise, `None` is returned.
Notes
-----
_parse_data_header and _parse_image have different purposes:
_parse_data_header is to get a quick copy of the data and/or header,
especially to CHECK if it has header, while _parse_image is to deal mainly
with the data (and has options to return as CCDData).
"""
if ccdlike is None or (isinstance(ccdlike, str) and ccdlike == ""):
data = None
hdr = None
elif isinstance(ccdlike, ASTROPY_CCD_TYPES):
if parse_data:
data = ccdlike.data.copy() if copy else ccdlike.data
else:
data = None
if parse_header:
hdr = ccdlike.header.copy() if copy else ccdlike.header
else:
hdr = None
elif isinstance(ccdlike, fits.HDUList):
extension = _parse_extension(extension) if (parse_data or parse_header) else 0
# ^ don't even do _parse_extension if both are False
if parse_data:
data = ccdlike[extension].data.copy() if copy else ccdlike[extension].data
else:
data = None
if parse_header:
hdr = ccdlike[extension].header.copy() if copy else ccdlike[extension].header
else:
hdr = None
elif isinstance(ccdlike, (np.ndarray, list, tuple)):
if parse_data:
data = np.array(ccdlike, copy=copy)
else:
data = None
hdr = None # regardless of parse_header
elif isinstance(ccdlike, fits.Header):
data = None # regardless of parse_data
if parse_header:
hdr = ccdlike.copy() if copy else ccdlike
else:
hdr = None
elif HAS_FITSIO and isinstance(ccdlike, fitsio.FITSHDR):
import copy
data = None # regardless of parse_data
if parse_header:
hdr = copy.deepcopy(ccdlike) if copy else ccdlike
else:
hdr = None
else:
try:
data = float(ccdlike) if (parse_data or parse_header) else None
hdr = None
except (ValueError, TypeError): # Path-like
# NOTE: This try-except cannot be swapped cuz ``Path("2321.3")``
# can be PosixPath without error...
extension = _parse_extension(extension) if parse_data or parse_header else 0
# fits.getheader is ~ 10-20 times faster than load_ccd.
# 2020-11-09 16:06:41 (KST: GMT+09:00) ysBach
try:
if parse_header:
hdu = fits.open(Path(ccdlike), memmap=False)[extension]
# No need to copy because they've been read (loaded) for
# the first time here.
data = hdu.data if parse_data else None
hdr = hdu.header if parse_header else None
else:
if isinstance(extension, tuple):
if HAS_FITSIO:
data = fitsio.read(Path(ccdlike), ext=extension[0],
extver=extension[1])
else:
data = fits.getdata(Path(ccdlike), *extension)
else:
if HAS_FITSIO:
data = fitsio.read(Path(ccdlike), ext=extension)
else:
data = fits.getdata(Path(ccdlike), extension)
hdr = None
except TypeError:
raise TypeError(f"ccdlike type ({type(ccdlike)}) is not acceptable "
+ "to find header and data.")
return data, hdr
def _parse_image(
ccdlike,
extension=None,
name=None,
force_ccddata=False,
prefer_ccddata=False,
copy=True,
):
"""Parse and return input image as desired format (ndarray or CCDData)
Parameters
----------
ccdlike : CCDData-like (e.g., PrimaryHDU, ImageHDU, HDUList), ndarray, path-like, or number-like
The "image" that will be parsed. A string that can be converted to
float (``float(im)``) will be interpreted as numbers; if not, it will
be interpreted as a path to the FITS file.
extension: int, str, (str, int)
The extension of FITS to be used. It can be given as integer
(0-indexing) of the extension, ``EXTNAME`` (single str), or a tuple of
str and int: ``(EXTNAME, EXTVER)``. If `None` (default), the *first
extension with data* will be used.
force_ccddata: bool, optional.
To force the retun im as `~astropy.nddata.CCDData` object. This is
useful when error calculation is turned on.
Default is `False`.
prefer_ccddata: bool, optional.
Mildly use `~astropy.nddata.CCDData`, i.e., return
`~astropy.nddata.CCDData` only if `im` was `~astropy.nddata.CCDData`,
HDU object, or Path-like to a FITS file, but **not** if it was ndarray
or numbers.
Default is `False`.
Returns
-------
new_im : ndarray or CCDData
Depending on the options `force_ccddata` and `prefer_ccddata`.
imname : str
The name of the image.
imtype : str
The type of the image.
Notes
-----
_parse_data_header and _parse_image have different purposes:
_parse_data_header is to get a quick copy of the data and/or header,
especially to CHECK if it has header, while _parse_image is to deal mainly
with the data (and has options to return as CCDData).
Timing on MBP 14" [2021, macOS 12.2.1, M1Pro(6P+2E/G16c/N16c/32G)]:
>>> np.random.RandomState(123)
>>> data = np.random.normal(size=(100,100))
>>> ccd = CCDData(data, unit='adu')
>>> fpath = "img/0001.fits" # doctest: +SKIP
>>> %timeit yfu._parse_image(data, name="test", force_ccddata=True)
>>> %timeit yfu._parse_image(ccd, name="test", force_ccddata=True)
>>> %timeit yfu._parse_image(fpath, name="test", force_ccddata=True) # doctest: +SKIP
>>> %timeit yfu._parse_image(fpath, name="test", force_ccddata=False)[0]*1.0 # doctest: +SKIP
# 14.2 µs +- 208 ns per loop (mean +- std. dev. of 7 runs, 100000 loops each)
# 16.6 µs +- 298 ns per loop (mean +- std. dev. of 7 runs, 100000 loops each)
# 20.8 ms +- 133 µs per loop (mean +- std. dev. of 7 runs, 10000 loops each)
# 156 µs +- 3.3 µs per loop (mean +- std. dev. of 7 runs, 10000 loops each)
`fpath` contains a FITS file of 276KB. Note that path with `force_ccddata =
True` consumes tremendous amount of time, because of astropy's header
parsing scheme.
"""
def __extract_extension(ext):
extension = _parse_extension(ext)
if extension is None:
extstr = ""
else:
if isinstance(extension, (tuple, list)):
extstr = f"[{extension[0]}, {extension[1]}]"
else:
extstr = f"[{extension}]"
return extension, extstr
def __extract_from_hdu(hdu, force_ccddata, prefer_ccddata):
if force_ccddata or prefer_ccddata:
unit = ccdlike.header.get("BUNIT", default=u.adu)
if isinstance(unit, str):
unit = unit.lower()
if copy:
return CCDData(data=hdu.data.copy(), header=hdu.header.copy(), unit=unit)
else:
return CCDData(data=hdu.data, header=hdu.header, unit=unit)
# The two lines above took ~ 5 us and 10-30 us for the simplest
# header and 1x1 pixel data case (regardless of BUNIT exists), on
# MBP 15" [2018, macOS 10.14.6, i7-8850H (2.6 GHz; 6-core), RAM 16
# GB (2400MHz DDR4), Radeon Pro 560X (4GB)]
else:
return hdu.data.copy() if copy else hdu.data
ccd_kw = dict(force_ccddata=force_ccddata, prefer_ccddata=prefer_ccddata)
has_no_name = name is None
extension, extstr = __extract_extension(extension)
imname = (
f"User-provided {ccdlike.__class__.__name__}{extstr}" if has_no_name else name
)
if isinstance(ccdlike, CCDData):
# force_ccddata: CCDData // prefer_ccddata: CCDData // else: ndarray
if force_ccddata or prefer_ccddata:
new_im = ccdlike.copy() if copy else ccdlike
else:
new_im = ccdlike.data.copy() if copy else ccdlike.data
imtype = "CCDData"
imname = imname.replace("[0]", "")
elif isinstance(ccdlike, (fits.PrimaryHDU, fits.ImageHDU)):
# force_ccddata: CCDData // prefer_ccddata: CCDData // else: ndarray
new_im = __extract_from_hdu(ccdlike, **ccd_kw)
imtype = "hdu"
imname = imname.replace("[0]", "")
elif isinstance(ccdlike, fits.HDUList):
# force_ccddata: CCDData // prefer_ccddata: CCDData // else: ndarray
new_im = __extract_from_hdu(ccdlike[extension], **ccd_kw)
imtype = "HDUList"
elif isinstance(ccdlike, np.ndarray):
# force_ccddata: CCDData // prefer_ccddata: ndarray // else: ndarray
if copy:
new_im = (
CCDData(data=ccdlike.copy(), unit="adu")
if force_ccddata
else ccdlike.copy()
)
else:
new_im = CCDData(data=ccdlike, unit="adu") if force_ccddata else ccdlike
imtype = "ndarray"
else:
try: # IF number (ex: im = 1.3)
# force_ccddata: CCDData // prefer_ccddata: array // else: array
imname = f"{imname} {ccdlike}" if has_no_name else name
_im = float(ccdlike)
new_im = CCDData(data=_im, unit="adu") if force_ccddata else np.asarray(_im)
imtype = "num"
# imname can be "int", "float", "str", etc, so imtype might be useful.
except (ValueError, TypeError):
try: # IF path-like
# force_ccddata: CCDData // prefer_ccddata: CCDData // else: ndarray
fpath = Path(ccdlike)
imname = f"{str(fpath)}{extstr}" if has_no_name else name
# set redundant extensions to None so that only the part
# specified by `extension` be loaded:
new_im = load_ccd(
fpath,
extension,
ccddata=prefer_ccddata or force_ccddata,
extension_uncertainty=None,
extension_mask=None,
)
imtype = "path"
except TypeError:
raise TypeError(
"input must be CCDData-like, ndarray, path-like (to FITS), or a number."
)
return new_im, imname, imtype
def _has_header(ccdlike, extension=None, open_if_file=True):
"""Checks if the object has header; similar to _parse_data_header.
Paramters
---------
ccdlike : CCDData, PrimaryHDU, ImageHDU, HDUList, ndarray, number-like, path-like
The object to be parsed into data and header.
extension: int, str, (str, int)
The extension of FITS to be used. It can be given as integer
(0-indexing) of the extension, ``EXTNAME`` (single str), or a tuple of
str and int: ``(EXTNAME, EXTVER)``. If `None` (default), the *first
extension with data* will be used. Used only if `ccdlike` is HDUList or
path-like.
open_if_file : bool, optional.
Whether to open the file to check if it has a header when `ccdlike` is
path-like. Any FITS file has a header, so this means it will check the
existence and validity of the file. If set to `False`, all path-like
input will return `False` because the path itself has no header.
Notes
-----
It first checks if the input is one of ``(CCDData, fits.PrimaryHDU,
fits.ImageHDU)``, then if `fits.HDUList`, then if `np.ndarray`, then if
number-like, and then finally if path-like. Although this has a bit of
disadvantage considering we may use file-path for most of the time, the
overhead is only ~ 1 us, tested on MBP 15" [2018, macOS 10.14.6, i7-8850H
(2.6 GHz; 6-core), RAM 16 GB (2400MHz DDR4), Radeon Pro 560X (4GB)].
"""
hashdr = True
if isinstance(ccdlike, ASTROPY_CCD_TYPES): # extension not used
try:
hashdr = ccdlike.header is not None
except AttributeError:
hashdr = False
elif isinstance(ccdlike, fits.HDUList):
extension = _parse_extension(extension)
try:
hashdr = ccdlike[extension].header is not None
except AttributeError:
hashdr = False
elif is_list_like(ccdlike):
hashdr = False
else:
try: # if number-like
_ = float(ccdlike)
hashdr = False
except (ValueError, TypeError): # if path-like
# NOTE: This try-except cannot be swapped cuz ``Path("2321.3")``
# can be PosixPath without error...
if open_if_file:
try:
# fits.getheader is ~ 10-20 times faster than load_ccd.
# 2020-11-09 16:06:41 (KST: GMT+09:00) ysBach
_ = fits.getheader(Path(ccdlike), extension)
except (AttributeError, FileNotFoundError):
hashdr = False
else:
hashdr = False
return hashdr
def _parse_extension(*args, ext=None, extname=None, extver=None):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
Direct copy from astropy, but removing "opening HDUList" part
https://github.com/astropy/astropy/blob/master/astropy/io/fits/convenience.py#L988
This is essential for fits_ccddata_reader, because it only has `hdu`, not
all three of ext, extname, and extver.
Notes
-----
extension parser itself is not a time-consuming process:
%timeit yfu._parse_extension()
# 1.52 µs +- 69.3 ns per loop (mean +- std. dev. of 7 runs, 1000000 loops each)
"""
err_msg = "Redundant/conflicting extension arguments(s): {}".format(
{"args": args, "ext": ext, "extname": extname, "extver": extver}
)
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if (isinstance(args[0], (int, np.integer))
or (isinstance(ext, tuple) and len(ext) == 2)):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], str):
# The first arg is an extension name; it could still be valid to
# provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError("Too many positional arguments.")
if ext is not None and not (
isinstance(ext, (int, np.integer))
or (
isinstance(ext, tuple)
and len(ext) == 2
and isinstance(ext[0], str)
and isinstance(ext[1], (int, np.integer))
)
):
raise ValueError(
"The ext keyword must be either an extension number (zero-indexed) "
+ "or a (extname, extver) tuple."
)
if extname is not None and not isinstance(extname, str):
raise ValueError("The extname argument must be a string.")
if extver is not None and not isinstance(extver, (int, np.integer)):
raise ValueError("The extver argument must be an integer.")
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError("extver alone cannot specify an extension.")
return ext
# **************************************************************************************** #
# * FILE IO * #
# **************************************************************************************** #
def load_ccd(
path,
extension=None,
trimsec=None,
ccddata=True,
as_ccd=True,
use_wcs=True,
unit=None,
extension_uncertainty="UNCERT",
extension_mask="MASK",
extension_flags=None,
full=False,
key_uncertainty_type="UTYPE",
memmap=False,
**kwd
):
""" Loads FITS file of CCD image data (not table, etc).
Paramters
---------
path : path-like
The path to the FITS file to load.
trimsec : str, optional.
Region of `~astropy.nddata.CCDData` from which the data is extracted.
Default: `None`.
extension: int, str, (str, int)
The extension of FITS to be used. It can be given as integer
(0-indexing) of the extension, ``EXTNAME`` (single str), or a tuple of
str and int: ``(EXTNAME, EXTVER)``. If `None` (default), the *first
extension with data* will be used.
ccddata : bool, optional.
Whether to return `~astropy.nddata.CCDData`. Default is `True`. If it
is `False`, **all the arguments below are ignored**, except for the
keyword arguments that will be passed to `fitsio.read`, and an ndarray
will be returned without astropy unit.
as_ccd : bool, optional.
Deprecated. (identical to `ccddata`)
use_wcs : bool, optional.
Whether to load WCS by `fits.getheader`, **not** by
`~astropy.nddata.fits_ccdddata_reader`. This is necessary as of now
because TPV WCS is not properly understood by the latter.
Default : `True`.
Used only if ``ccddata=True``.
..warning::
Use ``ccd.wcs``, but not ``WCS(ccd.header)``. astropy often parses
WCS erroneously for some non-standard ones.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used as
the unit, if present), this argument is used for the unit.
Default: `None`.
Used only if ``ccddata=True``.
.. note::
The behavior differs from astropy's original fits_ccddata_reader:
If no ``BUNIT`` is found and `unit` is `None`, ADU is assumed.
full : bool, optional.
Whether to return full `(data, unc, mask, flag)` when using
`fitsio` (i.e., when `ccddata=False`). If `False`(default), only `data`
will be returned.
Default: `False`.
extension_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty is `None`. Name is changed
from `hdu_uncertainty` in ccdproc to `extension_uncertainty` here. See
explanation of `extension`.
Default: ``'UNCERT'``.
extension_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask is `None`. Name is changed from
`hdu_mask` in ccdproc to `extension_mask` here. See explanation of
`extension`.
Default: ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.N ame is changed from `hdu_flags` in ccdproc
to `extension_flags` here.
Default: `None`.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default: ``UTYPE``.
Used only if ``ccddata=True``.
..warning::
If ``ccddata=False`` and ``load_primary_only_fitsio=False``, the
uncertainty type by `key_uncertainty_type` will be completely
ignored.
memmap : bool, optional
Is memory mapping to be used? This value is obtained from the
configuration item `astropy.io.fits.Conf.use_memmap`.
Default: `False` (**opposite of astropy**).
Used only if ``ccddata=True``.
kwd :
Any additional keyword parameters that will be used in
`~astropy.nddata.fits_ccddata_reader` (if ``ccddata=True``) or
`fitsio.read()` (if ``ccddata=False``).
Returns
-------
CCDData (``ccddata=True``) or ndarray (``ccddata=False``). For the latter
case, if ``load_primary_only_fitsio=False``, the uncertainty and mask
extensions, as well as flags (not supported, so just `None`) will be
returned as well as the one specified by `extension`.
If ``ccddata=False``, the returned object can be an ndarray (`full_fitsio`
is `False`) or a tuple of arrays ``(data, unc, mask, flag)`` (`full_fitsio`
is `True`).
Notes
-----
Many of the parameter explanations adopted from astropy
(https://github.com/astropy/astropy/blob/master/astropy/nddata/ccddata.py#L527
and
https://github.com/astropy/astropy/blob/master/astropy/io/fits/convenience.py#L120).
CCDData.read cannot read TPV WCS:
https://github.com/astropy/astropy/issues/7650
Also memory map must be set False to avoid memory problem
https://github.com/astropy/astropy/issues/9096
Plus, WCS info from astrometry.net solve-field sometimes not understood by
CCDData.read.... 2020-05-31 16:39:51 (KST: GMT+09:00) ysBach
Why the name of the argument is different (`hdu`) in
fits_ccddata_reader...;;
Using fitsio, we get ~ 6-100 times faster loading time for FITS files on
MBP 15" [2018, macOS 10.14.6, i7-8850H (2.6 GHz; 6-core), RAM 16 GB
(2400MHz DDR4), Radeon Pro 560X (4GB)]. Thus, when you just need data
without header information (combine or stacking images, simple image
arithmetics without header updates, etc) for MANY images, the gain is
enormous by using FITSIO. This also boosts the speed of some processes
which have to open the same FITS file repeatedly due to the memory limit.
```
!fitsinfo test.fits
Filename: test.fits
No. Name Ver Type Cards Dimensions Format
0 PRIMARY 1 PrimaryHDU 6 (1,) int64
1 a 1 ImageHDU 7 (1,) int64
2 a 1 ImageHDU 7 (1,) int64
3 a 2 ImageHDU 8 (1,) int64
%timeit fitsio.FITS("test.fits")["a", 2].read()
%timeit fitsio.FITS("test.fits")[0].read()
118 µs +/- 564 ns per loop (mean +/- std. dev. of 7 runs, 10000 loops each)
117 µs +/- 944 ns per loop (mean +/- std. dev. of 7 runs, 10000 loops each)
%timeit CCDData.read("test.fits")
%timeit CCDData.read("test.fits", hdu=("a", 2), unit='adu')
10.7 ms +/- 113 µs per loop (mean +/- std. dev. of 7 runs, 100 loops each)
11 ms +/- 114 µs per loop (mean +/- std. dev. of 7 runs, 100 loops each)
```
For a 1k by 1k image, it's ~ 6 times faster
```
np.random.seed(123)
ccd = CCDData(data=np.random.normal(
size=(1000, 1000)).astype('float32'), unit='adu'
)
ccd.write("test1k_32bit.fits")
%timeit fitsio.FITS("test10k_32bit.fits")[0].read()
1.49 ms +/- 91.1 µs per loop (mean +/- std. dev. of 7 runs, 1000 loops each)
%timeit CCDData.read("test10k_32bit.fits")
8.9 ms +/- 97.6 µs per loop (mean +/- std. dev. of 7 runs, 100 loops each)
```
For a 10k by 10k image, it's still ~ 6 times faster
```
ccd = CCDData(data=np.random.normal(
size=(10000, 10000)).astype('float32'), unit='adu'
)
%timeit fitsio.FITS("test10k_32bit.fits")[0].read()
1.4 ms +/- 123 µs per loop (mean +/- std. dev. of 7 runs, 1000 loops each)
%timeit CCDData.read("test10k_32bit.fits")
9.42 ms +/- 391 µs per loop (mean +/- std. dev. of 7 runs, 100 loops each)
```
"""
def _ext_umf(ext):
""" Return None if ext is None, otherwise, parse it (usu. returns 0)
"""
return None if ext is None else _parse_extension(ext)
try:
path = Path(path)
except TypeError:
raise TypeError(f"You must provide Path-like, not {type(path)}.")
extension = _parse_extension(extension)
if HAS_FITSIO:
if ccddata and as_ccd: # if at least one of these is False, it uses fitsio.
reader_kw = dict(
hdu=extension,
hdu_uncertainty=_ext_umf(extension_uncertainty),
hdu_mask=_ext_umf(extension_mask),
hdu_flags=_ext_umf(extension_flags),
key_uncertainty_type=key_uncertainty_type,
memmap=memmap,
**kwd,
)
# ^ If hdu_flags is not None, CCDData raises this Error:
# NotImplementedError: loading flags is currently not supported.
# FIXME: Remove this `if` block in the future if WCS issue is resolved.
if use_wcs: # Because of the TPV WCS issue
hdr = fits.getheader(path)
reader_kw["wcs"] = WCS(hdr)
del hdr
try: # Use BUNIT if unit is None
ccd = CCDData.read(path, unit=unit, **reader_kw)
except ValueError: # e.g., user did not give unit and there's no BUNIT
ccd = CCDData.read(path, unit=u.adu, **reader_kw)
if trimsec is not None:
# Do imslice AFTER loading the data to easily add LTV/LTM...
ccd = imslice(trimsec)
if full: # Just for API consistency
return ccd, ccd.uncertainty, ccd.mask, ccd.flags
return ccd
else:
# Use fitsio and only load the data as soon as possible.
# This is much quicker than astropy's getdata
def _read_by_fitsio(_hdul, _ext, _trimsec=None):
if _ext is None:
return None
_ext = _ext_umf(_ext)
try:
if _trimsec is not None:
sl = slicefy(_trimsec)
if is_list_like(_ext):
# length == 2 is already checked in _parse_extension.
arr = _hdul[_ext[0], _ext[1]].read()[sl]
else:
arr = _hdul[_ext].read()[sl]
else:
if is_list_like(_ext):
# length == 2 is already checked in _parse_extension.
arr = _hdul[_ext[0], _ext[1]].read()
else:
arr = _hdul[_ext].read()
return arr
except (OSError, ValueError) as e:
print(e)
# "Extension `{_ext}` is not found (file: {_path})")
return None
with fitsio.FITS(path) as hdul:
if full:
dat = _read_by_fitsio(hdul, extension, trimsec)
unc = _read_by_fitsio(hdul, extension_uncertainty, trimsec)
msk = _read_by_fitsio(hdul, extension_mask, trimsec)
flg = _read_by_fitsio(hdul, extension_flags, trimsec)
return dat, unc, msk, flg
else:
return _read_by_fitsio(hdul, extension, trimsec)
else:
e_u = _ext_umf(extension_uncertainty)
e_m = _ext_umf(extension_mask)
e_f = _ext_umf(extension_flags)
# ^ If not None, this happens:
# NotImplementedError: loading flags is currently not supported.
reader_kw = dict(
hdu=extension,
hdu_uncertainty=e_u,
hdu_mask=e_m,
hdu_flags=e_f,
key_uncertainty_type=key_uncertainty_type,
memmap=memmap,
**kwd,
)
# FIXME: Remove this if block in the future if WCS issue is resolved.
if use_wcs: # Because of the TPV WCS issue
hdr = fits.getheader(path)
reader_kw["wcs"] = WCS(hdr)
del hdr
try:
ccd = CCDData.read(path, unit=unit, **reader_kw)
except ValueError: # e.g., user did not give unit and there's no BUNIT
ccd = CCDData.read(path, unit="adu", **reader_kw)
# Force them to be None if extension is not specified
# (astropy.NDData.CCDData forces them to be loaded, which is not desirable imho)
ccd.uncertainty = None if e_u is None else ccd.uncertainty
ccd.mask = None if e_m is None else ccd.mask
if trimsec is not None:
ccd = imslice(ccd, trimsec=trimsec)
if ccddata and as_ccd: # if at least one of these is False, it uses fitsio.
if full: # Just for API consistency
return ccd, ccd.uncertainty, ccd.mask, ccd.flags
else:
return ccd
elif full:
try:
unc = None if e_u is None else np.array(ccd.uncertainty.array)
except AttributeError:
unc = None
mask = None if e_m is None else np.array(ccd.mask.array)
flag = None if e_f is None else np.array(ccd.flags)
return ccd.data, unc, mask, flag
else:
return ccd.data
def inputs2list(
inputs,
sort=True,
accept_ccdlike=True,
path_to_text=False,
check_coherency=False
):
""" Convert glob pattern or list-like of path-like to list of Path
Parameters
----------
inputs : str, path-like, CCDData, fits.PrimaryHDU, fits.ImageHDU, DataFrame-convertable.
If DataFrame-convertable, e.g., dict, `~pandas.DataFrame` or
`~astropy.table.Table`, it must have column named ``"file"``, such that
``outlist = list(inputs["file"])`` is possible. Otherwise, please use,
e.g., ``inputs = list(that_table["filenamecolumn"])``. If a str starts
with ``"@"`` (e.g., ``"@darks.list"``), it assumes the file contains a
list of paths separated by ``"\n"``, as in IRAF.
sort : bool, optional.
Whether to sort the output list.
Default: `True`.
accept_ccdlike: bool, optional.
Whether to accept `~astropy.nddata.CCDData`-like objects and simpley
return ``[inputs]``.
Default: `True`.
path_to_text: bool, optional.
Whether to convert the `pathlib.Path` object to `str`.
Default: `True`.
check_coherence: bool, optional.
Whether to check if all elements of the `inputs` have the identical
type.
Default: `False`.
"""
contains_ccdlike = False
if inputs is None:
return None
elif isinstance(inputs, str):
if inputs.startswith("@"):
with open(inputs[1:]) as ff:
outlist = ff.read().splitlines()
else:
# If str, "dir/file.fits" --> [Path("dir/file.fits")]
# "dir/*.fits" --> [Path("dir/file.fits"), ...]
outlist = glob.glob(inputs)
elif isinstance(inputs, (PosixPath, WindowsPath)):
# If Path, ``TOP/"file*.fits"`` --> [Path("top/file1.fits"), ...]
outlist = glob.glob(str(inputs))
elif isinstance(inputs, ASTROPY_CCD_TYPES):
if accept_ccdlike:
outlist = [inputs]
else:
raise TypeError(f"{type(inputs)} is given as `inputs`. "
+ "Turn off accept_ccdlike or use path-like.")
elif isinstance(inputs, (Table, dict, pd.DataFrame)):
# Do this before is_list_like because DataFrame returns True in
# is_list_like as it is iterable.
try:
outlist = list(inputs["file"])
except KeyError:
raise KeyError(
"If inputs is DataFrame convertible, it must have column named 'file'."
)
elif is_list_like(inputs):
type_ref = type(inputs[0])
outlist = []
for i, item in enumerate(inputs):
if check_coherency and (type(item) != type_ref):
raise TypeError(
f"The 0-th item has {type_ref} while {i}-th has {type(item)}."
)
if isinstance(item, ASTROPY_CCD_TYPES):
contains_ccdlike = True
if accept_ccdlike:
outlist.append(item)
else:
raise TypeError(f"{type(item)} is given in the {i}-th element. "
+ "Turn off accept_ccdlike or use path-like.")
else: # assume it is path-like
if path_to_text:
outlist.append(str(item))
else:
outlist.append(Path(item))
else:
raise TypeError(f"inputs type ({type(inputs)})not accepted.")
if sort and not contains_ccdlike:
outlist.sort()
return outlist
def load_ccds(
paths,
extension=None,
trimsec=None,
ccddata=True,
as_ccd=True,
use_wcs=True,
unit=None,
extension_uncertainty="UNCERT",
extension_mask='MASK',
extension_flags=None,
full=False,
key_uncertainty_type='UTYPE',
memmap=False,
**kwd
):
""" Simple recursion of load_ccd
Paramters
---------
paths : [list of] path-like
The path, glob pattern, or list of such, e.g., ``"a.fits"``,
``"c*.fits"``, ``["a.fits", "c*.fits"]``
Notes
-----
Timing on MBP 14" [2021, macOS 12.2, M1Pro(6P+2E/G16c/N16c/32G)] using 10
FITS (each 4.3 MB) with ~ 100 header cards:
%timeit ccds = yfu.load_ccds("h_20191021_000*")
105 ms +- 2.11 ms per loop (mean +- std. dev. of 7 runs, 10 loops each)
"""
paths2load = []
for p in listify(paths):
paths2load += inputs2list(p, sort=True, accept_ccdlike=False)
return [load_ccd(
p,
extension=extension,
trimsec=trimsec,
ccddata=ccddata,
as_ccd=as_ccd,
use_wcs=use_wcs,
unit=unit,
extension_uncertainty=extension_uncertainty,
extension_mask=extension_mask,
extension_flags=extension_flags,
full=full,
key_uncertainty_type=key_uncertainty_type,
memmap=memmap,
**kwd,
)
for p in np.array(paths2load).ravel()]
def CCDData_astype(ccd, dtype='float32', uncertainty_dtype=None, copy=True):
""" Assign dtype to the CCDData object (numpy uses float64 default).
Parameters
----------
ccd : CCDData
The ccd to be astyped.
dtype : dtype-like
The dtype to be applied to the data
uncertainty_dtype : dtype-like
The dtype to be applied to the uncertainty. Be default, use the same
dtype as data (``uncertainty_dtype=dtype``).
Example
-------
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> ccd = CCDData.read("image_unitygain001.fits", 0)
>>> ccd.uncertainty = np.sqrt(ccd.data)
>>> ccd = yfu.CCDData_astype(ccd, dtype='int16', uncertainty_dtype='float32')
"""
if copy:
nccd = ccd.copy()
else:
nccd = ccd
nccd.data = nccd.data.astype(dtype)
try:
if uncertainty_dtype is None:
uncertainty_dtype = dtype
nccd.uncertainty.array = nccd.uncertainty.array.astype(uncertainty_dtype)
except AttributeError:
# If there is no uncertainty attribute in the input `ccd`
pass
update_tlm(nccd.header)
return nccd
# **************************************************************************************** #
#* SETTER * #
# **************************************************************************************** #
def set_ccd_attribute(
ccd,
name,
value=None,
key=None,
default=None,
unit=None,
header_comment=None,
update_header=True,
verbose=True,
wrapper=None,
wrapper_kw={},
):
""" Set attributes from given paramters.
Parameters
----------
ccd : CCDData
The ccd to add attribute.
value : Any, optional.
The value to be set as the attribute. If `None`, the
``ccd.header[key]`` will be searched.
name : str, optional.
The name of the attribute.
key : str, optional.
The key in the ``ccd.header`` to be searched if ``value=None``.
unit : astropy.Unit, optional.
The unit that will be applied to the found value.
header_comment : str, optional.
The comment string to the header if ``update_header=True``. If `None`
(default), search for existing comment in the original header by
``ccd.comments[key]`` and only overwrite the value by
``ccd.header[key]=found_value``. If it's not `None`, the comments will
also be overwritten if ``update_header=True``.
wrapper : function object, None, optional.
The wrapper function that will be applied to the found value. Other
keyword arguments should be given as a dict to `wrapper_kw`.
wrapper_kw : dict, optional.
The keyword argument to `wrapper`.
Example
-------
>>> set_ccd_attribute(ccd, 'gain', value=2, unit='electron/adu')
>>> set_ccd_attribute(ccd, 'ra', key='RA', unit=u.deg, default=0)
Notes
-----
"""
_t_start = Time.now()
str_history = "From {}, {} = {} [unit = {}]"
# value_from, name, value_Q.value, value_Q.unit
if unit is None:
try:
unit = value.unit
except AttributeError:
unit = u.dimensionless_unscaled
value_Q, value_from = get_if_none(
value=value,
header=ccd.header,
key=key,
unit=unit,
verbose=verbose,
default=default,
)
if wrapper is not None:
value_Q = wrapper(value_Q, **wrapper_kw)
if update_header:
s = [str_history.format(value_from, name, value_Q.value, value_Q.unit)]
if key is not None:
if header_comment is None:
try:
header_comment = ccd.header.comments[key]
except (KeyError, ValueError):
header_comment = ""
try:
v = ccd.header[key]
s.append(
f"[yfu.set_ccd_attribute] (Original {key} = {v} is overwritten.)"
)
except (KeyError, ValueError):
pass
ccd.header[key] = (value_Q.value, header_comment)
# add as history
cmt2hdr(ccd.header, "h", s, t_ref=_t_start)
setattr(ccd, name, value_Q)
update_tlm(ccd.header)
# TODO: This is quite much overlapping with get_gain_rdnoise...
def set_ccd_gain_rdnoise(
ccd,
verbose=True,
update_header=True,
gain=None,
rdnoise=None,
gain_key="GAIN",
rdnoise_key="RDNOISE",
gain_unit=u.electron/u.adu,
rdnoise_unit=u.electron
):
""" A convenience set_ccd_attribute for gain and readnoise.
Parameters
----------
gain, rdnoise : None, float, astropy.Quantity, optional.
The gain and readnoise value. If `gain` or `readnoise` is specified,
they are interpreted with `gain_unit` and `rdnoise_unit`, respectively.
If they are not specified, this function will seek for the header with
keywords of `gain_key` and `rdnoise_key`, and interprete the header
value in the unit of `gain_unit` and `rdnoise_unit`, respectively.
gain_key, rdnoise_key : str, optional.
See `gain`, `rdnoise` explanation above.
gain_unit, rdnoise_unit : str, astropy.Unit, optional.
See `gain`, `rdnoise` explanation above.
verbose : bool, optional.
The verbose option.
update_header : bool, optional
Whether to update the given header.
"""
gain_str = f"[{gain_unit:s}] Gain of the detector"
rdn_str = f"[{rdnoise_unit:s}] Readout noise of the detector"
set_ccd_attribute(
ccd=ccd,
name="gain",
value=gain,
key=gain_key,
unit=gain_unit,
default=1.0,
header_comment=gain_str,
update_header=update_header,
verbose=verbose,
)
set_ccd_attribute(
ccd=ccd,
name="rdnoise",
value=rdnoise,
key=rdnoise_key,
unit=rdnoise_unit,
default=0.0,
header_comment=rdn_str,
update_header=update_header,
verbose=verbose,
)
# **************************************************************************************** #
# * CCD MANIPULATIONS * #
# **************************************************************************************** #
def propagate_ccdmask(ccd, additional_mask=None):
""" Propagate the CCDData's mask and additional mask.
Parameters
----------
ccd : CCDData, ndarray
The ccd to extract mask. If ndarray, it will only return a copy of
`additional_mask`.
additional_mask : mask-like, None
The mask to be propagated.
Notes
-----
The original ``ccd.mask`` is not modified. To do so,
>>> ccd.mask = propagate_ccdmask(ccd, additional_mask=mask2)
"""
if additional_mask is None:
try:
mask = ccd.mask.copy()
except AttributeError: # i.e., if ccd.mask is None
mask = None
else:
try:
mask = ccd.mask | additional_mask
except (TypeError, AttributeError): # i.e., if ccd.mask is None:
mask = deepcopy(additional_mask)
return mask
def imslice(ccd, trimsec, fill_value=None, order_xyz=True,
update_header=True, verbose=False):
""" Slice the CCDData using one of trimsec, bezels, or slices.
Paramters
---------
ccd : CCDData, ndarray
The ccd to be sliced. If ndarray, it will be converted to CCDData with
dummy unit ("ADU").
trimsec : str, int, list of int, list of slice, None, optional
It can have several forms::
* str: The FITS convention section to trim (e.g., IRAF TRIMSEC).
* [list of] int: The number of pixels to trim from the edge of the
image (bezel). If list, it must be [bezel_lower, bezel_upper].
* [list of] slice: The slice of each axis (`slice(start, stop,
step)`)
If a single int/slice is given, it will be applied to all the axes.
order_xyz : bool, optional
Whether the order of trimsec is in xyz order. Works only if the
`trimsec` is bezel-like (int or list of int). If it is slice-like,
`trimsec` must be in the pythonic order (i.e., ``[slice_for_axis0,
slice_for_axis1, ...]``).
fill_value : None, float-like, optinoal.
If `None`, it removes the pixels outside of it. If given as float-like
(including `np.nan`), the bezel pixels will be replaced with this
value.
Notes
-----
Similar to ccdproc.trim_image or imcopy. Compared to ccdproc, it has
flexibility, and can add LTV/LTM to header.
"""
_t = Time.now()
# Parse
sl = slicefy(trimsec, ndim=ccd.ndim, order_xyz=order_xyz)
if isinstance(ccd, np.ndarray):
ccd = CCDData(ccd, unit=u.adu)
if fill_value is None:
nccd = ccd[sl].copy() # CCDData supports this kind of slicing
else:
nccd = ccd.copy()
nccd.data = np.ones(nccd.shape) * fill_value
nccd.data[sl] = ccd.data[sl]
if update_header: # update LTV/LTM
ltms = [1 if s.step is None else 1/s.step for s in sl]
ndim = ccd.ndim # ndim == NAXIS keyword
shape = ccd.shape
if trimsec is not None:
ltvs = []
for axis_i_py, naxis_i in enumerate(shape):
# example: "[10:110]", we must have LTV = -9, not -10.
ltvs.append(-1*sl[axis_i_py].indices(naxis_i)[0])
ltvs = ltvs[::-1] # zyx -> xyz order
else:
ltvs = [0.0]*ndim
hdr = nccd.header
for i, ltv in enumerate(ltvs):
if (key := f"LTV{i+1}") in hdr:
hdr[key] += ltv
else:
hdr[key] = ltv
for i in range(ndim):
for j in range(ndim):
if i == j:
hdr[f"LTM_{i+1}_{i+1}"] = hdr.get(f"LTM{i+1}", ltms[i])
else:
hdr.setdefault(f"LTM{i+1}_{j+1}", 0.0)
if trimsec is not None:
infostr = [
f"[yfu.imslice] Sliced using `{trimsec = }`: converted to {sl}. "
]
if fill_value is not None:
infostr.append(f"Filled background with `{fill_value = }`.")
cmt2hdr(hdr, "h", infostr, t_ref=_t, verbose=verbose)
update_process(hdr, "T")
return nccd
# FIXME: not finished.
def trim_overlap(inputs, extension=None, coordinate="image"):
""" Trim only the overlapping regions of the two CCDs
Parameters
----------
coordinate : str, optional.
Ways to find the overlapping region. If ``'image'`` (default), output
size will be ``np.min([ccd.shape for ccd in ccds], axis=0)``. If
``'physical'``, overlapping region will be found based on the physical
coordinates.
extension: int, str, (str, int)
The extension of FITS to be used. It can be given as integer
(0-indexing) of the extension, ``EXTNAME`` (single str), or a tuple of
str and int: ``(EXTNAME, EXTVER)``. If `None` (default), the *first
extension with data* will be used.
Notes
-----
WCS is not acceptable because no rotation/scaling is supported.
"""
items = inputs2list(inputs, sort=False, accept_ccdlike=True, check_coherency=False)
if len(items) < 2:
raise ValueError("inputs must have at least 2 objects.")
offsets = []
shapes = []
reference = _parse_image(
items[0], extension=extension, name=None, force_ccddata=True
)
for item in items:
ccd, _, _ = _parse_image(
item, extension=extension, name=None, force_ccddata=True
)
shapes.append(ccd.data.shape)
offsets.append(
calc_offset_physical(ccd, reference, order_xyz=False, ignore_ltm=True)
)
offsets, new_shape = _image_shape(
shapes, offsets, method="overlap", intify_offsets=False
)
# FIXME: docstring looks strange
def cut_ccd(ccd, position, size, mode="trim", fill_value=np.nan):
""" Converts the Cutout2D object to proper CCDData.
Parameters
----------
ccd: CCDData
The ccd to be trimmed.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to the ``data``
array. The position can be specified either as a ``(x, y)`` tuple of
pixel coordinates or a `~astropy.coordinates.SkyCoord`, in which case
wcs is a required input.
size : int, array-like, `~astropy.units.Quantity`
The size of the cutout array along each axis. If `size` is a scalar
number or a scalar `~astropy.units.Quantity`, then a square cutout of
`size` will be created. If `size` has two elements, they should be in
``(ny, nx)`` order. Scalar numbers in `size` are assumed to be in units
of pixels. `size` can also be a `~astropy.units.Quantity` object or
contain `~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or angular units.
For all cases, `size` will be converted to an integer number of pixels,
rounding the the nearest integer. See the `mode` keyword for additional
details on the final cutout size.
.. note::
If `size` is in angular units, the cutout size is converted to
pixels using the pixel scales along each axis of the image at the
``CRPIX`` location. Projection and other non-linear distortions
are not taken into account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input `data` array. If `wcs` is not
`None`, then the returned cutout object will contain a copy of the
updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the cutout
array and the input `data` array is sufficient. For the ``'strict'``
mode, the cutout array has to be fully contained within the `data`
array, otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the cutout array that do not overlap with the `data` array
will be filled with `fill_value`. In ``'trim'`` mode only the
overlapping elements are returned, thus the resulting cutout array may
be smaller than the requested `shape`.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the cutout array
that do not overlap with the input `data`. `fill_value` must have the
same `dtype` as the input `data` array.
"""
hdr_orig = ccd.header
w = WCS(hdr_orig)
cutout = Cutout2D(
data=ccd.data,
position=position,
size=size,
wcs=w,
mode=mode,
fill_value=fill_value,
copy=True,
)
# Copy True just to avoid any contamination to the original ccd.
nccd = CCDData(data=cutout.data, header=hdr_orig, wcs=cutout.wcs, unit=ccd.unit)
ny, nx = nccd.data.shape
nccd.header["NAXIS1"] = nx
nccd.header["NAXIS2"] = ny
nonlin = False
try:
for ctype in ccd.wcs.get_axis_types():
if ctype["scale"] != "linear":
nonlin = True
break
except AttributeError:
nonlin = False
if nonlin:
warn(
"Since Cutout2D is for small image crop, astropy do not currently support "
+ "distortion in WCS. This may result in slightly inaccurate WCS calculation."
)
update_tlm(nccd.header)
return nccd
def bin_ccd(
ccd,
factor_x=1,
factor_y=1,
binfunc=np.mean,
trim_end=False,
update_header=True,
copy=True,
):
""" Bins the given ccd.
Paramters
---------
ccd : CCDData
The ccd to be binned
factor_x, factor_y : int, optional.
The binning factors in x, y direction.
binfunc : funciton object, optional.
The function to be applied for binning, such as ``np.sum``,
``np.mean``, and ``np.median``.
trim_end : bool, optional.
Whether to trim the end of x, y axes such that binning is done without
error.
update_header : bool, optional.
Whether to update header. Defaults to True.
Notes
-----
This is ~ 20-30 to upto 10^5 times faster than astropy.nddata's
block_reduce:
>>> from astropy.nddata.blocks import block_reduce
>>> import ysfitsutilpy as yfu
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> ccd = CCDData(data=np.arange(1000).reshape(20, 50), unit='adu')
>>> kw = dict(factor_x=5, factor_y=5, binfunc=np.sum, trim_end=True)
>>> %timeit yfu.binning(ccd.data, **kw)
>>> # 10.9 +- 0.216 us (7 runs, 100000 loops each)
>>> %timeit yfu.bin_ccd(ccd, **kw, update_header=False)
>>> # 32.9 µs +- 878 ns per loop (7 runs, 10000 loops each)
>>> %timeit -r 1 -n 1 block_reduce(ccd, block_size=5)
>>> # 518 ms, 2.13 ms, 250 us, 252 us, 257 us, 267 us
>>> # 5.e+5 ... ... ... ... 27 -- times slower
>>> # some strange chaching happens?
Tested on MBP 15" [2018, macOS 10.14.6, i7-8850H (2.6 GHz; 6-core), RAM 16
GB (2400MHz DDR4), Radeon Pro 560X (4GB)]
"""
_t_start = Time.now()
if not isinstance(ccd, CCDData):
raise TypeError("ccd must be CCDData object.")
if factor_x == 1 and factor_y == 1:
return ccd
if copy:
_ccd = ccd.copy()
else:
_ccd = ccd
_ccd.data = binning(
_ccd.data,
factor_x=factor_x,
factor_y=factor_y,
binfunc=binfunc,
trim_end=trim_end,
)
if update_header:
_ccd.header["BINFUNC"] = (binfunc.__name__,
"The function used for binning.")
_ccd.header["XBINNING"] = (factor_x,
"Binning done after the observation in X direction")
_ccd.header["YBINNING"] = (factor_y,
"Binning done after the observation in Y direction")
# add as history
cmt2hdr(_ccd.header, 'h', t_ref=_t_start,
s=f"[bin_ccd] Binned by (xbin, ybin) = ({factor_x}, {factor_y}) ")
return _ccd
# TODO: Need something (e.g., cython with pythran) to boost the speed of this function.
def fixpix(
ccd,
mask=None,
maskpath=None,
extension=None,
mask_extension=None,
priority=None,
update_header=True,
verbose=True,
):
""" Interpolate the masked location (N-D generalization of IRAF PROTO.FIXPIX)
Parameters
----------
ccd : CCDData-like (e.g., PrimaryHDU, ImageHDU, HDUList), ndarray, path-like, or number-like
The CCD data to be "fixed".
mask : CCDData-like (e.g., PrimaryHDU, ImageHDU, HDUList), ndarray, path-like
The mask to be used for fixing pixels (pixels to be fixed are where
`mask` is `True`). If `None`, nothing will happen and `ccd` is
returned.
extension, mask_extension: int, str, (str, int), None
The extension of FITS to be used. It can be given as integer
(0-indexing) of the extension, ``EXTNAME`` (single str), or a tuple of
str and int: ``(EXTNAME, EXTVER)``. If `None` (default), the *first
extension with data* will be used.
priority: tuple of int, None, optional.
The priority of axis as a tuple of non-repeating `int` from ``0`` to
`ccd.ndim`. It will be used if the mask has the same size along two or
more of the directions. To specify, use the integers for axis
directions, descending priority. For example, ``(2, 1, 0)`` will be
identical to `priority=None` (default) for 3-D images.
Default is `None` to follow IRAF's PROTO.FIXPIX: Priority is higher for
larger axis number (e.g., in 2-D, x-axis (axis=1) has higher priority
than y-axis (axis=0)).
Examples
--------
Timing test: MBP 15" [2018, macOS 11.4, i7-8850H (2.6 GHz; 6-core), RAM 16
GB (2400MHz DDR4), Radeon Pro 560X (4GB)], 2021-11-05 11:14:04 (KST:
GMT+09:00)
>>> np.random.RandomState(123) # RandomState(MT19937) at 0x7FAECA768D40
>>> data = np.random.normal(size=(1000, 1000))
>>> mask = np.zeros_like(data).astype(bool)
>>> mask[10, 10] = True
>>> %timeit yfu.fixpix(data, mask)
19.7 ms +- 1.53 ms per loop (mean +- std. dev. of 7 runs, 100 loops each)
>>> print(data[9:12, 9:12], yfu.fixpix(data, mask)[9:12, 9:12])
# [[ 1.64164502 -1.00385046 -1.24748504]
# [-1.31877621 1.37965928 0.66008966]
# [-0.7960262 -0.14613834 -1.34513327]]
# [[ 1.64164502 -1.00385046 -1.24748504]
# [-1.31877621 -0.32934328 0.66008966]
# [-0.7960262 -0.14613834 -1.34513327]] adu
"""
if mask is None:
return ccd.copy()
_t_start = Time.now()
_ccd, _, _ = _parse_image(ccd, extension=extension, force_ccddata=True)
mask, maskpath, _ = _parse_image(
mask, extension=mask_extension, name=maskpath, force_ccddata=True
)
mask = mask.data.astype(bool)
data = _ccd.data
naxis = _ccd.shape
if _ccd.shape != mask.shape:
raise ValueError(
f"ccd and mask must have the identical shape; now {_ccd.shape} VS {mask.shape}."
)
ndim = data.ndim
if priority is None:
priority = tuple([i for i in range(ndim)][::-1])
elif len(priority) != ndim:
raise ValueError(
"len(priority) and ccd.ndim must be the same; "
+ f"now {len(priority)} VS {ccd.ndim}."
)
elif not isinstance(priority, tuple):
priority = tuple(priority)
elif (np.min(priority) != 0) or (np.max(priority) != ndim - 1):
raise ValueError(
f"`priority` must be a tuple of int (0 <= int <= {ccd.ndim-1=}). "
+ f"Now it's {priority=}"
)
structures = [np.zeros([3]*ndim) for _ in range(ndim)]
for i in range(ndim):
sls = [[slice(1, 2, None)]*ndim for _ in range(ndim)][0]
sls[i] = slice(None, None, None)
structures[i][tuple(sls)] = 1
# structures[i] is the structure to obtain the num. of connected pix. along axis=i
pixels = []
n_axs = []
labels = []
for structure in structures:
_label, _nlabel = ndlabel(mask, structure=structure)
_pixels = {}
_n_axs = {}
for k in range(1, _nlabel + 1):
_label_k = (_label == k)
_pixels[k] = np.where(_label_k)
_n_axs[k] = np.count_nonzero(_label_k)
labels.append(_label)
pixels.append(_pixels)
n_axs.append(_n_axs)
idxs = np.where(mask)
for pos in np.array(idxs).T:
# The label of this position in each axis
label_pos = [lab.item(*pos) for lab in labels]
# number of pixels of the same label for each direction
n_ax = [_n_ax[lab] for _n_ax, lab in zip(n_axs, label_pos)]
# The shortest axis along which the interpolation will happen,
# OR, if 1+ directions having same minimum length, select this axis
# according to `priority`
interp_ax = np.where(n_ax == np.min(n_ax))[0]
if len(interp_ax) > 1:
for i_ax in priority: # check in the identical order to `priority`
if i_ax in interp_ax:
interp_ax = i_ax
break
else:
interp_ax = interp_ax[0]
# The coordinates of the pixels having the identical label to this
# pixel position, along the shortest axis
coord_samelabel = pixels[interp_ax][label_pos[interp_ax]]
coord_slice = []
coord_init = []
coord_last = []
for i in range(ndim):
invalid = False
if i == interp_ax:
init = np.min(coord_samelabel[i]) - 1
last = np.max(coord_samelabel[i]) + 1
# distance between the initial/last points to be used for the
# interpolation, along the interpolation axis:
delta = last - init
# grid for interpolation:
grid = np.arange(1, delta - 0.1, 1)
# Slice to be used for interpolation:
sl = slice(init + 1, last, None)
# Should be done here, BEFORE the if clause below.
# Check if lower/upper are all outside the image
if init < 0 and last >= naxis[i]:
invalid = True
break
elif init < 0: # if only one of lower/upper is outside the image
init = last
elif last >= naxis[i]:
last = init
else:
init = coord_samelabel[i][0]
last = coord_samelabel[i][0]
# coord_samelabel[i] is nothing but an array of same numbers
sl = slice(init, last + 1, None)
coord_init.append(init)
coord_last.append(last)
coord_slice.append(sl)
if not invalid:
val_init = data.item(tuple(coord_init))
val_last = data.item(tuple(coord_last))
data[tuple(coord_slice)].flat = (val_last - val_init)/delta*grid + val_init
if update_header:
nfix = np.count_nonzero(mask)
_ccd.header["MASKNPIX"] = (nfix, "No. of pixels masked (fixed) by fixpix.")
_ccd.header["MASKFILE"] = (maskpath, "Applied mask for fixpix.")
_ccd.header["MASKORD"] = (str(priority), "Axis priority for fixpix (python order)")
# MASKFILE: name identical to IRAF
# add as history
cmt2hdr(_ccd.header, "h", t_ref=_t_start, verbose=verbose,
s="[fixpix] Pixel values interpolated.")
update_process(_ccd.header, "P")
return _ccd
# # FIXME: Remove this after fixpix is completed
# def fixpix_griddata(ccd, mask, extension=None, method='nearest',
# fill_value=0, update_header=True):
# """ Interpolate the masked location (cf. IRAF's PROTO.FIXPIX)
# Parameters
# ----------
# ccd : CCDData-like (e.g., PrimaryHDU, ImageHDU, HDUList), ndarray, path-like, or number-like
# The CCD data to be "fixed".
# mask : ndarray (bool)
# The mask to be used for fixing pixels (pixels to be fixed are where
# `mask` is `True`).
# extension: int, str, (str, int)
# The extension of FITS to be used. It can be given as integer
# (0-indexing) of the extension, ``EXTNAME`` (single str), or a tuple
# of str and int: ``(EXTNAME, EXTVER)``. If `None` (default), the
# *first extension with data* will be used.
# method: str
# The interpolation method. Even the ``'linear'`` method takes too long
# time in many cases, so the default is ``'nearest'``.
# """
# _t_start = Time.now()
# _ccd, _, _ = _parse_image(ccd, extension=extension, force_ccddata=True)
# data = _ccd.data
# x_idx, y_idx = np.meshgrid(np.arange(0, data.shape[1] - 0.1),
# np.arange(0, data.shape[0] - 0.1))
# mask = mask.astype(bool)
# x_valid = x_idx[~mask]
# y_valid = y_idx[~mask]
# z_valid = data[~mask]
# _ccd.data = griddata((x_valid, y_valid),
# z_valid, (x_idx, y_idx), method=method, fill_value=fill_value)
# if update_header:
# _ccd.header["MASKMETH"] = (method,
# "The interpolation method for fixpix")
# _ccd.header["MASKFILL"] = (fill_value,
# "The fill value if interpol. fails in fixpix")
# _ccd.header["MASKNPIX"] = (np.count_nonzero(mask),
# "Total num of pixesl fixed by fixpix.")
# # add as history
# cmt2hdr(_ccd.header, 'h', t_ref=_t_start, s="Pixel values fixed by fixpix")
# update_tlm(_ccd.header)
# return _ccd
def find_extpix(
ccd,
mask=None,
npixs=(1, 1),
bezels=None,
order_xyz=True,
sort=True,
update_header=True,
verbose=0,
):
""" Finds the N extrema pixel values excluding masked pixels.
Paramters
---------
ccd : CCDData
The ccd to find extreme values
mask : CCDData-like (e.g., PrimaryHDU, ImageHDU, HDUList), ndarray, path-like, or number-like
The mask to be used. To reduce file I/O time, better to provide
ndarray.
npixs : length-2 tuple of int, optional
The numbers of extrema to find, in the form of ``[small, large]``, so
that ``small`` number of smallest and ``large`` number of largest pixel
values will be found. If `None`, no extrema is found (`None` is
returned for that extremum).
Deafult: ``(1, 1)`` (find minimum and maximum)
bezels : list of list of int, optional.
If given, must be a list of list of int. Each list of int is in the
form of ``[lower, upper]``, i.e., the first ``lower`` and last
``upper`` rows/columns are ignored.
order_xyz : bool, optional.
Whether `bezel` in xyz order or not (python order:
``xyz_order[::-1]``).
Default: `True`.
sort: bool, optional.
Whether to sort the extrema in ascending order.
Returns
-------
min
The list of extrema pixel values.
"""
if not len(npixs) == 2:
raise ValueError("npixs must be a length-2 tuple of int.")
_t = Time.now()
data = ccd.data.copy().astype("float32") # Not float64 to reduce memory usage
# slice first to reduce computation time
if bezels is not None:
sls = bezel2slice(bezels, order_xyz=order_xyz)
data = data[sls]
if mask is not None:
mask = mask[sls]
if mask is None:
maskname = "No mask"
mask = ~np.isfinite(data)
else:
if not isinstance(mask, np.ndarray):
mask, maskname, _ = _parse_image(mask, force_ccddata=True)
mask = mask.data | ~np.isfinite(data)
else:
maskname = "User-provided mask"
exts = []
for npix, sign, minmaxval in zip(npixs, [1, -1], [np.inf, -np.inf]):
if npix is None:
exts.append(None)
continue
data[mask] = minmaxval
# ^ if getting maximum/minimum pix vals, replace with minimum/maximum
extvals = np.partition(data.ravel(), sign*npix)
# ^^^^^^^^^^^^
# bn.partitoin has virtually no speed gain.
extvals = extvals[:npix] if sign > 0 else extvals[-npix:]
if sort:
extvals = np.sort(extvals)[::sign]
exts.append(extvals)
if update_header:
for ext, mm in zip(exts, ["min", "max"]):
if ext is not None:
for i, extval in enumerate(ext):
ccd.header.set(f"{mm.upper()}V{i+1:03d}", extval, f"{mm} pixel value")
bezstr = ""
if bezels is not None:
order = "xyz order" if order_xyz else "pythonic order"
bezstr = f" and bezel: {bezels} in {order}"
cmt2hdr(ccd.header, 'h', verbose=verbose, t_ref=_t,
s=("[yfu.find_extpix] Extrema pixel values found N(smallest, largest) = "
+ f"{npixs} excluding mask ({maskname}){bezstr}. "
+ "See MINViii and MAXViii.")
)
return exts
def find_satpix(
ccd,
mask=None,
satlevel=65535,
bezels=None,
order_xyz=True,
update_header=True,
verbose=0,
):
""" Finds saturated pixel values excluding masked pixels.
Paramters
---------
ccd : CCDData, ndarray
The ccd to find extreme values. If `ndarray`, `update_header` will
automatically be set to `False`.
mask : CCDData-like (e.g., PrimaryHDU, ImageHDU, HDUList), ndarray, path-like, or number-like
The mask to be used. To reduce file I/O time, better to provide
ndarray.
satlevel: numeric, optional.
The saturation level. Pixels >= `satlevel` will be retarded as
saturated pixels, except for those masked by `mask`.
bezels : list of list of int, optional.
If given, must be a list of list of int. Each list of int is in the
form of ``[lower, upper]``, i.e., the first ``lower`` and last
``upper`` rows/columns are ignored.
order_xyz : bool, optional.
Whether `bezel` in xyz order or not (python order:
``xyz_order[::-1]``).
Default: `True`.
Returns
-------
min
The list of extrema pixel values.
"""
_t = Time.now()
if isinstance(ccd, CCDData):
data = ccd.data.copy()
else:
data = ccd.copy()
update_header = False
satmask = np.zeros(data.shape, dtype=bool)
# slice first to reduce computation time
if bezels is not None:
sls = bezel2slice(bezels, order_xyz=order_xyz)
data = data[sls]
if mask is not None:
mask = mask[sls]
else:
sls = [slice(None, None, None) for _ in range(data.ndim)]
if mask is None:
maskname = "No mask"
satmask[sls] = data >= satlevel
else:
if not isinstance(mask, np.ndarray):
mask, maskname, _ = _parse_image(mask, force_ccddata=True)
mask = mask.data
else:
maskname = "User-provided mask"
satmask[sls] = (data >= satlevel) & (~mask) # saturated && not masked
if update_header:
nsat = np.count_nonzero(satmask[sls])
ccd.header["NSATPIX"] = (nsat, "No. of saturated pix")
ccd.header["SATLEVEL"] = (satlevel, "Saturation: pixels >= this value")
bezstr = ""
if bezels is not None:
order = "xyz order" if order_xyz else "pythonic order"
bezstr = f" and bezel: {bezels} in {order}"
cmt2hdr(ccd.header, 'h', verbose=verbose, t_ref=_t,
s=("[yfu.find_satpix] Saturated pixels calculated based on satlevel = "
+ f"{satlevel}, excluding mask ({maskname}){bezstr}. "
+ "See NSATPIX and SATLEVEL."))
return satmask
# def make_errormap(
# ccd,
# gain_epadu=1,
# rdnoise_electron=0,
# flat_err=0.0,
# subtracted_dark=None,
# return_variance=False
# ):
# print("Use `errormap` instead.")
# return errormap(ccd, gain_epadu=gain_epadu, rdnoise_electron=rdnoise_electron,
# subtracted_dark=subtracted_dark, flat_err=flat_err,
# return_variance=return_variance)
def errormap(
ccd_biassub,
gain_epadu=1,
rdnoise_electron=0,
subtracted_dark=0.0,
flat=1.0,
dark_std=0.0,
flat_err=0.0,
dark_std_min="rdnoise",
return_variance=False,
):
""" Calculate the detailed pixel-wise error map in ADU unit.
Parameters
----------
ccd : CCDData, PrimaryHDU, ImageHDU, ndarray.
The ccd data which will be used to generate error map. It must be
**bias subtracted**. If dark is subtracted, give `subtracted_dark`.
This array will be added to ``ccd.data`` and used to calculate the
Poisson noise term. If the amount of this subtracted dark is
negligible, you may just set ``subtracted_dark = None`` (default).
gain_epadu, rdnoise_electron : float, array-like, or Quantity, optional.
The effective gain factor in ``electron/ADU`` unit and the readout
noise in ``electron`` unit.
subtracted_dark : array-like
The subtracted dark map.
Default: 0.
flat : ndarray, optional.
The flat field value. There is no need that flat values are normalized.
Default: 1.
flat_err : float, array-like optional.
The uncertainty of the flat, which is obtained by the central limit
theorem (sample standard deviation of the pixel divided by the square
root of the number of flat frames). An example in IRAF and DAOPHOT: the
uncertainty from the flat fielding ``flat_err/flat`` is set as a
constant (see, e.g., eq 10 of StetsonPB 1987, PASP, 99, 191) set as
Stetson used 0.0075 (0.75% fractional uncertainty), and the same is
implemented to IRAF DAOPHOT:
http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daopars
Default: 0.
dark_std : float, array-like, optional.
The sample standard deviation of dark pixels. It **should not be
divided by the number of dark frames**, because we are interested in
the uncertainty in the dark (prediction), not the confidence interval
of the *mean* of the dark.
Default: 0.
dark_std_min : 'rdnoise', float, optional.
The minimum value for `dark_std`. Any `dark_std` value below this will
be replaced by this value. If ``'rdnoise'`` (default), the
``rdnoise_electron/gain_epadu`` will be used.
return_variance: bool, optional
Whether to return as variance map. Default is `False`, i.e., return the
square-rooted standard deviation map. It's better to use variance for
large image size (computation speed issue).
Example
-------
>>> from astropy.nddata import CCDData, StdDevUncertainty
>>> ccd = CCDData.read("obj001.fits", 0)
>>> hdr = ccd.header
>>> dark = CCDData.read("master_dark.fits", 0)
>>> params = dict(gain_epadu=hdr["GAIN"], rdnoise_electron=hdr["RDNOISE"],
>>> subtracted_dark=dark.data)
>>> ccd.uncertainty = StdDevUncertainty(errormap(ccd, **params))
"""
data, _ = _parse_data_header(ccd_biassub)
data[data < 0] = 0 # make all negative pixel to 0
if isinstance(gain_epadu, u.Quantity):
gain_epadu = gain_epadu.to(u.electron / u.adu).value
elif isinstance(gain_epadu, str):
gain_epadu = float(gain_epadu)
if isinstance(rdnoise_electron, u.Quantity):
rdnoise_electron = rdnoise_electron.to(u.electron).value
elif isinstance(rdnoise_electron, str):
rdnoise_electron = float(rdnoise_electron)
if dark_std_min == "rdnoise":
dark_std_min = rdnoise_electron/gain_epadu
if isinstance(dark_std, np.ndarray):
dark_std[dark_std < dark_std_min] = dark_std_min
# Calculate the full variance map
# restore dark for Poisson term calculation
eval_str = ("(data + subtracted_dark)/(gain_epadu*flat**2)"
+ "+ (dark_std/flat)**2"
+ "+ data**2*(flat_err/flat)**2"
+ "+ (rdnoise_electron/(gain_epadu*flat))**2"
)
if return_variance:
return NEVAL(eval_str)
else: # Sqrt is the most time-consuming part...
return NEVAL(f"{NPSTR}sqrt({eval_str})")
# var_pois = data / (gain_epadu * flat**2)
# var_rdn = (rdnoise_electron/(gain_epadu*flat))**2
# var_flat_err = data**2*(flat_err/flat)**2
# var_dark_err = (dark_err/flat)**2
# **************************************************************************************** #
# * HEADER MANIPULATION * #
# **************************************************************************************** #
def hedit(
item,
keys,
values,
comments=None,
befores=None,
afters=None,
add=False,
output=None,
overwrite=False,
output_verify="fix",
verbose=True,
):
""" Edit the header key (usu. to update value of a keyword).
Parameters
----------
item : `astropy` header, path-like, CCDData-like
The FITS file or header to edit. If `Header`, it is updated
**inplace**.
keys : str, list-like of str
The key to edit.
values : str, numeric, or list-like of such
The new value. To pass one single iterable (e.g., `[1, 2, 3]`) for one
single `key`, use a list of it (e.g., `[[1, 2, 3]]`) to circumvent
problem.
comment : str, list-like of str optional.
The comment to add.
add : bool, optional.
Whether to add the key if it is not in the header.
befores : str, int, list-like of such, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument `before` takes
precedence over `after` if both specified.
after : str, int, list-like of such, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
output: path-like, optional
The output file.
Returns
-------
ccd : CCDData
The header-updated CCDData. `None` if `item` was pure Header.
"""
def _add_key(header, key, val, infostr, cmt=None, before=None, after=None):
header.set(key, value=val, comment=cmt, before=before, after=after)
# infostr += " (comment: {})".format(comment) if comment is not None else ""
if before is not None:
infostr += f" (moved: {before=})"
elif after is not None: # `after` is ignored if `before` is given
infostr += f" (moved: {after=})"
cmt2hdr(header, "h", infostr, verbose=verbose)
update_tlm(header)
if isinstance(item, fits.header.Header):
header = item
if verbose:
print("item is astropy Header. (any `output` is igrnoed).")
output = None
ccd = None
elif isinstance(item, ASTROPY_CCD_TYPES):
ccd, imname, _ = _parse_image(item, force_ccddata=True, copy=False)
# ^^^^^^^^^^
# Use copy=False to update header of the input CCD inplace.z
header = ccd.header
keys, values, comments, befores, afters = listify(keys, values, comments,
befores, afters)
for key, val, cmt, bef, aft in zip(keys, values, comments, befores, afters):
if key in header:
oldv = header[key]
infostr = (f"[yfu.HEDIT] {key}={oldv} ({type(oldv).__name__}) "
+ f"--> {val} ({type(val).__name__})")
_add_key(header, key, val, infostr, cmt=cmt, before=bef, after=aft)
else:
if add: # add key only if `add` is True.
infostr = f"[yfu.HEDIT add] {key}= {val} ({type(val).__name__})"
_add_key(header, key, val, infostr, cmt=cmt, before=bef, after=aft)
elif verbose:
print(f"{key = } does not exist in the header. Skipped. (add=True to proceed)")
if output is not None:
ccd.write(output, overwrite=overwrite, output_verify=output_verify)
if verbose:
print(f"{imname} --> {output}")
return ccd
def key_remover(header, remove_keys, deepremove=True):
""" Removes keywords from the header.
Parameters
----------
header : Header
The header to be modified
remove_keys : list of str
The header keywords to be removed.
deepremove : True, optional
FITS standard does not have any specification of duplication of
keywords as discussed in the following issue:
https://github.com/astropy/ccdproc/issues/464
If it is set to `True`, ALL the keywords having the name specified in
`remove_keys` will be removed. If not, only the first occurence of each
key in `remove_keys` will be removed. It is more sensical to set it
`True` in most of the cases.
"""
nhdr = header.copy()
if deepremove:
for key in remove_keys:
while True:
try:
nhdr.remove(key)
except KeyError:
break
else:
for key in remove_keys:
try:
nhdr.remove(key)
except KeyError:
continue
return nhdr
def key_mapper(header, keymap=None, deprecation=False, remove=False):
""" Update the header to meed the standard (keymap).
Parameters
----------
header : Header
The header to be modified
keymap : dict
The dictionary contains ``{<standard_key>:<original_key>}``
information. If it is `None` (default), the copied version of the
header is returned without any change.
deprecation : bool, optional
Whether to change the original keywords' comments to contain
deprecation warning. If `True`, the original keywords' comments will
become ``DEPRECATED. See <standard_key>.``. It has no effect if
``remove=True``.
Default is `False`.
remove : bool, optional.
Whether to remove the original keyword. `deprecation` is ignored if
``remove=True``.
Default is `False`.
Returns
-------
newhdr: Header
The updated (key-mapped) header.
Notes
-----
If the new keyword already exist in the given header, virtually nothing
will happen. If ``deprecation=True``, the old one's comment will be
changed, and if ``remove=True``, the old one will be removed; the new
keyword will never be changed or overwritten.
"""
def _rm_or_dep(hdr, old, new):
if remove:
hdr.remove(old)
elif deprecation: # do not remove but deprecate
hdr.comments[old] = f"DEPRECATED. See {new}"
newhdr = header.copy()
if keymap is not None:
for k_new, k_old in keymap.items():
if k_new == k_old:
continue
if k_old is not None:
if k_new in newhdr: # if k_new already in the header, JUST deprecate k_old.
_rm_or_dep(newhdr, k_old, k_new)
else: # if not, copy k_old to k_new and deprecate k_old.
try:
comment_ori = newhdr.comments[k_old]
newhdr[k_new] = (newhdr[k_old], comment_ori)
_rm_or_dep(newhdr, k_old, k_new)
except (KeyError, IndexError):
# don't even warn
pass
return newhdr
def chk_keyval(type_key, type_val, group_key):
""" Checks the validity of key and values used heavily in combutil.
Parameters
----------
type_key : None, str, list of str, optional
The header keyword for the ccd type you want to use for match.
type_val : None, int, str, float, etc and list of such
The header keyword values for the ccd type you want to match.
group_key : None, str, list of str, optional
The header keyword which will be used to make groups for the CCDs that
have selected from `type_key` and `type_val`. If `None` (default), no
grouping will occur, but it will return the `~pandas.DataFrameGroupBy`
object will be returned for the sake of consistency.
Returns
-------
type_key, type_val, group_key
"""
# Make type_key to list
if type_key is None:
type_key = []
elif is_list_like(type_key):
try:
type_key = list(type_key)
if not all(isinstance(x, str) for x in type_key):
raise TypeError("Some of type_key are not str.")
except TypeError:
raise TypeError("type_key should be str or convertible to list.")
elif isinstance(type_key, str):
type_key = [type_key]
else:
raise TypeError(f"`type_key` not understood (type = {type(type_key)}): {type_key}")
# Make type_val to list
if type_val is None:
type_val = []
elif is_list_like(type_val):
try:
type_val = list(type_val)
except TypeError:
raise TypeError("type_val should be str or convertible to list.")
elif isinstance(type_val, str):
type_val = [type_val]
else:
raise TypeError(f"`type_val` not understood (type = {type(type_val)}): {type_val}")
# Make group_key to list
if group_key is None:
group_key = []
elif is_list_like(group_key):
try:
group_key = list(group_key)
if not all(isinstance(x, str) for x in group_key):
raise TypeError("Some of group_key are not str.")
except TypeError:
raise TypeError("group_key should be str or convertible to list.")
elif isinstance(group_key, str):
group_key = [group_key]
else:
raise TypeError(
f"`group_key` not understood (type = {type(group_key)}): {group_key}"
)
if len(type_key) != len(type_val):
raise ValueError("`type_key` and `type_val` must have the same length!")
# If there is overlap
overlap = set(type_key).intersection(set(group_key))
if len(overlap) > 0:
warn(f"{overlap} appear in both `type_key` and `group_key`."
+ "It may not be harmful but better to avoid.")
return type_key, type_val, group_key
def valinhdr(val=None, header=None, key=None, default=None, unit=None):
""" Get the value by priority: val > header[key] > default.
Parameters
----------
val : object, optional.
If not `None`, `header`, `key`, and `default` will **not** be used.
This is different from `header.get(key, default)`. It is therefore
useful if the API wants to override the header value by the
user-provided one.
header : Header, optional.
The header to extract the value if `value` is `None`.
key : str, optional.
The header keyword to extract if `value` is `None`.
default : object, optional.
The default value. If `value` is `None`, then ``header.get(key,
default)``.
unit : str, optional.
None to ignore unit. ``''`` (empty string) means `Unit(dimensionless)`.
Better to leave it as None unless astropy unit is truely needed.
Notes
-----
It takes << 10 us (when unit=None) or for any case for a reasonably lengthy
header. See `Tests` below. Tested on MBP 15" [2018, macOS 11.6, i7-8850H
(2.6 GHz; 6-core), RAM 16 GB (2400MHz DDR4), Radeon Pro 560X (4GB)].
Tests
-----
real_q = 20*u.s
real_v = 20
default_q = 0*u.s
default_v = 0
test_q = 3*u.s
test_v = 3
# w/o unit Times are the %timeit result of the LHS
assert valinhdr(None, hdr, "EXPTIME", default=0) == real_v # ~ 6.5 us
assert valinhdr(None, hdr, "EXPTIxx", default=0) == default_v # ~ 3.5 us
assert valinhdr(test_v, hdr, "EXPTIxx", default=0) == test_v # ~ 0.3 us
assert valinhdr(test_q, hdr, "EXPTIxx", default=0) == test_v # ~ 0.6 us
# w/ unit Times are the %timeit result of the LHS
assert valinhdr(None, hdr, "EXPTIME", default=0, unit='s') == real_q # ~ 23 us
assert valinhdr(None, hdr, "EXPTIxx", default=0, unit='s') == default_q # ~ 16 us
assert valinhdr(test_v, hdr, "EXPTIxx", default=0, unit='s') == test_q # ~ 11 us
assert valinhdr(test_q, hdr, "EXPTIxx", default=0, unit='s') == test_q # ~ 15 us
For a test CCDData, the following timing gave ~ 0.5 ms on MBP 15" [2018,
macOS 11.6, i7-8850H (2.6 GHz; 6-core), RAM 16 GB (2400MHz DDR4), Radeon
Pro 560X (4GB)]
%timeit ((yfu.valinhdr(None, ccd.header, "EXPTIME", unit=u.s)
/ yfu.valinhdr(3*u.s, ccd.header, "EXPTIME", unit=u.s)).si.value)
"""
uu = 1 if unit is None else u.Unit(unit)
# ^ NOT 1.0 to preserve the original dtype (e.g., int)
val = header.get(key, default) if val is None else val
if isinstance(val, u.Quantity):
return val.value if unit is None else val.to(unit)
else:
try:
return val*uu
except TypeError: # e.g., val is a str
return val
def get_from_header(header, key, unit=None, verbose=True, default=0):
""" Get a variable from the header object.
Parameters
----------
header : astropy.Header
The header to extract the value.
key : str
The header keyword to extract.
unit : astropy unit
The unit of the value.
default : str, int, float, ..., or Quantity
The default if not found from the header.
Returns
-------
q: Quantity or any object
The extracted quantity from the header. It's a Quantity if the unit is
given. Otherwise, appropriate type will be assigned.
"""
# If using q = header.get(key, default=default),
# we cannot give any meaningful verboses infostr.
# Anyway the `header.get` sourcecode contains only 4-line:
# ``try: return header[key] // except (KeyError, IndexError): return default.
key = key.upper()
try:
q = change_to_quantity(header[key], desired=unit)
if verbose:
print(f"header: {key:<8s} = {q}")
except (KeyError, IndexError):
q = change_to_quantity(default, desired=unit)
warn(f"The key {key} not found in header: setting to {default}.")
return q
def get_if_none(value, header, key, unit=None, verbose=True, default=0, to_value=False):
""" Similar to get_from_header, but a convenience wrapper.
"""
if value is None:
value_Q = get_from_header(header, key, unit=unit, verbose=verbose, default=default)
value_from = f"{key} in header"
else:
value_Q = change_to_quantity(value, unit, to_value=False)
value_from = "the user"
if to_value:
return value_Q.value, value_from
else:
return value_Q, value_from
def wcs_crota(wcs, degree=True):
"""
Notes
-----
https://iraf.net/forum/viewtopic.php?showtopic=108893
CROTA2 = arctan (-CD1_2 / CD2_2) = arctan ( CD2_1 / CD1_1)
"""
if isinstance(wcs, WCS):
wcsprm = wcs.wcs
elif isinstance(wcs, Wcsprm):
wcsprm = wcs
else:
raise TypeError("wcs type not understood. "
+ "It must be either astropy.wcs.WCS or astropy.wcs.Wcsprm")
# numpy arctan2 gets y-coord (numerator) and then x-coord(denominator)
crota = np.arctan2(wcsprm.cd[0, 0], wcsprm.cd[1, 0])
if degree:
crota = np.rad2deg(crota)
return crota
def midtime_obs(
header=None,
dateobs="DATE-OBS",
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
exptime="EXPTIME",
exptime_unit=u.s
):
"""Calculates the mid-obs time (exposure start + exposure/2)
Parameters
----------
header : astropy.Header, optional.
The header to extract the value. `midtime_obs` can be used without
header. But to do so, `dateobs` must be in `~astropy.time.Time` and
`exptime` must be given as float or `~astropy.units.Quantity`.
dateobs : str, `~astropy.Time`, optional.
The header keyword for DATE-OBS (start of exposure) or the
`~astropy.Time` object.
exptime : str, float, `~astropy.units.Quantity`, optional.
The header keyword for exposure time or the exposure time as float (in
seconds) or `~astropy.units.Quantity`.
"""
if isinstance(dateobs, str):
try:
time_0 = Time(header[dateobs], format=format, scale=scale,
precision=precision, in_subfmt=in_subfmt,
out_subfmt=out_subfmt, location=location)
except (KeyError, IndexError):
raise KeyError(f"The key '{dateobs=}' not found in header.")
else:
time_0 = dateobs
if isinstance(exptime, str):
try:
exptime = header.get(exptime, default=0)*exptime_unit
except (KeyError, IndexError):
raise KeyError(f"The key '{exptime=}' not found in header.")
elif isinstance(exptime, (int, float)):
exptime = exptime*exptime_unit
elif not isinstance(exptime, u.Quantity):
raise TypeError(f"exptime type ({type(exptime)}) not understood.")
return time_0 + exptime/2
def center_radec(
ccd_or_header,
center_of_image=True,
ra_key="RA",
dec_key="DEC",
equinox=None,
frame=None,
equinox_key="EPOCH",
frame_key="RADECSYS",
ra_unit=u.hourangle,
dec_unit=u.deg,
mode="all",
verbose=True,
plain=False,
):
""" Returns the central ra/dec from header or WCS.
Notes
-----
Even though RA or DEC is in sexagesimal, e.g., "20 53 20", astropy
correctly reads it in such a form, so no worries.
Parameters
----------
ccd_or_header : CCD-like, Header
The ccd or header to extract the central RA/DEC from keywords or WCS.
center_of_image : bool, optional
If `True`, WCS information will be extracted from the ccd or header,
rather than relying on the `ra_key` and `dec_key` keywords directly. If
`False`, `ra_key` and `dec_key` from the header will be understood as
the "center" and the RA, DEC of that location will be returned.
equinox, frame : str, optional
The `equinox` and `frame` for SkyCoord. Default (`None`) will use the
default of SkyCoord. Important only if ``usewcs=False``.
XX_key : str, optional
The header key to find XX if ``XX`` is `None`. Important only if
``usewcs=False``.
XX_unit : Quantity, optional
The unit of ``XX``. Important only if ``usewcs=False``.
mode : 'all' or 'wcs', optional
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``). Important
only if ``usewcs=True``.
plain : bool
If `True`, only the values of RA/DEC in degrees will be returned.
"""
if isinstance(ccd_or_header, CCDData):
header = ccd_or_header.header
w = ccd_or_header.wcs
elif isinstance(ccd_or_header, fits.Header):
header = ccd_or_header
w = WCS(header)
if center_of_image:
nx, ny = float(header["NAXIS1"]), float(header["NAXIS2"])
centx = nx / 2 - 0.5
centy = ny / 2 - 0.5
coo = SkyCoord.from_pixel(centx, centy, wcs=w, origin=0, mode=mode)
else:
ra = get_from_header(header, ra_key, verbose=verbose)
dec = get_from_header(header, dec_key, verbose=verbose)
if equinox is None:
equinox = get_from_header(header, equinox_key, verbose=verbose, default=None)
if frame is None:
frame = get_from_header(
header, frame_key, verbose=verbose, default=None
).lower()
coo = SkyCoord(
ra=ra, dec=dec, unit=(ra_unit, dec_unit), frame=frame, equinox=equinox
)
if plain:
return coo.ra.value, coo.dec.value
return coo
def calc_offset_wcs(
target,
reference,
loc_target="center",
loc_reference="center",
order_xyz=True,
intify_offset=False
):
""" The pixel offset of target's location when using WCS in referene.
Parameters
----------
target : CCDData, PrimaryHDU, ImageHDU, HDUList, Header, ndarray, number-like, path-like, WCS
The object to extract header to calculate the position
reference : CCDData, PrimaryHDU, ImageHDU, HDUList, Header, ndarray, number-like, path-like, WCS
The object to extract reference WCS (or header to extract WCS) to
calculate the position *from*.
loc_target : str (center, origin) or ndarray optional.
The location to calculate the position (in pixels and in xyz order).
Default is ``'center'`` (half of ``NAXISi`` keys in `target`). The
`location`'s world coordinate is calculated from the WCS information in
`target`. Then it will be transformed to the image coordinate of
`reference`.
loc_reference : str (center, origin) or ndarray optional.
The location of the reference point (in pixels and in xyz order) in
`reference`'s coordinate
to calculate the offset.
order_xyz : bool, optional.
Whether to return the position in xyz order or not (python order:
``[::-1]`` of the former). Default is `True`.
"""
def _parse_loc(loc, obj):
if isinstance(obj, WCS):
w = obj
else:
_, hdr = _parse_data_header(obj, parse_data=False, copy=False)
w = WCS(hdr)
if loc == "center":
_loc = np.atleast_1d(w._naxis)/2
elif loc == "origin":
_loc = [0.]*w.naxis
else:
_loc = np.atleast_1d(loc)
return w, _loc
w_targ, _loc_target = _parse_loc(loc_target, target)
w_ref, _loc_ref = _parse_loc(loc_reference, reference)
_loc_target_coo = w_targ.all_pix2world(*_loc_target, 0)
_loc_target_pix_ref = w_ref.all_world2pix(*_loc_target_coo, 0)
offset = _loc_target_pix_ref - _loc_ref
if intify_offset:
offset = np.around(offset).astype(int)
if order_xyz:
return offset
else:
return offset[::-1]
def calc_offset_physical(
target,
reference=None,
order_xyz=True,
ignore_ltm=True,
intify_offset=False
):
""" The pixel offset by physical-coordinate information in referene.
Parameters
----------
target : CCDData, PrimaryHDU, ImageHDU, HDUList, Header, ndarray, number-like, path-like
The object to extract header to calculate the position
reference : CCDData, PrimaryHDU, ImageHDU, HDUList, Header, ndarray, number-like, path-like
The reference to extract header to calculate the position *from*. If
`None`, it is basically identical to extract the LTV values from
`target`.
Default is `None`.
order_xyz : bool, optional.
Whether to return the position in xyz order or not (python order:
``[::-1]`` of the former).
Default is `True`.
ignore_ltm : bool, optional.
Whether to assuem the LTM matrix is identity. If it is not and
``ignore_ltm=False``, a `NotImplementedError` will be raised, i.e.,
non-identity LTM matrices are not supported.
Notes
-----
Similar to `calc_offset_wcs`, but with locations fixed to origin (as
non-identity LTM matrix is not supported). Also, input of WCS is not
accepted because astropy's wcs module does not parse LTV/LTM from header.
"""
def _check_ltm(hdr):
ndim = hdr["NAXIS"]
for i in range(ndim):
for j in range(ndim):
try:
assert float(hdr["LTM{i}_{j}"]) == 1.0*(i == j)
except (KeyError, IndexError):
continue
except (AssertionError):
raise NotImplementedError("Non-identity LTM matrix is not supported.")
try: # Sometimes LTM matrix is saved as ``LTMi``, not ``LTMi_j``.
assert float(target["LTM{i}"]) == 1.0
except (KeyError, IndexError):
continue
except (AssertionError):
raise NotImplementedError("Non-identity LTM matrix is not supported.")
do_ref = reference is not None
_, target = _parse_data_header(target, parse_data=False)
if do_ref:
_, reference = _parse_data_header(reference, parse_data=False)
if not ignore_ltm:
_check_ltm(target)
if do_ref:
_check_ltm(reference)
ndim = target["NAXIS"]
ltvs_obj = []
for i in range(ndim):
try:
ltvs_obj.append(target[f"LTV{i + 1}"])
except (KeyError, IndexError):
ltvs_obj.append(0)
if do_ref:
ltvs_ref = []
for i in range(ndim):
try:
ltvs_ref.append(reference[f"LTV{i + 1}"])
except (KeyError, IndexError):
ltvs_ref.append(0)
offset = np.array(ltvs_obj) - np.array(ltvs_ref)
else:
offset = np.array(ltvs_obj)
if intify_offset:
offset = np.around(offset).astype(int)
if order_xyz:
return offset # This is already xyz order!
else:
return offset[::-1]
def fov_radius(header, unit=u.deg):
""" Calculates the rough radius (cone) of the (square) FOV using WCS.
Parameters
----------
header: Header
The header to extract WCS information.
Returns
-------
radius: `~astropy.Quantity`
The radius in degrees
"""
w = WCS(header)
nx, ny = float(header["NAXIS1"]), float(header["NAXIS2"])
# Rough calculation, so use mode='wcs'
c1 = SkyCoord.from_pixel(0, 0, wcs=w, origin=0, mode="wcs")
c2 = SkyCoord.from_pixel(nx, 0, wcs=w, origin=0, mode="wcs")
c3 = SkyCoord.from_pixel(0, ny, wcs=w, origin=0, mode="wcs")
c4 = SkyCoord.from_pixel(nx, ny, wcs=w, origin=0, mode="wcs")
# TODO: Can't we just do ``return max(r1, r2).to(unit)``???
# Why did I do this? I can't remember...
# 2020-11-09 14:29:29 (KST: GMT+09:00) ysBach
r1 = c1.separation(c3).value / 2
r2 = c2.separation(c4).value / 2
r = max(r1, r2) * u.deg
return r.to(unit)
# TODO: do not load data extension if not explicitly ordered
def wcsremove(
path=None,
additional_keys=None,
extension=None,
output=None,
output_verify="fix",
overwrite=False,
verbose=True,
close=True,
):
""" Remove most WCS related keywords from the header.
Paramters
---------
additional_keys : list of regex str, optional
Additional keys given by the user to be 'reset'. It must be in regex
expression. Of course regex accepts just string, like 'NAXIS1'.
output: str or Path
The output file path.
"""
# Define header keywords to be deleted in regex:
re2remove = [
"CD[0-9]_[0-9]", # Coordinate Description matrix
"CTYPE[0-9]", # e.g., 'RA---TAN' and 'DEC--TAN'
"C[0-9]YPE[0-9]", # FOCAS
"CUNIT[0-9]", # e.g., 'deg'
"C[0-9]NIT[0-9]", # FOCAS
"CRPIX[0-9]", # The reference pixels in image coordinate
"C[0-9]PIX[0-9]", # FOCAS
# The world cooordinate values at CRPIX[1, 2]
"CRVAL[0-9]",
"C[0-9]VAL[0-9]", # FOCAS
"CDELT[0-9]", # with CROTA, older version of CD matrix.
"C[0-9]ELT[0-9]", # FOCAS
# The angle between image Y and world Y axes
"CROTA[0-9]",
"CRDELT[0-9]",
"CFINT[0-9]",
"RADE[C]?SYS*" "WCS-ORIG", # RA/DEC system (frame) # FOCAS
"LTM[0-9]_[0-9]",
"LTV[0-9]*",
"PIXXMIT",
"PIXOFFST",
"WAT[0-9]_[0-9]", # For TNX and ZPX, e.g., "WAT1_001"
"C0[0-9]_[0-9]", # polynomial CD by imwcs
"PC[0-9]_[0-9]",
"P[A-Z]?[0-9]?[0-9][0-9][0-9][0-9][0-9][0-9]", # FOCAS
"PV[0-9]_[0-9]",
"[A,B][P]?_[0-9]_[0-9]", # astrometry.net
"[A,B][P]?_ORDER", # astrometry.net
"[A,B][P]?_DMAX", # astrometry.net
"WCS[A-Z]", # see below
"AST_[A-Z]", # astrometry.net
"ASTIRMS[0-9]", # astrometry.net
"ASTRRMS[0-9]", # astrometry.net
"FGROUPNO", # SCAMP field group label
"ASTINST", # SCAMP astrometric instrument label
"FLXSCALE", # SCAMP relative flux scale
"MAGZEROP", # SCAMP zero-point
"PHOTIRMS", # mag dispersion RMS (internal, high S/N)
"PHOTINST", # SCAMP photometric instrument label
"PHOTLINK", # True if linked to a photometric field
"SECPIX[0-9]",
]
# WCS[A-Z] captures, WCS[DIM, RFCAT, IMCAT, MATCH, NREF, TOL, SEP],
# but not [IM]WCS, for example. These are likely to have been inserted
# by WCS updating tools like astrometry.net or WCSlib/WCSTools. I
# intentionally ignored IMWCS just for future reference.
if additional_keys is not None:
re2remove = re2remove + listify(additional_keys)
# If following str is in comment, suggest it if verbose
candidate_re = ["wcs", "axis", "axes", "coord", "distortion", "reference"]
candidate_key = []
hdul = fits.open(path)
hdr = hdul[_parse_extension(extension)].header
if verbose:
print("Removed keywords: ", end="")
for k in list(hdr.keys()):
com = hdr.comments[k]
deleted = False
for re_i in re2remove:
if re.match(re_i, k) is not None and not deleted:
hdr.remove(k)
deleted = True
if verbose:
print(f"{k}", end=" ")
continue
if not deleted:
for re_cand in candidate_re:
if re.match(re_cand, com):
candidate_key.append(k)
if verbose:
print("\n")
if len(candidate_key) != 0:
print(f"\nFollowing keys may be related to WCS too:\n\t{candidate_key}")
hdul[extension].header = hdr
if output is not None:
hdul.writeto(output, output_verify=output_verify, overwrite=overwrite)
if close:
hdul.close()
return
else:
return hdul
# def center_coord(header, skycoord=False):
# """ Gives the sky coordinate of the center of the image field of view.
# Parameters
# ----------
# header: astropy.header.Header
# The header to be used to extract WCS information (and image size)
# skycoord: bool
# Whether to return in the astropy.coordinates.SkyCoord object. If
# `False`, a numpy array is returned.
# """
# wcs = WCS(header)
# cx = float(header['naxis1']) / 2 - 0.5
# cy = float(header['naxis2']) / 2 - 0.5
# center_coo = wcs.wcs_pix2world(cx, cy, 0)
# if skycoord:
# return SkyCoord(*center_coo, unit='deg')
# return np.array(center_coo)
def convert_bit(fname, original_bit=12, target_bit=16):
""" Converts a FIT(S) file's bit.
Notes
-----
In ASI1600MM, for example, the output data is 12-bit but since FITS
standard do not accept 12-bit (but the closest integer is 16-bit), so, for
example, the pixel values can have 0 and 15, but not any integer between
these two. So it is better to convert to 16-bit.
"""
hdul = fits.open(fname)
dscale = 2**(target_bit - original_bit)
hdul[0].data = (hdul[0].data / dscale).astype("int")
hdul[0].header["MAXDATA"] = (2**original_bit - 1,
"maximum valid physical value in raw data")
# hdul[0].header['BITPIX'] = target_bit
# FITS ``BITPIX`` cannot have, e.g., 12, so the above is redundant line.
hdul[0].header["BUNIT"] = "ADU"
hdul.close()
return hdul
# TODO: add sigma-clipped statistics option (hdr key can be using "SIGC", e.g., SIGCAVG.)
def give_stats(
item,
mask=None,
extension=None,
statsecs=None,
percentiles=[1, 99],
N_extrema=None,
return_header=False,
):
""" Calculates simple statistics.
Parameters
----------
item: array-like, CCDData, HDUList, PrimaryHDU, ImageHDU, or path-like
The data or path to a FITS file to be analyzed.
mask : array-like, optional
The mask to be used. If given, it must have the same size as `item`
**before** applying `statsecs`.
extension: int, str, (str, int)
The extension of FITS to be used. It can be given as integer
(0-indexing) of the extension, ``EXTNAME`` (single str), or a tuple of
str and int: ``(EXTNAME, EXTVER)``. If `None` (default), the *first
extension with data* will be used.
statsecs : str, slice, int, list of such, optional.
The section information to calculate the statistics. It can be given
as a string (FITS-convention section, e.g., "[1:100,2:200]"), a slice
object (e.g., slice(1,100,2)), or as a bezel (e.g., 10 or (5, 10),
etc.). See `~ysfitsutilpy.slicefy` for more details.
percentiles: list-like, optional
The percentiles to be calculated.
N_extrema: int, optinoal
The number of low and high elements to be returned when the whole data
are sorted. If `None`, it will not be calculated. If ``1``, it is
identical to min/max values.
return_header : bool, optional.
Works only if you gave `item` as FITS file path or
`~astropy.nddata.CCDData`. The statistics information will be added to
the header and the updated header will be returned.
Returns
-------
result : dict
The dict which contains all the statistics.
hdr : Header
The updated header. Returned only if `update_header` is `True` and
`item` is FITS file path or has `header` attribute (e.g.,
`~astropy.nddata.CCDData` or `hdu`)
Notes
-----
If you have bottleneck package, the functions from bottleneck will be used.
Otherwise, numpy is used.
Example
-------
>>> bias = CCDData.read("bias_bin11.fits")
>>> dark = CCDData.read("pdark_300s_27C_bin11.fits")
>>> percentiles = [0.1, 1, 5, 95, 99, 99.9]
>>> give_stats(bias, percentiles=percentiles, N_extrema=5)
>>> give_stats(dark, percentiles=percentiles, N_extrema=5)
Or just simply
>>> give_stats("bias_bin11.fits", percentiles=percentiles, N_extrema=5)
To update the header
>>> ccd = CCDData.read("bias_bin11.fits", unit='adu')
>>> _, hdr = (ccd, N_extrema=10, update_header=True)
>>> ccd.header = hdr
# To read the stringfied list into python list (e.g., percentiles):
# >>> import json
# >>> percentiles = json.loads(ccd.header['percentiles'])
"""
data, hdr = _parse_data_header(item, extension=extension)
if mask is not None:
data[mask] = np.nan
if statsecs is not None:
statsecs = [statsecs] if isinstance(statsecs, str) else list(statsecs)
data = np.array([data[slicefy(sec)] for sec in statsecs])
data = data.ravel()
data = data[np.isfinite(data)]
minf = np.min
maxf = np.max
avgf = np.mean
medf = bn.median # Still median from bn seems faster!
stdf = np.std
pctf = np.percentile
result = dict(
num=np.size(data),
min=minf(data),
max=maxf(data),
avg=avgf(data),
med=medf(data),
std=stdf(data, ddof=1),
madstd=mad_std(data),
percentiles=percentiles,
pct=pctf(data, percentiles),
slices=statsecs,
)
# d_pct = np.percentile(data, percentiles)
# for i, pct in enumerate(percentiles):
# result[f"percentile_{round(pct, 4)}"] = d_pct[i]
zs = ImageNormalize(data, interval=ZScaleInterval())
d_zmin = zs.vmin
d_zmax = zs.vmax
result["zmin"] = d_zmin
result["zmax"] = d_zmax
if N_extrema is not None:
if 2*N_extrema > result["num"]:
warn(
f"Extrema overlaps (2*N_extrema ({2*N_extrema}) > N_pix ({result['num']}))"
)
data_flatten = np.sort(data, axis=None) # axis=None will do flatten.
d_los = data_flatten[:N_extrema]
d_his = data_flatten[-1*N_extrema:]
result["ext_lo"] = d_los
result["ext_hi"] = d_his
if return_header and hdr is not None:
hdr["STATNPIX"] = (result["num"], "Number of pixels used in statistics below")
hdr["STATMIN"] = (result["min"], "Minimum value of the pixels")
hdr["STATMAX"] = (result["max"], "Maximum value of the pixels")
hdr["STATAVG"] = (result["avg"], "Average value of the pixels")
hdr["STATMED"] = (result["med"], "Median value of the pixels")
hdr["STATSTD"] = (result["std"], "Sample standard deviation value of the pixels")
hdr["STATMED"] = (result["zmin"], "Median value of the pixels")
hdr["STATZMIN"] = (result["zmin"], "zscale minimum value of the pixels")
hdr["STATZMAX"] = (result["zmax"], "zscale minimum value of the pixels")
for i, p in enumerate(percentiles):
hdr[f"PERCTS{i+1:02d}"] = (p, "The percentile used in STATPCii")
hdr[f"STATPC{i+1:02d}"] = (result["pct"][i], "Percentile value at PERCTSii")
if statsecs is not None:
for i, sec in enumerate(statsecs):
hdr[f"STATSEC{i+1:01d}"] = (sec, "Sections used for statistics")
if N_extrema is not None:
if N_extrema > 99:
warn("N_extrema > 99 may not work properly in header.")
for i in range(N_extrema):
hdr[f"STATLO{i+1:02d}"] = (result["ext_lo"][i],
f"Lower extreme values (N_extrema={N_extrema})")
hdr[f"STATHI{i+1:02d}"] = (result["ext_hi"][i],
f"Upper extreme values (N_extrema={N_extrema})")
return result, hdr
return result | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/accounting/model/generate_remote_key_request.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
class GenerateRemoteKeyRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('name',): {
'min_length': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
defined_types = {
'name': (str,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, name, *args, **kwargs): # noqa: E501
"""GenerateRemoteKeyRequest - a model defined in OpenAPI
Args:
name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, *args, **kwargs): # noqa: E501
"""GenerateRemoteKeyRequest - a model defined in OpenAPI
Args:
name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name: Union[str] = name | PypiClean |
/EMD_signal-1.5.1-py3-none-any.whl/PyEMD/EMD.py |
import logging
from typing import Optional, Tuple
import numpy as np
from scipy.interpolate import interp1d
from PyEMD.splines import akima, cubic, cubic_hermite, cubic_spline_3pts, pchip
from PyEMD.utils import get_timeline
FindExtremaOutput = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]
class EMD:
"""
.. _EMD:
**Empirical Mode Decomposition**
Method of decomposing signal into Intrinsic Mode Functions (IMFs)
based on algorithm presented in Huang et al. [Huang1998]_.
Algorithm was validated with Rilling et al. [Rilling2003]_ Matlab's version from 3.2007.
Threshold which control the goodness of the decomposition:
* `std_thr` --- Test for the proto-IMF how variance changes between siftings.
* `svar_thr` -- Test for the proto-IMF how energy changes between siftings.
* `total_power_thr` --- Test for the whole decomp how much of energy is solved.
* `range_thr` --- Test for the whole decomp whether the difference is tiny.
References
----------
.. [Huang1998] N. E. Huang et al., "The empirical mode decomposition and the
Hilbert spectrum for non-linear and non stationary time series
analysis", Proc. Royal Soc. London A, Vol. 454, pp. 903-995, 1998
.. [Rilling2003] G. Rilling, P. Flandrin and P. Goncalves, "On Empirical Mode
Decomposition and its algorithms", IEEE-EURASIP Workshop on
Nonlinear Signal and Image Processing NSIP-03, Grado (I), June 2003
Examples
--------
>>> import numpy as np
>>> T = np.linspace(0, 1, 100)
>>> S = np.sin(2*2*np.pi*T)
>>> emd = EMD(extrema_detection='parabol')
>>> IMFs = emd.emd(S)
>>> IMFs.shape
(1, 100)
"""
logger = logging.getLogger(__name__)
def __init__(self, spline_kind: str = "cubic", nbsym: int = 2, **kwargs):
"""Initiate *EMD* instance.
Configuration, such as threshold values, can be passed as kwargs (keyword arguments).
Parameters
----------
FIXE : int (default: 0)
FIXE_H : int (default: 0)
MAX_ITERATION : int (default 1000)
Maximum number of iterations per single sifting in EMD.
energy_ratio_thr : float (default: 0.2)
Threshold value on energy ratio per IMF check.
std_thr float : (default 0.2)
Threshold value on standard deviation per IMF check.
svar_thr float : (default 0.001)
Threshold value on scaled variance per IMF check.
total_power_thr : float (default 0.005)
Threshold value on total power per EMD decomposition.
range_thr : float (default 0.001)
Threshold for amplitude range (after scaling) per EMD decomposition.
extrema_detection : str (default 'simple')
Method used to finding extrema.
DTYPE : np.dtype (default np.float64)
Data type used.
Examples
--------
>>> emd = EMD(std_thr=0.01, range_thr=0.05)
"""
# Declare constants
self.energy_ratio_thr = float(kwargs.get("energy_ratio_thr", 0.2))
self.std_thr = float(kwargs.get("std_thr", 0.2))
self.svar_thr = float(kwargs.get("svar_thr", 0.001))
self.total_power_thr = float(kwargs.get("total_power_thr", 0.005))
self.range_thr = float(kwargs.get("range_thr", 0.001))
self.nbsym = int(kwargs.get("nbsym", nbsym))
self.scale_factor = float(kwargs.get("scale_factor", 1.0))
self.spline_kind = spline_kind
self.extrema_detection = kwargs.get("extrema_detection", "simple") # simple, parabol
assert self.extrema_detection in (
"simple",
"parabol",
), "Only 'simple' and 'parabol' values supported"
self.DTYPE = kwargs.get("DTYPE", np.float64)
self.FIXE = int(kwargs.get("FIXE", 0))
self.FIXE_H = int(kwargs.get("FIXE_H", 0))
self.MAX_ITERATION = int(kwargs.get("MAX_ITERATION", 1000))
# Instance global declaration
self.imfs = None # Optional[np.ndarray]
self.residue = None # Optional[np.ndarray]
def __call__(self, S: np.ndarray, T: Optional[np.ndarray] = None, max_imf: int = -1) -> np.ndarray:
return self.emd(S, T=T, max_imf=max_imf)
def extract_max_min_spline(
self, T: np.ndarray, S: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Extracts top and bottom envelopes based on the signal,
which are constructed based on maxima and minima, respectively.
Parameters
----------
T : numpy array
Position or time array.
S : numpy array
Input data S(T).
Returns
-------
max_spline : numpy array
Spline spanned on S maxima.
min_spline : numpy array
Spline spanned on S minima.
max_extrema : numpy array
Points indicating local maxima.
min_extrema : numpy array
Points indicating local minima.
"""
# Get indexes of extrema
ext_res = self.find_extrema(T, S)
max_pos, max_val = ext_res[0], ext_res[1]
min_pos, min_val = ext_res[2], ext_res[3]
if len(max_pos) + len(min_pos) < 3:
return [-1] * 4 # TODO: Fix this. Doesn't match the signature.
#########################################
# Extrapolation of signal (over boundaries)
max_extrema, min_extrema = self.prepare_points(T, S, max_pos, max_val, min_pos, min_val)
_, max_spline = self.spline_points(T, max_extrema)
_, min_spline = self.spline_points(T, min_extrema)
return max_spline, min_spline, max_extrema, min_extrema
def prepare_points(
self,
T: np.ndarray,
S: np.ndarray,
max_pos: np.ndarray,
max_val: np.ndarray,
min_pos: np.ndarray,
min_val: np.ndarray,
):
"""
Performs extrapolation on edges by adding extra extrema, also known
as mirroring signal. The number of added points depends on *nbsym*
variable.
Parameters
----------
T : numpy array
Position or time array.
S : numpy array
Input signal.
max_pos : iterable
Sorted time positions of maxima.
max_val : iterable
Signal values at max_pos positions.
min_pos : iterable
Sorted time positions of minima.
min_val : iterable
Signal values at min_pos positions.
Returns
-------
max_extrema : numpy array (2 rows)
Position (1st row) and values (2nd row) of minima.
min_extrema : numpy array (2 rows)
Position (1st row) and values (2nd row) of maxima.
"""
if self.extrema_detection == "parabol":
return self._prepare_points_parabol(T, S, max_pos, max_val, min_pos, min_val)
elif self.extrema_detection == "simple":
return self._prepare_points_simple(T, S, max_pos, max_val, min_pos, min_val)
else:
msg = "Incorrect extrema detection type. Please try: 'simple' or 'parabol'."
raise ValueError(msg)
def _prepare_points_parabol(self, T, S, max_pos, max_val, min_pos, min_val) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs mirroring on signal which extrema do not necessarily
belong on the position array.
See :meth:`EMD.prepare_points`.
"""
# Need at least two extrema to perform mirroring
max_extrema = np.zeros((2, len(max_pos)), dtype=self.DTYPE)
min_extrema = np.zeros((2, len(min_pos)), dtype=self.DTYPE)
max_extrema[0], min_extrema[0] = max_pos, min_pos
max_extrema[1], min_extrema[1] = max_val, min_val
# Local variables
nbsym = self.nbsym
end_min, end_max = len(min_pos), len(max_pos)
####################################
# Left bound
d_pos = max_pos[0] - min_pos[0]
left_ext_max_type = d_pos < 0 # True -> max, else min
# Left extremum is maximum
if left_ext_max_type:
if (S[0] > min_val[0]) and (np.abs(d_pos) > (max_pos[0] - T[0])):
# mirror signal to first extrema
expand_left_max_pos = 2 * max_pos[0] - max_pos[1 : nbsym + 1]
expand_left_min_pos = 2 * max_pos[0] - min_pos[0:nbsym]
expand_left_max_val = max_val[1 : nbsym + 1]
expand_left_min_val = min_val[0:nbsym]
else:
# mirror signal to beginning
expand_left_max_pos = 2 * T[0] - max_pos[0:nbsym]
expand_left_min_pos = 2 * T[0] - np.append(T[0], min_pos[0 : nbsym - 1])
expand_left_max_val = max_val[0:nbsym]
expand_left_min_val = np.append(S[0], min_val[0 : nbsym - 1])
# Left extremum is minimum
else:
if (S[0] < max_val[0]) and (np.abs(d_pos) > (min_pos[0] - T[0])):
# mirror signal to first extrema
expand_left_max_pos = 2 * min_pos[0] - max_pos[0:nbsym]
expand_left_min_pos = 2 * min_pos[0] - min_pos[1 : nbsym + 1]
expand_left_max_val = max_val[0:nbsym]
expand_left_min_val = min_val[1 : nbsym + 1]
else:
# mirror signal to beginning
expand_left_max_pos = 2 * T[0] - np.append(T[0], max_pos[0 : nbsym - 1])
expand_left_min_pos = 2 * T[0] - min_pos[0:nbsym]
expand_left_max_val = np.append(S[0], max_val[0 : nbsym - 1])
expand_left_min_val = min_val[0:nbsym]
if not expand_left_min_pos.shape:
expand_left_min_pos, expand_left_min_val = min_pos, min_val
if not expand_left_max_pos.shape:
expand_left_max_pos, expand_left_max_val = max_pos, max_val
expand_left_min = np.vstack((expand_left_min_pos[::-1], expand_left_min_val[::-1]))
expand_left_max = np.vstack((expand_left_max_pos[::-1], expand_left_max_val[::-1]))
####################################
# Right bound
d_pos = max_pos[-1] - min_pos[-1]
right_ext_max_type = d_pos > 0
# Right extremum is maximum
if not right_ext_max_type:
if (S[-1] < max_val[-1]) and (np.abs(d_pos) > (T[-1] - min_pos[-1])):
# mirror signal to last extrema
idx_max = max(0, end_max - nbsym)
idx_min = max(0, end_min - nbsym - 1)
expand_right_max_pos = 2 * min_pos[-1] - max_pos[idx_max:]
expand_right_min_pos = 2 * min_pos[-1] - min_pos[idx_min:-1]
expand_right_max_val = max_val[idx_max:]
expand_right_min_val = min_val[idx_min:-1]
else:
# mirror signal to end
idx_max = max(0, end_max - nbsym + 1)
idx_min = max(0, end_min - nbsym)
expand_right_max_pos = 2 * T[-1] - np.append(max_pos[idx_max:], T[-1])
expand_right_min_pos = 2 * T[-1] - min_pos[idx_min:]
expand_right_max_val = np.append(max_val[idx_max:], S[-1])
expand_right_min_val = min_val[idx_min:]
# Right extremum is minimum
else:
if (S[-1] > min_val[-1]) and len(max_pos) > 1 and (np.abs(d_pos) > (T[-1] - max_pos[-1])):
# mirror signal to last extremum
idx_max = max(0, end_max - nbsym - 1)
idx_min = max(0, end_min - nbsym)
expand_right_max_pos = 2 * max_pos[-1] - max_pos[idx_max:-1]
expand_right_min_pos = 2 * max_pos[-1] - min_pos[idx_min:]
expand_right_max_val = max_val[idx_max:-1]
expand_right_min_val = min_val[idx_min:]
else:
# mirror signal to end
idx_max = max(0, end_max - nbsym)
idx_min = max(0, end_min - nbsym + 1)
expand_right_max_pos = 2 * T[-1] - max_pos[idx_max:]
expand_right_min_pos = 2 * T[-1] - np.append(min_pos[idx_min:], T[-1])
expand_right_max_val = max_val[idx_max:]
expand_right_min_val = np.append(min_val[idx_min:], S[-1])
if not expand_right_min_pos.shape:
expand_right_min_pos, expand_right_min_val = min_pos, min_val
if not expand_right_max_pos.shape:
expand_right_max_pos, expand_right_max_val = max_pos, max_val
expand_right_min = np.vstack((expand_right_min_pos[::-1], expand_right_min_val[::-1]))
expand_right_max = np.vstack((expand_right_max_pos[::-1], expand_right_max_val[::-1]))
max_extrema = np.hstack((expand_left_max, max_extrema, expand_right_max))
min_extrema = np.hstack((expand_left_min, min_extrema, expand_right_min))
return max_extrema, min_extrema
def _prepare_points_simple(
self,
T: np.ndarray,
S: np.ndarray,
max_pos: np.ndarray,
max_val: Optional[np.ndarray],
min_pos: np.ndarray,
min_val: Optional[np.ndarray],
) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs mirroring on signal which extrema can be indexed on
the position array.
See :meth:`EMD.prepare_points`.
"""
# Find indexes of pass
ind_min = min_pos.astype(int)
ind_max = max_pos.astype(int)
# Local variables
nbsym = self.nbsym
end_min, end_max = len(min_pos), len(max_pos)
####################################
# Left bound - mirror nbsym points to the left
if ind_max[0] < ind_min[0]:
if S[0] > S[ind_min[0]]:
lmax = ind_max[1 : min(end_max, nbsym + 1)][::-1]
lmin = ind_min[0 : min(end_min, nbsym + 0)][::-1]
lsym = ind_max[0]
else:
lmax = ind_max[0 : min(end_max, nbsym)][::-1]
lmin = np.append(ind_min[0 : min(end_min, nbsym - 1)][::-1], 0)
lsym = 0
else:
if S[0] < S[ind_max[0]]:
lmax = ind_max[0 : min(end_max, nbsym + 0)][::-1]
lmin = ind_min[1 : min(end_min, nbsym + 1)][::-1]
lsym = ind_min[0]
else:
lmax = np.append(ind_max[0 : min(end_max, nbsym - 1)][::-1], 0)
lmin = ind_min[0 : min(end_min, nbsym)][::-1]
lsym = 0
####################################
# Right bound - mirror nbsym points to the right
if ind_max[-1] < ind_min[-1]:
if S[-1] < S[ind_max[-1]]:
rmax = ind_max[max(end_max - nbsym, 0) :][::-1]
rmin = ind_min[max(end_min - nbsym - 1, 0) : -1][::-1]
rsym = ind_min[-1]
else:
rmax = np.append(ind_max[max(end_max - nbsym + 1, 0) :], len(S) - 1)[::-1]
rmin = ind_min[max(end_min - nbsym, 0) :][::-1]
rsym = len(S) - 1
else:
if S[-1] > S[ind_min[-1]]:
rmax = ind_max[max(end_max - nbsym - 1, 0) : -1][::-1]
rmin = ind_min[max(end_min - nbsym, 0) :][::-1]
rsym = ind_max[-1]
else:
rmax = ind_max[max(end_max - nbsym, 0) :][::-1]
rmin = np.append(ind_min[max(end_min - nbsym + 1, 0) :], len(S) - 1)[::-1]
rsym = len(S) - 1
# In case any array missing
if not lmin.size:
lmin = ind_min
if not rmin.size:
rmin = ind_min
if not lmax.size:
lmax = ind_max
if not rmax.size:
rmax = ind_max
# Mirror points
tlmin = 2 * T[lsym] - T[lmin]
tlmax = 2 * T[lsym] - T[lmax]
trmin = 2 * T[rsym] - T[rmin]
trmax = 2 * T[rsym] - T[rmax]
# If mirrored points are not outside passed time range.
if tlmin[0] > T[0] or tlmax[0] > T[0]:
if lsym == ind_max[0]:
lmax = ind_max[0 : min(end_max, nbsym)][::-1]
else:
lmin = ind_min[0 : min(end_min, nbsym)][::-1]
if lsym == 0:
raise Exception("Left edge BUG")
lsym = 0
tlmin = 2 * T[lsym] - T[lmin]
tlmax = 2 * T[lsym] - T[lmax]
if trmin[-1] < T[-1] or trmax[-1] < T[-1]:
if rsym == ind_max[-1]:
rmax = ind_max[max(end_max - nbsym, 0) :][::-1]
else:
rmin = ind_min[max(end_min - nbsym, 0) :][::-1]
if rsym == len(S) - 1:
raise Exception("Right edge BUG")
rsym = len(S) - 1
trmin = 2 * T[rsym] - T[rmin]
trmax = 2 * T[rsym] - T[rmax]
zlmax = S[lmax]
zlmin = S[lmin]
zrmax = S[rmax]
zrmin = S[rmin]
tmin = np.append(tlmin, np.append(T[ind_min], trmin))
tmax = np.append(tlmax, np.append(T[ind_max], trmax))
zmin = np.append(zlmin, np.append(S[ind_min], zrmin))
zmax = np.append(zlmax, np.append(S[ind_max], zrmax))
max_extrema = np.array([tmax, zmax])
min_extrema = np.array([tmin, zmin])
# Make double sure, that each extremum is significant
max_dup_idx = np.where(max_extrema[0, 1:] == max_extrema[0, :-1])
max_extrema = np.delete(max_extrema, max_dup_idx, axis=1)
min_dup_idx = np.where(min_extrema[0, 1:] == min_extrema[0, :-1])
min_extrema = np.delete(min_extrema, min_dup_idx, axis=1)
return max_extrema, min_extrema
def spline_points(self, T: np.ndarray, extrema: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Constructs spline over given points.
Parameters
----------
T : numpy array
Position or time array.
extrema : numpy array
Position (1st row) and values (2nd row) of points.
Returns
-------
T : numpy array
Position array (same as input).
spline : numpy array
Spline array over given positions T.
"""
kind = self.spline_kind.lower()
t = T[np.r_[T >= extrema[0, 0]] & np.r_[T <= extrema[0, -1]]]
if kind == "akima":
return t, akima(extrema[0], extrema[1], t)
elif kind == "cubic":
if extrema.shape[1] > 3:
return t, cubic(extrema[0], extrema[1], t)
else:
return cubic_spline_3pts(extrema[0], extrema[1], t)
elif kind == "pchip":
return t, pchip(extrema[0], extrema[1], t)
elif kind == "cubic_hermite":
return t, cubic_hermite(extrema[0], extrema[1], t)
elif kind in ["slinear", "quadratic", "linear"]:
return T, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)
else:
raise ValueError("No such interpolation method!")
@staticmethod
def _not_duplicate(S: np.ndarray) -> np.ndarray:
"""
Returns indices for not repeating values, where there is no extremum.
Example
-------
>>> S = [0, 1, 1, 1, 2, 3]
>>> idx = self._not_duplicate(S)
[0, 1, 3, 4, 5]
"""
dup = np.r_[S[1:-1] == S[0:-2]] & np.r_[S[1:-1] == S[2:]]
not_dup_idx = np.arange(1, len(S) - 1)[~dup]
idx = np.empty(len(not_dup_idx) + 2, dtype=np.int64)
idx[0] = 0
idx[-1] = len(S) - 1
idx[1:-1] = not_dup_idx
return idx
def find_extrema(self, T: np.ndarray, S: np.ndarray) -> FindExtremaOutput:
"""
Returns extrema (minima and maxima) for given signal S.
Detection and definition of the extrema depends on
``extrema_detection`` variable, set on initiation of EMD.
Parameters
----------
T : numpy array
Position or time array.
S : numpy array
Input data S(T).
Returns
-------
local_max_pos : numpy array
Position of local maxima.
local_max_val : numpy array
Values of local maxima.
local_min_pos : numpy array
Position of local minima.
local_min_val : numpy array
Values of local minima.
"""
if self.extrema_detection == "parabol":
return self._find_extrema_parabol(T, S)
elif self.extrema_detection == "simple":
return self._find_extrema_simple(T, S)
else:
raise ValueError("Incorrect extrema detection type. Please try: 'simple' or 'parabol'.")
def _find_extrema_parabol(self, T: np.ndarray, S: np.ndarray) -> FindExtremaOutput:
"""
Performs parabol estimation of extremum, i.e. an extremum is a peak
of parabol spanned on 3 consecutive points, where the mid point is
the closest.
See :meth:`EMD.find_extrema()`.
"""
# Finds indexes of zero-crossings
S1, S2 = S[:-1], S[1:]
indzer = np.nonzero(S1 * S2 < 0)[0]
if np.any(S == 0):
indz = np.nonzero(S == 0)[0]
if np.any(np.diff(indz) == 1):
zer = S == 0
dz = np.diff(np.append(np.append(0, zer), 0))
debz = np.nonzero(dz == 1)[0]
finz = np.nonzero(dz == -1)[0] - 1
indz = np.round((debz + finz) / 2.0)
indzer = np.sort(np.append(indzer, indz))
dt = float(T[1] - T[0])
scale = 2.0 * dt * dt
idx = self._not_duplicate(S)
T = T[idx]
S = S[idx]
# p - previous
# 0 - current
# n - next
Tp, T0, Tn = T[:-2], T[1:-1], T[2:]
Sp, S0, Sn = S[:-2], S[1:-1], S[2:]
# a = Sn + Sp - 2*S0
# b = 2*(Tn+Tp)*S0 - ((Tn+T0)*Sp+(T0+Tp)*Sn)
# c = Sp*T0*Tn -2*Tp*S0*Tn + Tp*T0*Sn
TnTp, T0Tn, TpT0 = Tn - Tp, T0 - Tn, Tp - T0
scale = Tp * Tn * Tn + Tp * Tp * T0 + T0 * T0 * Tn - Tp * Tp * Tn - Tp * T0 * T0 - T0 * Tn * Tn
a = T0Tn * Sp + TnTp * S0 + TpT0 * Sn
b = (S0 - Sn) * Tp**2 + (Sn - Sp) * T0**2 + (Sp - S0) * Tn**2
c = T0 * Tn * T0Tn * Sp + Tn * Tp * TnTp * S0 + Tp * T0 * TpT0 * Sn
a = a / scale
b = b / scale
c = c / scale
a[a == 0] = 1e-14
tVertex = -0.5 * b / a
idx = np.r_[tVertex < T0 + 0.5 * (Tn - T0)] & np.r_[tVertex >= T0 - 0.5 * (T0 - Tp)]
a, b, c = a[idx], b[idx], c[idx]
tVertex = tVertex[idx]
sVertex = a * tVertex * tVertex + b * tVertex + c
local_max_pos, local_max_val = tVertex[a < 0], sVertex[a < 0]
local_min_pos, local_min_val = tVertex[a > 0], sVertex[a > 0]
return local_max_pos, local_max_val, local_min_pos, local_min_val, indzer
@staticmethod
def _find_extrema_simple(T: np.ndarray, S: np.ndarray) -> FindExtremaOutput:
"""
Performs extrema detection, where extremum is defined as a point,
that is above/below its neighbours.
See :meth:`EMD.find_extrema`.
"""
# Finds indexes of zero-crossings
S1, S2 = S[:-1], S[1:]
indzer = np.nonzero(S1 * S2 < 0)[0]
if np.any(S == 0):
indz = np.nonzero(S == 0)[0]
if np.any(np.diff(indz) == 1):
zer = S == 0
dz = np.diff(np.append(np.append(0, zer), 0))
debz = np.nonzero(dz == 1)[0]
finz = np.nonzero(dz == -1)[0] - 1
indz = np.round((debz + finz) / 2.0)
indzer = np.sort(np.append(indzer, indz))
# Finds local extrema
d = np.diff(S)
d1, d2 = d[:-1], d[1:]
indmin = np.nonzero(np.r_[d1 * d2 < 0] & np.r_[d1 < 0])[0] + 1
indmax = np.nonzero(np.r_[d1 * d2 < 0] & np.r_[d1 > 0])[0] + 1
# When two or more points have the same value
if np.any(d == 0):
imax, imin = [], []
bad = d == 0
dd = np.diff(np.append(np.append(0, bad), 0))
debs = np.nonzero(dd == 1)[0]
fins = np.nonzero(dd == -1)[0]
if debs[0] == 1:
if len(debs) > 1:
debs, fins = debs[1:], fins[1:]
else:
debs, fins = [], []
if len(debs) > 0:
if fins[-1] == len(S) - 1:
if len(debs) > 1:
debs, fins = debs[:-1], fins[:-1]
else:
debs, fins = [], []
lc = len(debs)
if lc > 0:
for k in range(lc):
if d[debs[k] - 1] > 0:
if d[fins[k]] < 0:
imax.append(np.round((fins[k] + debs[k]) / 2.0))
else:
if d[fins[k]] > 0:
imin.append(np.round((fins[k] + debs[k]) / 2.0))
if len(imax) > 0:
indmax = indmax.tolist()
for x in imax:
indmax.append(int(x))
indmax.sort()
if len(imin) > 0:
indmin = indmin.tolist()
for x in imin:
indmin.append(int(x))
indmin.sort()
local_max_pos = T[indmax]
local_max_val = S[indmax]
local_min_pos = T[indmin]
local_min_val = S[indmin]
return local_max_pos, local_max_val, local_min_pos, local_min_val, indzer
def end_condition(self, S: np.ndarray, IMF: np.ndarray) -> bool:
"""Tests for end condition of whole EMD. The procedure will stop if:
* Absolute amplitude (max - min) is below *range_thr* threshold, or
* Metric L1 (mean absolute difference) is below *total_power_thr* threshold.
Parameters
----------
S : numpy array
Original signal on which EMD was performed.
IMF : numpy 2D array
Set of IMFs where each row is IMF. Their order is not important.
Returns
-------
end : bool
Whether sifting is finished.
"""
# When to stop EMD
tmp = S - np.sum(IMF, axis=0)
if np.max(tmp) - np.min(tmp) < self.range_thr:
self.logger.debug("FINISHED -- RANGE")
return True
if np.sum(np.abs(tmp)) < self.total_power_thr:
self.logger.debug("FINISHED -- SUM POWER")
return True
return False
def check_imf(
self,
imf_new: np.ndarray,
imf_old: np.ndarray,
eMax: np.ndarray,
eMin: np.ndarray,
) -> bool:
"""
Huang criteria for **IMF** (similar to Cauchy convergence test).
Signal is an IMF if consecutive siftings do not affect signal
in a significant manner.
"""
# local max are >0 and local min are <0
if np.any(eMax[1] < 0) or np.any(eMin[1] > 0):
return False
# Convergence
if np.sum(imf_new**2) < 1e-10:
return False
# Precompute values
imf_diff = imf_new - imf_old
imf_diff_sqrd_sum = np.sum(imf_diff * imf_diff)
# Scaled variance test
svar = imf_diff_sqrd_sum / (max(imf_old) - min(imf_old))
if svar < self.svar_thr:
self.logger.debug("Scaled variance -- PASSED")
return True
# Standard deviation test
std = np.sum((imf_diff / imf_new) ** 2)
if std < self.std_thr:
self.logger.debug("Standard deviation -- PASSED")
return True
energy_ratio = imf_diff_sqrd_sum / np.sum(imf_old * imf_old)
if energy_ratio < self.energy_ratio_thr:
self.logger.debug("Energy ratio -- PASSED")
return True
return False
@staticmethod
def _common_dtype(x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Casts inputs (x, y) into a common numpy DTYPE."""
dtype = np.find_common_type([x.dtype, y.dtype], [])
if x.dtype != dtype:
x = x.astype(dtype)
if y.dtype != dtype:
y = y.astype(dtype)
return x, y
@staticmethod
def _normalize_time(t: np.ndarray) -> np.ndarray:
"""
Normalize time array so that it doesn't explode on tiny values.
Returned array starts with 0 and the smallest increase is by 1.
"""
d = np.diff(t)
assert np.all(d != 0), "All time domain values needs to be unique"
return (t - t[0]) / np.min(d)
def emd(self, S: np.ndarray, T: Optional[np.ndarray] = None, max_imf: int = -1) -> np.ndarray:
"""
Performs Empirical Mode Decomposition on signal S.
The decomposition is limited to *max_imf* imfs.
Returns IMF functions and residue in a single numpy array format.
Parameters
----------
S : numpy array,
Input signal.
T : numpy array, (default: None)
Position or time array. If None is passed or self.extrema_detection == "simple",
then numpy range is created.
max_imf : int, (default: -1)
IMF number to which decomposition should be performed.
Negative value means *all*.
Returns
-------
IMFs and residue : numpy array
A numpy array which cointains both the IMFs and residual, if any, appended as
the last slice.
"""
if T is not None and len(S) != len(T):
raise ValueError("Time series have different sizes: len(S) -> {} != {} <- len(T)".format(len(S), len(T)))
if T is None or self.extrema_detection == "simple":
T = get_timeline(len(S), S.dtype)
# Normalize T so that it doesn't explode
T = self._normalize_time(T)
# Make sure same types are dealt
S, T = self._common_dtype(S, T)
self.DTYPE = S.dtype
N = len(S)
residue = S.astype(self.DTYPE)
imf = np.zeros(len(S), dtype=self.DTYPE)
imf_old = np.nan
if S.shape != T.shape:
raise ValueError("Position or time array should be the same size as signal.")
# Create arrays
imfNo = 0
extNo = -1
IMF = np.empty((imfNo, N)) # Numpy container for IMF
finished = False
while not finished:
self.logger.debug("IMF -- %s", imfNo)
residue[:] = S - np.sum(IMF[:imfNo], axis=0)
imf = residue.copy()
mean = np.zeros(len(S), dtype=self.DTYPE)
# Counters
n = 0 # All iterations for current imf.
n_h = 0 # counts when |#zero - #ext| <=1
while True:
n += 1
if n >= self.MAX_ITERATION:
self.logger.info("Max iterations reached for IMF. Continuing with another IMF.")
break
ext_res = self.find_extrema(T, imf)
max_pos, min_pos, indzer = ext_res[0], ext_res[2], ext_res[4]
extNo = len(min_pos) + len(max_pos)
nzm = len(indzer)
if extNo > 2:
max_env, min_env, eMax, eMin = self.extract_max_min_spline(T, imf)
mean[:] = 0.5 * (max_env + min_env)
imf_old = imf.copy()
imf[:] = imf - mean
# Fix number of iterations
if self.FIXE:
if n >= self.FIXE:
break
# Fix number of iterations after number of zero-crossings
# and extrema differ at most by one.
elif self.FIXE_H:
tmp_residue = self.find_extrema(T, imf)
max_pos, min_pos, ind_zer = (
tmp_residue[0],
tmp_residue[2],
tmp_residue[4],
)
extNo = len(max_pos) + len(min_pos)
nzm = len(ind_zer)
if n == 1:
continue
# If proto-IMF add one, or reset counter otherwise
n_h = n_h + 1 if abs(extNo - nzm) < 2 else 0
# STOP
if n_h >= self.FIXE_H:
break
# Stops after default stopping criteria are met
else:
ext_res = self.find_extrema(T, imf)
max_pos, _, min_pos, _, ind_zer = ext_res
extNo = len(max_pos) + len(min_pos)
nzm = len(ind_zer)
if imf_old is np.nan:
continue
f1 = self.check_imf(imf, imf_old, eMax, eMin)
f2 = abs(extNo - nzm) < 2
# STOP
if f1 and f2:
break
else: # Less than 2 ext, i.e. trend
finished = True
break
# END OF IMF SIFTING
IMF = np.vstack((IMF, imf.copy()))
imfNo += 1
if self.end_condition(S, IMF) or imfNo == max_imf:
finished = True
break
# If the last sifting had 2 or less extrema then that's a trend (residue)
if extNo <= 2:
IMF = IMF[:-1]
# Saving imfs and residue for external references
self.imfs = IMF.copy()
self.residue = S - np.sum(self.imfs, axis=0)
# If residue isn't 0 then add it to the output
if not np.allclose(self.residue, 0):
IMF = np.vstack((IMF, self.residue))
return IMF
def get_imfs_and_residue(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Provides access to separated imfs and residue from recently analysed signal.
Returns
-------
imfs : np.ndarray
Obtained IMFs
residue : np.ndarray
Residue.
"""
if self.imfs is None or self.residue is None:
raise ValueError("No IMF found. Please, run EMD method or its variant first.")
return self.imfs, self.residue
def get_imfs_and_trend(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Provides access to separated imfs and trend from recently analysed signal.
Note that this may differ from the `get_imfs_and_residue` as the trend isn't
necessarily the residue. Residue is a point-wise difference between input signal
and all obtained components, whereas trend is the slowest component (can be zero).
Returns
-------
imfs : np.ndarray
Obtained IMFs
trend : np.ndarray
The main trend.
"""
if self.imfs is None or self.residue is None:
raise ValueError("No IMF found. Please, run EMD method or its variant first.")
imfs, residue = self.get_imfs_and_residue()
if np.allclose(residue, 0):
return imfs[:-1].copy(), imfs[-1].copy()
else:
return imfs, residue
###################################################
if __name__ == "__main__":
import pylab as plt
# Logging options
logging.basicConfig(level=logging.DEBUG)
# EMD options
max_imf = -1
DTYPE = np.float64
# Signal options
N = 400
tMin, tMax = 0, 2 * np.pi
T = np.linspace(tMin, tMax, N, dtype=DTYPE)
S = np.sin(20 * T * (1 + 0.2 * T)) + T**2 + np.sin(13 * T)
S = S.astype(DTYPE)
print("Input S.dtype: " + str(S.dtype))
# Prepare and run EMD
emd = EMD()
emd.FIXE_H = 5
emd.nbsym = 2
emd.spline_kind = "cubic"
emd.DTYPE = DTYPE
imfs = emd.emd(S, T, max_imf)
imfNo = imfs.shape[0]
# Plot results
c = 1
r = np.ceil((imfNo + 1) / c)
plt.ioff()
plt.subplot(r, c, 1)
plt.plot(T, S, "r")
plt.xlim((tMin, tMax))
plt.title("Original signal")
for num in range(imfNo):
plt.subplot(r, c, num + 2)
plt.plot(T, imfs[num], "g")
plt.xlim((tMin, tMax))
plt.ylabel("Imf " + str(num + 1))
plt.tight_layout()
plt.show() | PypiClean |
/ConSSL-0.0.1-py3-none-any.whl/CSSL/models/regression/linear_regression.py | from argparse import ArgumentParser
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from torch.optim import Adam
from torch.optim.optimizer import Optimizer
class LinearRegression(pl.LightningModule):
"""
Linear regression model implementing - with optional L1/L2 regularization
$$min_{W} ||(Wx + b) - y ||_2^2 $$
"""
def __init__(
self,
input_dim: int,
output_dim: int = 1,
bias: bool = True,
learning_rate: float = 1e-4,
optimizer: Optimizer = Adam,
l1_strength: float = 0.0,
l2_strength: float = 0.0,
**kwargs
):
"""
Args:
input_dim: number of dimensions of the input (1+)
output_dim: number of dimensions of the output (default=1)
bias: If false, will not use $+b$
learning_rate: learning_rate for the optimizer
optimizer: the optimizer to use (default='Adam')
l1_strength: L1 regularization strength (default=None)
l2_strength: L2 regularization strength (default=None)
"""
super().__init__()
self.save_hyperparameters()
self.optimizer = optimizer
self.linear = nn.Linear(in_features=self.hparams.input_dim, out_features=self.hparams.output_dim, bias=bias)
def forward(self, x):
y_hat = self.linear(x)
return y_hat
def training_step(self, batch, batch_idx):
x, y = batch
# flatten any input
x = x.view(x.size(0), -1)
y_hat = self(x)
loss = F.mse_loss(y_hat, y, reduction='sum')
# L1 regularizer
if self.hparams.l1_strength > 0:
l1_reg = sum(param.abs().sum() for param in self.parameters())
loss += self.hparams.l1_strength * l1_reg
# L2 regularizer
if self.hparams.l2_strength > 0:
l2_reg = sum(param.pow(2).sum() for param in self.parameters())
loss += self.hparams.l2_strength * l2_reg
loss /= x.size(0)
tensorboard_logs = {'train_mse_loss': loss}
progress_bar_metrics = tensorboard_logs
return {'loss': loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x)
return {'val_loss': F.mse_loss(y_hat, y)}
def validation_epoch_end(self, outputs):
val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_mse_loss': val_loss}
progress_bar_metrics = tensorboard_logs
return {'val_loss': val_loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
return {'test_loss': F.mse_loss(y_hat, y)}
def test_epoch_end(self, outputs):
test_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'test_mse_loss': test_loss}
progress_bar_metrics = tensorboard_logs
return {'test_loss': test_loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}
def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.hparams.learning_rate)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--input_dim', type=int, default=None)
parser.add_argument('--output_dim', type=int, default=1)
parser.add_argument('--bias', default='store_true')
parser.add_argument('--batch_size', type=int, default=16)
return parser
def cli_main():
from CSSL.datamodules.sklearn_datamodule import SklearnDataModule
from CSSL.utils import _SKLEARN_AVAILABLE
pl.seed_everything(1234)
# create dataset
if _SKLEARN_AVAILABLE:
from sklearn.datasets import load_boston
else: # pragma: no cover
raise ModuleNotFoundError(
'You want to use `sklearn` which is not installed yet, install it with `pip install sklearn`.'
)
# args
parser = ArgumentParser()
parser = LinearRegression.add_model_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# model
model = LinearRegression(input_dim=13, l1_strength=1, l2_strength=1)
# model = LinearRegression(**vars(args))
# data
X, y = load_boston(return_X_y=True) # these are numpy arrays
loaders = SklearnDataModule(X, y, batch_size=args.batch_size)
# train
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, train_dataloader=loaders.train_dataloader(), val_dataloaders=loaders.val_dataloader())
if __name__ == '__main__':
cli_main() | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.