content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowSecurityGroupRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'security_group_id': 'str'
}
attribute_map = {
'security_group_id': 'security_group_id'
}
def __init__(self, security_group_id=None):
"""ShowSecurityGroupRequest - a model defined in huaweicloud sdk"""
self._security_group_id = None
self.discriminator = None
self.security_group_id = security_group_id
@property
def security_group_id(self):
"""Gets the security_group_id of this ShowSecurityGroupRequest.
安全组资源ID
:return: The security_group_id of this ShowSecurityGroupRequest.
:rtype: str
"""
return self._security_group_id
@security_group_id.setter
def security_group_id(self, security_group_id):
"""Sets the security_group_id of this ShowSecurityGroupRequest.
安全组资源ID
:param security_group_id: The security_group_id of this ShowSecurityGroupRequest.
:type: str
"""
self._security_group_id = security_group_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowSecurityGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.657895 | 89 | 0.574691 | [
"Apache-2.0"
] | huaweicloud/huaweicloud-sdk-python-v3 | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py | 3,173 | Python |
from lark import Lark, Transformer, v_args
from lark.visitors import Interpreter, visit_children_decor
p = Lark.open("rules.lark", parser="lalr", rel_to=__file__)
code = """
// Firrst win in my book
b = 4;
a = b*2;
print a+1
x = 7;
p = [1, 2, 3, 4]
print p
"""
tree = p.parse(code)
@v_args(inline=True)
class MyEval(Transformer):
from operator import add, mul, neg, sub
from operator import truediv as div
number = float
def __init__(self, ns):
self.ns = ns
def var(self, name):
return self.ns[name]
# def num_list(self, value):
# print(value)
def eval_expr(tree, ns):
return MyEval(ns).transform(tree)
@v_args(inline=True)
class MyInterp(Interpreter):
def __init__(self):
self.namespace = {}
def assign(self, var, expr):
self.namespace[var] = eval_expr(expr, self.namespace)
def print_statement(self, expr):
# print(expr)
res = eval_expr(expr, self.namespace)
print(res)
print(tree.pretty())
# MyInterp().visit(tree)
| 18.553571 | 61 | 0.638114 | [
"Apache-2.0"
] | RLToolsWorkshop/dubdub | tests/cmdexpr/ruler.py | 1,039 | Python |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="error_details.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
import json
class ErrorDetails(object):
"""The error details.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_date_time': 'datetime',
'request_id': 'str'
}
attribute_map = {
'error_date_time': 'ErrorDateTime',
'request_id': 'RequestId'
}
def __init__(self, error_date_time=None, request_id=None): # noqa: E501
"""ErrorDetails - a model defined in Swagger""" # noqa: E501
self._error_date_time = None
self._request_id = None
self.discriminator = None
if error_date_time is not None:
self.error_date_time = error_date_time
if request_id is not None:
self.request_id = request_id
@property
def error_date_time(self):
"""Gets the error_date_time of this ErrorDetails. # noqa: E501
Error datetime. # noqa: E501
:return: The error_date_time of this ErrorDetails. # noqa: E501
:rtype: datetime
"""
return self._error_date_time
@error_date_time.setter
def error_date_time(self, error_date_time):
"""Sets the error_date_time of this ErrorDetails.
Error datetime. # noqa: E501
:param error_date_time: The error_date_time of this ErrorDetails. # noqa: E501
:type: datetime
"""
self._error_date_time = error_date_time
@property
def request_id(self):
"""Gets the request_id of this ErrorDetails. # noqa: E501
The request id. # noqa: E501
:return: The request_id of this ErrorDetails. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ErrorDetails.
The request id. # noqa: E501
:param request_id: The request_id of this ErrorDetails. # noqa: E501
:type: str
"""
self._request_id = request_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 33.772727 | 87 | 0.580585 | [
"MIT"
] | rizwanniazigroupdocs/aspose-words-cloud-python | asposewordscloud/models/error_details.py | 5,944 | Python |
import datetime
from flask_restplus import Namespace, Resource
from flask_login import login_required, current_user
from flask import request
from ..util import query_util, coco_util, profile
from config import Config
from database import (
ImageModel,
CategoryModel,
AnnotationModel,
SessionEvent
)
api = Namespace('annotator', description='Annotator related operations')
@api.route('/data')
class AnnotatorData(Resource):
@profile
@login_required
def post(self):
"""
Called when saving data from the annotator client
"""
data = request.get_json(force=True)
image = data.get('image')
dataset = data.get('dataset')
image_id = image.get('id')
image_model = ImageModel.objects(id=image_id).first()
if image_model is None:
return {'success': False, 'message': 'Image does not exist'}, 400
# Check if current user can access dataset
db_dataset = current_user.datasets.filter(id=image_model.dataset_id).first()
if dataset is None:
return {'success': False, 'message': 'Could not find associated dataset'}
db_dataset.update(annotate_url=dataset.get('annotate_url', ''))
categories = CategoryModel.objects.all()
annotations = AnnotationModel.objects(image_id=image_id)
current_user.update(preferences=data.get('user', {}))
annotated = False
# Iterate every category passed in the data
for category in data.get('categories', []):
category_id = category.get('id')
# Find corresponding category object in the database
db_category = categories.filter(id=category_id).first()
if db_category is None:
continue
category_update = {'color': category.get('color')}
if current_user.can_edit(db_category):
category_update['keypoint_edges'] = category.get('keypoint_edges', [])
category_update['keypoint_labels'] = category.get('keypoint_labels', [])
db_category.update(**category_update)
# Iterate every annotation from the data annotations
for annotation in category.get('annotations', []):
# Find corresponding annotation object in database
annotation_id = annotation.get('id')
db_annotation = annotations.filter(id=annotation_id).first()
if db_annotation is None:
continue
# Paperjs objects are complex, so they will not always be passed. Therefor we update
# the annotation twice, checking if the paperjs exists.
# Update annotation in database
sessions = []
total_time = 0
for session in annotation.get('sessions', []):
date = datetime.datetime.fromtimestamp(int(session.get('start')) / 1e3)
model = SessionEvent(
user=current_user.username,
created_at=date,
milliseconds=session.get('milliseconds'),
tools_used=session.get('tools')
)
total_time += session.get('milliseconds')
sessions.append(model)
db_annotation.update(
add_to_set__events=sessions,
inc__milliseconds=total_time,
set__isbbox=annotation.get('isbbox', False),
set__keypoints=annotation.get('keypoints', []),
set__metadata=annotation.get('metadata'),
set__color=annotation.get('color')
)
paperjs_object = annotation.get('compoundPath', [])
# Update paperjs if it exists
if len(paperjs_object) == 2:
width = db_annotation.width
height = db_annotation.height
# Generate coco formatted segmentation data
segmentation, area, bbox = coco_util.\
paperjs_to_coco(width, height, paperjs_object)
db_annotation.update(
set__segmentation=segmentation,
set__area=area,
set__isbbox=annotation.get('isbbox', False),
set__bbox=bbox,
set__paper_object=paperjs_object,
)
if area > 0:
annotated = True
image_model.update(
set__metadata=image.get('metadata', {}),
set__annotated=annotated,
set__category_ids=image.get('category_ids', []),
set__regenerate_thumbnail=True,
set__num_annotations=annotations\
.filter(deleted=False, area__gt=0).count()
)
return {"success": True}
@api.route('/data/<int:image_id>')
class AnnotatorId(Resource):
@profile
@login_required
def get(self, image_id):
""" Called when loading from the annotator client """
image = ImageModel.objects(id=image_id)\
.exclude('events').first()
if image is None:
return {'success': False, 'message': 'Could not load image'}, 400
dataset = current_user.datasets.filter(id=image.dataset_id).first()
if dataset is None:
return {'success': False, 'message': 'Could not find associated dataset'}, 400
categories = CategoryModel.objects(deleted=False)\
.in_bulk(dataset.categories).items()
# Get next and previous image
images = ImageModel.objects(dataset_id=dataset.id, deleted=False)
pre = images.filter(file_name__lt=image.file_name).order_by('-file_name').first()
nex = images.filter(file_name__gt=image.file_name).order_by('file_name').first()
preferences = {}
if not Config.LOGIN_DISABLED:
preferences = current_user.preferences
# Generate data about the image to return to client
data = {
'image': query_util.fix_ids(image),
'categories': [],
'dataset': query_util.fix_ids(dataset),
'preferences': preferences,
'permissions': {
'dataset': dataset.permissions(current_user),
'image': image.permissions(current_user)
}
}
data['image']['previous'] = pre.id if pre else None
data['image']['next'] = nex.id if nex else None
for category in categories:
category = query_util.fix_ids(category[1])
category_id = category.get('id')
annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False)\
.exclude('events').all()
category['show'] = True
category['visualize'] = False
category['annotations'] = [] if annotations is None else query_util.fix_ids(annotations)
data.get('categories').append(category)
return data
| 36.338384 | 109 | 0.577623 | [
"MIT"
] | Cheol-H-Jeong/Deep-POC-2019 | coco-annotator/backend/webserver/api/annotator.py | 7,195 | Python |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_connection_details import UpdateConnectionDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateConnectionFromAmazonS3(UpdateConnectionDetails):
"""
The details to update an Amazon s3 connection.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateConnectionFromAmazonS3 object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAmazonS3.model_type` attribute
of this class is ``AMAZON_S3_CONNECTION`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param model_type:
The value to assign to the model_type property of this UpdateConnectionFromAmazonS3.
Allowed values for this property are: "ORACLE_ADWC_CONNECTION", "ORACLE_ATP_CONNECTION", "ORACLE_OBJECT_STORAGE_CONNECTION", "ORACLEDB_CONNECTION", "MYSQL_CONNECTION", "GENERIC_JDBC_CONNECTION", "BICC_CONNECTION", "AMAZON_S3_CONNECTION", "BIP_CONNECTION"
:type model_type: str
:param key:
The value to assign to the key property of this UpdateConnectionFromAmazonS3.
:type key: str
:param model_version:
The value to assign to the model_version property of this UpdateConnectionFromAmazonS3.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this UpdateConnectionFromAmazonS3.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this UpdateConnectionFromAmazonS3.
:type name: str
:param description:
The value to assign to the description property of this UpdateConnectionFromAmazonS3.
:type description: str
:param object_status:
The value to assign to the object_status property of this UpdateConnectionFromAmazonS3.
:type object_status: int
:param object_version:
The value to assign to the object_version property of this UpdateConnectionFromAmazonS3.
:type object_version: int
:param identifier:
The value to assign to the identifier property of this UpdateConnectionFromAmazonS3.
:type identifier: str
:param connection_properties:
The value to assign to the connection_properties property of this UpdateConnectionFromAmazonS3.
:type connection_properties: list[oci.data_integration.models.ConnectionProperty]
:param registry_metadata:
The value to assign to the registry_metadata property of this UpdateConnectionFromAmazonS3.
:type registry_metadata: oci.data_integration.models.RegistryMetadata
:param access_key:
The value to assign to the access_key property of this UpdateConnectionFromAmazonS3.
:type access_key: oci.data_integration.models.SensitiveAttribute
:param secret_key:
The value to assign to the secret_key property of this UpdateConnectionFromAmazonS3.
:type secret_key: oci.data_integration.models.SensitiveAttribute
"""
self.swagger_types = {
'model_type': 'str',
'key': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_status': 'int',
'object_version': 'int',
'identifier': 'str',
'connection_properties': 'list[ConnectionProperty]',
'registry_metadata': 'RegistryMetadata',
'access_key': 'SensitiveAttribute',
'secret_key': 'SensitiveAttribute'
}
self.attribute_map = {
'model_type': 'modelType',
'key': 'key',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_status': 'objectStatus',
'object_version': 'objectVersion',
'identifier': 'identifier',
'connection_properties': 'connectionProperties',
'registry_metadata': 'registryMetadata',
'access_key': 'accessKey',
'secret_key': 'secretKey'
}
self._model_type = None
self._key = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_status = None
self._object_version = None
self._identifier = None
self._connection_properties = None
self._registry_metadata = None
self._access_key = None
self._secret_key = None
self._model_type = 'AMAZON_S3_CONNECTION'
@property
def access_key(self):
"""
Gets the access_key of this UpdateConnectionFromAmazonS3.
:return: The access_key of this UpdateConnectionFromAmazonS3.
:rtype: oci.data_integration.models.SensitiveAttribute
"""
return self._access_key
@access_key.setter
def access_key(self, access_key):
"""
Sets the access_key of this UpdateConnectionFromAmazonS3.
:param access_key: The access_key of this UpdateConnectionFromAmazonS3.
:type: oci.data_integration.models.SensitiveAttribute
"""
self._access_key = access_key
@property
def secret_key(self):
"""
Gets the secret_key of this UpdateConnectionFromAmazonS3.
:return: The secret_key of this UpdateConnectionFromAmazonS3.
:rtype: oci.data_integration.models.SensitiveAttribute
"""
return self._secret_key
@secret_key.setter
def secret_key(self, secret_key):
"""
Sets the secret_key of this UpdateConnectionFromAmazonS3.
:param secret_key: The secret_key of this UpdateConnectionFromAmazonS3.
:type: oci.data_integration.models.SensitiveAttribute
"""
self._secret_key = secret_key
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 39.551724 | 266 | 0.671898 | [
"Apache-2.0",
"BSD-3-Clause"
] | pabs3/oci-python-sdk | src/oci/data_integration/models/update_connection_from_amazon_s3.py | 6,882 | Python |
import logging
import threading
import time
import traceback
import warnings
from ..machine.machine import TlMachine
LOG = logging.getLogger(__name__)
class Invoker:
def __init__(self, data_controller):
self.data_controller = data_controller
self.exception = None
threading.excepthook = self._threading_excepthook
def _threading_excepthook(self, args):
self.exception = args
def invoke(self, vmid, run_async=True):
LOG.info(f"Invoking {vmid} (new thread? {run_async})")
m = TlMachine(vmid, self)
if run_async:
thread = threading.Thread(target=m.run)
LOG.info(f"New thread: {thread}")
thread.start()
else:
m.run()
| 24.766667 | 62 | 0.650067 | [
"Apache-2.0"
] | condense9/hark-lang | src/hark_lang/executors/thread.py | 743 | Python |
#!/usr/bin/env python
"""
<Program Name>
test_root_versioning_integration.py
<Author>
Evan Cordell.
<Started>
July 21, 2016.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Test root versioning for efficient root key rotation.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import logging
import tempfile
import shutil
import sys
# 'unittest2' required for testing under Python < 2.7.
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
import tuf
import tuf.log
import tuf.formats
import tuf.exceptions
import tuf.roledb
import tuf.keydb
import tuf.repository_tool as repo_tool
import securesystemslib
logger = logging.getLogger('tuf.test_root_versioning')
repo_tool.disable_console_log_messages()
class TestRepository(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd())
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.temporary_directory)
def tearDown(self):
tuf.roledb.clear_roledb()
tuf.keydb.clear_keydb()
def test_init(self):
# Test normal case.
repository = repo_tool.Repository('repository_directory/',
'metadata_directory/',
'targets_directory/')
self.assertTrue(isinstance(repository.root, repo_tool.Root))
self.assertTrue(isinstance(repository.snapshot, repo_tool.Snapshot))
self.assertTrue(isinstance(repository.timestamp, repo_tool.Timestamp))
self.assertTrue(isinstance(repository.targets, repo_tool.Targets))
# Test improperly formatted arguments.
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, 3,
'metadata_directory/', 'targets_directory')
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository,
'repository_directory', 3, 'targets_directory')
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository,
'repository_directory', 'metadata_directory', 3)
def test_root_role_versioning(self):
# Test root role versioning
#
# 1. Import public and private keys.
# 2. Add verification keys.
# 3. Load signing keys.
# 4. Add target files.
# 5. Perform delegation.
# 6. writeall()
#
# Copy the target files from 'tuf/tests/repository_data' so that writeall()
# has target fileinfo to include in metadata.
temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory)
targets_directory = os.path.join(temporary_directory, 'repository',
repo_tool.TARGETS_DIRECTORY_NAME)
original_targets_directory = os.path.join('repository_data',
'repository', 'targets')
shutil.copytree(original_targets_directory, targets_directory)
# In this case, create_new_repository() creates the 'repository/'
# sub-directory in 'temporary_directory' if it does not exist.
repository_directory = os.path.join(temporary_directory, 'repository')
metadata_directory = os.path.join(repository_directory,
repo_tool.METADATA_STAGED_DIRECTORY_NAME)
repository = repo_tool.create_new_repository(repository_directory)
# (1) Load the public and private keys of the top-level roles, and one
# delegated role.
keystore_directory = os.path.join('repository_data', 'keystore')
# Load the public keys.
root_pubkey_path = os.path.join(keystore_directory, 'root_key.pub')
targets_pubkey_path = os.path.join(keystore_directory, 'targets_key.pub')
snapshot_pubkey_path = os.path.join(keystore_directory, 'snapshot_key.pub')
timestamp_pubkey_path = os.path.join(keystore_directory, 'timestamp_key.pub')
role1_pubkey_path = os.path.join(keystore_directory, 'delegation_key.pub')
root_pubkey = repo_tool.import_rsa_publickey_from_file(root_pubkey_path)
targets_pubkey = repo_tool.import_ed25519_publickey_from_file(targets_pubkey_path)
snapshot_pubkey = \
repo_tool.import_ed25519_publickey_from_file(snapshot_pubkey_path)
timestamp_pubkey = \
repo_tool.import_ed25519_publickey_from_file(timestamp_pubkey_path)
role1_pubkey = repo_tool.import_ed25519_publickey_from_file(role1_pubkey_path)
# Load the private keys.
root_privkey_path = os.path.join(keystore_directory, 'root_key')
targets_privkey_path = os.path.join(keystore_directory, 'targets_key')
snapshot_privkey_path = os.path.join(keystore_directory, 'snapshot_key')
timestamp_privkey_path = os.path.join(keystore_directory, 'timestamp_key')
role1_privkey_path = os.path.join(keystore_directory, 'delegation_key')
root_privkey = \
repo_tool.import_rsa_privatekey_from_file(root_privkey_path, 'password')
targets_privkey = \
repo_tool.import_ed25519_privatekey_from_file(targets_privkey_path, 'password')
snapshot_privkey = \
repo_tool.import_ed25519_privatekey_from_file(snapshot_privkey_path,
'password')
timestamp_privkey = \
repo_tool.import_ed25519_privatekey_from_file(timestamp_privkey_path,
'password')
role1_privkey = \
repo_tool.import_ed25519_privatekey_from_file(role1_privkey_path,
'password')
# (2) Add top-level verification keys.
repository.root.add_verification_key(root_pubkey)
repository.targets.add_verification_key(targets_pubkey)
repository.snapshot.add_verification_key(snapshot_pubkey)
repository.timestamp.add_verification_key(timestamp_pubkey)
# (3) Load top-level signing keys.
repository.root.load_signing_key(root_privkey)
repository.targets.load_signing_key(targets_privkey)
repository.snapshot.load_signing_key(snapshot_privkey)
repository.timestamp.load_signing_key(timestamp_privkey)
# (4) Add target files.
target1 = os.path.join(targets_directory, 'file1.txt')
target2 = os.path.join(targets_directory, 'file2.txt')
target3 = os.path.join(targets_directory, 'file3.txt')
repository.targets.add_target(target1)
repository.targets.add_target(target2)
# (5) Perform delegation.
repository.targets.delegate('role1', [role1_pubkey], [target3])
repository.targets('role1').load_signing_key(role1_privkey)
# (6) Write repository.
repository.targets.compressions = ['gz']
repository.writeall()
self.assertTrue(os.path.exists(os.path.join(metadata_directory, 'root.json')))
self.assertTrue(os.path.exists(os.path.join(metadata_directory, '1.root.json')))
# Verify that the expected metadata is written.
root_filepath = os.path.join(metadata_directory, 'root.json')
root_1_filepath = os.path.join(metadata_directory, '1.root.json')
root_2_filepath = os.path.join(metadata_directory, '2.root.json')
old_root_signable = securesystemslib.util.load_json_file(root_filepath)
root_1_signable = securesystemslib.util.load_json_file(root_1_filepath)
# Make a change to the root keys
repository.root.add_verification_key(targets_pubkey)
repository.root.load_signing_key(targets_privkey)
repository.root.threshold = 2
repository.writeall()
new_root_signable = securesystemslib.util.load_json_file(root_filepath)
root_2_signable = securesystemslib.util.load_json_file(root_2_filepath)
for role_signable in [old_root_signable, new_root_signable, root_1_signable, root_2_signable]:
# Raise 'securesystemslib.exceptions.FormatError' if 'role_signable' is an
# invalid signable.
tuf.formats.check_signable_object_format(role_signable)
# Verify contents of versioned roots
self.assertEqual(old_root_signable, root_1_signable)
self.assertEqual(new_root_signable, root_2_signable)
self.assertEqual(root_1_signable['signed']['version'], 1)
self.assertEqual(root_2_signable['signed']['version'], 2)
repository.root.remove_verification_key(root_pubkey)
repository.root.unload_signing_key(root_privkey)
repository.root.threshold = 2
# Errors, not enough signing keys to satisfy old threshold
self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall)
# No error, write() ignore's root's threshold and allows it to be written
# to disk partially signed.
repository.write('root')
if __name__ == '__main__':
unittest.main()
| 37.349138 | 98 | 0.734795 | [
"MIT"
] | ninox-iot/tuf | tests/test_root_versioning_integration.py | 8,665 | Python |
from datetime import datetime
from django.db import models
# Create your models here.
class JD(models.Model):
appkey = models.CharField(max_length=100,verbose_name='appkey')
secret = models.CharField(max_length=100,verbose_name='secret')
add_time = models.DateTimeField(default=datetime.now,verbose_name='添加时间')
def __str__(self):
return self.appkey
class Meta:
verbose_name = '配置'
verbose_name_plural = verbose_name
"""
1-好券商品,
2-超级大卖场,
10-9.9专区,
22-热销爆品,
24-数码家电,
25-超市,
26-母婴玩具,
27-家具日用,
28-美妆穿搭,
29-医药保健,
30-图书文具,
31-今日必推,
32-王牌好货
"""
class Category(models.Model):
CHOOSE = (
('1','导航'),
('2','九宫格'),
)
pid = models.CharField(max_length=10,verbose_name='分类id')
name = models.CharField(max_length=20,verbose_name='分类名')
sort = models.IntegerField(verbose_name='排序',default=0)
type = models.CharField(max_length=10,choices=CHOOSE,verbose_name='显示',default='1')
add_time = models.DateTimeField(default=datetime.now,verbose_name='添加时间')
def __str__(self):
return self.name
class Meta:
verbose_name = '类别'
verbose_name_plural = verbose_name
class Banner(models.Model):
title = models.CharField(max_length=100,verbose_name='活动名称')
url = models.TextField(verbose_name='跳转地址')
img = models.URLField(verbose_name='图片地址',default='')
start_time = models.DateField(default=datetime.now,verbose_name='活动开始时间')
end_time = models.DateField(default=datetime.now,verbose_name='活动结束时间')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
def __str__(self):
return self.title
class Meta:
verbose_name = '活动'
verbose_name_plural = verbose_name | 24.9 | 87 | 0.690189 | [
"MIT"
] | fengjinqi/linjuanbang | apps/jd_app/models.py | 1,953 | Python |
from mle_scheduler.cluster.slurm.helpers_launch_slurm import slurm_generate_startup_file
job_arguments = {
"num_logical_cores": 5,
"partition": "standard",
"job_name": "test_job",
"num_gpus": 1,
"gpu_type": "RTX2080",
"env_name": "test_env",
"use_conda_venv": True,
"script": "python run.py",
"memory_per_cpu": 2000,
"time_per_job": "10:05:02",
"modules_to_load": "nvidia/cuda/10.0",
}
job_script = """#!/bin/bash
#SBATCH --job-name=test_job
#SBATCH --output=log.txt
#SBATCH --error=err.err
#SBATCH --partition=standard
#SBATCH --cpus-per-task=5
#SBATCH --gres=gpu:RTX2080:1
#SBATCH --mem-per-cpu=2000
#SBATCH --time=10:05:02
echo "------------------------------------------------------------------------"
source ~/miniconda3/etc/profile.d/conda.sh
echo "------------------------------------------------------------------------"
. ~/.bashrc && conda activate test_env
echo "Successfully activated virtual environment - Ready to start job"
module load nvidia/cuda/10.0
echo "------------------------------------------------------------------------"
echo "Job started on" `date`
echo "------------------------------------------------------------------------"
python run.py
echo "------------------------------------------------------------------------"
echo "Job ended on" `date`
echo "------------------------------------------------------------------------"
conda deactivate
"""
def test_job_slurm():
startup_script = slurm_generate_startup_file(job_arguments).format(**job_arguments)
assert job_script == startup_script
return
| 32.44898 | 88 | 0.510063 | [
"MIT"
] | boazbk/mle-scheduler | tests/test_job_slurm.py | 1,590 | Python |
"""
This file imports `__all__` from the solvers directory, thus populating the solver registry.
"""
from pysperf.solvers import *
from .config import solvers
__all__ = ['solvers']
| 20.333333 | 92 | 0.748634 | [
"BSD-2-Clause"
] | ZedongPeng/pysperf | pysperf/solver_library.py | 183 | Python |
font = CurrentFont()
one = font['A']
two = font['A.2']
steps = 4
if one.isCompatible(two):
for x in range(steps):
n = "A.interp" + str(x+1)
g = font.newGlyph(n)
f = (x+1)/(steps+1)
print f
g.interpolate(f, one, two) | 18.714286 | 34 | 0.515267 | [
"MIT"
] | benkiel/python_workshops | 2018_3_Cooper_Type/RoboFont/simple_interpolation.py | 262 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Volk(CMakePackage):
"""VOLK is the Vector-Optimized Library of Kernels. It is a
library that contains kernels of hand-written SIMD code for
different mathematical operations. Since each SIMD architecture
can be very different and no compiler has yet come along to handle
vectorization properly or highly efficiently, VOLK approaches the
problem differently.
For each architecture or platform that a developer wishes to
vectorize for, a new proto-kernel is added to VOLK. At runtime,
VOLK will select the correct proto-kernel. In this way, the users
of VOLK call a kernel for performing the operation that is
platform/architecture agnostic. This allows us to write portable
SIMD code."""
homepage = "https://github.com/gnuradio/volk"
url = "https://github.com/gnuradio/volk/archive/v2.3.0.tar.gz"
maintainers = ['aweits']
version('2.3.0', sha256='f42c928f561b128acfe4adb21227e4a62a3f6ab8103592fc3233765ff326d5fc')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| 41.967742 | 95 | 0.734051 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | Bambi/spack | var/spack/repos/builtin/packages/volk/package.py | 1,301 | Python |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_add_steps`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.emr_add_steps import EmrAddStepsOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_add_steps`.",
DeprecationWarning, stacklevel=2
)
| 40.333333 | 100 | 0.771074 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | AdrianMirandaTailor/RaWorkflowOrchestrator | airflow/contrib/operators/emr_add_steps_operator.py | 1,210 | Python |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors,
# The HuggingFace Inc. team, and The XTREME Benchmark Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning models for NER and POS tagging."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
from dataclasses import dataclass, field
from typing import Optional
import json
import numpy as np
import scipy
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
# import lang2vec.lang2vec as l2v
from scipy.spatial import distance
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
HfArgumentParser,
MultiLingAdapterArguments,
AdapterConfig,
AdapterType,
)
#from xlm import XLMForTokenClassification
DEFAULT_LANGUAGES = {
'mr': 'hi',
'bn': 'hi',
'ta': 'ta',
'fo': 'fo',
'no': 'da',
'da': 'da',
'be': 'be',
'uk': 'uk',
'bg': 'bg'
}
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
logger.info(f'Seed = {args.seed}')
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None):
"""Train the model."""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
print(f'Local Rank = {args.local_rank}')
print(len(train_dataset))
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
logging.info([n for (n, p) in model.named_parameters() if p.requires_grad])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
best_score = 0.0
best_checkpoint = None
patience = 0
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Add here for reproductibility (even between python 2 and 3)
cur_epoch = 0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
cur_epoch += 1
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch if t is not None)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == "xlm":
inputs["langs"] = batch[4]
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel training
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training:
# Only evaluate on single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.save_only_best_checkpoint:
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)
if result["f1"] > best_score:
logger.info("result['f1']={} > best_score={}".format(result["f1"], best_score))
best_score = result["f1"]
# Save the best model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-best")
best_checkpoint = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
model_to_save.save_all_adapters(output_dir)
if args.do_save_adapter_fusions:
model_to_save.save_all_adapter_fusions(output_dir)
if args.do_save_full_model:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving the best model checkpoint to %s", output_dir)
logger.info("Reset patience to 0")
patience = 0
else:
patience += 1
logger.info("Hit patience={}".format(patience))
if args.eval_patience > 0 and patience > args.eval_patience:
logger.info("early stop! patience={}".format(patience))
epoch_iterator.close()
train_iterator.close()
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
else:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
model_to_save.save_all_adapters(output_dir)
if args.do_save_adapter_fusions:
model_to_save.save_all_adapter_fusions(output_dir)
if args.do_save_full_model:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weights, step=10, lang=None):
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"return_sequence_out": True,
"labels": batch[3]}
# logger.info(f'Language Adapters are {lang_adapter_names}')
adapter_weights = [torch.FloatTensor([0.5 for _ in range(len(lang_adapter_names))]).to(args.device) for _ in range(13)]
if args.lang_to_vec:
logger.info(lang)
logger.info(lang_adapter_names)
adapter_weights = calc_l2v_weights(lang, lang_adapter_names, args.en_weight)
logger.info(adapter_weights)
for step_no in range(step):
for w in adapter_weights: w.requires_grad = True
if args.lang_to_vec and step_no == 0:
normed_adapter_weights = adapter_weights
else:
normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]
# logger.info(f'Initial Adapter Weights = {normed_adapter_weights}')
model.set_active_adapters([lang_adapter_names, [task_name]])
inputs["adapter_names"] = [lang_adapter_names, [task_name]]
inputs["adapter_weights"] = normed_adapter_weights
outputs = model(**inputs)
loss, logits, orig_sequence_output = outputs[:3]
kept_logits = outputs[-1]
entropy = torch.nn.functional.softmax(kept_logits, dim=1)*torch.nn.functional.log_softmax(kept_logits, dim=1)
entropy = -entropy.sum() / kept_logits.size(0)
grads = torch.autograd.grad(entropy, adapter_weights)
#print(adapter_weights)
#print(grads)
#print(grads)
for i, w in enumerate(adapter_weights):
adapter_weights[i] = adapter_weights[i].data - 10*grads[i].data
normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]
#print(normed_adapter_weights)
# logger.info(f'Final Adapter Weights = {normed_adapter_weights}')
return normed_adapter_weights
def jaccard_sim(vec1, vec2):
intersection = 0
union = 0
for i in range(len(vec1)):
if vec1[i] == '--' or vec2[i] == '--':
continue
if vec1[i] == 1 or vec2[i] == 1:
union += 1
if vec1[i] == 1 and vec2[i] == 1:
intersection += 1
return intersection/union
def get_sim(lang1, lang2):
features = l2v.get_features(f'{DEFAULT_LANGUAGES[lang1]} {lang2}', 'learned')
similarity = 1 - distance.cosine(features[DEFAULT_LANGUAGES[lang1]], features[lang2])
return similarity
def get_syntax_sim(lang1, lang2):
features = l2v.get_features(f'{lang1} {lang2}', "syntax_wals|syntax_sswl|syntax_ethnologue")
similarity = jaccard_sim(features[lang1], features[lang2])
return similarity
def calc_l2v_weights(args, lang, lang_adapter_names):
adapter_weight = []
for adapter_lang in lang_adapter_names:
if args.en_weight is not None and adapter_lang == 'en':
continue
if args.lang_to_vec == 'learned':
adapter_weight.append(get_sim(lang, adapter_lang))
elif args.lang_to_vec == 'syntax':
adapter_weight.append(get_syntax_sim(lang, adapter_lang))
else:
logger.info('INVALID FEATURE TYPE')
exit()
logger.info(adapter_weight)
adapter_weight = torch.FloatTensor(adapter_weight)
adapter_weight = torch.nn.functional.softmax(adapter_weight/args.temperature).tolist()
if args.en_weight is not None:
adapter_weight = [(1 - args.en_weight)*aw for aw in adapter_weight]
en_index = lang_adapter_names.index('en')
adapter_weight.insert(en_index, args.en_weight)
return adapter_weight
def scaled_input(emb, batch_size=16, num_batch=1, baseline=None, start_i=None, end_i=None):
# shape of emb: (num_head, seq_len, seq_len)
if baseline is None:
baseline = torch.zeros_like(emb)
num_points = batch_size * num_batch
scale = 1.0 / num_points
if start_i is None:
step = (emb.unsqueeze(0) - baseline.unsqueeze(0)) * scale
res = torch.cat([torch.add(baseline.unsqueeze(0), step*i) for i in range(num_points)], dim=0)
return res, step[0]
else:
step = (emb - baseline) * scale
start_emb = torch.add(baseline, step*start_i)
end_emb = torch.add(baseline, step*end_i)
step_new = (end_emb.unsqueeze(0) - start_emb.unsqueeze(0)) * scale
res = torch.cat([torch.add(start_emb.unsqueeze(0), step_new*i) for i in range(num_points)], dim=0)
return res, step_new[0]
#Changed the default of calc_weight_step to 0
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="", lang="en", lang2id=None, print_result=True, adapter_weight=None, lang_adapter_names=None, task_name=None, calc_weight_step=0):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode, lang=lang, lang2id=lang2id)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
if args.get_attr:
eval_sampler = RandomSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
else:
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s in %s *****" % (prefix, lang))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
counter = 0
head_importances = None
all_head_importances = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
counter += 1
logger.info(f'Batch number = {counter}')
batch = tuple(t.to(args.device) for t in batch)
if calc_weight_step > 0:
adapter_weight = calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weight, calc_weight_step, lang=lang)
if args.get_attr:
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"adapter_weights": adapter_weight}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[4]
inputs["output_attentions"] = True
outputs = model(**inputs)
tmp_eval_loss, logits, attentions, kept_labels, kl_logits = outputs
attr_all = []
res_attr = []
input_len = int(inputs["attention_mask"][0].sum())
example_head_importances = None
#Remove the batch_size dim since batch_size=1
logits = logits[0]
for tar_layer in range(12):
att = attentions[tar_layer][0]
pred_labels = torch.argmax(logits, dim=-1)
scale_att, step = scaled_input(att.data)
scale_att.requires_grad_(True)
attr_all = None
prob_all = None
for j_batch in range(1):
one_batch_att = scale_att[j_batch*16:(j_batch+1)*16]
_, grad = model(input_ids=inputs['input_ids'], token_type_ids=inputs['token_type_ids'], attention_mask=inputs['attention_mask'], labels=inputs['labels'], tar_layer=tar_layer, tmp_score=one_batch_att, pred_labels=pred_labels)
grad = grad.sum(dim=0)
attr_all = grad if attr_all is None else torch.add(attr_all, grad)
# prob_all = tar_prob if prob_all is None else torch.cat([prob_all, tar_prob])
attr_all = attr_all[:,0:input_len,0:input_len] * step[:,0:input_len,0:input_len]
if example_head_importances is None:
example_head_importances = torch.amax(attr_all, dim=(1,2)).unsqueeze(0)
else:
tmp = torch.amax(attr_all, dim=(1,2))
tmp = tmp.unsqueeze(0)
example_head_importances = torch.cat((example_head_importances, tmp), dim=0)
# att = att[:,0:input_len,0:input_len]
res_attr.append(attr_all.data)
# logger.info(f'Example Head Importances = {example_head_importances}')
all_head_importances = example_head_importances.unsqueeze(0) if all_head_importances is None else torch.cat((all_head_importances, example_head_importances.unsqueeze(0)), dim=0)
head_importances = example_head_importances if head_importances is None else torch.add(head_importances, example_head_importances)
if counter == 100:
break
continue
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"adapter_weights": adapter_weight}
# logger.info(f'Labels = {batch[3]}')
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[4]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel evaluating
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if args.get_attr:
head_importances = head_importances/counter
logger.info(f'Head Importances = {head_importances}')
torch.save(head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_importances_100.pt'))
torch.save(all_head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_all_importances_100.pt'))
return None, None
if nb_eval_steps == 0:
results = {k: 0 for k in ["loss", "precision", "recall", "f1"]}
else:
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
if print_result:
logger.info("***** Evaluation result %s in %s *****" % (prefix, lang))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode, lang, lang2id=None, few_shot=-1):
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
# Load data features from cache or dataset file
bpe_dropout = args.bpe_dropout
if mode != 'train': bpe_dropout = 0
if bpe_dropout > 0:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}_drop{}".format(mode, lang,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length), bpe_dropout))
else:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}".format(mode, lang,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
langs = lang.split(',')
logger.info("all languages = {}".format(lang))
features = []
for lg in langs:
data_file = os.path.join(args.data_dir, lg, "{}.{}".format(mode, args.model_name_or_path))
logger.info("Creating features from dataset file at {} in language {}".format(data_file, lg))
examples = read_examples_from_file(data_file, lg, lang2id)
print(examples)
features_lg = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta", "xlmr"]),
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
lang=lg,
bpe_dropout=bpe_dropout,
)
features.extend(features_lg)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file {}, len(features)={}".format(cached_features_file, len(features)))
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if few_shot > 0 and mode == 'train':
logger.info("Original no. of examples = {}".format(len(features)))
features = features[: few_shot]
logger.info('Using few-shot learning on {} examples'.format(len(features)))
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
if args.model_type == 'xlm' and features[0].langs is not None:
all_langs = torch.tensor([f.langs for f in features], dtype=torch.long)
logger.info('all_langs[0] = {}'.format(all_langs[0]))
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_langs)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
model_type: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
labels: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
data_dir: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
output_dir: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
max_seq_length: Optional[int] = field(
default=128, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
do_train: Optional[bool] = field(default=False )
do_eval: Optional[bool] = field(default=False )
do_predict: Optional[bool] = field(default=False )
do_adapter_predict: Optional[bool] = field(default=False )
do_predict_dev: Optional[bool] = field(default=False )
do_predict_train: Optional[bool] = field(default=False )
init_checkpoint: Optional[str] = field(default=None )
evaluate_during_training: Optional[bool] = field(default=False )
do_lower_case: Optional[bool] = field(default=False )
few_shot: Optional[int] = field(default=-1 )
per_gpu_train_batch_size: Optional[int] = field(default=8)
per_gpu_eval_batch_size: Optional[int] = field(default=8)
gradient_accumulation_steps: Optional[int] = field(default=1)
learning_rate: Optional[float] = field(default=5e-5)
weight_decay: Optional[float] = field(default=0.0)
adam_epsilon: Optional[float] = field(default=1e-8)
max_grad_norm: Optional[float] = field(default=1.0)
num_train_epochs: Optional[float] = field(default=3.0)
max_steps: Optional[int] = field(default=-1)
save_steps: Optional[int] = field(default=-1)
warmup_steps: Optional[int] = field(default=0)
logging_steps: Optional[int] = field(default=50)
save_only_best_checkpoint: Optional[bool] = field(default=False)
eval_all_checkpoints: Optional[bool] = field(default=False)
no_cuda: Optional[bool] = field(default=False)
overwrite_output_dir: Optional[bool] = field(default=False)
overwrite_cache: Optional[bool] = field(default=False)
seed: Optional[int] = field(default=42)
fp16: Optional[bool] = field(default=False)
fp16_opt_level: Optional[str] = field(default="O1")
local_rank: Optional[int] = field(default=-1)
server_ip: Optional[str] = field(default="")
server_port: Optional[str] = field(default="")
predict_langs: Optional[str] = field(default="en")
train_langs: Optional[str] = field(default="en")
log_file: Optional[str] = field(default=None)
eval_patience: Optional[int] = field(default=-1)
bpe_dropout: Optional[float] = field(default=0)
do_save_adapter_fusions: Optional[bool] = field(default=False)
task_name: Optional[str] = field(default="ner")
predict_task_adapter: Optional[str] = field(default=None)
predict_lang_adapter: Optional[str] = field(default=None)
test_adapter: Optional[bool] = field(default=False)
adapter_weight: Optional[str] = field(default=None)
lang_to_vec: Optional[str] = field(default=None)
calc_weight_step: Optional[int] = field(default=0)
predict_save_prefix: Optional[str] = field(default=None)
en_weight: Optional[float] = field(default=None)
temperature: Optional[float] = field(default=1.0)
get_attr: Optional[bool] = field(default=False)
topk: Optional[int] = field(default=1)
task: Optional[str] = field(default='udpos')
def setup_adapter(args, adapter_args, model, train_adapter=True, load_adapter=None, load_lang_adapter=None):
task_name = args.task_name or "ner"
# check if adapter already exists, otherwise add it
if task_name not in model.config.adapters.adapter_list(AdapterType.text_task):
logging.info("Trying to decide if add adapter")
# resolve the adapter config
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
)
# load a pre-trained from Hub if specified
if adapter_args.load_adapter or load_adapter:
logging.info("loading task adapter")
model.load_adapter(
adapter_args.load_adapter if load_adapter is None else load_adapter,
AdapterType.text_task,
config=adapter_config,
load_as=task_name,
)
# otherwise, add a fresh adapter
else:
logging.info("Adding task adapter")
model.add_adapter(task_name, AdapterType.text_task, config=adapter_config)
# optionally load a pre-trained language adapter
if adapter_args.load_lang_adapter or load_lang_adapter:
if load_lang_adapter is None:
# load a set of language adapters
logging.info("loading lang adpater {}".format(adapter_args.load_lang_adapter))
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
# if adapter_args.language == 'topk':
# assert len(args.predict_langs.split(',')) == 1
# filename = f'scripts/{args.task}/en/{args.predict_langs}.json'
# logger.info(f'Loading Adapter Languages from {filename}')
# languages = []
# with open(filename) as f:
# for i,line in enumerate(f):
# if i == args.topk:
# break
# line = json.loads(line)
# languages.append(line['adapter'].strip())
# adapter_names = [f'{lang}/wiki@ukp' for lang in languages]
# else:
# languages = adapter_args.language.split(",")
# adapter_names = adapter_args.load_lang_adapter.split(",")
# logger.info(f'Adapter Languages : {languages}, Length : {len(languages)}')
# logger.info(f'Adapter Names {adapter_names}, Length : {len(adapter_names)}')
# assert len(languages) == len(adapter_names)
# lang_adapter_names = []
# for language, adapter_name in zip(languages, adapter_names):
# logger.info(f'Language = {language}')
# logger.info(f'Adapter Name = {adapter_name}')
# lang_adapter_name = model.load_adapter(
# adapter_name,
# AdapterType.text_lang,
# config=lang_adapter_config,
# load_as=language,
# )
# lang_adapter_names.append(lang_adapter_name)
else:
logging.info("loading lang adpater {}".format(load_lang_adapter))
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
# lang_adapter_name = model.load_adapter(
# load_lang_adapter,
# AdapterType.text_lang,
# config=lang_adapter_config,
# load_as="lang",
# )
# lang_adapter_names = [lang_adapter_name]
else:
lang_adapter_name = None
lang_adapter_names = []
# Freeze all model weights except of those of this adapter
model.train_adapter([task_name])
# Set the adapters to be used in every forward pass
if lang_adapter_name:
model.set_active_adapters([lang_adapter_names, [task_name]])
else:
model.set_active_adapters([task_name])
return model, lang_adapter_names, task_name
def load_model(args, num_labels):
logger.info('Loading pretrained model and tokenizer')
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
cache_dir=args.cache_dir,
)
args.model_type = config.model_type
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir,
use_fast=False,
)
if args.init_checkpoint:
logger.info("loading from init_checkpoint={}".format(args.init_checkpoint))
model = AutoModelForTokenClassification.from_pretrained(
args.init_checkpoint,
config=config,
cache_dir=args.cache_dir,
)
else:
logger.info("loading from existing model {}".format(args.model_name_or_path))
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
lang2id = config.lang2id if args.model_type == "xlm" else None
logger.info("Using lang2id = {}".format(lang2id))
return model, tokenizer, lang2id
def predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, split):
output_test_results_file = os.path.join(args.output_dir, f"{split}_results.txt")
with open(output_test_results_file, "a") as result_writer:
for lang in args.predict_langs.split(','):
#Check if language data exists
if not os.path.exists(os.path.join(args.data_dir, lang, '{}.{}'.format(split, args.model_name_or_path))):
logger.info("Language {}, split {} does not exist".format(lang, split))
continue
#Activate the required language adapter
adapter_weight = None
# if not args.adapter_weight and not args.lang_to_vec:
# if (adapter_args.train_adapter or args.test_adapter) and not args.adapter_weight:
# if lang in lang_adapter_names:
# logger.info(f'Language adapter for {lang} found')
# logger.info("Set active language adapter to {}".format(lang))
# model.set_active_adapters([[lang], [task_name]])
# else:
# logger.info(f'Language adapter for {lang} not found, using {lang_adapter_names[0]} instead')
# logger.info("Set active language adapter to {}".format(lang_adapter_names[0]))
# model.set_active_adapters([[lang_adapter_names[0]], [task_name]])
# else:
# if args.adapter_weight == 'equal':
# adapter_weight = [1/len(lang_adapter_names) for _ in lang_adapter_names]
# elif args.adapter_weight == 'equal_en':
# assert 'en' in lang_adapter_names, 'English language adapter not included'
# adapter_weight = [(1-args.en_weight)/(len(lang_adapter_names)-1) for _ in lang_adapter_names]
# en_index = lang_adapter_names.index('en')
# adapter_weight[en_index] = args.en_weight
# elif args.lang_to_vec:
# if args.en_weight is not None:
# logger.info(lang_adapter_names)
# assert 'en' in lang_adapter_names, 'English language adapter not included'
# adapter_weight = calc_l2v_weights(args, lang, lang_adapter_names)
# elif args.adapter_weight == 'load':
# filename = f'weights/{args.task}/{lang}/weights_s{args.seed}'
# logger.info(f'Loading adapter weights from {filename}')
# with open(filename) as f:
# adapter_weight = json.loads(next(f))
# elif args.adapter_weight != "0" and args.adapter_weight is not None:
# adapter_weight = [float(w) for w in args.adapter_weight.split(",")]
logger.info('Args Adapter Weight = {}'.format(args.adapter_weight))
logger.info('Adapter Languages = {}'.format(lang_adapter_names))
if adapter_weight is not None:
logger.info("Adapter Weights = {}".format(adapter_weight))
logger.info('Sum of Adapter Weights = {}'.format(sum(adapter_weight)))
logger.info("Length of Adapter Weights = {}".format(len(adapter_weight)))
# model.set_active_adapters([ lang_adapter_names, [task_name]])
#Evaluate
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=split, lang=lang, lang2id=lang2id, adapter_weight=adapter_weight, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
if args.get_attr:
continue
result_json = {}
# Save results
if args.predict_save_prefix is not None and args.predict_save_prefix:
result_json['language'] = f'{args.predict_save_prefix}_{lang}'
else:
result_json['language'] = f'{lang}'
result_json['seed'] = args.seed
result_json['language_adapters'] = lang_adapter_names
if args.adapter_weight:
result_json['adapter_weights'] = args.adapter_weight
for key in sorted(result.keys()):
result_json[key] = result[key]
result_writer.write(json.dumps(result_json) + '\n')
# Save predictions
if args.predict_save_prefix is not None and args.predict_save_prefix:
output_test_predictions_file = os.path.join(args.output_dir, "{}_{}_{}_s{}_predictions.txt".format(split, args.predict_save_prefix, lang, args.seed))
else:
output_test_predictions_file = os.path.join(args.output_dir, "{}_{}_s{}_predictions.txt".format(split, lang, args.seed))
infile = os.path.join(args.data_dir, lang, "{}.{}".format(split, args.model_name_or_path))
idxfile = infile + '.idx'
save_predictions(args, predictions, output_test_predictions_file, infile, idxfile)
def main():
parser = argparse.ArgumentParser()
parser = HfArgumentParser((ModelArguments, MultiLingAdapterArguments))
args, adapter_args = parser.parse_args_into_dataclasses()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else:
# Initializes the distributed backend which sychronizes nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(handlers = [logging.FileHandler(args.log_file), logging.StreamHandler()],
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logging.info("Input args: %r" % args)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare NER/POS task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id
# so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.do_save_full_model= (not adapter_args.train_adapter)
args.do_save_adapters=adapter_args.train_adapter
if args.do_save_adapters:
logging.info('save adapters')
logging.info(adapter_args.train_adapter)
if args.do_save_full_model:
logging.info('save model')
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank == 0:
torch.distributed.barrier()
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
model, tokenizer, lang2id = load_model(args, num_labels)
if adapter_args.train_adapter:
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model)
logger.info("lang adapter names: {}".format(" ".join(lang_adapter_names)))
else:
lang_adatper_names = []
task_name = None
model.to(args.device)
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train", lang=args.train_langs, lang2id=lang2id, few_shot=args.few_shot)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use default names for the model,
# you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
# Save model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
logging.info("Save adapter")
model_to_save.save_all_adapters(args.output_dir)
if args.do_save_adapter_fusions:
logging.info("Save adapter fusion")
model_to_save.save_all_adapter_fusions(args.output_dir)
if args.do_save_full_model:
logging.info("Save full model")
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Initialization for evaluation
results = {}
if args.init_checkpoint:
best_checkpoint = args.init_checkpoint
elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-best')):
best_checkpoint = os.path.join(args.output_dir, 'checkpoint-best')
else:
best_checkpoint = args.output_dir
# Evaluation
#This evaluates only if the entire model is saved, something we are not doing
if args.do_eval and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
logger.info('Evaluating the model on dev set of training language(en)')
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix='debugging', lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
results.update(result)
# for checkpoint in checkpoints:
# global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
# model = AutoModelForTokenClassification.from_pretrained(checkpoint)
# if adapter_args.train_adapter:
# load_adapter = checkpoint + "/" + args.task_name
# load_lang_adapter = "{}/{}".format(checkpoint, adapter_args.language)
# model.model_name = args.model_name_or_path
# model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)
#
# model.to(args.device)
# result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
# if result["f1"] > best_f1:
# best_checkpoint = checkpoint
# best_f1 = result["f1"]
# if global_step:
# result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
# results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
# writer.write("best checkpoint = {}, best f1 = {}\n".format(best_checkpoint, best_f1))
if args.do_predict and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
# Prediction
logger.info('Evaluating the model on test set of all the languages specified')
#Set up the task adapter
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'test')
if args.do_predict_train and args.local_rank in [-1, 0]:
logger.info('Evaluating on the train set of all specified languages')
model, tokenizer, lang2id = load_model(args, num_labels)
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'train')
#Predict dev set
if args.do_predict_dev and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
logger.info('Evaluating on the dev sets of all the specified languages')
#Set up task and language adapters
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'dev')
def save_predictions(args, predictions, output_file, text_file, idx_file, output_word_prediction=False):
# Save predictions
with open(text_file, "r") as text_reader, open(idx_file, "r") as idx_reader:
text = text_reader.readlines()
index = idx_reader.readlines()
assert len(text) == len(index)
# Sanity check on the predictions
with open(output_file, "w") as writer:
example_id = 0
prev_id = int(index[0])
for line, idx in zip(text, index):
if line == "" or line == "\n":
example_id += 1
else:
cur_id = int(idx)
output_line = '\n' if cur_id != prev_id else ''
if output_word_prediction:
output_line += line.split()[0] + '\t'
output_line += predictions[example_id].pop(0) + '\n'
writer.write(output_line)
prev_id = cur_id
if __name__ == "__main__":
main()
| 46.527256 | 259 | 0.688153 | [
"Apache-2.0"
] | rohanshah13/cloud-emea-copy | third_party/ridayesh_run_tag.py | 52,064 | Python |
# The MIT License (MIT)
#
# Copyright (c) 2016 Oracle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = "Michael Shanley (Oracle A-Team)"
__copyright__ = "Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved."
__version__ = "1.0.0.0"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
from oc_provision_wrappers import commerce_setup_helper
import os
import time
import logging
logger = logging.getLogger(__name__)
json_key = 'ORACLE_11g_clone'
service_name = "Oracle DB clone"
def clone_oracle(configData, full_path):
if json_key in configData:
jsonData = configData[json_key]
else:
logging.error(json_key + " config data missing from json. will not install")
return
logging.info("installing " + service_name)
INSTALL_OWNER = jsonData['installOwner']
ORACLE_HOME = jsonData['oracleHome']
ORIG_HOST = jsonData['originalHost']
NEW_HOST = jsonData['newHost']
ORACLE_SID = jsonData['oracleSID']
UPDATE_DB_CONSOLE = jsonData['updateDBConsole']
db_script = "/etc/init.d/oracleDatabase"
db_console_script = "/etc/init.d/oracleDBconsole"
stop_db_cmd = db_script + " stop"
stop_db_console_cmd = db_console_script + " stop"
start_db_cmd = db_script + " start"
start_db_console_cmd = db_console_script + " start"
tns_path = ORACLE_HOME + "/network/admin/tnsnames.ora"
lsnr_path = ORACLE_HOME + "/network/admin/listener.ora"
if not os.path.exists(tns_path):
logging.error("tnsnames.ora not found at " + tns_path + " - will not proceed")
return False
# stop db
commerce_setup_helper.exec_cmd(stop_db_cmd)
# stop console
commerce_setup_helper.exec_cmd(stop_db_console_cmd)
tns_replacements = {}
lsnr_replacements = {}
if (ORIG_HOST and NEW_HOST):
tns_replacements[ORIG_HOST] = NEW_HOST
lsnr_replacements[ORIG_HOST] = NEW_HOST
# update tnsnames
if tns_replacements:
if not os.path.exists(tns_path):
logging.warn("tnsnames.ora not found at " + tns_path + " - cannot modify")
else:
# backup tnsnames
timestr = time.strftime("%Y%m%d-%H%M%S")
installCommand = "\"" + "cp " + tns_path + " " + tns_path + "." + timestr + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, installCommand)
commerce_setup_helper.substitute_file_fields(tns_path, tns_path, tns_replacements)
# update listener
if lsnr_replacements:
if not os.path.exists(lsnr_path):
logging.warn("listener.ora not found at " + lsnr_path + " - cannot modify")
else:
# backup listener
timestr = time.strftime("%Y%m%d-%H%M%S")
installCommand = "\"" + "cp " + lsnr_path + " " + lsnr_path + "." + timestr + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, installCommand)
commerce_setup_helper.substitute_file_fields(lsnr_path, lsnr_path, tns_replacements)
# update db name
orig_db_name = ORACLE_HOME + "/" + ORIG_HOST + "_" + ORACLE_SID
new_db_name = ORACLE_HOME + "/" + NEW_HOST + "_" + ORACLE_SID
if not os.path.exists(orig_db_name):
logging.error("db path not found at " + orig_db_name + " - cannot modify")
else:
mv_cmd = "\"" + "mv " + orig_db_name + " " + new_db_name + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, mv_cmd)
# update db console
if (UPDATE_DB_CONSOLE == "true") :
PORT = jsonData['lsnrPort']
ORACLE_PW = jsonData['adminPW']
orig_db_console = ORACLE_HOME + "/oc4j/j2ee/OC4J_DBConsole_" + ORIG_HOST + "_" + ORACLE_SID
new_db_console = ORACLE_HOME + "/oc4j/j2ee/OC4J_DBConsole_" + NEW_HOST + "_" + ORACLE_SID
if not os.path.exists(orig_db_console):
logging.warn("db console not found at " + orig_db_console + " - cannot modify")
else:
mv_cmd = "\"" + "mv " + orig_db_console + " " + new_db_console + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, mv_cmd)
# db must be running for emca to exec. make sure
# start db
commerce_setup_helper.exec_cmd(start_db_cmd)
emca_params = "-SID " + ORACLE_SID + " -PORT " + PORT + " -SYS_PWD " + ORACLE_PW + " -SYSMAN_PWD " + ORACLE_PW + " -DBSNMP_PWD " + ORACLE_PW
drop_repo_cmd = "\"" + ORACLE_HOME + "/bin/emca -deconfig dbcontrol db -repos drop -silent " + emca_params + "\""
create_repo_cmd = "\"" + ORACLE_HOME + "/bin/emca -config dbcontrol db -repos create -silent " + emca_params + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, drop_repo_cmd)
commerce_setup_helper.exec_as_user(INSTALL_OWNER, create_repo_cmd)
# stop db
commerce_setup_helper.exec_cmd(stop_db_cmd)
# stop console
commerce_setup_helper.exec_cmd(stop_db_console_cmd)
# start db
commerce_setup_helper.exec_cmd(start_db_cmd)
if (UPDATE_DB_CONSOLE == "true") :
# start dbconsole
commerce_setup_helper.exec_cmd(start_db_console_cmd)
| 42.946309 | 153 | 0.643694 | [
"MIT"
] | LaudateCorpus1/atg-commerce-iaas | common-python/oc_provisioning/oc_provision_wrappers/database/v11g/oracle_rdbms_clone.py | 6,399 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
| 37.993528 | 80 | 0.725639 | [
"Apache-2.0"
] | 188080501/tensorflow | tensorflow/contrib/lite/python/op_hint.py | 11,740 | Python |
#!/usr/bin/python
# vim:fileencoding=utf-8
#
# Lookup for MX and NS records
#
import unbound
ctx = unbound.ub_ctx()
ctx.resolvconf("/etc/resolv.conf")
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_MX, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.mx_list:
print " priority:%d address:%s" % k
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_A, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.address_list:
print " address:%s" % k
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_NS, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.domain_list:
print " host: %s" % k
| 28.6875 | 79 | 0.650327 | [
"BSD-3-Clause"
] | 1Blackdiamondsc/haven | external/unbound/libunbound/python/doc/examples/example8-1.py | 918 | Python |
#!/usr/bin/python3
import sys
import os
import shutil
import csv
import zipfile
import pandas as pd
import glob
infile = sys.argv[1]
outfile = sys.argv[2]
# remove holding_folder if it exists, and create new folder
# use 'rm -r /holding_folder/* in shell script instead?'
holding_path = '/media/secure_volume/holding_folder'
if os.path.isdir(holding_path):
shutil.rmtree(holding_path)
os.mkdir(holding_path)
def extract(infile):
'''
Merges bioindex.tsv with the infile (balanced data),
finds the volsplit.zip location for each bio file and
extracts the files into secure_volume/holding_folder.
'''
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = pd.read_table(infile)
for suffix in balanced_bioindex.filesuffix.unique():
volsplit_file = 'volsplit'+str(suffix)+'.zip'
volsplit_df = balanced_bioindex.loc[balanced_bioindex.filesuffix == suffix,:]
try:
with zipfile.ZipFile('/media/secure_volume/'+volsplit_file, 'r') as myzip:
for idx, row in volsplit_df.iterrows():
filename = row['mainid']+'.zip'
myzip.extract(filename, '/media/secure_volume/holding_folder')
except Exception as e:
print('ERROR:',filename,'not found in',volsplit_file,'!', e)
def slicer(outfile):
idx_file_path = '/media/secure_volume/index/bioindex.tsv'
holding_folder_path = '/media/secure_volume/holding_folder/'
bio_idx_df = pd.read_table(idx_file_path)
bio_idx_df.set_index('mainid', inplace = True)
mainid_list = [vol for vol in os.listdir(holding_folder_path) if vol.endswith('.zip')]
# remove '.zip' from file names
mainid_list_clean = [item[0:-4] for item in mainid_list]
#subset bioindex on holding_folder IDs
htid_series = bio_idx_df.htid[mainid_list_clean]
file_path_list = glob.glob(holding_folder_path+'*.zip')
# print('file path list has: ',len(file_path_list))
# print('htid_list has', len(htid_list))
slice_df = pd.DataFrame(htid_series)
slice_df['path'] = file_path_list
slice_df['c'] = 0
slice_df['d'] = 1001
with open(outfile, 'w') as outf:
slice_df.to_csv(outfile, sep='\t', header=False, index=False)
print("Wrote", len(slice_df), "rows to", outfile)
extract(infile)
slicer(outfile)
| 34.042857 | 90 | 0.684012 | [
"MIT"
] | afcarl/biographies | code/extract_balanced.py | 2,383 | Python |
#Faça um algoritmo utilizando o comando while que mostra uma
#contagem regressiva na tela, iniciando em 10 e terminando
#em O. Mostrar uma mensagem “FIM!" após a contagem.
i=11
while(i!=0):
i-=1
print(i)
print("FIM") | 28 | 60 | 0.71875 | [
"MIT"
] | AlexandrePeBrito/CursoUdemyPython | exercicios/Lista3/Q3.py | 228 | Python |
# Columbus - A Smart Navigation System for the Visually-Impaired
# Ike Kilinc
# This file integrates Columbus' primary start location and destination input
# features with its core pathfinding algorithm. This file also facilitates
# Columbus' speech recognition and audio functionalities.
from speech_to_text import *
from node_mapper import *
from path_finder import *
#####################################################################
#####################################################################
def run():
# Columbus asks what the user would like to do (with help option). directions, popular dests, directions
pathMode = startupModeSelection()
if pathMode == "specificDestination":
# User inputs destination.
destination = destinationInput()
startLocation = startLocationInput()
elif pathMode == "nearestRestroom":
# Columbus asks where user is (TEMP).
startLocation = startLocationInput()
# Columbus finds nearest Restroom and sets as destination
destination = None
elif pathMode == "nearestPrinter":
# Columbus asks where user is (TEMP).
startLocation = startLocationInput()
# Columbus finds nearest Printer and sets as destination
destination = None
elif pathMode == "popularDestinations":
# Columbus gives user choice options of popular destinations.
# Sets user input as the destination.
destination = popularLocationsInput(data)
startLocation = startLocationInput()
elif pathMode == "savedDestinations":
# Columbus gives user choice of previously saved destinations and sets
# user input as the destination.
destination = savedLocationsInput(data)
startLocation = startLocationInput()
elif pathMode == "findGod":
pass
# Columbus searches for and determines path to destination.
nodesPath = pathFinder(startLocation, destination, pathMode)
#####################################################################
#####################################################################
class Segment(object):
def __init__(self, startCoords, endCoords, segNumber, isActive, isFloorChange):
self.segmentBounds = (startCoords[0], startCoords[1], endCoords[0], endCoords[1])
self.floor = startCoords[2]
self.segNumber = segNumber
self.isActive = isActive
self.isFloorChange = isFloorChange
# self.direction = direction
def __repr__(self):
return str(self.segNumber)
def __hash__(self):
return hash(self.segNumber)
def getSegBounds(self):
return self.segmentBounds
def getSegNum(self):
return self.segNumber
def getSegFloor(self):
return self.floor
def getIsActive(self):
return self.isActive
def getIsFloorChange(self):
return self.isFloorChange
def getCenter(self):
centerX = (self.segmentBounds[0] + self.segmentBounds[2])/2
centerY = (self.segmentBounds[1] + self.segmentBounds[3])/2
return (centerX, centerY)
def getSegmentDirection(self):
(x0,y0,x1,y1) = self.segmentBounds
if (x1-x0) > 0: return "E"
elif (x1-x0) < 0: return "W"
elif (y1-y0) > 0: return "S"
elif (y1-y0) < 0: return "N"
else: return None
def createAllSegments(nodesPath):
allSegments = []
isFloorChange = False
intNodesPath = []
for i in range(len(nodesPath)):
node = nodesPath[i]
if (isinstance(node, Intersection) or isinstance(node, Elevator) or
i==0 or i==(len(nodesPath)-1)):
intNodesPath.append(node)
for i in range(len(intNodesPath)-1):
(node, nextNode) = (intNodesPath[i], intNodesPath[i+1])
if (isinstance(node, Elevator) and isinstance(nextNode, Elevator)):
isFloorChange = True
segment = Segment(node.getCoords(), nextNode.getCoords(), i, False, isFloorChange)
isFloorChange = False
allSegments.append(segment)
allSegments.append(Segment(intNodesPath[-1].getCoords(), intNodesPath[-1].getCoords(), i, False, False))
return allSegments
#####################################################################
#####################################################################
def startupModeSelection(repeat=False):
# Used to select mode for operating Columbus. Mode options include:
# Finding directions to a specific destination, directions to the nearest
# restroom, directions to popular destinations, and directions to previously
# saved destinations.
if repeat == True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
play("voiceCommands/modeSelectionInputPrompt.wav")
userInput = recognizeSpeech("mode")
if userInput == "help":
play("voiceCommands/modeSelectionHelp.wav")
userInput = recognizeSpeech("mode")
if userInput in ["nearestRestroom", "popularDestinations",
"savedDestinations", "nearestPrinter",
"specificDestination", "findGod", "help"]:
return userInput
else:
return startupModeSelection(True)
def destinationInput(repeat=False):
if repeat==True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
# Columbus asks where user would like to go.
play("voiceCommands/destinationInputPrompt.wav")
# User inputs destination
destination = recognizeSpeech("location")
if isLegalNode(destination):
return destination
else:
return destinationInput(True)
def startLocationInput(repeat=False):
if repeat==True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
# Columbus asks where user is now.
play("voiceCommands/startLocationInputPrompt.wav")
# User inputs start location.
startLocation = recognizeSpeech("location")
if isLegalNode(startLocation):
return startLocation
else:
return startLocationInput(True)
def popularLocationsInput(data, repeat=False):
print("popLocsInput")
if repeat==True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
# Columbus asks where user would like to go.
play("voiceCommands/destinationInputPromptWithHelp.wav")
userInput = recognizeSpeech("popularDest")
if userInput == "help":
play("voiceCommands/popularLocationSelectionHelp.wav")
userInput = recognizeSpeech("popularDest")
if userInput in ["5Prima", "4Sorrells"]:
return userInput
else:
return popularLocationsInput(data, True)
def savedLocationsInput(data, repeat=False):
if len(data.savedLocations) == 0:
play("voiceCommands/noSavedDestinations.wav")
else:
if repeat==True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
# Columbus asks where user would like to go.
play("voiceCommands/destinationInputPromptWithHelp.wav")
userInput = recognizeSpeech("savedDest")
if userInput == "help":
play("voiceCommands/modeSelectionHelp.wav")
userInput = recognizeSpeech("savedDest")
if userInput in data.savedLocations:
return userInput
else:
return savedLocationsInput(data, True)
def isLegalNode(string):
allNodesMap = mapAllNodes()
for floor in allNodesMap:
for roomStr in allNodesMap[floor]:
if string == roomStr:
return True
return False
| 32.212766 | 108 | 0.628666 | [
"MIT"
] | ikekilinc/Columbus | main_algo.py | 7,570 | Python |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from q2_emperor.plugin_setup import plugin as emperor_plugin
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(emperor_plugin.name, 'emperor')
| 31.333333 | 78 | 0.556738 | [
"BSD-3-Clause"
] | ChrisKeefe/q2-emperor | q2_emperor/tests/test_plugin_setup.py | 564 | Python |
"""
Generate configuration files into :ref:`generated_dir<directories>`.
"""
from fabric.api import task
from gusset.output import status
from gusset.validation import with_validation
from confab.iter import iter_conffiles
@task
@with_validation
def generate(directory=None):
"""
Generate configuration files.
"""
for conffiles in iter_conffiles(directory):
status("Generating templates for '{environment}' and '{role}'",
environment=conffiles.environment,
role=conffiles.role)
conffiles.generate()
| 24.565217 | 71 | 0.707965 | [
"Apache-2.0"
] | locationlabs/confab | confab/generate.py | 565 | Python |
import unittest
from cloudmitigator_semantic import version
class VersionTestCase(unittest.TestCase):
def test_proper_version_parsing(self):
proper_version = version.Version("v5.1.2")
self.assertEqual(proper_version.major, 5)
self.assertEqual(proper_version.minor, 1)
self.assertEqual(proper_version.patch, 2)
def test_bump_major(self):
major_version = version.Version("v1.3.2")
major_version.bump_major()
self.assertEqual(major_version.major, 2)
self.assertEqual(major_version.minor, 0)
self.assertEqual(major_version.patch, 0)
def test_bump_minor(self):
minor_version = version.Version("v1.3.2")
minor_version.bump_minor()
self.assertEqual(minor_version.major, 1)
self.assertEqual(minor_version.minor, 4)
self.assertEqual(minor_version.patch, 0)
def test_bump_patch(self):
patch_version = version.Version("v1.3.2")
patch_version.bump_patch()
self.assertEqual(patch_version.major, 1)
self.assertEqual(patch_version.minor, 3)
self.assertEqual(patch_version.patch, 3)
def test_pre_release(self):
pre_version = version.Version("v1.3.2-alpha")
self.assertEqual(pre_version.version, "v1.3.2-alpha")
def test_meta_data(self):
meta_data = version.Version("v1.3.2-alpha+361nh")
self.assertEqual(meta_data.version, "v1.3.2-alpha+361nh")
def test_version_changed_true(self):
version_changed = version.Version("v1.1.0")
version_changed.bump_minor()
self.assertTrue(version_changed.version_changed)
def test_version_changed_false(self):
version_changed = version.Version("v1.1.0")
self.assertFalse(version_changed.version_changed)
def test_return_original_version(self):
original_version = version.Version("v1.1.0")
original_version.bump_patch()
self.assertEqual(original_version.original_version, "v1.1.0")
self.assertEqual(original_version.version, "v1.1.1")
| 34.847458 | 69 | 0.695525 | [
"MPL-2.0"
] | reflexivesecurity/semantic | tests/test_version_object.py | 2,056 | Python |
from habit.habit_model import HabitHistory
from habit.complete_habit import complete
def test_overdue_habit(datasett):
"""
please note the 'double tt' for datasett. This stands to differentiate
the functional test data from the data used for unit tests.
habit 1 is the overdue habit since its added first in the func/conftest
module.
:param datasett: from func/conftest
:return:
"""
session = datasett
complete(1, session)
result = session.query(HabitHistory.broken_count).\
filter(HabitHistory.habitid == 1).all()
assert result == [(1,)]
def test_a_habit_due_for_completion(datasett):
"""
habit 2 is the due habit since its added second in the func/conftest
module.
:param datasett: from func/conftest
:return:
"""
session = datasett
complete(2, session)
result = session.query(HabitHistory.streak).\
filter(HabitHistory.habitid == 2).all()
assert result == [(1,)]
| 29.515152 | 75 | 0.687885 | [
"MIT"
] | takavarasha-desire/habittracker1_1 | tests/func/test_complete_habit.py | 974 | Python |
#!/usr/bin/python3
"""Alta3 Research - Exploring OpenAPIs with requests"""
# documentation for this API is at
# https://anapioficeandfire.com/Documentation
import requests
AOIF = "https://www.anapioficeandfire.com/api"
def main():
## Send HTTPS GET to the API of ICE and Fire
gotresp = requests.get(AOIF)
## Decode the response
got_dj = gotresp.json()
## print the response
print(got_dj)
if __name__ == "__main__":
main()
| 20.727273 | 55 | 0.686404 | [
"MIT"
] | AgentKD6-37/2022-01-04-Python | Day 6/iceAndFire01.py | 456 | Python |
import time
from torch.utils.tensorboard import SummaryWriter
import numpy as np
class LossWriter(SummaryWriter):
def __init__(self, log_dir=None, comment=''):
if log_dir == None:
log_dir = './logs/tensorboard/' + time.strftime('%Y-%m-%d--%H-%M-%S', time.localtime(time.time()))
super(LossWriter, self).__init__(log_dir=log_dir, comment=comment)
def write_loss(self, loss_name, scalar, n_iter):
self.add_scalar('Loss/'+loss_name, scalar, n_iter)
if __name__=='__main__':
testwriter = LossWriter()
for n_iter in range(100):
testwriter.write_loss(np.random.random(), n_iter)
| 29.090909 | 110 | 0.676563 | [
"MIT"
] | wooseoklee4/AP-BSN | src/util/summary_logging.py | 640 | Python |
from __future__ import unicode_literals
from django.db import models
from model_utils import Choices
from model_utils.models import TimeStampedModel, StatusModel
from django.conf import settings
class Booking(TimeStampedModel, StatusModel):
TRAVEL_CLASSES = Choices('economy', 'first_class')
STATUS = Choices(('pending', 'Pending'), ('paid', 'Paid'), ('failed', 'Failed'))
date_of_travel = models.DateTimeField()
travel_class = models.CharField(choices=TRAVEL_CLASSES, default=TRAVEL_CLASSES.economy, max_length=30)
status = models.CharField(choices=STATUS, default=STATUS.pending, max_length=20)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False)
payment_reference = models.CharField(max_length=100, blank=True)
| 48 | 106 | 0.778646 | [
"MIT"
] | savioabuga/lipame | lipame/lipa/models.py | 768 | Python |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class (ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
}
@cached_property
def discriminator():
return None
attribute_map = {
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""OuterEnumIntegerDefaultValue - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.380117 | 174 | 0.590279 | [
"Apache-2.0"
] | jetbridge/openapi-generator | samples/openapi3/client/petstore/python-experimental/petstore_api/model/outer_enum_integer_default_value.py | 6,563 | Python |
from math import pi, sqrt
from typing import List
import numpy as np
import pytest
from src.kinematics.forward_kinematics import get_tform
from src.prechecks.spatial_interpolation import linear_interpolation, circular_interpolation
@pytest.mark.parametrize("start,end,ds,expected_points",
[
(
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [300, 0, 0]],
50,
7
),
(
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [50, 0, 0]],
50,
2
)
]
)
def test_linear_interpolation(start, end, ds, expected_points):
# Create the start and end point matrices
start = get_tform(*start)
end = get_tform(*end)
# Calculate the interpolated tforms
interpolated_tforms = list(linear_interpolation(start, end, ds=ds))
helper_spatial_interpolation_test(interpolated_tforms, start, end, expected_points)
# Check that the points are equidistant
if expected_points > 2:
for i in range(expected_points - 1):
ds_actual = np.linalg.norm(interpolated_tforms[i + 1][0:3, 3] - interpolated_tforms[i][0:3, 3])
assert pytest.approx(ds, rel=0.1) == ds_actual
@pytest.mark.parametrize("start,end,nvec,cw,ds,expected_points",
[
# XY plane half circle (start, intermediate, end)
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi / 2,
3
),
# XY plane half circle (start, end)
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi,
2
),
# XY plane half circle (start, end) rounded
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi / 2 * 1.1,
2
),
# XY plane half circle (start, end) rounded
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
False,
pi / 5,
6
),
# XY plane 3/4 circle, five points
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -1, 0]],
[0, 0, 1],
True,
6 / 16 * pi,
5
),
# XY plane full circle, five points
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[0, 0, 1],
False,
2 / 3 * pi,
4
),
# YZ plane 3/4 circle, five points
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, -1]],
[1, 0, 0],
True,
6 / 16 * pi,
5
),
# XY plane half circle (start, end) rounded
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -0.5 * sqrt(2), 0.5 * sqrt(2)]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0.5 * sqrt(2), -0.5 * sqrt(2)]],
[0, 1, 1],
False,
pi / 5,
6
)
]
)
def test_circular_interpolation(start, end, nvec, cw, ds, expected_points):
# Create the start and end point matrices
start = get_tform(*start)
end = get_tform(*end)
# Calculate the interpolated tforms
interpolated_tforms = list(circular_interpolation(start, end, [0, 0, 0], nvec, cw, ds=ds))
print(interpolated_tforms)
helper_spatial_interpolation_test(interpolated_tforms, start, end, expected_points)
# Check that the points all have distance of the radius to the center point
r = np.linalg.norm(start[0:3, 3])
for tform in interpolated_tforms:
assert pytest.approx(r, rel=0.01) == np.linalg.norm(tform[0:3, 3])
# Check that the points are equidistant
if expected_points > 3:
ds_straight_line_ref = np.linalg.norm(interpolated_tforms[1][0:3, 3] - interpolated_tforms[0][0:3, 3])
for i in range(1, expected_points - 1):
ds_actual = np.linalg.norm(interpolated_tforms[i + 1][0:3, 3] - interpolated_tforms[i][0:3, 3])
assert pytest.approx(ds_straight_line_ref, rel=0.1) == ds_actual
def helper_spatial_interpolation_test(interpolated_tforms: List[np.ndarray], start, end, expected_points):
# Test that the number of interpolated points is correct
assert len(interpolated_tforms) == expected_points
# Test that the start and end points are included
np.testing.assert_allclose(interpolated_tforms[0], start)
np.testing.assert_allclose(interpolated_tforms[-1], end)
| 47.64 | 110 | 0.34047 | [
"MIT"
] | pat-bert/gcode | test/test_spatial_interpolation.py | 7,146 | Python |
n = int(input())
c = int(input())
lista = input().split()
graph = [[0 for i in range(n)] for j in range(n)]
cont = 0
for i in range(n):
for j in range(n):
graph[i][j] = int(lista[cont])
cont += 1
if i == j:
graph[i][j] = 0
listaMemoria = [c]
contaminados = []
contaminados.append(c)
k = 1
while True:
veLinha = listaMemoria[-1]
check = 0
for i in range(n):
if graph[veLinha][i] == 1:
graph[veLinha][i] = 0
graph[i][veLinha] = 0
listaMemoria.append(i)
contaminados.append(i)
check = 1
k += 1
break
if check == 0:
if listaMemoria[-1] == c:
break
else:
listaMemoria.pop()
print(k) | 23.181818 | 49 | 0.48366 | [
"MIT"
] | Vith-MCB/UFV | UFV---Python/Trabalho Mat. Disc/grafo_4675.py | 765 | Python |
import csv
import six
import sys
import time
from datetime import (
datetime,
date,
timedelta,
)
from xml.etree import cElementTree as ElementTree
from django.core.management.base import BaseCommand
from corehq.apps.users.util import SYSTEM_USER_ID
from corehq.form_processor.backends.sql.dbaccessors import CaseAccessorSQL, CaseReindexAccessor
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.apps.locations.models import SQLLocation
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.backends.sql.dbaccessors import iter_all_rows
from casexml.apps.case.mock import CaseBlock
DOMAIN = "icds-cas"
CASE_TYPE = "person"
CUT_OFF_AGE_IN_YEARS = 6
date_today = date.today()
CUT_OFF_DOB = str(date_today.replace(year=date_today.year - CUT_OFF_AGE_IN_YEARS))
DOB_PROPERTY = "dob"
MOTHER_NAME_PROPERTY = "mother_name"
MOTHER_INDEX_IDENTIFIER = "mother"
CASE_ITERATION_COUNT = 10000
MAX_RESCUE_EXCEPTIONS_ON_UPDATE = 5
CSV_HEADERS = ['Case ID', 'Mother Case ID', 'Mother Name']
class Command(BaseCommand):
help = """
Iterate person cases updated in last 100 days (3 months with buffer) in a single partition,
Find the ones which are
- not deleted
- not belonging to test locations,
- with age less than 6 years using dob case property,
- if there is related mother case, populate mother_name case property with it's name
Returns two lists of case ids, the ones updated and the ones that could not be updated
"""
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.db_alias = None
self.log_progress = False
self.test_locations = None
self.case_accessor = CaseAccessors(DOMAIN)
def add_arguments(self, parser):
parser.add_argument('db_alias')
parser.add_argument(
'--log',
action='store_true',
dest='log_progress',
default=False,
help="log progress"
)
def handle(self, db_alias, log_progress, **options):
self.db_alias = db_alias
self.log_progress = log_progress
self.test_locations = find_test_awc_locations()
filename = self._find_case_ids_without_mother_name()
if self.log_progress:
print('starting update now for cases')
self._update_cases(filename)
def _find_case_ids_without_mother_name(self):
start_date = date.today() - timedelta(days=100)
reindex_accessor = CaseReindexAccessor(
domain=DOMAIN,
case_type=CASE_TYPE, limit_db_aliases=[self.db_alias],
start_date=start_date
)
filename = 'cases_without_mother_name_part_%s_%s.csv' % (self.db_alias, datetime.utcnow())
cases_with_no_mother_name_filename = 'cases_with_no_mother_name_part_%s_%s.csv' % (
self.db_alias, datetime.utcnow())
with open(filename, 'w') as output:
with open(cases_with_no_mother_name_filename, 'w') as no_mother_name_file:
cases_iterated = 0
writer = csv.writer(output)
writer.writerow(CSV_HEADERS)
no_mother_name_writer = csv.writer(no_mother_name_file)
no_mother_name_writer.writerow(['Case ID'])
if self.log_progress:
print('iterating now')
for case in iter_all_rows(reindex_accessor):
if self.log_progress and cases_iterated % CASE_ITERATION_COUNT == 0:
print("cases iterated: %s" % cases_iterated)
cases_iterated += 1
if self._case_needs_to_be_updated(case):
mother_case_id, mother_name = self._find_mother_case_id_and_name(case)
if mother_case_id and mother_name:
writer.writerow([case.case_id, mother_case_id, mother_name])
else:
no_mother_name_writer.writerow([case.case_id])
return filename
def _find_mother_case_id_and_name(self, case):
mother_case_ids = [i.referenced_id for i in CaseAccessorSQL.get_indices(DOMAIN, case.case_id)
if i.identifier == MOTHER_INDEX_IDENTIFIER]
if len(mother_case_ids) == 1:
try:
mother_case = self.case_accessor.get_case(mother_case_ids[0])
except CaseNotFound:
pass
else:
return mother_case.case_id, mother_case.name
return None, None
def _case_needs_to_be_updated(self, case):
if case.deleted:
return False
assert case.type == CASE_TYPE
if bool(case.owner_id) and case.owner_id in self.test_locations:
return False
dob = case.get_case_property(DOB_PROPERTY)
if dob and dob > CUT_OFF_DOB and not case.get_case_property(MOTHER_NAME_PROPERTY):
return True
return False
def _update_cases(self, filename):
exceptions_raised = 0
updates = {} # case id: mother name
counter = 0
with open(filename, 'r') as _input:
reader = csv.DictReader(_input)
with open('cases_without_mother_name_part_%s_updated.csv' % self.db_alias, 'w') as output:
writer = csv.writer(output)
writer.writerow(['Case ID', 'Mother Name'])
for row in reader:
updates[row['Case ID']] = row['Mother Name']
counter += 1
if counter > 0 and counter % 100 == 0:
case_ids = self._reassured_case_ids_to_update(list(updates.keys()))
skip_ids = updates.keys() - case_ids
for case_id in skip_ids:
updates.pop(case_id)
for case_id, mother_name in updates.items():
writer.writerow([case_id, mother_name])
exceptions_raised = self._submit_update_form(updates, exceptions_raised)
if self.log_progress:
print("cases updated: %s" % counter)
updates = {}
counter = 0
# update the pending batch
for case_id, mother_name in updates.items():
writer.writerow([case_id, mother_name])
exceptions_raised = self._submit_update_form(updates, exceptions_raised)
def _submit_update_form(self, updates, exceptions_raised):
update_case_blocks = self.create_case_blocks(updates)
if not update_case_blocks:
return exceptions_raised
for attempt in range(MAX_RESCUE_EXCEPTIONS_ON_UPDATE):
try:
submit_case_blocks(update_case_blocks, DOMAIN, user_id=SYSTEM_USER_ID)
except Exception as e:
exc = sys.exc_info()
exceptions_raised += 1
if self.log_progress:
print("rescuing exception %s %s" % (exceptions_raised, str(e)))
if exceptions_raised > MAX_RESCUE_EXCEPTIONS_ON_UPDATE:
six.reraise(*exc)
else:
time.sleep(60) # wait for 1 min before trying again
else:
break
return exceptions_raised
def create_case_blocks(self, updates):
case_blocks = []
for case_id, mother_name in updates.items():
case_block = CaseBlock.deprecated_init(case_id,
update={MOTHER_NAME_PROPERTY: mother_name},
user_id=SYSTEM_USER_ID)
case_block = ElementTree.tostring(case_block.as_xml()).decode('utf-8')
case_blocks.append(case_block)
return case_blocks
def _reassured_case_ids_to_update(self, case_ids):
# reconfirm the cases before updating to avoid removing updates in between
# fetching case ids and updating
invalid_cases = self.case_accessor.get_cases(case_ids)
case_ids_list = set()
for invalid_case in invalid_cases:
if self._case_needs_to_be_updated(invalid_case):
case_ids_list.add(invalid_case.case_id)
return case_ids_list
def find_test_awc_locations():
test_locations = set()
for location in SQLLocation.active_objects.filter(location_type__code='state', domain=DOMAIN):
if location.metadata.get('is_test_location') == 'test':
test_locations.update(
location.get_descendants(include_self=True).
filter(location_type__code='awc').values_list('location_id', flat=True)
)
return test_locations
| 41.84507 | 102 | 0.62392 | [
"BSD-3-Clause"
] | dungeonmaster51/commcare-hq | custom/icds/management/commands/populate_mother_name.py | 8,913 | Python |
"""Class to manage the entities for a single platform."""
import asyncio
from homeassistant.const import DEVICE_DEFAULT_NAME
from homeassistant.core import callback, valid_entity_id, split_entity_id
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
from .event import async_track_time_interval, async_call_later
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
PLATFORM_NOT_READY_RETRIES = 10
class EntityPlatform:
"""Manage the entities for a single platform."""
def __init__(self, *, hass, logger, domain, platform_name, platform,
scan_interval, entity_namespace,
async_entities_added_callback):
"""Initialize the entity platform.
hass: HomeAssistant
logger: Logger
domain: str
platform_name: str
scan_interval: timedelta
entity_namespace: str
async_entities_added_callback: @callback method
"""
self.hass = hass
self.logger = logger
self.domain = domain
self.platform_name = platform_name
self.platform = platform
self.scan_interval = scan_interval
self.entity_namespace = entity_namespace
self.async_entities_added_callback = async_entities_added_callback
self.config_entry = None
self.entities = {}
self._tasks = []
# Method to cancel the state change listener
self._async_unsub_polling = None
# Method to cancel the retry of setup
self._async_cancel_retry_setup = None
self._process_updates = asyncio.Lock()
# Platform is None for the EntityComponent "catch-all" EntityPlatform
# which powers entity_component.add_entities
if platform is None:
self.parallel_updates = None
self.parallel_updates_semaphore = None
return
self.parallel_updates = getattr(platform, 'PARALLEL_UPDATES', None)
# semaphore will be created on demand
self.parallel_updates_semaphore = None
def _get_parallel_updates_semaphore(self):
"""Get or create a semaphore for parallel updates."""
if self.parallel_updates_semaphore is None:
self.parallel_updates_semaphore = asyncio.Semaphore(
self.parallel_updates if self.parallel_updates else 1,
loop=self.hass.loop
)
return self.parallel_updates_semaphore
async def async_setup(self, platform_config, discovery_info=None):
"""Set up the platform from a config file."""
platform = self.platform
hass = self.hass
@callback
def async_create_setup_task():
"""Get task to set up platform."""
if getattr(platform, 'async_setup_platform', None):
return platform.async_setup_platform(
hass, platform_config,
self._async_schedule_add_entities, discovery_info
)
# This should not be replaced with hass.async_add_job because
# we don't want to track this task in case it blocks startup.
return hass.loop.run_in_executor(
None, platform.setup_platform, hass, platform_config,
self._schedule_add_entities, discovery_info
)
await self._async_setup_platform(async_create_setup_task)
async def async_setup_entry(self, config_entry):
"""Set up the platform from a config entry."""
# Store it so that we can save config entry ID in entity registry
self.config_entry = config_entry
platform = self.platform
@callback
def async_create_setup_task():
"""Get task to set up platform."""
return platform.async_setup_entry(
self.hass, config_entry, self._async_schedule_add_entities)
return await self._async_setup_platform(async_create_setup_task)
async def _async_setup_platform(self, async_create_setup_task, tries=0):
"""Set up a platform via config file or config entry.
async_create_setup_task creates a coroutine that sets up platform.
"""
logger = self.logger
hass = self.hass
full_name = '{}.{}'.format(self.domain, self.platform_name)
logger.info("Setting up %s", full_name)
warn_task = hass.loop.call_later(
SLOW_SETUP_WARNING, logger.warning,
"Setup of platform %s is taking over %s seconds.",
self.platform_name, SLOW_SETUP_WARNING)
try:
task = async_create_setup_task()
await asyncio.wait_for(
asyncio.shield(task),
SLOW_SETUP_MAX_WAIT)
# Block till all entities are done
if self._tasks:
pending = [task for task in self._tasks if not task.done()]
self._tasks.clear()
if pending:
await asyncio.wait(
pending)
hass.config.components.add(full_name)
return True
except PlatformNotReady:
tries += 1
wait_time = min(tries, 6) * 30
logger.warning(
'Platform %s not ready yet. Retrying in %d seconds.',
self.platform_name, wait_time)
async def setup_again(now):
"""Run setup again."""
self._async_cancel_retry_setup = None
await self._async_setup_platform(
async_create_setup_task, tries)
self._async_cancel_retry_setup = \
async_call_later(hass, wait_time, setup_again)
return False
except asyncio.TimeoutError:
logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
self.platform_name, SLOW_SETUP_MAX_WAIT)
return False
except Exception: # pylint: disable=broad-except
logger.exception(
"Error while setting up platform %s", self.platform_name)
return False
finally:
warn_task.cancel()
def _schedule_add_entities(self, new_entities, update_before_add=False):
"""Schedule adding entities for a single platform, synchronously."""
run_callback_threadsafe(
self.hass.loop,
self._async_schedule_add_entities, list(new_entities),
update_before_add
).result()
@callback
def _async_schedule_add_entities(self, new_entities,
update_before_add=False):
"""Schedule adding entities for a single platform async."""
self._tasks.append(self.hass.async_add_job(
self.async_add_entities(
new_entities, update_before_add=update_before_add)
))
def add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform."""
# That avoid deadlocks
if update_before_add:
self.logger.warning(
"Call 'add_entities' with update_before_add=True "
"only inside tests or you can run into a deadlock!")
run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), update_before_add),
self.hass.loop).result()
async def async_add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform async.
This method must be run in the event loop.
"""
# handle empty list from component/platform
if not new_entities:
return
hass = self.hass
device_registry = await \
hass.helpers.device_registry.async_get_registry()
entity_registry = await \
hass.helpers.entity_registry.async_get_registry()
tasks = [
self._async_add_entity(entity, update_before_add,
entity_registry, device_registry)
for entity in new_entities]
# No entities for processing
if not tasks:
return
await asyncio.wait(tasks)
self.async_entities_added_callback()
if self._async_unsub_polling is not None or \
not any(entity.should_poll for entity
in self.entities.values()):
return
self._async_unsub_polling = async_track_time_interval(
self.hass, self._update_entity_states, self.scan_interval
)
async def _async_add_entity(self, entity, update_before_add,
entity_registry, device_registry):
"""Add an entity to the platform."""
if entity is None:
raise ValueError('Entity cannot be None')
entity.hass = self.hass
entity.platform = self
# Async entity
# PARALLEL_UPDATE == None: entity.parallel_updates = None
# PARALLEL_UPDATE == 0: entity.parallel_updates = None
# PARALLEL_UPDATE > 0: entity.parallel_updates = Semaphore(p)
# Sync entity
# PARALLEL_UPDATE == None: entity.parallel_updates = Semaphore(1)
# PARALLEL_UPDATE == 0: entity.parallel_updates = None
# PARALLEL_UPDATE > 0: entity.parallel_updates = Semaphore(p)
if hasattr(entity, 'async_update') and not self.parallel_updates:
entity.parallel_updates = None
elif (not hasattr(entity, 'async_update')
and self.parallel_updates == 0):
entity.parallel_updates = None
else:
entity.parallel_updates = self._get_parallel_updates_semaphore()
# Update properties before we generate the entity_id
if update_before_add:
try:
await entity.async_device_update(warning=False)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"%s: Error on device update!", self.platform_name)
return
suggested_object_id = None
# Get entity_id from unique ID registration
if entity.unique_id is not None:
if entity.entity_id is not None:
suggested_object_id = split_entity_id(entity.entity_id)[1]
else:
suggested_object_id = entity.name
if self.entity_namespace is not None:
suggested_object_id = '{} {}'.format(
self.entity_namespace, suggested_object_id)
if self.config_entry is not None:
config_entry_id = self.config_entry.entry_id
else:
config_entry_id = None
device_info = entity.device_info
device_id = None
if config_entry_id is not None and device_info is not None:
processed_dev_info = {
'config_entry_id': config_entry_id
}
for key in (
'connections',
'identifiers',
'manufacturer',
'model',
'name',
'sw_version',
'via_hub',
):
if key in device_info:
processed_dev_info[key] = device_info[key]
device = device_registry.async_get_or_create(
**processed_dev_info)
if device:
device_id = device.id
entry = entity_registry.async_get_or_create(
self.domain, self.platform_name, entity.unique_id,
suggested_object_id=suggested_object_id,
config_entry_id=config_entry_id,
device_id=device_id,
known_object_ids=self.entities.keys())
if entry.disabled:
self.logger.info(
"Not adding entity %s because it's disabled",
entry.name or entity.name or
'"{} {}"'.format(self.platform_name, entity.unique_id))
return
entity.entity_id = entry.entity_id
entity.registry_name = entry.name
entity.async_on_remove(entry.add_update_listener(entity))
# We won't generate an entity ID if the platform has already set one
# We will however make sure that platform cannot pick a registered ID
elif (entity.entity_id is not None and
entity_registry.async_is_registered(entity.entity_id)):
# If entity already registered, convert entity id to suggestion
suggested_object_id = split_entity_id(entity.entity_id)[1]
entity.entity_id = None
# Generate entity ID
if entity.entity_id is None:
suggested_object_id = \
suggested_object_id or entity.name or DEVICE_DEFAULT_NAME
if self.entity_namespace is not None:
suggested_object_id = '{} {}'.format(self.entity_namespace,
suggested_object_id)
entity.entity_id = entity_registry.async_generate_entity_id(
self.domain, suggested_object_id, self.entities.keys())
# Make sure it is valid in case an entity set the value themselves
if not valid_entity_id(entity.entity_id):
raise HomeAssistantError(
'Invalid entity id: {}'.format(entity.entity_id))
if (entity.entity_id in self.entities or
entity.entity_id in self.hass.states.async_entity_ids(
self.domain)):
msg = 'Entity id already exists: {}'.format(entity.entity_id)
if entity.unique_id is not None:
msg += '. Platform {} does not generate unique IDs'.format(
self.platform_name)
raise HomeAssistantError(msg)
entity_id = entity.entity_id
self.entities[entity_id] = entity
entity.async_on_remove(lambda: self.entities.pop(entity_id))
await entity.async_added_to_hass()
await entity.async_update_ha_state()
async def async_reset(self):
"""Remove all entities and reset data.
This method must be run in the event loop.
"""
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
if not self.entities:
return
tasks = [self.async_remove_entity(entity_id)
for entity_id in self.entities]
await asyncio.wait(tasks)
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
async def async_remove_entity(self, entity_id):
"""Remove entity id from platform."""
await self.entities[entity_id].async_remove()
# Clean up polling job if no longer needed
if (self._async_unsub_polling is not None and
not any(entity.should_poll for entity
in self.entities.values())):
self._async_unsub_polling()
self._async_unsub_polling = None
async def _update_entity_states(self, now):
"""Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop.
"""
if self._process_updates.locked():
self.logger.warning(
"Updating %s %s took longer than the scheduled update "
"interval %s", self.platform_name, self.domain,
self.scan_interval)
return
async with self._process_updates:
tasks = []
for entity in self.entities.values():
if not entity.should_poll:
continue
tasks.append(entity.async_update_ha_state(True))
if tasks:
await asyncio.wait(tasks)
| 38.491726 | 78 | 0.605577 | [
"Apache-2.0"
] | crazyfish1111/home-assistant | homeassistant/helpers/entity_platform.py | 16,282 | Python |
from __future__ import annotations
import typing
if typing.TYPE_CHECKING:
from typing import Optional, Union, Any, Dict
from pypbbot.driver import AffairDriver
from pypbbot.typing import Event
from pypbbot.utils import Clips
from pypbbot.protocol import GroupMessageEvent, PrivateMessageEvent
from enum import Enum
import asyncio
from pypbbot.logging import logger
from pypbbot.utils import sendBackClipsTo
__all__ = ['HandlerPriority', 'BaseAffair', 'ChatAffair']
class HandlerPriority(Enum):
SYSTEM = 0 # SHOULD NOT USED BY PLUGINS
VERY_HIGH = 1
HIGH = 2
NORMAL = 3
LOW = 4
VERY_LOW = 5
def __lt__(self, other: object) -> bool:
if not isinstance(other, HandlerPriority):
return NotImplemented
return self.value < other.value
class BaseAffair:
def __init__(self, driver: AffairDriver, event: Event) -> None:
logger.debug(
'A new affair has been created for event [{}]'.format(type(event)))
self.event: Optional[Event] = event
self.driver: AffairDriver = driver
self.states: Dict[str, Any] = {}
self.finished: bool = False
return
class ChatAffair(BaseAffair):
def __init__(self, driver: AffairDriver, event: Union[GroupMessageEvent, PrivateMessageEvent], sender_id: int) -> None:
self.event: Union[GroupMessageEvent, PrivateMessageEvent] = event
self.driver: AffairDriver = driver
self.receiver_id: int = event.self_id
self.sender_id: int = sender_id
self.raw_message: str = event.raw_message
return
async def send(self, clips: Union[Clips, str, int, float]) -> Any:
return await sendBackClipsTo(self.event, clips)
def sendAndWait(self, clips: Union[Clips, str, int, float]) -> Any:
return asyncio.run(self.send(clips))
| 31.87931 | 123 | 0.685776 | [
"MIT"
] | PHIKN1GHT/pypbbot_archived | pypbbot/affairs/builtin.py | 1,849 | Python |
# The most basic of settings to get the app to run as an example, should *never* be used in a
# production environment.
import os
import dj_database_url
DATABASES = {}
db_url = os.environ.get('DATABASE_URL', '')
if db_url:
DATABASES['default'] = dj_database_url.parse(db_url, conn_max_age=600, ssl_require=True)
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dr.sqlite3',
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.messages',
'keybase_proofs',
'test_app',
)
DEBUG = True
ALLOWED_HOSTS = ['*']
SECRET_KEY = '_'
SITE_ID = 1
ROOT_URLCONF = 'test_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.app_directories.Loader',
],
},
},
]
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Must match the `domain` set in the config.
KEYBASE_PROOFS_DOMAIN = '<your-domain.com>'
| 27.885714 | 93 | 0.653176 | [
"BSD-3-Clause"
] | AngelKey/Angelkey.proofintegration | test_app/settings.py | 1,952 | Python |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.input_utils import concatenate
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.common.comms import CommsContext
from cuml.dask.common.comms import worker_state
from cuml.dask.common.utils import raise_exception_from_futures
from dask.distributed import wait
from cuml.common.memory_utils import with_cupy_rmm
class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin):
"""
Multi-Node Multi-GPU implementation of KMeans.
This version minimizes data transfer by sharing only
the centroids between workers in each iteration.
Predictions are done embarrassingly parallel, using cuML's
single-GPU version.
For more information on this implementation, refer to the
documentation for single-GPU K-Means.
Parameters
----------
handle : cuml.Handle
If it is None, a new one is created just for this class.
n_clusters : int (default = 8)
The number of centroids or clusters you want.
max_iter : int (default = 300)
The more iterations of EM, the more accurate, but slower.
tol : float (default = 1e-4)
Stopping criterion when centroid means do not change much.
verbose : int or boolean (default = False)
Logging level for printing diagnostic information
random_state : int (default = 1)
If you want results to be the same when you restart Python,
select a state.
init : {'scalable-kmeans++', 'k-means||' , 'random' or an ndarray}
(default = 'scalable-k-means++')
'scalable-k-means++' or 'k-means||': Uses fast and stable scalable
kmeans++ intialization.
'random': Choose 'n_cluster' observations (rows) at random
from data for the initial centroids. If an ndarray is passed,
it should be of shape (n_clusters, n_features) and gives the
initial centers.
oversampling_factor : int (default = 2) The amount of points to sample
in scalable k-means++ initialization for potential centroids.
Increasing this value can lead to better initial centroids at the
cost of memory. The total number of centroids sampled in scalable
k-means++ is oversampling_factor * n_clusters * 8.
max_samples_per_batch : int (default = 32768) The number of data
samples to use for batches of the pairwise distance computation.
This computation is done throughout both fit predict. The default
should suit most cases. The total number of elements in the
batched pairwise distance computation is max_samples_per_batch
* n_clusters. It might become necessary to lower this number when
n_clusters becomes prohibitively large.
Attributes
----------
cluster_centers_ : cuDF DataFrame or CuPy ndarray
The coordinates of the final clusters. This represents of "mean" of
each data cluster.
"""
def __init__(self, client=None, verbose=False, **kwargs):
super(KMeans, self).__init__(client=client,
verbose=verbose,
**kwargs)
@staticmethod
@mnmg_import
def _func_fit(sessionId, objs, datatype, **kwargs):
from cuml.cluster.kmeans_mg import KMeansMG as cumlKMeans
handle = worker_state(sessionId)["handle"]
inp_data = concatenate(objs)
return cumlKMeans(handle=handle, output_type=datatype,
**kwargs).fit(inp_data)
@staticmethod
def _score(model, data):
ret = model.score(data)
return ret
@with_cupy_rmm
def fit(self, X):
"""
Fit a multi-node multi-GPU KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Training data to cluster.
"""
data = DistributedDataHandler.create(X, client=self.client)
self.datatype = data.datatype
comms = CommsContext(comms_p2p=False)
comms.init(workers=data.workers)
kmeans_fit = [self.client.submit(KMeans._func_fit,
comms.sessionId,
wf[1],
self.datatype,
**self.kwargs,
workers=[wf[0]],
pure=False)
for idx, wf in enumerate(data.worker_to_parts.items())]
wait(kmeans_fit)
raise_exception_from_futures(kmeans_fit)
comms.destroy()
self.local_model = kmeans_fit[0].result()
self.cluster_centers_ = self.local_model.cluster_centers_
return self
def fit_predict(self, X, delayed=True):
"""
Compute cluster centers and predict cluster index for each sample.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions
"""
return self.fit(X).predict(X, delayed=delayed)
def predict(self, X, delayed=True):
"""
Predict labels for the input
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions
"""
return self._predict(X, delayed=delayed)
def fit_transform(self, X, delayed=True):
"""
Calls fit followed by transform using a distributed KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self.fit(X).transform(X, delayed=delayed)
def transform(self, X, delayed=True):
"""
Transforms the input into the learned centroid space
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self._transform(X, n_dims=2, delayed=delayed)
@with_cupy_rmm
def score(self, X):
"""
Computes the inertia score for the trained KMeans centroids.
Parameters
----------
X : dask_cudf.Dataframe
Dataframe to compute score
Returns
-------
Inertial score
"""
scores = self._run_parallel_func(KMeans._score,
X,
n_dims=1,
delayed=False,
output_futures=True)
return -1 * cp.sum(cp.asarray(
self.client.compute(scores, sync=True))*-1.0)
def get_param_names(self):
return list(self.kwargs.keys())
| 33.582677 | 78 | 0.617468 | [
"Apache-2.0"
] | Chetank99/cuml | python/cuml/dask/cluster/kmeans.py | 8,530 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the key functions in pruning library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"sparsity_function_end_step=100", "target_sparsity=0.9",
"weight_sparsity_map=[conv1:0.8,conv2/kernel:0.8]"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
# Add global step variable to the graph
self.global_step = training_util.get_or_create_global_step()
# Add sparsity
self.sparsity = variables.VariableV1(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
self.assertEqual(p._weight_sparsity_map["conv1"], 0.8)
self.assertEqual(p._weight_sparsity_map["conv2/kernel"], 0.8)
def testInitWithExternalSparsity(self):
with self.cached_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.cached_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.cached_session():
weights = variables.VariableV1(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.cached_session() as session:
weights = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.VariableV1(0.95, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 5)
def _blockMasking(self, hparams, weights, expected_mask):
threshold = variables.VariableV1(0.0, name="threshold")
sparsity = variables.VariableV1(0.5, name="sparsity")
test_spec = ",".join(hparams)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
with self.cached_session():
variables.global_variables_initializer().run()
_, new_mask = p._maybe_update_block_mask(weights, threshold)
# Check if the mask is the same size as the weights
self.assertAllEqual(new_mask.get_shape(), weights.get_shape())
mask_val = new_mask.eval()
self.assertAllEqual(mask_val, expected_mask)
def testBlockMasking(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]])
weights_max = constant_op.constant(
[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]])
expected_mask = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
expected_mask)
def testBlockMaskingWithHigherDimensions(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
# Weights as in testBlockMasking, but with one extra dimension.
weights_avg = constant_op.constant(
[[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]]])
weights_max = constant_op.constant(
[[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]]])
expected_mask = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.cached_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.VariableV1(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6",
"nbins=100"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.VariableV1(0.00, name="sparsity")
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.conditional_mask_update_op()
sparsity_val = math_ops.linspace(0.0, 0.9, 10)
increment_global_step = state_ops.assign_add(self.global_step, 1)
non_zero_count = []
with self.cached_session() as session:
variables.global_variables_initializer().run()
for i in range(10):
session.run(state_ops.assign(sparsity, sparsity_val[i]))
session.run(mask_update_op)
session.run(increment_global_step)
non_zero_count.append(np.count_nonzero(masked_weights.eval()))
# Weights pruned at steps 0,2,4,and,6
expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
self.assertAllEqual(expected_non_zero_count, non_zero_count)
def testWeightSpecificSparsity(self):
param_list = [
"begin_pruning_step=1", "pruning_frequency=1", "end_pruning_step=100",
"target_sparsity=0.5",
"weight_sparsity_map=[layer1:0.6,layer2/weights:0.75,.*kernel:0.6]",
"threshold_decay=0.0"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
with variable_scope.variable_scope("layer1"):
w1 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w1)
with variable_scope.variable_scope("layer2"):
w2 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w2)
with variable_scope.variable_scope("layer3"):
w3 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="kernel")
_ = pruning.apply_mask(w3)
p = pruning.Pruning(pruning_hparams)
mask_update_op = p.conditional_mask_update_op()
increment_global_step = state_ops.assign_add(self.global_step, 1)
with self.cached_session() as session:
variables.global_variables_initializer().run()
for _ in range(110):
session.run(mask_update_op)
session.run(increment_global_step)
self.assertAllClose(
session.run(pruning.get_weight_sparsity()), [0.6, 0.75, 0.6])
if __name__ == "__main__":
test.main()
| 41.84252 | 80 | 0.684136 | [
"Apache-2.0"
] | 1244783394/tensorflow | tensorflow/contrib/model_pruning/python/pruning_test.py | 10,628 | Python |
"""Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
from data import VOC_CLASSES as labelmap
import torch.utils.data as data
from ssd import build_ssd
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.5, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=False, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOC_ROOT,
help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
annopath = os.path.join(args.voc_root, 'VOC2007', 'Annotations', '%s.xml')
imgpath = os.path.join(args.voc_root, 'VOC2007', 'JPEGImages', '%s.jpg')
if sys.platform.startswith("linux"):
imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets', 'Main', '{:s}.txt') # Linux 系统下
if sys.platform.startswith("win"):
imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets', 'Main', '{}.txt') # Linux 系统下
YEAR = '2007'
devkit_path = args.voc_root + 'VOC' + YEAR
dataset_mean = (104, 117, 123)
set_type = 'test'
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_output_dir(name, phase):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
filedir = os.path.join(name, phase)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filedir
def get_voc_results_file_template(image_set, cls):
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'det_' + image_set + '_%s.txt' % (cls)
filedir = os.path.join(devkit_path, 'results')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def write_voc_results_file(all_boxes, dataset):
for cls_ind, cls in enumerate(labelmap):
print('Writing {:s} VOC results file'.format(cls))
filename = get_voc_results_file_template(set_type, cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(dataset.ids):
dets = all_boxes[cls_ind+1][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index[1], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(output_dir='output', use_07=True):
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = use_07
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(labelmap):
filename = get_voc_results_file_template(set_type, cls)
rec, prec, ap = voc_eval(
filename, annopath, imgsetpath.format(set_type), cls, cachedir,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('--------------------------------------------------------------')
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
def test_net(save_folder, net, cuda, dataset, transform, top_k,
im_size=300, thresh=0.05):
num_images = len(dataset)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(len(labelmap)+1)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer()}
output_dir = get_output_dir('ssd300_120000', set_type)
det_file = os.path.join(output_dir, 'detections.pkl')
for i in range(num_images):
im, gt, h, w = dataset.pull_item(i)
x = Variable(im.unsqueeze(0))
if args.cuda:
x = x.cuda()
_t['im_detect'].tic()
detections = net(x).data
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[j][i] = cls_dets
print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1,
num_images, detect_time))
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
evaluate_detections(all_boxes, output_dir, dataset)
def evaluate_detections(box_list, output_dir, dataset):
write_voc_results_file(box_list, dataset)
do_python_eval(output_dir)
if __name__ == '__main__':
# load net
num_classes = len(labelmap) + 1 # +1 for background
net = build_ssd('test', 300, num_classes) # initialize SSD
#net.load_state_dict(torch.load(args.trained_model))
net.load_state_dict(torch.load(args.trained_model, map_location='cpu')) # running on a CPU-only machine
net.eval()
print('Finished loading model!')
# load data
dataset = VOCDetection(args.voc_root,
[('2007', set_type)],
BaseTransform(300, dataset_mean),
VOCAnnotationTransform())
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(net.size, dataset_mean), args.top_k, 300,
thresh=args.confidence_threshold)
| 36.43018 | 109 | 0.563277 | [
"MIT"
] | FLyingLSJ/ssd.pytorch | eval.py | 16,187 | Python |
# Webhooks for external integrations.
from functools import partial
from typing import Any, Callable, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, \
check_send_webhook_message, get_http_headers_from_filename, \
validate_extract_webhook_http_header
from zerver.models import UserProfile
TICKET_STARTED_TEMPLATE = """
{customer_name} submitted new ticket [#{number}: {title}]({app_url}):
``` quote
{summary}
```
""".strip()
TICKET_ASSIGNED_TEMPLATE = "[#{number}: {title}]({app_url}) ({state}) assigned to {assignee_info}."
AGENT_REPLIED_TEMPLATE = """
{actor} {action} [ticket #{number}]({app_ticket_url}):
``` quote
{plain_text_body}
```
""".strip()
def ticket_started_body(payload: Dict[str, Any]) -> str:
return TICKET_STARTED_TEMPLATE.format(**payload)
def ticket_assigned_body(payload: Dict[str, Any]) -> Optional[str]:
state = payload['state']
kwargs = {
'state': 'open' if state == 'opened' else state,
'number': payload['number'],
'title': payload['title'],
'app_url': payload['app_url']
}
assignee = payload['assignee']
assigned_group = payload['assigned_group']
if assignee or assigned_group:
if assignee and assigned_group:
kwargs['assignee_info'] = '{assignee} from {assigned_group}'.format(**payload)
elif assignee:
kwargs['assignee_info'] = '{assignee}'.format(**payload)
elif assigned_group:
kwargs['assignee_info'] = '{assigned_group}'.format(**payload)
return TICKET_ASSIGNED_TEMPLATE.format(**kwargs)
else:
return None
def replied_body(payload: Dict[str, Any], actor: str, action: str) -> str:
actor_url = "http://api.groovehq.com/v1/{}/".format(actor + 's')
actor = payload['links']['author']['href'].split(actor_url)[1]
number = payload['links']['ticket']['href'].split("http://api.groovehq.com/v1/tickets/")[1]
body = AGENT_REPLIED_TEMPLATE.format(
actor=actor,
action=action,
number=number,
app_ticket_url=payload['app_ticket_url'],
plain_text_body=payload['plain_text_body']
)
return body
def get_event_handler(event: str) -> Callable[..., str]:
# The main reason for this function existence is because of mypy
handler = EVENTS_FUNCTION_MAPPER.get(event) # type: Any
if handler is None:
raise UnexpectedWebhookEventType("Groove", event)
return handler
@api_key_only_webhook_view('Groove')
@has_request_variables
def api_groove_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
event = validate_extract_webhook_http_header(request, 'X_GROOVE_EVENT', 'Groove')
assert event is not None
handler = get_event_handler(event)
body = handler(payload)
topic = 'notifications'
if body is not None:
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
EVENTS_FUNCTION_MAPPER = {
'ticket_started': ticket_started_body,
'ticket_assigned': ticket_assigned_body,
'agent_replied': partial(replied_body, actor='agent', action='replied to'),
'customer_replied': partial(replied_body, actor='customer', action='replied to'),
'note_added': partial(replied_body, actor='agent', action='left a note on')
}
fixture_to_headers = get_http_headers_from_filename("HTTP_X_GROOVE_EVENT")
| 34.308411 | 99 | 0.701989 | [
"Apache-2.0"
] | D-MaaS/zulip | zerver/webhooks/groove/view.py | 3,671 | Python |
import unittest
import bibdeskparser
from bibdeskparser.bparser import BibTexParser
from tempfile import TemporaryFile
class TestbibdeskparserParserMethods(unittest.TestCase):
input_file_path = 'tests/data/book.bib'
input_bom_file_path = 'tests/data/book_bom.bib'
entries_expected = [
{
'ENTRYTYPE': 'book',
'year': '1987',
'edition': '2',
'publisher': 'Wiley Edition',
'ID': 'Bird1987',
'volume': '1',
'title': 'Dynamics of Polymeric Liquid',
'author': 'Bird, R.B. and Armstrong, R.C. and Hassager, O.',
}
]
def test_parse_immediately(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = BibTexParser(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_str(self):
parser = BibTexParser()
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_bom_str(self):
parser = BibTexParser()
with open(self.input_bom_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_bom_bytes(self):
parser = BibTexParser()
with open(self.input_bom_file_path, 'rb') as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = parser.parse(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_file(self):
parser = BibTexParser()
with open(self.input_file_path) as bibtex_file:
bibtex_database = parser.parse_file(bibtex_file)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_str_module(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = bibdeskparser.loads(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
def test_parse_file_module(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
self.assertEqual(bibtex_database.entries, self.entries_expected)
class TestBibtexpardserWriteMethods(unittest.TestCase):
input_file_path = 'tests/data/book.bib'
expected = """@book{Bird1987,
author = {Bird, R.B. and Armstrong, R.C. and Hassager, O.},
edition = {2},
publisher = {Wiley Edition},
title = {Dynamics of Polymeric Liquid},
volume = {1},
year = {1987}
}
"""
def test_write_str(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
result = bibdeskparser.dumps(bibtex_database)
self.assertEqual(result, self.expected)
def test_write_file(self):
with open(self.input_file_path) as bibtex_file:
bibtex_database = bibdeskparser.load(bibtex_file)
with TemporaryFile(mode='w+') as bibtex_out_file:
bibdeskparser.dump(bibtex_database, bibtex_out_file)
bibtex_out_file.seek(0)
bibtex_out_str = bibtex_out_file.read()
self.assertEqual(bibtex_out_str, self.expected)
class TestbibdeskparserFieldNames(unittest.TestCase):
input_file_path = 'tests/data/fieldname.bib'
entries_expected = [
{'ENTRYTYPE': 'book', 'ID': 'Bird1987', 'dc.date': '2004-01'}
]
def test_parse_immediately(self):
with open(self.input_file_path) as bibtex_file:
bibtex_str = bibtex_file.read()
bibtex_database = BibTexParser(bibtex_str)
self.assertEqual(bibtex_database.entries, self.entries_expected)
if __name__ == '__main__':
unittest.main()
| 35.587719 | 72 | 0.676608 | [
"BSD-3-Clause"
] | goerz/bibdeskparser | tests/test_bibtexparser.py | 4,057 | Python |
import pandas as pd
import tweepy
from textblob import TextBlob
from wordcloud import WordCloud
import plotly.graph_objs as go
import os
import re
import pystan
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from GoogleNews import GoogleNews
from ta.volatility import BollingerBands
from ta.trend import MACD
from ta.momentum import RSIIndicator
import datetime as datetime
import base64
import pandas as pd
import plotly.express as px
import datetime
import requests
from bs4 import BeautifulSoup
from datetime import date
from plotly import graph_objs
st.set_page_config(
layout="wide",
initial_sidebar_state="auto",
page_title= "Finance-Forcasting-Dashboard",
page_icon= "Images/growth.png",
)
col1, col2, col3 = st.beta_columns([1,2,1])
col1.write("")
col2.image("Images/LL.png", width = 500)
col3.write("")
st.set_option('deprecation.showPyplotGlobalUse', False)
main_bg = "Images/BACK.png"
main_bg_ext = "Images/BACK.png"
st.markdown(
f"""
<style>
.reportview-container {{
background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()})
}}
</style>
""",
unsafe_allow_html=True
)
###############################Funtions############################
# load data from yahoo finance
def load_data(ticker):
start = "2020-01-01"
today = date.today().strftime("%Y-%m-%d")
data = yf.download(ticker, start, today)
data.reset_index(inplace=True)
return data
# Plot raw data
def plot_raw_data():
fig = graph_objs.Figure()
fig.add_trace(graph_objs.Scatter(x=data['Date'], y=data['Open'], name="stock_open"))
fig.add_trace(graph_objs.Scatter(x=data['Date'], y=data['Close'], name="stock_close"))
fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
def get_forecast(data):
model = Prophet()
model.fit(data)
future = model.make_future_dataframe(periods=7)
forecast = model.predict(future)
return model, forecast
@st.cache
def read_data():
url = "https://raw.githubusercontent.com/emrecanaltinsoy/forex_data/main/forex_usd_data.csv"
data = pd.read_csv(url)
cols = data.columns
return data, cols[1:]
@st.cache
def get_range(data, date_range):
start_index = data.index[data["date(y-m-d)"] == str(date_range[0])].tolist()[0]
end_index = data.index[data["date(y-m-d)"] == str(date_range[1])].tolist()[0]
data = data.iloc[start_index : end_index + 1]
cols = data.columns
dates = data["date(y-m-d)"]
return data, dates
@st.cache
def scrape_currency():
today = datetime.date.today()
base_url = "https://www.x-rates.com/historical/?from=USD&amount=1&date"
year = today.year
month = today.month if today.month > 9 else f"0{today.month}"
day = today.day if today.day > 9 else f"0{today.day}"
URL = f"{base_url}={year}-{month}-{day}"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
table = soup.find_all("tr")[12:]
currencies = [table[i].text.split("\n")[1:3][0] for i in range(len(table))]
currencies.insert(0, "date(y-m-d)")
currencies.insert(1, "American Dollar")
rates = [table[i].text.split("\n")[1:3][1] for i in range(len(table))]
rates.insert(0, f"{year}-{month}-{day}")
rates.insert(1, "1")
curr_data = {currencies[i]: rates[i] for i in range(len(rates))}
curr_data = pd.DataFrame(curr_data, index=[0])
cols = curr_data.columns
return curr_data, cols[1:]
@st.cache
def train_model(data, currency, period):
df_train = data[["date(y-m-d)", currency]]
df_train = df_train.iloc[-365*2 :]
df_train = df_train.rename(columns={"date(y-m-d)": "ds", currency: "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
return forecast, m
df_all, columns = read_data()
################################################################################
st.sidebar.image("Images/Menu.png", width = 330)
menu = ["Home","STOCKS Live Forcasting", "Crypto-Live Forcasting","View Historical Currency Charts", "Check Live Currency Exchange rates", "Forecast Currency Live Prices"]
choice = st.sidebar.selectbox("Menu", menu)
if choice == "Home":
st.write("")
st.write(""" <p style=" font-size: 15px; font-weight:normal; font-family:verdana"> Finance Dashboard is a special web service that allows you to view Cryptocurrencies,Stocks,and Live Currency Values by many useful methods (technical indicators, graphical patterns, sentimental analysis, and more). Trading and crypto investing requires constant analysis and monitoring. Traders need to track all their trades in order to improve results and find errors. If you don't use additional instruments, then trading will be unsystematic, and the results will be uncertain. Such a service will be useful and even extremely necessary for those who trade and invest in cryptocurrencies and Stocks. Competent selection of cryptocurrencies is at least half of investment success. Finance Dashboard has a simple interface and is great for quick analysis of the Stock market. </p>
""", unsafe_allow_html=True)
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write(""" <p style=" color:#E75480; font-size: 30px; font-weight:bold"> How does it work? </p>
""", unsafe_allow_html=True)
st.write("")
st.image("Images/How.png", width = 1300)
st.sidebar.write(" ")
st.sidebar.write(" ")
st.sidebar.image("Images/info.png", width = 300)
elif choice == "STOCKS Live Forcasting":
st.title('Stocks Weekly Forecast')
st.subheader('Enter the stock ticker:')
ticker = st.text_input('example: GOOG')
ticket = ticker.upper()
if len(ticker)>0:
data_load_state = st.text('Loading data...')
data = load_data(ticker)
if data.empty:
data_load_state.text(f'No ticker named {ticker}')
ticker = ''
else:
data_load_state.text('Loading data... done!')
st.subheader(f'Company: {yf.Ticker(ticker).info["longName"]}')
st.write(data.head())
plot_raw_data()
# prepare data for forecasting
df_train = data[['Date','Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
# train and forecast
model, forecast = get_forecast(df_train)
st.subheader('Forecast')
# plot forecast
st.write(f'Forecast plot for the next week')
fig = plot_plotly(model, forecast)
st.plotly_chart(fig)
elif choice == "View Historical Currency Charts":
st.write("This app can be used to view historical **currency** charts!")
date_range = st.date_input(
"Choose date range",
value=(
datetime.date(2011, 1, 1),
datetime.date(2011, 1, 1) + datetime.timedelta(df_all.shape[0] - 1),
),
min_value=datetime.date(2011, 1, 1),
max_value=datetime.date(2011, 1, 1) + datetime.timedelta(df_all.shape[0] - 1),
)
df, dates = get_range(df_all, date_range)
selected_curr = st.multiselect("Select currencies", columns)
ok = st.button("View")
if ok:
if selected_curr:
# st.write(df[selected_curr])
for curr in selected_curr:
fig = px.line(
x=dates,
y=df[curr],
)
fig.update_layout(
xaxis_title="Date",
yaxis_title=curr,
)
st.write(fig)
elif choice == "Check Live Currency Exchange rates":
st.write("This app can be used to check current **currency** data!")
daily_df, columns = scrape_currency()
base_curr = st.selectbox("Select the base currency", columns)
selected_curr = st.multiselect("Select currencies", columns)
if selected_curr:
base = daily_df[base_curr].astype(float)
selected = daily_df[selected_curr].astype(float)
converted = selected / float(base)
st.write(converted)
elif choice == "Forecast Currency Live Prices":
currency = st.selectbox("Select the currency for prediction", columns)
n_weeks = st.slider("Weeks of prediction", 4, 20, 8, 1)
ok = st.button("Predict")
if ok:
train_state = st.text("Training the model...")
pred, model = train_model(df_all, currency, period=n_weeks * 7)
train_state.text("Model training completed!!")
st.subheader("Forecast data")
fig1 = plot_plotly(model, pred)
st.plotly_chart(fig1)
elif choice == "Crypto-Live Forcasting":
st.sidebar.header("Please select cryptocurrency")
option = st.sidebar.selectbox("Ticker Symbol",("BTC-USD", "ETH-USD", "XRP-USD", "DOGE-USD", "ADA-USD", "BNB-USD", "LTC-USD",))
today = datetime.date.today()
before = today - datetime.timedelta(days=1400)
start_date = st.sidebar.date_input('Start date', before)
end_date = st.sidebar.date_input('End date', today)
if start_date < end_date:
st.sidebar.success("Start date: `%s`\n\nEnd date: `%s` " % (start_date, end_date))
else:
st.sidebar.error("Error: End date must fall after start date.")
@st.cache(allow_output_mutation = True)
def get_data(option, start_date, end_date):
df = yf.download(option,start= start_date,end = end_date, progress=False)
return df
# Getting API_KEYS
api_key = os.environ.get("Key")
api_secret = os.environ.get("Secret")
# Function for getting tweets
# Create authentication
@st.cache(allow_output_mutation = True)
def get_tweets(key, secret, search_term):
authentication = tweepy.OAuthHandler(api_key, api_secret)
api = tweepy.API(authentication)
term = search_term+"-filter:retweets"
# Create a cursor object
tweets = tweepy.Cursor(api.search, q = term, lang = "en",
since = today, tweet_mode = "extended").items(100)
# Store the tweets
tweets_text = [tweet.full_text for tweet in tweets]
df = pd.DataFrame(tweets_text, columns = ["Tweets"])
return df
# Clean text
@st.cache(allow_output_mutation = True)
def Clean(twt):
twt = re.sub("#cryptocurrency", "cryptocurrency", twt)
twt = re.sub("#Cryptocurrency", "Cryptocurrency", twt)
twt = re.sub("#[A-Za-z0-9]+", "", twt)
twt = re.sub("RT[\s]+", "", twt)
twt = re.sub("\\n", "", twt)
twt = re.sub("https?\://\S+", '', twt)
twt = re.sub("<br />", "", twt)
twt = re.sub("\d","", twt)
twt = re.sub("it\'s", "it is", twt)
twt = re.sub("can\'t", "cannot", twt)
twt = re.sub("<(?:a\b[^>]*>|/a>)", "", twt)
return twt
# Subjectivity and Polarity
@st.cache(allow_output_mutation = True)
def subjectivity(text):
return TextBlob(text).sentiment.subjectivity
@st.cache(allow_output_mutation = True)
def polarity(text):
return TextBlob(text).sentiment.polarity
# Create a function to get sentiment text
@st.cache(allow_output_mutation = True)
def sentiment(score):
if score < 0:
return "Negative"
elif score == 0:
return "Neutral"
else:
return "Positive"
if option == "BTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
#Plot
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Bitcoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ETH-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ETH-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Etherium")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "DOGE-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Dogecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
st.write(" ")
elif option == "XRP-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("XRP")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ADA-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ADA-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("cryptocurrency")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "BNB-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BNB-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("BNB")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "LTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> LTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Litecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
# Sentiment Analysis
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> How generally users feel about cryptocurrency? </p>
""", unsafe_allow_html=True)
st.write(" ")
df = get_tweets(api_key, api_secret, "#cryptocurrency")
df["Tweets"] = df["Tweets"].apply(Clean)
df["Subjectivity"] = df["Tweets"].apply(subjectivity)
df["Polarity"] = df["Tweets"].apply(polarity)
#WordCloud
words = " ".join([twts for twts in df["Tweets"]])
cloud = WordCloud(random_state = 21, max_font_size = 100).generate(words)
plt.imshow(cloud, interpolation = "bilinear")
plt.axis("off")
st.pyplot()
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Sentiment Bar Plot </p>
""", unsafe_allow_html=True)
st.write(" ")
# Get Sentiment tweets
df["Sentiment"] = df["Polarity"].apply(sentiment)
df["Sentiment"].value_counts().plot(kind = "bar", figsize = (10,5))
plt.title("Sentiment Analysis Bar Plot")
plt.xlabel("Sentiment")
plt.ylabel("Number of Tweets")
st.pyplot()
| 27.208048 | 870 | 0.6082 | [
"Apache-2.0"
] | krishnaaxo/Finance-Forcasting-Dashboard | app.py | 31,779 | Python |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="faust_pydantic_validate",
version="0.0.1",
author="Alexey Kuzyashin",
author_email="[email protected]",
description="A small decorator for post data view validation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Kuzyashin/faust-pydantic-validate",
packages=['faust_pydantic_validate'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"pydantic",
"faust",
],
)
| 28.703704 | 66 | 0.651613 | [
"MIT"
] | Kuzyashin/faust-pydantic-validate | setup.py | 775 | Python |
"""adding cluster control options on every level
Revision ID: a987c6ce888d
Revises: 00c5cc87408d
Create Date: 2018-08-01 18:34:00.415937
"""
import logging
from alembic import op
import sqlalchemy as sa
revision = 'a987c6ce888d'
down_revision = '8079a1cb5874'
branch_labels = None
depends_on = None
logger = logging.getLogger('alembic.' + revision)
CLUSTER_CONF_OPTIONS = ['cluster_enabled', 'cluster_tfidf_enabled',
'cluster_same_category', 'cluster_same_feed',
'cluster_wake_up']
def upgrade():
op.drop_column('category', 'cluster_on_title')
for table in 'user', 'feed', 'category':
logger.info('adding cluster control options on %s', table)
for option in CLUSTER_CONF_OPTIONS:
op.add_column(table, sa.Column(option, sa.Boolean(),
default=None, nullable=True))
op.add_column(table, sa.Column('cluster_conf', sa.PickleType(),
default={}, nullable=True))
logger.info('setting default options to true for users')
op.execute('UPDATE "user" SET %s;'
% ', '.join(["%s=true" % opt for opt in CLUSTER_CONF_OPTIONS]))
for option in CLUSTER_CONF_OPTIONS:
op.alter_column('user', option, nullable=False)
def downgrade():
for table in 'user', 'feed', 'category':
for option in CLUSTER_CONF_OPTIONS:
op.drop_column(table, option)
op.add_column('category', sa.Column('cluster_on_title',
sa.BOOLEAN(), autoincrement=False, nullable=True))
| 35.111111 | 78 | 0.643038 | [
"MIT"
] | hanakhry/Crime_Admin | python-news_aggregator/migrations/versions/20180830_cluster_control.py | 1,580 | Python |
import os
import time
import re
from flask import url_for
from . util import set_original_response, set_modified_response, live_server_setup
import logging
from changedetectionio.notification import default_notification_body, default_notification_title
# Hard to just add more live server URLs when one test is already running (I think)
# So we add our test here (was in a different file)
def test_check_notification(client, live_server):
live_server_setup(live_server)
set_original_response()
# Give the endpoint time to spin up
time.sleep(3)
# Re 360 - new install should have defaults set
res = client.get(url_for("settings_page"))
assert default_notification_body.encode() in res.data
assert default_notification_title.encode() in res.data
# When test mode is in BASE_URL env mode, we should see this already configured
env_base_url = os.getenv('BASE_URL', '').strip()
if len(env_base_url):
logging.debug(">>> BASE_URL enabled, looking for %s", env_base_url)
res = client.get(url_for("settings_page"))
assert bytes(env_base_url.encode('utf-8')) in res.data
else:
logging.debug(">>> SKIPPING BASE_URL check")
# re #242 - when you edited an existing new entry, it would not correctly show the notification settings
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("api_watch_add"),
data={"url": test_url, "tag": ''},
follow_redirects=True
)
assert b"Watch added" in res.data
# Give the thread time to pick up the first version
time.sleep(3)
# Goto the edit page, add our ignore text
# Add our URL to the import page
url = url_for('test_notification_endpoint', _external=True)
notification_url = url.replace('http', 'json')
print (">>>> Notification URL: "+notification_url)
res = client.post(
url_for("edit_page", uuid="first"),
data={"notification_urls": notification_url,
"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_body": "BASE URL: {base_url}\n"
"Watch URL: {watch_url}\n"
"Watch UUID: {watch_uuid}\n"
"Watch title: {watch_title}\n"
"Watch tag: {watch_tag}\n"
"Preview: {preview_url}\n"
"Diff URL: {diff_url}\n"
"Snapshot: {current_snapshot}\n"
"Diff: {diff}\n"
"Diff Full: {diff_full}\n"
":-)",
"notification_format": "Text",
"url": test_url,
"tag": "my tag",
"title": "my title",
"headers": "",
"fetch_backend": "html_requests",
"trigger_check": "y"},
follow_redirects=True
)
assert b"Updated watch." in res.data
assert b"Test notification queued" in res.data
# Hit the edit page, be sure that we saved it
res = client.get(
url_for("edit_page", uuid="first"))
assert bytes(notification_url.encode('utf-8')) in res.data
# Re #242 - wasnt saving?
assert bytes("New ChangeDetection.io Notification".encode('utf-8')) in res.data
# Because we hit 'send test notification on save'
time.sleep(3)
notification_submission = None
# Verify what was sent as a notification, this file should exist
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
# Did we see the URL that had a change, in the notification?
assert test_url in notification_submission
os.unlink("test-datastore/notification.txt")
set_modified_response()
# Trigger a check
client.get(url_for("api_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(3)
# Did the front end see it?
res = client.get(
url_for("index"))
assert bytes("just now".encode('utf-8')) in res.data
notification_submission=None
# Verify what was sent as a notification
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
# Did we see the URL that had a change, in the notification?
assert test_url in notification_submission
# Diff was correctly executed
assert "Diff Full: Some initial text" in notification_submission
assert "Diff: (changed) Which is across multiple lines" in notification_submission
assert "(-> into) which has this one new line" in notification_submission
if env_base_url:
# Re #65 - did we see our BASE_URl ?
logging.debug (">>> BASE_URL checking in notification: %s", env_base_url)
assert env_base_url in notification_submission
else:
logging.debug(">>> Skipping BASE_URL check")
## Now configure something clever, we go into custom config (non-default) mode, this is returned by the endpoint
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write(";jasdhflkjadshf kjhsdfkjl ahslkjf haslkjd hfaklsj hf\njl;asdhfkasj stuff we will detect\n")
res = client.post(
url_for("settings_page"),
data={"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_urls": "json://foobar.com", #Re #143 should not see that it sent without [test checkbox]
"minutes_between_check": 180,
"fetch_backend": "html_requests",
},
follow_redirects=True
)
assert b"Settings updated." in res.data
# Re #143 - should not see this if we didnt hit the test box
assert b"Test notification queued" not in res.data
# Trigger a check
client.get(url_for("api_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(3)
# Did the front end see it?
res = client.get(
url_for("index"))
assert bytes("just now".encode('utf-8')) in res.data
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
print ("Notification submission was:", notification_submission)
# Re #342 - check for accidental python byte encoding of non-utf8/string
assert "b'" not in notification_submission
assert re.search('Watch UUID: [0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}', notification_submission, re.IGNORECASE)
assert "Watch title: my title" in notification_submission
assert "Watch tag: my tag" in notification_submission
assert "diff/" in notification_submission
assert "preview/" in notification_submission
assert ":-)" in notification_submission
assert "New ChangeDetection.io Notification - {}".format(test_url) in notification_submission
# This should insert the {current_snapshot}
assert "stuff we will detect" in notification_submission
# Prove that "content constantly being marked as Changed with no Updating causes notification" is not a thing
# https://github.com/dgtlmoon/changedetection.io/discussions/192
os.unlink("test-datastore/notification.txt")
# Trigger a check
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
assert os.path.exists("test-datastore/notification.txt") == False
# Now adding a wrong token should give us an error
res = client.post(
url_for("settings_page"),
data={"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_body": "Rubbish: {rubbish}\n",
"notification_format": "Text",
"notification_urls": "json://foobar.com",
"minutes_between_check": 180,
"fetch_backend": "html_requests"
},
follow_redirects=True
)
assert bytes("is not a valid token".encode('utf-8')) in res.data
# Re #360 some validation
res = client.post(
url_for("edit_page", uuid="first"),
data={"notification_urls": notification_url,
"notification_title": "",
"notification_body": "",
"notification_format": "Text",
"url": test_url,
"tag": "my tag",
"title": "my title",
"headers": "",
"fetch_backend": "html_requests",
"trigger_check": "y"},
follow_redirects=True
)
assert b"Notification Body and Title is required when a Notification URL is used" in res.data
| 38.991266 | 121 | 0.634786 | [
"Apache-2.0"
] | Pritam-Patra/changedetection.io | changedetectionio/tests/test_notification.py | 8,929 | Python |
import json
import pickle
from TwitterAPI import TwitterAPI
with open("api_key.json") as json_data:
all_keys = json.load(json_data)
consumer_key = all_keys["consumer_key"]
consumer_secret = all_keys["consumer_secret"]
access_token_key = all_keys["access_token_key"]
access_token_secret = all_keys["access_token_secret"]
api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)
master_ID = "116568685"
count = 25
def who_follows(ID):
page_cursor = get_pickle()
r = api.request("followers/ids", {"user_id":ID, "cursor":page_cursor, "count":count})
print(r.status_code)
parse_response = r.json()
users_inf = parse_response["ids"]
IDS = []
for x in users_inf:
IDS.append(x)
page_cursor += -1
print(page_cursor)
make_pickle(page_cursor)
print(IDS)
return IDS
def make_pickle(obj):
with open("objs.pkl", "wb") as f:
pickle.dump(obj, f)
def get_pickle():
with open("objs.pkl", "rb") as f:
obj = pickle.load(f)
print(obj)
return obj
| 25.093023 | 89 | 0.677479 | [
"MIT"
] | hallowf/MotivationalBinary | search_to_follow.py | 1,079 | Python |
"""Connection pooling for psycopg2
This module implements thread-safe (and not) connection pools.
"""
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
"""
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {} # id(conn) -> key map
self._keys = 0
for i in range(self.minconn):
self._connect()
def _connect(self, key=None):
"""Create a new connection and assign it to 'key' if not None."""
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
"""Return a new unique key."""
self._keys += 1
return self._keys
def _getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
"""Put away a connection."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
# Return the connection into a consistent state before putting
# it back into the pool
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
# server connection lost
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
# connection in error or in transaction
conn.rollback()
self._pool.append(conn)
else:
# regular idle connection
self._pool.append(conn)
# If the connection is closed, we just discard it.
else:
conn.close()
# here we check for the presence of key because it can happen that a
# thread tries to put back a connection after a call to close
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
def _closeall(self):
"""Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
"""
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
class SimpleConnectionPool(AbstractConnectionPool):
"""A connection pool that can't be shared across different threads."""
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
"""A connection pool that works with the threading module."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
"""Put away an unused connection."""
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
"""A pool that assigns persistent connections to different threads.
Note that this connection pool generates by itself the required keys
using the current thread id. This means that until a thread puts away
a connection it will always get the same connection object by successive
`!getconn()` calls. This also means that a thread can't use more than one
single connection from the pool.
"""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# we we'll need the thread module, to determine thread ids, so we
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
"""Generate thread id and return a connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
"""Put away an unused connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
| 34.474576 | 78 | 0.615044 | [
"MIT"
] | ALEXIS2ES/sherom-Serve | lexis/Lib/site-packages/psycopg2/pool.py | 8,136 | Python |
import frappe, re
from renovation_service_provider_manager import invoke_mediator
@frappe.whitelist(allow_guest=True)
def get_service_provider_client_id(provider):
k = f"client_id_{re.sub('[^0-9a-zA-Z]+', '_', provider.lower())}"
client_id = frappe.cache().get_value(k)
if client_id:
return client_id
client_id = get_client_id_from_mediator(provider)
frappe.cache().set_value(k, client_id, expires_in_sec=18000) # 5hr
return client_id
def get_client_id_from_mediator(provider):
try:
r = invoke_mediator("/api/method/renovation_mediator.api.get_service_provider_client_id", {"provider": provider})
r.raise_for_status()
r = r.json()
return r["message"]
except:
frappe.throw(r.text) | 31.434783 | 117 | 0.749654 | [
"MIT"
] | leam-tech/renovation_service_provider_manager | renovation_service_provider_manager/api/__init__.py | 723 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
from math import pi
from numpy import sign, nan, append, zeros, array, sqrt, where
from numpy import max as max_
from pandas import Series, DataFrame, concat
from pandapower.pypower.idx_gen import GEN_BUS, PMIN, PMAX, QMIN, QMAX, GEN_STATUS
from pandapower.pypower.idx_cost import COST, NCOST
from pandapower.pypower.idx_bus import BUS_I, BASE_KV
import pandapower as pp
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
try:
from pypower import ppoption, runpf, runopf, rundcpf, rundcopf
ppopt = ppoption.ppoption(VERBOSE=0, OUT_ALL=0)
pypower_import = True
except ImportError:
pypower_import = False
ppc_elms = ["bus", "branch", "gen"]
def _create_costs(net, ppc, gen_lookup, type, idx):
if ppc['gencost'][idx, 0] == 1:
if not len(ppc['gencost'][idx, COST:]) == 2*ppc['gencost'][idx, NCOST]:
logger.error("In gencost line %s, the number n does not fit to the number of values" %
idx)
raise NotImplementedError
pp.create_pwl_cost(net, gen_lookup.element.at[idx],
gen_lookup.element_type.at[idx],
ppc['gencost'][idx, 4:], type)
elif ppc['gencost'][idx, 0] == 2:
ncost = ppc['gencost'][idx, NCOST]
if ncost == 1:
cp2 = 0
cp1 = 0
cp0 = ppc['gencost'][idx, COST]
elif ncost == 2:
cp2 = 0
cp1 = ppc['gencost'][idx, COST]
cp0 = ppc['gencost'][idx, COST + 1]
elif ncost == 3:
cp2 = ppc['gencost'][idx, COST]
cp1 = ppc['gencost'][idx, COST + 1]
cp0 = ppc['gencost'][idx, COST + 2]
elif ncost > 3:
logger.warning("The pandapower poly_cost table only supports up to 2nd order " +
"polynomials. The ppc higher order polynomials cannot be converted.")
cp2 = ppc['gencost'][idx, COST + ncost - 3]
cp1 = ppc['gencost'][idx, COST + ncost - 2]
cp0 = ppc['gencost'][idx, COST + ncost - 1]
else:
raise ValueError("'ncost' must be an positve integer but is " + str(ncost))
pp.create_poly_cost(net, gen_lookup.element.at[idx], gen_lookup.element_type.at[idx],
cp1_eur_per_mw=cp1, cp2_eur_per_mw2=cp2, cp0_eur=cp0)
else:
logger.info("Cost mode of gencost line %s is unknown." % idx)
def _gen_bus_info(ppc, idx_gen):
bus_name = int(ppc["gen"][idx_gen, GEN_BUS])
# assumption: there is only one bus with this bus_name:
idx_bus = int(where(ppc["bus"][:, BUS_I] == bus_name)[0][0])
current_bus_type = int(ppc["bus"][idx_bus, 1])
same_bus_gen_idx = where(ppc["gen"][:, GEN_BUS] == ppc["gen"][idx_gen, GEN_BUS])[0].astype(int)
same_bus_in_service_gen_idx = same_bus_gen_idx[where(ppc["gen"][same_bus_gen_idx, GEN_STATUS] > 0)]
first_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[0] if len(
same_bus_in_service_gen_idx) else None
last_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[-1] if len(
same_bus_in_service_gen_idx) else None
return current_bus_type, idx_bus, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx
def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs):
"""
This function converts pypower case files to pandapower net structure.
INPUT:
**ppc** : The pypower case file.
OPTIONAL:
**f_hz** (float, 50) - The frequency of the network.
**validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.
For running the validation, the ppc must already contain the pypower
powerflow results or pypower must be importable.
****kwargs** keyword arguments for validate_from_ppc if validate_conversion is True
OUTPUT:
**net** : pandapower net.
EXAMPLE:
import pandapower.converter as pc
from pypower import case4gs
ppc_net = case4gs.case4gs()
net = pc.from_ppc(ppc_net, f_hz=60)
"""
# --- catch common failures
if Series(ppc['bus'][:, BASE_KV] <= 0).any():
logger.info('There are false baseKV given in the pypower case file.')
# --- general_parameters
baseMVA = ppc['baseMVA'] # MVA
omega = pi * f_hz # 1/s
MAX_VAL = 99999.
net = pp.create_empty_network(f_hz=f_hz, sn_mva=baseMVA)
# --- bus data -> create buses, sgen, load, shunt
for i in range(len(ppc['bus'])):
# create buses
pp.create_bus(net, name=int(ppc['bus'][i, 0]), vn_kv=ppc['bus'][i, 9], type="b",
zone=ppc['bus'][i, 10], in_service=bool(ppc['bus'][i, 1] != 4),
max_vm_pu=ppc['bus'][i, 11], min_vm_pu=ppc['bus'][i, 12])
# create sgen, load
if ppc['bus'][i, 2] > 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
elif ppc['bus'][i, 2] < 0:
pp.create_sgen(net, i, p_mw=-ppc['bus'][i, 2], q_mvar=-ppc['bus'][i, 3],
type="", controllable=False)
elif ppc['bus'][i, 3] != 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
# create shunt
if ppc['bus'][i, 4] != 0 or ppc['bus'][i, 5] != 0:
pp.create_shunt(net, i, p_mw=ppc['bus'][i, 4],
q_mvar=-ppc['bus'][i, 5])
# unused data of ppc: Vm, Va (partwise: in ext_grid), zone
# --- gen data -> create ext_grid, gen, sgen
gen_lookup = DataFrame(nan, columns=['element', 'element_type'],
index=range(len(ppc['gen'][:, 0])))
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc["gen"].shape) == 1:
ppc["gen"] = array(ppc["gen"], ndmin=2)
for i in range(len(ppc['gen'][:, 0])):
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc, i)
# create ext_grid
if current_bus_type == 3:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_ext_grid(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
va_degree=ppc['bus'][current_bus_idx, 8], in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'ext_grid'
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
# create gen
elif current_bus_type == 2:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_gen(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
p_mw=ppc['gen'][i, 1],
in_service=bool(ppc['gen'][i, 7] > 0), controllable=True,
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'gen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of gen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
# create sgen
if current_bus_type == 1:
gen_lookup.element.loc[i] = pp.create_sgen(
net, bus=current_bus_idx, p_mw=ppc['gen'][i, 1],
q_mvar=ppc['gen'][i, 2], type="", in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN],
controllable=True)
gen_lookup.element_type.loc[i] = 'sgen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of sgen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
# unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,
# Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf
# --- branch data -> create line, trafo
for i in range(len(ppc['branch'])):
from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 1]))
from_vn_kv = ppc['bus'][from_bus, 9]
to_vn_kv = ppc['bus'][to_bus, 9]
if (from_vn_kv == to_vn_kv) & ((ppc['branch'][i, 8] == 0) | (ppc['branch'][i, 8] == 1)) & \
(ppc['branch'][i, 9] == 0): # create line
Zni = ppc['bus'][to_bus, 9]**2/baseMVA # ohm
max_i_ka = ppc['branch'][i, 5]/ppc['bus'][to_bus, 9]/sqrt(3)
if max_i_ka == 0.0:
max_i_ka = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"maximum branch flow")
pp.create_line_from_parameters(
net, from_bus=from_bus, to_bus=to_bus, length_km=1,
r_ohm_per_km=ppc['branch'][i, 2]*Zni, x_ohm_per_km=ppc['branch'][i, 3]*Zni,
c_nf_per_km=ppc['branch'][i, 4]/Zni/omega*1e9/2,
max_i_ka=max_i_ka, type='ol', max_loading_percent=100,
in_service=bool(ppc['branch'][i, 10]))
else: # create transformer
if from_vn_kv >= to_vn_kv:
hv_bus = from_bus
vn_hv_kv = from_vn_kv
lv_bus = to_bus
vn_lv_kv = to_vn_kv
tap_side = 'hv'
else:
hv_bus = to_bus
vn_hv_kv = to_vn_kv
lv_bus = from_bus
vn_lv_kv = from_vn_kv
tap_side = 'lv'
if from_vn_kv == to_vn_kv:
logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered'
' as a transformer because of a ratio != 0 | 1 but it connects '
'the same voltage level', i, ppc['branch'][i, 0],
ppc['branch'][i, 1])
rk = ppc['branch'][i, 2]
xk = ppc['branch'][i, 3]
zk = (rk ** 2 + xk ** 2) ** 0.5
sn = ppc['branch'][i, 5]
if sn == 0.0:
sn = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"apparent power")
ratio_1 = 0 if ppc['branch'][i, 8] == 0 else (ppc['branch'][i, 8] - 1) * 100
i0_percent = -ppc['branch'][i, 4] * 100 * baseMVA / sn
if i0_percent < 0:
logger.info('A transformer always behaves inductive consumpting but the '
'susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is '
'positive.', i, ppc['branch'][i, 0], ppc['branch'][i, 1])
pp.create_transformer_from_parameters(
net, hv_bus=hv_bus, lv_bus=lv_bus, sn_mva=sn, vn_hv_kv=vn_hv_kv,
vn_lv_kv=vn_lv_kv, vk_percent=sign(xk) * zk * sn * 100 / baseMVA,
vkr_percent=rk * sn * 100 / baseMVA, max_loading_percent=100,
pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][i, 9],
tap_step_percent=abs(ratio_1), tap_pos=sign(ratio_1),
tap_side=tap_side, tap_neutral=0)
# unused data of ppc: rateB, rateC
# --- gencost -> create polynomial_cost, piecewise_cost
if 'gencost' in ppc:
if len(ppc['gencost'].shape) == 1:
# reshape gencost if only one gencost is given -> no indexError
ppc['gencost'] = ppc['gencost'].reshape((1, -1))
if ppc['gencost'].shape[0] <= gen_lookup.shape[0]:
idx_p = range(ppc['gencost'].shape[0])
idx_q = []
elif ppc['gencost'].shape[0] > gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0])
if ppc['gencost'].shape[0] >= 2*gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], 2*gen_lookup.shape[0])
for idx in idx_p:
_create_costs(net, ppc, gen_lookup, 'p', idx)
for idx in idx_q:
_create_costs(net, ppc, gen_lookup, 'q', idx)
# areas are unconverted
if validate_conversion:
logger.setLevel(logging.DEBUG)
if not validate_from_ppc(ppc, net, **kwargs):
logger.error("Validation failed.")
net._options = {}
net._options["gen_lookup"] = gen_lookup
return net
def _validate_diff_res(diff_res, max_diff_values):
to_iterate = set(max_diff_values.keys()) & {'gen_q_mvar', 'branch_p_mw', 'branch_q_mvar',
'gen_p_mw', 'bus_va_degree', 'bus_vm_pu'}
if not len(to_iterate):
logger.warning("There are no keys to validate.")
val = True
for i in to_iterate:
elm = i.split("_")[0]
sought = ["p", "q"] if elm != "bus" else ["vm", "va"]
col = int(array([0, 1])[[j in i for j in sought]][0]) if elm != "branch" else \
list(array([[0, 2], [1, 3]])[[j in i for j in sought]][0])
val &= bool(max_(abs(diff_res[elm][:, col])) < max_diff_values[i])
return val
def validate_from_ppc(ppc_net, net, pf_type="runpp", max_diff_values={
"bus_vm_pu": 1e-6, "bus_va_degree": 1e-5, "branch_p_mw": 1e-6, "branch_q_mvar": 1e-6,
"gen_p_mw": 1e-6, "gen_q_mvar": 1e-6}, run=True):
"""
This function validates the pypower case files to pandapower net structure conversion via a \
comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)
INPUT:
**ppc_net** - The pypower case file, which must already contain the pypower powerflow
results or pypower must be importable.
**net** - The pandapower network.
OPTIONAL:
**pf_type** ("runpp", string) - Type of validated power flow. Possible are ("runpp",
"rundcpp", "runopp", "rundcopp")
**max_diff_values** - Dict of maximal allowed difference values. The keys must be
'vm_pu', 'va_degree', 'p_branch_mw', 'q_branch_mvar', 'p_gen_mw' and 'q_gen_mvar' and
the values floats.
**run** (True, bool or list of two bools) - changing the value to False avoids trying to run
(optimal) loadflows. Giving a list of two bools addresses first pypower and second
pandapower.
OUTPUT:
**conversion_success** - conversion_success is returned as False if pypower or pandapower
cannot calculate a powerflow or if the maximum difference values (max_diff_values )
cannot be hold.
EXAMPLE:
import pandapower.converter as pc
net = cv.from_ppc(ppc_net, f_hz=50)
conversion_success = cv.validate_from_ppc(ppc_net, net)
NOTE:
The user has to take care that the loadflow results already are included in the provided \
ppc_net or pypower is importable.
"""
# check in case of optimal powerflow comparison whether cost information exist
if "opp" in pf_type:
if not (len(net.polynomial_cost) | len(net.piecewise_linear_cost)):
if "gencost" in ppc_net:
if not len(ppc_net["gencost"]):
logger.debug('ppc and pandapower net do not include cost information.')
return True
else:
logger.error('The pandapower net does not include cost information.')
return False
else:
logger.debug('ppc and pandapower net do not include cost information.')
return True
# guarantee run parameter as list, for pypower and pandapower (optimal) powerflow run
run = [run, run] if isinstance(run, bool) else run
# --- check pypower powerflow success, if possible
if pypower_import and run[0]:
try:
if pf_type == "runpp":
ppc_net = runpf.runpf(ppc_net, ppopt)[0]
elif pf_type == "rundcpp":
ppc_net = rundcpf.rundcpf(ppc_net, ppopt)[0]
elif pf_type == "runopp":
ppc_net = runopf.runopf(ppc_net, ppopt)
elif pf_type == "rundcopp":
ppc_net = rundcopf.rundcopf(ppc_net, ppopt)
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
except:
logger.debug("The pypower run did not work.")
ppc_success = True
if 'success' in ppc_net.keys():
if ppc_net['success'] != 1:
ppc_success = False
logger.error("The given ppc data indicates an unsuccessful pypower powerflow: " +
"'ppc_net['success'] != 1'")
if (ppc_net['branch'].shape[1] < 17):
ppc_success = False
logger.error("The shape of given ppc data indicates missing pypower powerflow results.")
# --- try to run a pandapower powerflow
if run[1]:
if pf_type == "runpp":
try:
pp.runpp(net, init="dc", calculate_voltage_angles=True, trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, calculate_voltage_angles=True, init="flat", trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, trafo_model="pi", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except pp.LoadflowNotConverged:
logger.error('The pandapower powerflow does not converge.')
elif pf_type == "rundcpp":
try:
pp.rundcpp(net, trafo_model="pi")
except pp.LoadflowNotConverged:
logger.error('The pandapower dc powerflow does not converge.')
elif pf_type == "runopp":
try:
pp.runopp(net, init="flat", calculate_voltage_angles=True)
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=True)
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
try:
pp.runopp(net, init="flat", calculate_voltage_angles=False)
logger.info("voltage_angles could be calculated.")
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
logger.error('The pandapower optimal powerflow does not converge.')
elif pf_type == "rundcopp":
try:
pp.rundcopp(net)
except pp.LoadflowNotConverged:
logger.error('The pandapower dc optimal powerflow does not converge.')
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
# --- prepare powerflow result comparison by reordering pp results as they are in ppc results
if not ppc_success:
return False
if "opp" in pf_type:
if not net.OPF_converged:
return
elif not net.converged:
return False
# --- store pypower powerflow results
ppc_res = dict.fromkeys(ppc_elms)
ppc_res["branch"] = ppc_net['branch'][:, 13:17]
ppc_res["bus"] = ppc_net['bus'][:, 7:9]
ppc_res["gen"] = ppc_net['gen'][:, 1:3]
# --- pandapower bus result table
pp_res = dict.fromkeys(ppc_elms)
pp_res["bus"] = array(net.res_bus.sort_index()[['vm_pu', 'va_degree']])
# --- pandapower gen result table
pp_res["gen"] = zeros([1, 2])
# consideration of parallel generators via storing how much generators have been considered
# each node
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc_net["gen"].shape) == 1:
ppc_net["gen"] = array(ppc_net["gen"], ndmin=2)
GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int))
GEN_uniq = GENS.drop_duplicates()
already_used_gen = Series(zeros(GEN_uniq.shape[0]).astype(int),
index=[int(v) for v in GEN_uniq.values])
change_q_compare = []
for i, j in GENS.iterrows():
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc_net, i)
if current_bus_type == 3 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_ext_grid[
net.ext_grid.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
elif current_bus_type == 2 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_gen[
net.gen.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
else:
pp_res["gen"] = append(pp_res["gen"], array(net.res_sgen[
net.sgen.bus == current_bus_idx][['p_mw', 'q_mvar']])[
already_used_gen.at[int(j)]].reshape((1, 2)), 0)
already_used_gen.at[int(j)] += 1
change_q_compare += [int(j)]
pp_res["gen"] = pp_res["gen"][1:, :] # delete initial zero row
# --- pandapower branch result table
pp_res["branch"] = zeros([1, 4])
# consideration of parallel branches via storing how often branches were considered
# each node-to-node-connection
try:
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1,
sort=True).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1,
sort=True).drop_duplicates()
except TypeError:
# legacy pandas < 0.21
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1).drop_duplicates()
init1['hv_bus'] = nan
init1['lv_bus'] = nan
init2['from_bus'] = nan
init2['to_bus'] = nan
try:
already_used_branches = concat([init1, init2], axis=0, sort=True)
except TypeError:
# pandas < 0.21 legacy
already_used_branches = concat([init1, init2], axis=0)
already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int)
BRANCHES = DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]])
for i in BRANCHES.index:
from_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 1]))
from_vn_kv = ppc_net['bus'][from_bus, 9]
to_vn_kv = ppc_net['bus'][to_bus, 9]
ratio = BRANCHES[2].at[i]
angle = BRANCHES[3].at[i]
# from line results
if (from_vn_kv == to_vn_kv) & ((ratio == 0) | (ratio == 1)) & (angle == 0):
pp_res["branch"] = append(pp_res["branch"], array(net.res_line[
(net.line.from_bus == from_bus) &
(net.line.to_bus == to_bus)]
[['p_from_mw', 'q_from_mvar', 'p_to_mw', 'q_to_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)] += 1
# from trafo results
else:
if from_vn_kv >= to_vn_kv:
pp_res["branch"] = append(pp_res["branch"], array(net.res_trafo[
(net.trafo.hv_bus == from_bus) &
(net.trafo.lv_bus == to_bus)]
[['p_hv_mw', 'q_hv_mvar', 'p_lv_mw', 'q_lv_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)] += 1
else: # switch hv-lv-connection of pypower connection buses
pp_res["branch"] = append(pp_res["branch"], array(net.res_trafo[
(net.trafo.hv_bus == to_bus) &
(net.trafo.lv_bus == from_bus)]
[['p_lv_mw', 'q_lv_mvar', 'p_hv_mw', 'q_hv_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)] += 1
pp_res["branch"] = pp_res["branch"][1:, :] # delete initial zero row
# --- do the powerflow result comparison
diff_res = dict.fromkeys(ppc_elms)
diff_res["bus"] = ppc_res["bus"] - pp_res["bus"]
diff_res["bus"][:, 1] -= diff_res["bus"][0, 1] # remove va_degree offset
diff_res["branch"] = ppc_res["branch"] - pp_res["branch"]
diff_res["gen"] = ppc_res["gen"] - pp_res["gen"]
# comparison of buses with several generator units only as q sum
for i in GEN_uniq.loc[GEN_uniq[0].isin(change_q_compare)].index:
next_is = GEN_uniq.index[GEN_uniq.index > i]
if len(next_is) > 0:
next_i = next_is[0]
else:
next_i = GENS.index[-1] + 1
if (next_i - i) > 1:
diff_res["gen"][i:next_i, 1] = sum(diff_res["gen"][i:next_i, 1])
# logger info
logger.debug("Maximum voltage magnitude difference between pypower and pandapower: "
"%.2e pu" % max_(abs(diff_res["bus"][:, 0])))
logger.debug("Maximum voltage angle difference between pypower and pandapower: "
"%.2e degree" % max_(abs(diff_res["bus"][:, 1])))
logger.debug("Maximum branch flow active power difference between pypower and pandapower: "
"%.2e MW" % max_(abs(diff_res["branch"][:, [0, 2]])))
logger.debug("Maximum branch flow reactive power difference between pypower and "
"pandapower: %.2e MVAr" % max_(abs(diff_res["branch"][:, [1, 3]])))
logger.debug("Maximum active power generation difference between pypower and pandapower: "
"%.2e MW" % max_(abs(diff_res["gen"][:, 0])))
logger.debug("Maximum reactive power generation difference between pypower and pandapower: "
"%.2e MVAr" % max_(abs(diff_res["gen"][:, 1])))
if _validate_diff_res(diff_res, {"bus_vm_pu": 1e-3, "bus_va_degree": 1e-3, "branch_p_mw": 1e-6,
"branch_q_mvar": 1e-6}) and \
(max_(abs(diff_res["gen"])) > 1e-1).any():
logger.debug("The active/reactive power generation difference possibly results "
"because of a pypower error. Please validate "
"the results via pypower loadflow.") # this occurs e.g. at ppc case9
# give a return
if isinstance(max_diff_values, dict):
return _validate_diff_res(diff_res, max_diff_values)
else:
logger.debug("'max_diff_values' must be a dict.")
| 48.93517 | 103 | 0.568509 | [
"BSD-3-Clause"
] | BaraaUniKassel/pandapower | pandapower/converter/pypower/from_ppc.py | 30,193 | Python |
from setuptools import setup, find_packages
import os
setup(name='avenue',
version=0.1,
description='Element AI car Simulator',
url='https://github.com/cyrilibrahim/Avenue',
author='ElementAI',
author_email='[email protected]',
license='',
zip_safe=False,
install_requires=[
"gdown",
# "mlagents==0.5.0",
"gym",
# "mlagents_frozen",
"mlagents @ git+https://[email protected]/rmst/ml-agents-frozen@fd10e3544472b365701da2526a8262e0c8a15784#egg=mlagents",
],
extras_require={},
packages=find_packages()
)
| 28.909091 | 128 | 0.613208 | [
"MIT"
] | ElementAI/avenue | setup.py | 636 | Python |
import warnings
import numpy as np
import pandas as pd
import pandas.util.testing as tm
try:
from pandas.api.types import union_categoricals
except ImportError:
try:
from pandas.types.concat import union_categoricals
except ImportError:
pass
class Concat:
def setup(self):
N = 10**5
self.s = pd.Series(list('aabbcd') * N).astype('category')
self.a = pd.Categorical(list('aabbcd') * N)
self.b = pd.Categorical(list('bbcdjk') * N)
def time_concat(self):
pd.concat([self.s, self.s])
def time_union(self):
union_categoricals([self.a, self.b])
class Constructor:
def setup(self):
N = 10**5
self.categories = list('abcde')
self.cat_idx = pd.Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
self.datetimes = pd.Series(pd.date_range('1995-01-01 00:00:00',
periods=N / 10,
freq='s'))
self.datetimes_with_nat = self.datetimes.copy()
self.datetimes_with_nat.iloc[-1] = pd.NaT
self.values_some_nan = list(np.tile(self.categories + [np.nan], N))
self.values_all_nan = [np.nan] * len(self.values)
self.values_all_int8 = np.ones(N, 'int8')
self.categorical = pd.Categorical(self.values, self.categories)
self.series = pd.Series(self.categorical)
def time_regular(self):
pd.Categorical(self.values, self.categories)
def time_fastpath(self):
pd.Categorical(self.codes, self.cat_idx, fastpath=True)
def time_datetimes(self):
pd.Categorical(self.datetimes)
def time_datetimes_with_nat(self):
pd.Categorical(self.datetimes_with_nat)
def time_with_nan(self):
pd.Categorical(self.values_some_nan)
def time_all_nan(self):
pd.Categorical(self.values_all_nan)
def time_from_codes_all_int8(self):
pd.Categorical.from_codes(self.values_all_int8, self.categories)
def time_existing_categorical(self):
pd.Categorical(self.categorical)
def time_existing_series(self):
pd.Categorical(self.series)
class ValueCounts:
params = [True, False]
param_names = ['dropna']
def setup(self, dropna):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_value_counts(self, dropna):
self.ts.value_counts(dropna=dropna)
class Repr:
def setup(self):
self.sel = pd.Series(['s1234']).astype('category')
def time_rendering(self):
str(self.sel)
class SetCategories:
def setup(self):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class RemoveCategories:
def setup(self):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_remove_categories(self):
self.ts.cat.remove_categories(self.ts.cat.categories[::2])
class Rank:
def setup(self):
N = 10**5
ncats = 100
self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
self.s_str_cat = self.s_str.astype('category')
with warnings.catch_warnings(record=True):
self.s_str_cat_ordered = self.s_str.astype('category',
ordered=True)
self.s_int = pd.Series(np.random.randint(0, ncats, size=N))
self.s_int_cat = self.s_int.astype('category')
with warnings.catch_warnings(record=True):
self.s_int_cat_ordered = self.s_int.astype('category',
ordered=True)
def time_rank_string(self):
self.s_str.rank()
def time_rank_string_cat(self):
self.s_str_cat.rank()
def time_rank_string_cat_ordered(self):
self.s_str_cat_ordered.rank()
def time_rank_int(self):
self.s_int.rank()
def time_rank_int_cat(self):
self.s_int_cat.rank()
def time_rank_int_cat_ordered(self):
self.s_int_cat_ordered.rank()
class Isin:
params = ['object', 'int64']
param_names = ['dtype']
def setup(self, dtype):
np.random.seed(1234)
n = 5 * 10**5
sample_size = 100
arr = [i for i in np.random.randint(0, n // 10, size=n)]
if dtype == 'object':
arr = ['s{:04d}'.format(i) for i in arr]
self.sample = np.random.choice(arr, sample_size)
self.series = pd.Series(arr).astype('category')
def time_isin_categorical(self, dtype):
self.series.isin(self.sample)
class IsMonotonic:
def setup(self):
N = 1000
self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N))
self.s = pd.Series(self.c)
def time_categorical_index_is_monotonic_increasing(self):
self.c.is_monotonic_increasing
def time_categorical_index_is_monotonic_decreasing(self):
self.c.is_monotonic_decreasing
def time_categorical_series_is_monotonic_increasing(self):
self.s.is_monotonic_increasing
def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
class Contains:
def setup(self):
N = 10**5
self.ci = tm.makeCategoricalIndex(N)
self.c = self.ci.values
self.key = self.ci.categories[0]
def time_categorical_index_contains(self):
self.key in self.ci
def time_categorical_contains(self):
self.key in self.c
class CategoricalSlicing:
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**6
categories = ['a', 'b', 'c']
values = [0] * N + [1] * N + [2] * N
if index == 'monotonic_incr':
self.data = pd.Categorical.from_codes(values,
categories=categories)
elif index == 'monotonic_decr':
self.data = pd.Categorical.from_codes(list(reversed(values)),
categories=categories)
elif index == 'non_monotonic':
self.data = pd.Categorical.from_codes([0, 1, 2] * N,
categories=categories)
else:
raise ValueError('Invalid index param: {}'.format(index))
self.scalar = 10000
self.list = list(range(10000))
self.cat_scalar = 'b'
def time_getitem_scalar(self, index):
self.data[self.scalar]
def time_getitem_slice(self, index):
self.data[:self.scalar]
def time_getitem_list_like(self, index):
self.data[[self.scalar]]
def time_getitem_list(self, index):
self.data[self.list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
class Indexing:
def setup(self):
N = 10**5
self.index = pd.CategoricalIndex(range(N), range(N))
self.series = pd.Series(range(N), index=self.index).sort_index()
self.category = self.index[500]
def time_get_loc(self):
self.index.get_loc(self.category)
def time_shape(self):
self.index.shape
def time_shallow_copy(self):
self.index._shallow_copy()
def time_align(self):
pd.DataFrame({'a': self.series, 'b': self.series[:500]})
def time_intersection(self):
self.index[:750].intersection(self.index[250:])
def time_unique(self):
self.index.unique()
def time_reindex(self):
self.index.reindex(self.index[:500])
def time_reindex_missing(self):
self.index.reindex(['a', 'b', 'c', 'd'])
def time_sort_values(self):
self.index.sort_values(ascending=False)
from .pandas_vb_common import setup # noqa: F401
| 28.316498 | 77 | 0.592271 | [
"BSD-3-Clause"
] | FerhatYilmaz1986/pandas | asv_bench/benchmarks/categoricals.py | 8,410 | Python |
import astropy.io.fits as fits
import argparse, os, re, copy
parser = argparse.ArgumentParser()
parser.add_argument('--list', default='list.list')
parser.add_argument('--rename_by', default='DATA-TYP')
parser.add_argument('--reparse', default=0, type=int)
args = parser.parse_args()
def log(description):
print(description)
try:
lst_f = open(args.list, 'r')
except:
error("List file list not found: " + args.list)
lst = lst_f.read()
lst = lst.replace('\r\n', '\n')
lst = lst.replace('\r', '\n')
lst = lst.split('\n')
log("Loading file(s)...")
for i in lst:
try:
#if True:
hdulist = fits.open(i)
hdulist.verify('fix')
log("Loading file: " + i)
headers = dict(hdulist[0].header)
typ = headers[args.rename_by].strip()
if args.reparse == 1:
newname = typ + '_' + i.split('_')[-1]
else:
newname = typ + '_' + i
log("Renamed to " + newname)
os.rename(i, newname)
except:
log("Error while reading file " + i)
| 21.431818 | 54 | 0.645811 | [
"MIT"
] | hletrd/PyAstrophotography | autorenamer.py | 943 | Python |
import collections
Set = set
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
"""
From: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[NEXT] = next
next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
if __name__=="__main__":
a = OrderedSet()
| 22.828947 | 74 | 0.591931 | [
"Apache-2.0"
] | gantech/fastv8DriverProgram | fastv8/doc/_extensions/backports.py | 1,735 | Python |
#!/usr/bin/env python3
import RPi.GPIO as GPIO
import time
import threading
import logging
import pandas as pd
import numpy as np
from tzlocal import get_localzone
from flask import Flask, render_template, url_for, request
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
from rpiweather import temphumid
from rpiweather import temppressure
from rpiweather import data
from rpiweather import outside_weather
from rpiweather import dust
temppressure.start_recording()
temphumid.start_recording()
outside_weather.start_recording()
dust.start_recording()
app = Flask("rpiweather")
def format_timestamps(series):
local_tz = get_localzone()
return list(
str(dt.tz_localize("UTC").tz_convert(local_tz)) for dt in series
)
@app.route("/")
def index():
lookbehind = int(request.args.get('lookbehind', 24))
bigarray = data.get_recent_datapoints(lookbehind)
logger.info("Total datapoint count: %d" % len(bigarray))
df = pd.DataFrame(bigarray, columns=['time', 'type', 'value'])
df['time'] = pd.to_datetime(df['time'])
df = df.set_index('time')
agg_interval = "15T" if lookbehind < 168 else "1H" if lookbehind < 5040 else "1D"
df2 = df.pivot(columns='type', values='value').resample(agg_interval).mean()
temp_df = df2['temperature'].dropna()
temp_values = {
'x': format_timestamps(temp_df.index),
'y': list(temp_df),
'name': 'Temperature',
'type': 'line',
'line': {
'color': 'rgb(244, 66, 98)'
}
}
outside_temp_df = df2['outside_temperature'].dropna()
ot_values = {
'x': format_timestamps(outside_temp_df.index),
'y': list(outside_temp_df),
'name': 'Temperature Outside',
'type': 'line',
'line': {
'color': 'rgb(244, 66, 98)',
'dash': 'longdash'
}
}
pres_df = df2['pressure'].dropna()
pressure_values = {
'x': format_timestamps(pres_df.index),
'y': list(pres_df),
'name': 'Pressure',
'type': 'line',
'yaxis': 'y2',
'line': {
'dash': 'dot',
'color': 'rgb(151,138,155)'
}
}
hum_df = df2['humidity'].dropna()
humidity_values = {
'x': format_timestamps(hum_df.index),
'y': list(hum_df),
'name': 'Humidity',
'type': 'scatter',
'fill': 'tozeroy',
'yaxis': 'y3',
'marker': {
'color': 'rgb(66,131,244)'
}
}
dust_df = df2['dust'].dropna()
dust_values = {
'x': format_timestamps(dust_df.index),
'y': list(dust_df),
'name': 'Dust level',
'type': 'line',
'yaxis': 'y4',
'line': {
'dash': 'dot',
'color': 'rgb(224, 205, 31)'
}
}
chart_data = [
temp_values, pressure_values, humidity_values, ot_values, dust_values
]
#import pdb; pdb.set_trace()
lookbehind_options = [(24, "1d"),
(24*7, "1w"),
(24*7*30, "30d")]
return render_template("index.html",
weather_data=chart_data,
lookbehind_options=lookbehind_options,
lookbehind=lookbehind)
def make_agg_df(rec):
df = pd.DataFrame.from_records(rec, index="time")
df.index = pd.to_datetime(df.index, unit="s")
return df.resample("T").mean()
def magic():
df_tp = make_agg_df(temppressure.get_records())
df_th = make_agg_df(temphumid.get_records())
df_th = df_th.rename(columns={'temp': 'bad_temp'})
total_view = pd.concat([df_tp, df_th], axis=1)
return total_view
#import IPython
# IPython.embed()
if False:
bigarray = data.get_recent_datapoints()
df = pd.DataFrame(bigarray, columns=['time', 'type', 'value'])
df['time'] = pd.to_datetime(df['time'])
df = df.set_index('time')
df2 = df.pivot(columns='type', values='value').resample("5T").mean()
temp_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['temperature']
))
pressure_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['pressure']
))
humidity_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['humidity']
))
| 27.886792 | 99 | 0.58525 | [
"MIT"
] | wbkang/rpi-repo | rpiweather/server.py | 4,434 | Python |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/30 11:28
Desc: 新浪财经-A股-实时行情数据和历史行情数据(包含前复权和后复权因子)
"""
import re
import demjson
import execjs
import pandas as pd
import requests
from tqdm import tqdm
from akshare.stock.cons import (zh_sina_a_stock_payload,
zh_sina_a_stock_url,
zh_sina_a_stock_count_url,
zh_sina_a_stock_hist_url,
hk_js_decode,
zh_sina_a_stock_hfq_url,
zh_sina_a_stock_qfq_url,
zh_sina_a_stock_amount_url)
def _get_zh_a_page_count() -> int:
"""
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要抓取的股票总页数
:rtype: int
"""
res = requests.get(zh_sina_a_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def stock_zh_a_spot() -> pd.DataFrame:
"""
从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: pandas.DataFrame
symbol code name trade pricechange changepercent buy \
0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920
1 sh600004 600004 白云机场 18.110 -0.370 -2.002 18.110
2 sh600006 600006 东风汽车 4.410 -0.030 -0.676 4.410
3 sh600007 600007 中国国贸 17.240 -0.360 -2.045 17.240
4 sh600008 600008 首创股份 3.320 -0.030 -0.896 3.310
... ... ... ... ... ... ...
3755 sh600096 600096 云天化 5.270 -0.220 -4.007 5.270
3756 sh600097 600097 开创国际 10.180 -0.120 -1.165 10.180
3757 sh600098 600098 广州发展 6.550 -0.040 -0.607 6.540
3758 sh600099 600099 林海股份 6.540 -0.150 -2.242 6.540
3759 sh600100 600100 同方股份 8.200 -0.100 -1.205 8.200
sell settlement open high low volume amount \
0 12.930 12.950 12.950 13.100 12.860 46023920 597016896
1 18.120 18.480 18.510 18.510 17.880 24175071 437419344
2 4.420 4.440 4.490 4.490 4.410 4304900 19130233
3 17.280 17.600 17.670 17.670 17.220 684801 11879731
4 3.320 3.350 3.360 3.360 3.300 8284294 27579688
... ... ... ... ... ... ...
3755 5.280 5.490 5.490 5.500 5.220 16964636 90595172
3756 10.190 10.300 10.220 10.340 10.090 1001676 10231669
3757 6.550 6.590 6.560 6.620 6.500 1996449 13098901
3758 6.580 6.690 6.650 6.680 6.530 1866180 12314997
3759 8.210 8.300 8.300 8.310 8.120 12087236 99281447
ticktime per pb mktcap nmc turnoverratio
0 15:00:00 6.984 0.790 3.792289e+07 3.631006e+07 0.16376
1 15:00:07 32.927 2.365 3.747539e+06 3.747539e+06 1.16826
2 15:00:02 15.926 1.207 8.820000e+05 8.820000e+05 0.21525
3 15:00:02 22.390 2.367 1.736555e+06 1.736555e+06 0.06798
4 15:00:07 22.912 1.730 1.887569e+06 1.600444e+06 0.17185
... ... ... ... ... ...
3755 15:00:00 56.728 1.566 7.523847e+05 6.963668e+05 1.28386
3756 15:00:00 17.552 1.434 2.452734e+05 2.303459e+05 0.44268
3757 15:00:00 25.476 1.059 1.785659e+06 1.785659e+06 0.07323
3758 15:00:00 540.496 3.023 1.433045e+05 1.433045e+05 0.85167
3759 15:00:07 -6.264 1.465 2.430397e+06 2.430397e+06 0.40782
"""
big_df = pd.DataFrame()
page_count = _get_zh_a_page_count()
zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy()
for page in tqdm(range(1, page_count+1), desc="Please wait for a moment"):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(
zh_sina_a_stock_url,
params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
return big_df
def stock_zh_a_daily(symbol: str = "sz000613", adjust: str = "qfq") -> pd.DataFrame:
"""
新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = execjs.compile(hk_js_decode)
dict_list = js_code.call(
'd', res.text.split("=")[1].split(";")[0].replace(
'"', "")) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df["date"] = data_df["date"].str.split("T", expand=True).iloc[:, 0]
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("["): r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how="left")
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = ['open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover']
if adjust == "":
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "hfq-factor":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
return qfq_factor_df
if __name__ == "__main__":
stock_zh_a_daily_hfq_df = stock_zh_a_daily(symbol="sh600582", adjust="qfq-factor")
print(stock_zh_a_daily_hfq_df)
stock_zh_a_daily_df = stock_zh_a_daily(symbol="sz000613", adjust="qfq")
print(stock_zh_a_daily_df)
stock_zh_a_spot_df = stock_zh_a_spot()
print(stock_zh_a_spot_df)
| 44.979695 | 107 | 0.586616 | [
"MIT"
] | fellowfun/akshare | akshare/stock/zh_stock_a_sina.py | 9,239 | Python |
"""Support for HomematicIP Cloud lights."""
import logging
from typing import Any, Dict
from homematicip.aio.device import (
AsyncBrandDimmer,
AsyncBrandSwitchMeasuring,
AsyncBrandSwitchNotificationLight,
AsyncDimmer,
AsyncFullFlushDimmer,
AsyncPluggableDimmer,
)
from homematicip.base.enums import RGBColorState
from homematicip.base.functionalChannels import NotificationLightChannel
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_NAME,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
Light,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
from .hap import HomematicipHAP
_LOGGER = logging.getLogger(__name__)
ATTR_TODAY_ENERGY_KWH = "today_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None
) -> None:
"""Old way of setting up HomematicIP Cloud lights."""
pass
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP Cloud lights from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]]
entities = []
for device in hap.home.devices:
if isinstance(device, AsyncBrandSwitchMeasuring):
entities.append(HomematicipLightMeasuring(hap, device))
elif isinstance(device, AsyncBrandSwitchNotificationLight):
entities.append(HomematicipLight(hap, device))
entities.append(
HomematicipNotificationLight(hap, device, device.topLightChannelIndex)
)
entities.append(
HomematicipNotificationLight(
hap, device, device.bottomLightChannelIndex
)
)
elif isinstance(
device,
(AsyncDimmer, AsyncPluggableDimmer, AsyncBrandDimmer, AsyncFullFlushDimmer),
):
entities.append(HomematicipDimmer(hap, device))
if entities:
async_add_entities(entities)
class HomematicipLight(HomematicipGenericDevice, Light):
"""Representation of a HomematicIP Cloud light device."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the light device."""
super().__init__(hap, device)
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._device.on
async def async_turn_on(self, **kwargs) -> None:
"""Turn the device on."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the device off."""
await self._device.turn_off()
class HomematicipLightMeasuring(HomematicipLight):
"""Representation of a HomematicIP Cloud measuring light device."""
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the generic device."""
state_attr = super().device_state_attributes
current_power_w = self._device.currentPowerConsumption
if current_power_w > 0.05:
state_attr[ATTR_CURRENT_POWER_W] = round(current_power_w, 2)
state_attr[ATTR_TODAY_ENERGY_KWH] = round(self._device.energyCounter, 2)
return state_attr
class HomematicipDimmer(HomematicipGenericDevice, Light):
"""Representation of HomematicIP Cloud dimmer light device."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the dimmer light device."""
super().__init__(hap, device)
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._device.dimLevel is not None and self._device.dimLevel > 0.0
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return int((self._device.dimLevel or 0.0) * 255)
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
await self._device.set_dim_level(kwargs[ATTR_BRIGHTNESS] / 255.0)
else:
await self._device.set_dim_level(1)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
await self._device.set_dim_level(0)
class HomematicipNotificationLight(HomematicipGenericDevice, Light):
"""Representation of HomematicIP Cloud dimmer light device."""
def __init__(self, hap: HomematicipHAP, device, channel: int) -> None:
"""Initialize the dimmer light device."""
self.channel = channel
if self.channel == 2:
super().__init__(hap, device, "Top")
else:
super().__init__(hap, device, "Bottom")
self._color_switcher = {
RGBColorState.WHITE: [0.0, 0.0],
RGBColorState.RED: [0.0, 100.0],
RGBColorState.YELLOW: [60.0, 100.0],
RGBColorState.GREEN: [120.0, 100.0],
RGBColorState.TURQUOISE: [180.0, 100.0],
RGBColorState.BLUE: [240.0, 100.0],
RGBColorState.PURPLE: [300.0, 100.0],
}
@property
def _func_channel(self) -> NotificationLightChannel:
return self._device.functionalChannels[self.channel]
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return (
self._func_channel.dimLevel is not None
and self._func_channel.dimLevel > 0.0
)
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return int((self._func_channel.dimLevel or 0.0) * 255)
@property
def hs_color(self) -> tuple:
"""Return the hue and saturation color value [float, float]."""
simple_rgb_color = self._func_channel.simpleRGBColorState
return self._color_switcher.get(simple_rgb_color, [0.0, 0.0])
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the generic device."""
state_attr = super().device_state_attributes
if self.is_on:
state_attr[ATTR_COLOR_NAME] = self._func_channel.simpleRGBColorState
return state_attr
@property
def name(self) -> str:
"""Return the name of the generic device."""
return f"{super().name} Notification"
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self.__class__.__name__}_{self.post}_{self._device.id}"
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
# Use hs_color from kwargs,
# if not applicable use current hs_color.
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
simple_rgb_color = _convert_color(hs_color)
# Use brightness from kwargs,
# if not applicable use current brightness.
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
# If no kwargs, use default value.
if not kwargs:
brightness = 255
# Minimum brightness is 10, otherwise the led is disabled
brightness = max(10, brightness)
dim_level = brightness / 255.0
transition = kwargs.get(ATTR_TRANSITION, 0.5)
await self._device.set_rgb_dim_level_with_time(
channelIndex=self.channel,
rgb=simple_rgb_color,
dimLevel=dim_level,
onTime=0,
rampTime=transition,
)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
simple_rgb_color = self._func_channel.simpleRGBColorState
transition = kwargs.get(ATTR_TRANSITION, 0.5)
await self._device.set_rgb_dim_level_with_time(
channelIndex=self.channel,
rgb=simple_rgb_color,
dimLevel=0.0,
onTime=0,
rampTime=transition,
)
def _convert_color(color: tuple) -> RGBColorState:
"""
Convert the given color to the reduced RGBColorState color.
RGBColorStat contains only 8 colors including white and black,
so a conversion is required.
"""
if color is None:
return RGBColorState.WHITE
hue = int(color[0])
saturation = int(color[1])
if saturation < 5:
return RGBColorState.WHITE
if 30 < hue <= 90:
return RGBColorState.YELLOW
if 90 < hue <= 160:
return RGBColorState.GREEN
if 150 < hue <= 210:
return RGBColorState.TURQUOISE
if 210 < hue <= 270:
return RGBColorState.BLUE
if 270 < hue <= 330:
return RGBColorState.PURPLE
return RGBColorState.RED
| 32.391459 | 88 | 0.652054 | [
"Apache-2.0"
] | 0x00-0xFF/home-assistant | homeassistant/components/homematicip_cloud/light.py | 9,102 | Python |
# region [Imports]
# * Standard Library Imports ---------------------------------------------------------------------------->
import os
import logging
import sqlite3 as sqlite
from pprint import pformat
# * Gid Imports ----------------------------------------------------------------------------------------->
import gidlogger as glog
# endregion[Imports]
__updated__ = '2020-11-26 17:04:37'
# region [AppUserData]
# endregion [AppUserData]
# region [Logging]
log = logging.getLogger('gidsql')
glog.import_notification(log, __name__)
# endregion[Logging]
# region [Constants]
# endregion[Constants]
class GidSqliteActionBase:
def __init__(self, in_db_loc, in_pragmas=None):
self.db_loc = in_db_loc
self.pragmas = in_pragmas
glog.class_init_notification(log, self)
@property
def exists(self):
"""
checks if the db exist and logs it
Returns
-------
bool
bool if the file exist or not
"""
if os.path.isfile(self.db_loc):
log.info("database at %s, does EXIST", self.db_loc)
return True
else:
log.info("databse at %s does NOT EXIST", self.db_loc)
return False
@staticmethod
def _handle_error(error, sql_phrase, variables):
log.critical("%s - with SQL --> %s and args[%s]", str(error), sql_phrase, pformat(variables))
if 'syntax error' in str(error):
raise SyntaxError(error)
raise sqlite.Error(error)
def _execute_pragmas(self, in_cursor):
if self.pragmas is not None and self.pragmas != '':
in_cursor.executescript(self.pragmas)
log.debug("Executed pragmas '%s' successfully", self.pragmas)
def __repr__(self):
return f"{self.__class__.__name__} ('{self.db_loc}')"
def __str__(self):
return self.__class__.__name__
class AioGidSqliteActionBase:
def __init__(self, in_db_loc, in_pragmas=None):
self.db_loc = in_db_loc
self.pragmas = in_pragmas
glog.class_init_notification(log, self)
@property
def exists(self):
"""
checks if the db exist and logs it
Returns
-------
bool
bool if the file exist or not
"""
if os.path.isfile(self.db_loc):
log.info("database at %s, does EXIST", self.db_loc)
return True
else:
log.info("databse at %s does NOT EXIST", self.db_loc)
return False
@staticmethod
async def _handle_error(error, sql_phrase, variables):
log.critical("%s - with SQL --> %s and args[%s]", str(error), sql_phrase, pformat(variables))
if 'syntax error' in str(error):
raise SyntaxError(error)
raise sqlite.Error(error)
async def _execute_pragmas(self, in_connection):
if self.pragmas not in [None, '', []]:
await in_connection.executescript(self.pragmas)
log.debug("Executed pragmas '%s' successfully", self.pragmas)
def __repr__(self):
return f"{self.__class__.__name__} ('{self.db_loc}')"
def __str__(self):
return self.__class__.__name__
# region[Main_Exec]
if __name__ == '__main__':
pass
# endregion[Main_Exec]
| 26.624 | 106 | 0.578425 | [
"MIT"
] | official-antistasi-community/Antipetros_Discord_Bot | antipetros_discordbot/utility/gidsql/db_action_base.py | 3,328 | Python |
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
| 40.965142 | 120 | 0.625725 | [
"MIT"
] | nifunk/GNNMushroomRL | mushroom_rl/core/parallelization_tools/step_sequence.py | 37,606 | Python |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask_caching import Cache
from flask_caching.backends.rediscache import RedisCache
from flask_caching.backends.simplecache import SimpleCache
from redis import RedisError
from indico.core.logger import Logger
_logger = Logger.get('cache')
class CachedNone:
__slots__ = ()
@classmethod
def wrap(cls, value):
return cls() if value is None else value
@classmethod
def unwrap(cls, value, default=None):
if value is None:
return default
elif isinstance(value, cls):
return None
else:
return value
class IndicoCacheMixin:
def get(self, key, default=None):
return CachedNone.unwrap(super().get(key), default)
def get_many(self, *keys, default=None):
return [CachedNone.unwrap(val, default) for val in super().get_many(*keys)]
def get_dict(self, *keys, default=None):
return dict(zip(keys, self.get_many(*keys, default=default)))
class IndicoRedisCache(IndicoCacheMixin, RedisCache):
"""
This is similar to the original RedisCache from Flask-Caching, but it
allows specifying a default value when retrieving cache data and
distinguishing between a cached ``None`` value and a cache miss.
"""
def dump_object(self, value):
# We are not overriding the `load_object` counterpart to this method o
# purpose because we need to have access to the wrapped value in `get`
# and `get_many`.
return super().dump_object(CachedNone.wrap(value))
class IndicoSimpleCache(IndicoCacheMixin, SimpleCache):
"""
This is similar to the original SimpleCache from Flask-Caching, but it
allows specifying a default value when retrieving cache data and
distinguishing between a cached ``None`` value and a cache miss.
"""
def set(self, key, value, timeout=None):
return super().set(key, CachedNone.wrap(value), timeout=timeout)
def add(self, key, value, timeout=None):
return super().add(key, CachedNone.wrap(value), timeout=timeout)
def make_indico_simple_cache(app, config, args, kwargs):
return IndicoSimpleCache(*args, **kwargs)
def make_indico_redis_cache(app, config, args, kwargs):
from redis import from_url as redis_from_url
key_prefix = config.get('CACHE_KEY_PREFIX')
if key_prefix:
kwargs['key_prefix'] = key_prefix
kwargs['host'] = redis_from_url(config['CACHE_REDIS_URL'], socket_timeout=1)
return IndicoRedisCache(*args, **kwargs)
class ScopedCache:
def __init__(self, cache, scope):
self.cache = cache
self.scope = scope
def _scoped(self, key):
return f'{self.scope}/{key}'
def get(self, key, default=None):
return self.cache.get(self._scoped(key), default=default)
def set(self, key, value, timeout=None):
self.cache.set(self._scoped(key), value, timeout=timeout)
def add(self, key, value, timeout=None):
self.cache.add(self._scoped(key), value, timeout=timeout)
def delete(self, key):
self.cache.delete(self._scoped(key))
def delete_many(self, *keys):
keys = [self._scoped(key) for key in keys]
self.cache.delete_many(*keys)
def clear(self):
raise NotImplementedError('Clearing scoped caches is not supported')
def get_dict(self, *keys, default=None):
return dict(zip(keys, self.get_many(*keys, default=default)))
def get_many(self, *keys, default=None):
keys = [self._scoped(key) for key in keys]
return self.cache.get_many(*keys, default=default)
def set_many(self, mapping, timeout=None):
mapping = {self._scoped(key): value for key, value in mapping.items()}
self.cache.set_many(mapping, timeout=timeout)
def __repr__(self):
return f'<ScopedCache: {self.scope}>'
class IndicoCache(Cache):
"""
This is basicaly the Cache class from Flask-Caching but it silences all
exceptions that happen during a cache operation since cache failures should
not take down the whole page.
While this cache can in principle support many different backends, we only
consider redis and (for unittests) a simple dict-based cache. This allows
us to be more specific in catching exceptions since the Redis cache has
exactly one base exception.
"""
def get(self, key, default=None):
try:
return super().get(key, default)
except RedisError:
_logger.exception('get(%r) failed', key)
return default
def set(self, key, value, timeout=None):
try:
super().set(key, value, timeout=timeout)
except RedisError:
_logger.exception('set(%r) failed', key)
def add(self, key, value, timeout=None):
try:
super().add(key, value, timeout=timeout)
except RedisError:
_logger.exception('add(%r) failed', key)
def delete(self, key):
try:
super().delete(key)
except RedisError:
_logger.exception('delete(%r) failed', key)
def delete_many(self, *keys):
try:
super().delete_many(*keys)
except RedisError:
_logger.exception('delete_many(%s) failed', ', '.join(map(repr, keys)))
def clear(self):
try:
super().clear()
except RedisError:
_logger.exception('clear() failed')
def get_many(self, *keys, default=None):
try:
return super().get_many(*keys, default=default)
except RedisError:
logkeys = ', '.join(map(repr, keys))
_logger.exception('get_many(%s) failed', logkeys)
return [default] * len(keys)
def set_many(self, mapping, timeout=None):
try:
super().set_many(mapping, timeout=timeout)
except RedisError:
_logger.exception('set_many(%r) failed', mapping)
def get_dict(self, *keys, default=None):
try:
return super().get_dict(*keys, default=default)
except RedisError:
logkeys = ', '.join(map(repr, keys))
_logger.exception('get_dict(%s) failed', logkeys)
return dict(zip(keys, [default] * len(keys)))
def make_scoped_cache(scope):
"""Create a new scoped cache.
In most cases the global cache should not be used directly but rather
with a scope depending on the module a cache is used for. This is
especially important when passing user-provided data as the cache key
to prevent reading other unrelated cache keys.
"""
return ScopedCache(cache, scope)
cache = IndicoCache()
| 31.971963 | 83 | 0.653464 | [
"MIT"
] | errikos/indico | indico/core/cache.py | 6,842 | Python |
import numpy as np # linear algebra
np.random.seed(42)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
import time
import os, glob
import cv2
# parameters
format = "%H%M"
ts = time.strftime(format)
base_name = os.path.splitext(__file__)[0] + "_ts" + ts
input_size = 128
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, GaussianNoise
from keras.layers import GlobalMaxPooling2D, Reshape, UpSampling3D, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, CSVLogger, ReduceLROnPlateau
from keras import backend as K
def get_callbacks(save_path, lr=0.001, patience=64):
csv_logger = CSVLogger(save_path + '_log.csv', append=True)
# check_path = save_path + '_e{epoch:02d}_vl{val_loss:.5f}.hdf5'
check_path = save_path
save_checkpoint = ModelCheckpoint(filepath=check_path, monitor='val_loss', save_best_only=True)
lerning_rate_schedular = ReduceLROnPlateau(patience=8, min_lr=lr * 0.00001)
early_stopping = EarlyStopping(monitor='val_loss',
patience=16,
verbose=1,
min_delta=1e-4,
mode='min')
Callbacks = [csv_logger,
save_checkpoint,
# lerning_rate_schedular,
early_stopping
]
return Callbacks
def swish(x):
return x * K.sigmoid(x)
from keras.applications.vgg16 import VGG16
from keras.optimizers import SGD
def get_model(num_class):
base_model = VGG16(weights='imagenet', include_top=False,
input_shape=[input_size,input_size,3], classes=1)
x = base_model.get_layer('block5_pool').output
x = GlobalMaxPooling2D()(x)
x = Dense(512, activation='relu', name='fc2')(x)
x = Dropout(0.3)(x)
x = Dense(512, activation='relu', name='fc3')(x)
x = Dropout(0.3)(x)
predictions = Dense(num_class, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
return model
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image) # sikisou, saido, meido
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def randomShiftScaleRotate(image,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-45, 45), aspect_limit=(0, 0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1]) # degree
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image
def randomHorizontalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
return image
def randomVerticalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 0)
return image
def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=False):
def eraser(input_img):
img_h, img_w, img_c = input_img.shape
p_1 = np.random.rand()
if p_1 > p:
return input_img
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
if pixel_level:
c = np.random.uniform(v_l, v_h, (h, w, img_c))
else:
c = np.random.uniform(v_l, v_h)
input_img[top:top + h, left:left + w, :] = c
return input_img
return eraser
from multiprocessing import Pool
def load_img(args):
img_path = args
img = cv2.imread(img_path)
# print("img shape", img.shape)
img = cv2.resize(img, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-5, 5),
sat_shift_limit=(-1, 1),
val_shift_limit=(-2, 2),
u=0.5)
img = randomShiftScaleRotate(img,
shift_limit=(-0.2, 0.2),
scale_limit=(-0.2, 0.5),
rotate_limit=(-30, 30),
aspect_limit=(-0.2, 0.2),
u=0.5)
img = randomHorizontalFlip(img)
img = randomVerticalFlip(img)
return img
def train_generator(x_train, y_train, img_dir, batch_size, shuffle=True):
# x_train = x_train.as_matrix()
# y_train = y_train.as_matrix()
y_train = np.eye(55)[y_train]
batch_index = 0
n = x_train.shape[0]
# print("n", n)
eraser = get_random_eraser(v_h=0.)
pool = Pool()
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_id = index_array[current_index: current_index + current_batch_size]
batch_x = pool.map(load_img,
[img_dir + '/{}'.format(x_train[id])
for id in batch_id])
for id in range(len(batch_x)):
img = batch_x[id]
img =eraser(img)
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
batch_x[id] = img
batch_x = np.array(batch_x, np.float32) / 255
batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
# print("batch shape", batch_x.shape, batch_y.shape)
yield (batch_x, batch_y)
def get_mixer(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3):
def mixer(img1, img2, mask1, mask2):
img_h, img_w, img_c = img1.shape
p_1 = np.random.rand()
if p_1 > p:
return img1, mask1
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
img1[top:top + h, left:left + w, :] = img2[top:top + h, left:left + w, :]
mask1[top:top + h, left:left + w, :] = mask2[top:top + h, left:left + w, :]
return img1, mask1
return mixer
def mix_generator(X_train, Y_train, img_dir, batch_size, shuffle=True):
alpha = 0.2
gen1 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
gen2 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
while True:
batch1 = next(gen1)
batch2 = next(gen2)
current_batch_size = batch1[0].shape[0]
l = np.random.beta(alpha, alpha, current_batch_size)
X_l = l.reshape(current_batch_size, 1, 1, 1)
Y_l = l.reshape(current_batch_size, 1)
batch_x = batch1[0] * X_l + batch2[0] * (1 - X_l)
batch_y = batch1[1] * Y_l + batch2[1] * (1 - Y_l)
yield (batch_x, batch_y)
def test_generator(x_train, img_dir, batch_size, shuffle=True):
# x_train = x_train.as_matrix()
# y_train = y_train.as_matrix()
batch_index = 0
n = x_train.shape[0]
# print("n", n)
eraser = get_random_eraser(v_h=0.)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_x = []
batch_id = index_array[current_index: current_index + current_batch_size]
# print(batch_x_base)
for id in batch_id:
# print(x_train[0])
# print(x_train[id])
# print(img_dir + '/{}'.format(x_train[id]))
img = cv2.imread(img_dir + '/{}'.format(x_train[id]))
# print("img shape", img.shape)
img = cv2.resize(img, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-5, 5),
sat_shift_limit=(-1, 1),
val_shift_limit=(-2, 2),
u=0.5)
img = randomShiftScaleRotate(img,
shift_limit=(-0.2, 0.2),
scale_limit=(-0.2, 0.2),
rotate_limit=(-30, 30),
aspect_limit = (-0.2, 0.2),
u=0.5)
img = randomHorizontalFlip(img)
# img =eraser(img)
batch_x.append(img)
batch_x = np.array(batch_x, np.float32) / 255
# batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
# print("batch shape", batch_x.shape, batch_y.shape)
yield batch_x
def load_data(train_path="input/train_master.tsv", test_path="input/sample_submit.tsv"):
train = pd.read_csv(train_path, delimiter="\t", index_col=False)
test = pd.read_csv(test_path, delimiter="\t", index_col=False, header=None)
print("train shape", train.shape)
print(train.head())
X_train = train['file_name'].as_matrix()
y_train = train['category_id'].as_matrix()
# y_train = np.eye(55)[y_train]
# print(y_train[:5])
# print(y_train.shape)
X_test = test.iloc[:,0]
return X_train, y_train, X_test
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import log_loss
def train(epochs, seed):
# parameter
batch_size = 128
num_class = 55
save_path = base_name + "_seed" + str(seed)
model_path = "_"
# Load data
X_train, y_train, X_test = load_data()
# CV
ids_train_split, ids_valid_split = train_test_split(np.arange(X_train.shape[0]),
random_state=42, test_size=0.05,
stratify=y_train)
# data process
X_train_cv = X_train[ids_train_split]
y_train_cv = y_train[ids_train_split]
X_holdout = X_train[ids_valid_split]
Y_holdout = y_train[ids_valid_split]
# print(X_train_cv.head())
# define file path and get callbacks
weight_path = "model/" + save_path + '.hdf5'
callbacks = get_callbacks(weight_path, patience=16)
gen = mix_generator(X_train_cv, y_train_cv, "input/train", batch_size)
gen_val = train_generator(X_holdout, Y_holdout, "input/train", batch_size, shuffle=False)
gen_val_pred = test_generator(X_holdout, "input/train", batch_size, shuffle=False)
gen_tst_pred = test_generator(X_test, "input/test", batch_size, shuffle=False)
model = get_model(num_class)
model.fit_generator(generator=gen,
steps_per_epoch=np.ceil(X_train_cv.shape[0] / batch_size),
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=gen_val,
validation_steps=np.ceil(X_holdout.shape[0] / batch_size),
)
# Getting the Best Model
model.load_weights(filepath=weight_path)
# Getting Training Score
# score = model.evaluate_generator(generator=gen_trn_eval,
# steps=np.ceil(X_train.shape[0]/batch_size))
# print('Train loss:', score[0])
# print('Train accuracy:', score[1])
# Getting Valid Score
score = model.evaluate_generator(generator=gen_val,
steps=np.ceil(X_holdout.shape[0]/batch_size))
print('Valid loss:', score[0])
print('Valid accuracy:', score[1])
# Getting validation prediction
pred_valid = model.predict_generator(generator=gen_val_pred,
steps=np.ceil(X_holdout.shape[0]/batch_size))
# Getting Test prediction
pred_test = model.predict_generator(generator=gen_tst_pred,
steps=np.ceil(X_test.shape[0]/batch_size))
submission = pd.DataFrame({'id': X_test, 'predict': np.argmax(pred_test, axis=1)})
submit_path = "output/submission" + save_path + "_val_loss" + str(score[0]) + "_val_acc" + str(score[1]) + ".tsv"
submission.to_csv(submit_path, index=False, header=False, sep='\t')
np.save("input/" + base_name + "_valid.npy", pred_valid)
np.save("input/" + base_name + "_test.npy", pred_test)
def main():
train(epochs=250, seed=0)
if __name__ == "__main__": main()
| 35.669683 | 117 | 0.571356 | [
"MIT"
] | OsciiArt/Cookpad | train_180131_2.py | 15,766 | Python |
import logging
import numpy as np
from cvlib.object_detection import populate_class_labels, draw_bbox, detect_common_objects
from traffic_monitor.services.detectors.detector_abstract import DetectorAbstract
logger = logging.getLogger('detector')
class DetectorCVlib(DetectorAbstract):
"""
Implementation of DetectorAbstract. This implementation is from the OpenCV
implementation of object instance detection.
https://github.com/arunponnusamy/cvlib
Yolov4 cfg and weights are available at: https://github.com/AlexeyAB/darknet
Supports models:
yolov3-tiny
yolov3
Requires that .cfg file and .weights files are in ~/.cvlib/object_detection/yolo/yolov3
"""
def __init__(self, monitor_config: dict):
DetectorAbstract.__init__(self, monitor_config)
self.detector_name: str = monitor_config.get('detector_name')
self.detector_model: str = monitor_config.get('detector_model')
self.detector_confidence: float = monitor_config.get('detector_confidence')
# note that colors in cvlib uses BGR not RGB colors
self.bgr_colors = np.float64([monitor_config.get('class_colors').get(o)[::-1] for o in populate_class_labels()])
def set_detector_value(self, kwargs_list: list):
""" Only allow changes to confidence or the model """
try:
for kwargs in kwargs_list:
field = kwargs.get('field')
value = kwargs.get('value')
if field in ['detector_confidence', 'detector_model']:
logger.info(f"{self.detector_name}: setting value: {field}: {value}")
self.monitor_config[field] = value
except Exception as e:
logger.error(f"{self.__class__.__name__}: Error setting value: {e}")
def detect(self, frame: np.array) -> (np.array, list):
# colors is a list of BGR values in a list ([[#b,#g,#r],[#b,#g,#r], ... ])
try:
bbox, labels, conf = detect_common_objects(frame, confidence=self.detector_confidence, model=self.detector_model)
frame = draw_bbox(img=frame, bbox=bbox, labels=labels, confidence=conf, write_conf=False, colors=self.bgr_colors)
return frame, labels
except Exception as e:
logger.error(f"{self.__class__.__name__} Exception: {e}")
@classmethod
def get_trained_objects(cls) -> list:
return populate_class_labels()
| 41.457627 | 125 | 0.674571 | [
"MIT"
] | mcdomx/monitor | traffic_monitor/services/detectors/detector_cvlib.py | 2,446 | Python |
import os
import json
import datetime
import numpy as np
from matplotlib import pyplot as plt
class MetaLogger(object):
def __init__(self, meta_config, config, task_directory, load_directory=None, load_epoch=None):
self.results_directory = os.path.join('meta_results', str(datetime.datetime.now()))
self.results = {
'task_directory': task_directory,
'load_directory': load_directory,
'load_epoch': load_epoch,
'train_losses': [],
'train_accuracies': [],
'validation_losses': [],
'validation_accuracies': [],
'baseline_test_loss': 0,
'baseline_test_accuracy': 0,
'sgd_test_loss': 0,
'sgd_test_accuracy': 0,
'adam_test_loss': 0,
'adam_test_accuracy': 0,
'meta_optimizer_test_loss': 0,
'meta_optimizer_test_accuracy': 0,
'config': config,
'meta_config': meta_config
}
def load(self, file_path):
self.results_directory, _ = os.path.split(file_path)
with open(file_path, 'r') as file_obj:
self.results = json.load(file_obj)
def log(self):
if not os.path.exists(self.results_directory):
os.makedirs(self.results_directory)
with open('{}/results.json'.format(self.results_directory), 'w') as file_obj:
json.dump(self.results, file_obj, indent=4)
def plot(self):
plt.figure()
plt.title('Loss')
plt.xlabel('Meta Epochs')
plt.ylabel('Loss')
plt.xticks(np.arange(0, len(self.results['train_losses']) * .125, .25))
plt.plot(np.arange(.125, (len(self.results['train_losses']) + 1) * .125, .125), self.results['train_losses'], label='train')
plt.plot(np.arange(.125, (len(self.results['validation_losses']) + 1) * .125, .125), self.results['validation_losses'], label='validation')
plt.legend()
plt.savefig('{}/loss.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Accuracy')
plt.xlabel('Meta Epochs')
plt.ylabel('Accuracy')
plt.xticks(np.arange(0, len(self.results['train_accuracies']) * .125, .25))
plt.plot(np.arange(.125, (len(self.results['train_accuracies']) + 1) * .125, .125), self.results['train_accuracies'], label='train')
plt.plot(np.arange(.125, (len(self.results['validation_accuracies']) + 1) * .125, .125), self.results['validation_accuracies'], label='validation')
plt.legend()
plt.savefig('{}/accuracy.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Test Losses')
plt.ylabel('Mean Test Loss')
x_labels = ('Baseline', 'SGD', 'Adam', 'Meta Optimizer')
x_pos = np.arange(len(x_labels))
performance = [self.results['{}_test_loss'.format('_'.join(label.lower().split(' ')))] for label in x_labels]
plt.bar(x_pos, performance, align='center', alpha=0.5)
plt.xticks(x_pos, x_labels)
plt.savefig('{}/test_loss.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Test Accuracies')
plt.ylabel('Mean Test Accuracy')
x_labels = ('Baseline', 'SGD', 'Adam', 'Meta Optimizer')
x_pos = np.arange(len(x_labels))
performance = [self.results['{}_test_accuracy'.format('_'.join(label.lower().split(' ')))] for label in x_labels]
plt.bar(x_pos, performance, align='center', alpha=0.5)
plt.xticks(x_pos, x_labels)
plt.savefig('{}/test_accuracy.pdf'.format(self.results_directory))
plt.close()
| 41.897727 | 155 | 0.606184 | [
"MIT"
] | rlaboulaye/transformer | meta_logger.py | 3,687 | Python |
import torch.nn.functional as F
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
from ..._internally_replaced_utils import load_state_dict_from_url
from ...ops import misc as misc_nn_ops
from ..mobilenetv3 import mobilenet_v3_large
from ..resnet import resnet50
from ._utils import overwrite_eps
from .anchor_utils import AnchorGenerator
from .backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers, _mobilenet_extractor
from .generalized_rcnn import GeneralizedRCNN
from .roi_heads import RoIHeads
from .rpn import RPNHead, RegionProposalNetwork
from .transform import GeneralizedRCNNTransform
__all__ = [
"FasterRCNN",
"fasterrcnn_resnet50_fpn",
"fasterrcnn_mobilenet_v3_large_320_fpn",
"fasterrcnn_mobilenet_v3_large_fpn",
]
class FasterRCNN(GeneralizedRCNN):
"""
Implements Faster R-CNN.
The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
image, and should be in 0-1 range. Different images can have different sizes.
The behavior of the model changes depending if it is in training or evaluation mode.
During training, the model expects both the input tensors, as well as a targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the class label for each ground-truth box
The model returns a Dict[Tensor] during training, containing the classification and regression
losses for both the RPN and the R-CNN.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
follows:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores or each prediction
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
The backbone should return a single Tensor or and OrderedDict[Tensor].
num_classes (int): number of output classes of the model (including the background).
If box_predictor is specified, num_classes should be None.
min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
image_mean (Tuple[float, float, float]): mean values used for input normalization.
They are generally the mean values of the dataset on which the backbone has been trained
on
image_std (Tuple[float, float, float]): std values used for input normalization.
They are generally the std values of the dataset on which the backbone has been trained on
rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
rpn_score_thresh (float): during inference, only return proposals with a classification score
greater than rpn_score_thresh
box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
the locations indicated by the bounding boxes
box_head (nn.Module): module that takes the cropped feature maps as input
box_predictor (nn.Module): module that takes the output of box_head and returns the
classification logits and box regression deltas.
box_score_thresh (float): during inference, only return proposals with a classification score
greater than box_score_thresh
box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
box_detections_per_img (int): maximum number of detections per image, for all classes.
box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
considered as positive during training of the classification head
box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
considered as negative during training of the classification head
box_batch_size_per_image (int): number of proposals that are sampled during training of the
classification head
box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
of the classification head
bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
bounding boxes
Example::
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import FasterRCNN
>>> from torchvision.models.detection.rpn import AnchorGenerator
>>> # load a pre-trained model for classification and return
>>> # only the features
>>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
>>> # FasterRCNN needs to know the number of
>>> # output channels in a backbone. For mobilenet_v2, it's 1280
>>> # so we need to add it here
>>> backbone.out_channels = 1280
>>>
>>> # let's make the RPN generate 5 x 3 anchors per spatial
>>> # location, with 5 different sizes and 3 different aspect
>>> # ratios. We have a Tuple[Tuple[int]] because each feature
>>> # map could potentially have different sizes and
>>> # aspect ratios
>>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
>>> aspect_ratios=((0.5, 1.0, 2.0),))
>>>
>>> # let's define what are the feature maps that we will
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
>>> # put the pieces together inside a FasterRCNN model
>>> model = FasterRCNN(backbone,
>>> num_classes=2,
>>> rpn_anchor_generator=anchor_generator,
>>> box_roi_pool=roi_pooler)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
"""
def __init__(
self,
backbone,
num_classes=None,
# transform parameters
min_size=800,
max_size=1333,
image_mean=None,
image_std=None,
# RPN parameters
rpn_anchor_generator=None,
rpn_head=None,
rpn_pre_nms_top_n_train=2000,
rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000,
rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7,
rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256,
rpn_positive_fraction=0.5,
rpn_score_thresh=0.0,
# Box parameters
box_roi_pool=None,
box_head=None,
box_predictor=None,
box_score_thresh=0.05,
box_nms_thresh=0.5,
box_detections_per_img=100,
box_fg_iou_thresh=0.5,
box_bg_iou_thresh=0.5,
box_batch_size_per_image=512,
box_positive_fraction=0.25,
bbox_reg_weights=None,
):
if not hasattr(backbone, "out_channels"):
raise ValueError(
"backbone should contain an attribute out_channels "
"specifying the number of output channels (assumed to be the "
"same for all the levels)"
)
assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))
assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))
if num_classes is not None:
if box_predictor is not None:
raise ValueError("num_classes should be None when box_predictor is specified")
else:
if box_predictor is None:
raise ValueError("num_classes should not be None when box_predictor is not specified")
out_channels = backbone.out_channels
if rpn_anchor_generator is None:
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
if rpn_head is None:
rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])
rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
rpn = RegionProposalNetwork(
rpn_anchor_generator,
rpn_head,
rpn_fg_iou_thresh,
rpn_bg_iou_thresh,
rpn_batch_size_per_image,
rpn_positive_fraction,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_nms_thresh,
score_thresh=rpn_score_thresh,
)
if box_roi_pool is None:
box_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2)
if box_head is None:
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)
if box_predictor is None:
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, num_classes)
roi_heads = RoIHeads(
# Box
box_roi_pool,
box_head,
box_predictor,
box_fg_iou_thresh,
box_bg_iou_thresh,
box_batch_size_per_image,
box_positive_fraction,
bbox_reg_weights,
box_score_thresh,
box_nms_thresh,
box_detections_per_img,
)
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
super().__init__(backbone, rpn, roi_heads, transform)
class TwoMLPHead(nn.Module):
"""
Standard heads for FPN-based models
Args:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
def __init__(self, in_channels, representation_size):
super().__init__()
self.fc6 = nn.Linear(in_channels, representation_size)
self.fc7 = nn.Linear(representation_size, representation_size)
def forward(self, x):
x = x.flatten(start_dim=1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
class FastRCNNPredictor(nn.Module):
"""
Standard classification + bounding box regression layers
for Fast R-CNN.
Args:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
def __init__(self, in_channels, num_classes):
super().__init__()
self.cls_score = nn.Linear(in_channels, num_classes)
self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
def forward(self, x):
if x.dim() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
model_urls = {
"fasterrcnn_resnet50_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth",
"fasterrcnn_mobilenet_v3_large_320_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth",
"fasterrcnn_mobilenet_v3_large_fpn_coco": "https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth",
}
def fasterrcnn_resnet50_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
"""
Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.
Reference: `"Faster R-CNN: Towards Real-Time Object Detection with
Region Proposal Networks" <https://arxiv.org/abs/1506.01497>`_.
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be in ``0-1`` range. Different images can have different sizes.
The behavior of the model changes depending if it is in training or evaluation mode.
During training, the model expects both the input tensors, as well as a targets (list of dictionary),
containing:
- boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the class label for each ground-truth box
The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
losses for both the RPN and the R-CNN.
During inference, the model requires only the input tensors, and returns the post-processed
predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
follows, where ``N`` is the number of detections:
- boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
- labels (``Int64Tensor[N]``): the predicted labels for each detection
- scores (``Tensor[N]``): the scores of each detection
For more details on the output, you may refer to :ref:`instance_seg_output`.
Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
Example::
>>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
>>> # For training
>>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)
>>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4]
>>> labels = torch.randint(1, 91, (4, 11))
>>> images = list(image for image in images)
>>> targets = []
>>> for i in range(len(images)):
>>> d = {}
>>> d['boxes'] = boxes[i]
>>> d['labels'] = labels[i]
>>> targets.append(d)
>>> output = model(images, targets)
>>> # For inference
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
>>>
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
"""
is_trained = pretrained or pretrained_backbone
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
if pretrained:
# no need to download the backbone if pretrained is set
pretrained_backbone = False
backbone = resnet50(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)
backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
model = FasterRCNN(backbone, num_classes, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["fasterrcnn_resnet50_fpn_coco"], progress=progress)
model.load_state_dict(state_dict)
overwrite_eps(model, 0.0)
return model
def _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=False,
progress=True,
num_classes=91,
pretrained_backbone=True,
trainable_backbone_layers=None,
**kwargs,
):
is_trained = pretrained or pretrained_backbone
trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 6, 3)
norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
if pretrained:
pretrained_backbone = False
backbone = mobilenet_v3_large(pretrained=pretrained_backbone, progress=progress, norm_layer=norm_layer)
backbone = _mobilenet_extractor(backbone, True, trainable_backbone_layers)
anchor_sizes = (
(
32,
64,
128,
256,
512,
),
) * 3
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
model = FasterRCNN(
backbone, num_classes, rpn_anchor_generator=AnchorGenerator(anchor_sizes, aspect_ratios), **kwargs
)
if pretrained:
if model_urls.get(weights_name, None) is None:
raise ValueError(f"No checkpoint is available for model {weights_name}")
state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)
model.load_state_dict(state_dict)
return model
def fasterrcnn_mobilenet_v3_large_320_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
"""
Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
"""
weights_name = "fasterrcnn_mobilenet_v3_large_320_fpn_coco"
defaults = {
"min_size": 320,
"max_size": 640,
"rpn_pre_nms_top_n_test": 150,
"rpn_post_nms_top_n_test": 150,
"rpn_score_thresh": 0.05,
}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=pretrained,
progress=progress,
num_classes=num_classes,
pretrained_backbone=pretrained_backbone,
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)
def fasterrcnn_mobilenet_v3_large_fpn(
pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs
):
"""
Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True)
>>> model.eval()
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. If ``None`` is
passed (the default) this value is set to 3.
"""
weights_name = "fasterrcnn_mobilenet_v3_large_fpn_coco"
defaults = {
"rpn_score_thresh": 0.05,
}
kwargs = {**defaults, **kwargs}
return _fasterrcnn_mobilenet_v3_large_fpn(
weights_name,
pretrained=pretrained,
progress=progress,
num_classes=num_classes,
pretrained_backbone=pretrained_backbone,
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)
| 44.154426 | 139 | 0.66425 | [
"BSD-3-Clause"
] | Bethhhh/vision | torchvision/models/detection/faster_rcnn.py | 23,446 | Python |
# @Author: Pieter Blok
# @Date: 2021-03-25 15:33:17
# @Last Modified by: Pieter Blok
# @Last Modified time: 2021-03-25 15:36:30
from .uncertainty import * | 26.5 | 42 | 0.685535 | [
"Apache-2.0"
] | bpanahij/maskal | active_learning/heuristics/__init__.py | 159 | Python |
#!/usr/bin/env python
"""Tests for the export mechanisms of tulip.dumpsmach."""
from __future__ import print_function
import logging
import networkx as nx
from nose.tools import assert_raises
from tulip import spec, synth, dumpsmach
logging.getLogger('tulip').setLevel('ERROR')
logging.getLogger('astutils').setLevel('ERROR')
logging.getLogger('omega').setLevel('ERROR')
class basic_test(object):
def setUp(self):
self.triv = spec.GRSpec(env_vars="x", sys_vars="y",
env_init="x & y", env_prog="x",
sys_init="y", sys_prog="y && x")
self.triv_M = synth.synthesize(
self.triv, solver='omega')
self.dcounter = spec.GRSpec(
sys_vars={"y": (0, 5)},
env_init=['y = 0'],
sys_prog=["y=0", "y=5"])
self.dcounter_M = synth.synthesize(
self.dcounter, solver='omega')
self.enumf = spec.GRSpec(
sys_vars={'y': ['a', 'b']},
env_init=['y="a"'],
sys_safety=['y = "a" -> X(y = "b")',
'y = "b" -> X(y = "a")'])
self.enumf_M = synth.synthesize(
self.enumf, solver='omega')
def tearDown(self):
self.dcounter = None
self.dcounter_M = None
def test_python_case(self):
compile(dumpsmach.python_case(self.triv_M),
filename="<string>", mode="exec")
# print(dumpsmach.python_case(self.dcounter_M))
compile(dumpsmach.python_case(self.dcounter_M),
filename="<string>", mode="exec")
exec(compile(dumpsmach.python_case(self.enumf_M)
+'\nM = TulipStrategy(); M.move()',
filename="<string>", mode="exec"))
def test_nx():
g = nx.DiGraph()
g.inputs = {'a': '...', 'b': '...'}
g.outputs = {'c': '...', 'd': '...'}
start = 'Sinit'
g.add_edge(start, 0, a=0, b=0, c=0, d=0)
g.add_edge(0, 1, a=0, b=1, c=0, d=1)
g.add_edge(1, 2, a=1, b=0, c=1, d=1)
print(dumpsmach.python_case(g, classname='Machine', start='Sinit'))
exe_globals = dict()
exec(dumpsmach.python_case(g, classname='Machine', start='Sinit'), exe_globals)
m = exe_globals['Machine']() # previous line creates the class `Machine`
# Sinit -> 0
out = m.move(a=0, b=0)
assert out == dict(c=0, d=0)
# 0 -> 1
out = m.move(a=0, b=1)
assert out == dict(c=0, d=1)
# invalid input for index 2 in time sequence
with assert_raises(ValueError):
m.move(a=1, b=1)
# 1 -> 2
out = m.move(a=1, b=0)
assert out == dict(c=1, d=1)
# dead-end
with assert_raises(Exception):
m.move(a=1, b=0)
| 32.46988 | 83 | 0.547681 | [
"BSD-3-Clause"
] | arw12625/tulip-control | tests/dumpsmach_test.py | 2,695 | Python |
# coding=utf-8
import requests
import json
def robot(content,userid):
api = r'http://openapi.tuling123.com/openapi/api/v2'
data = {
"perception": {
"inputText": {
"text": content
}
},
"userInfo": {
"apiKey": "fece0dcdbe4845559492c26d5de40119",
"userId": userid
}
}
response = requests.post(api, data=json.dumps(data))
robot_res = json.loads(response.content)
return robot_res["results"][0]['values']['text']
| 26.590909 | 65 | 0.499145 | [
"MIT"
] | 91MrGeng/wechatrobot | tuling.py | 585 | Python |
import argparse
import os
import sys
import numpy as np
import soundfile
from mir_eval.util import midi_to_hz
from onsets_and_frames import *
def load_and_process_audio(flac_path, sequence_length, device):
random = np.random.RandomState(seed=42)
audio, sr = soundfile.read(flac_path, dtype='int16')
assert sr == SAMPLE_RATE
audio = torch.ShortTensor(audio)
if sequence_length is not None:
audio_length = len(audio)
step_begin = random.randint(audio_length - sequence_length) // HOP_LENGTH
n_steps = sequence_length // HOP_LENGTH
begin = step_begin * HOP_LENGTH
end = begin + sequence_length
audio = audio[begin:end].to(device)
else:
audio = audio.to(device)
audio = audio.float().div_(32768.0)
return audio
def transcribe(model, audio):
mel = melspectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]).transpose(-1, -2)
onset_pred, offset_pred, _, frame_pred, velocity_pred = model(mel)
predictions = {
'onset': onset_pred.reshape((onset_pred.shape[1], onset_pred.shape[2])),
'offset': offset_pred.reshape((offset_pred.shape[1], offset_pred.shape[2])),
'frame': frame_pred.reshape((frame_pred.shape[1], frame_pred.shape[2])),
'velocity': velocity_pred.reshape((velocity_pred.shape[1], velocity_pred.shape[2]))
}
return predictions
def transcribe_file(model_file, flac_paths, save_path, sequence_length,
onset_threshold, frame_threshold, device):
model = torch.load(model_file, map_location=device).eval()
summary(model)
for flac_path in flac_paths:
print(f'Processing {flac_path}...', file=sys.stderr)
audio = load_and_process_audio(flac_path, sequence_length, device)
predictions = transcribe(model, audio)
p_est, i_est, v_est = extract_notes(predictions['onset'], predictions['frame'], predictions['velocity'], onset_threshold, frame_threshold)
scaling = HOP_LENGTH / SAMPLE_RATE
i_est = (i_est * scaling).reshape(-1, 2)
p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])
os.makedirs(save_path, exist_ok=True)
pred_path = os.path.join(save_path, os.path.basename(flac_path) + '.pred.png')
save_pianoroll(pred_path, predictions['onset'], predictions['frame'])
midi_path = os.path.join(save_path, os.path.basename(flac_path) + '.pred.mid')
save_midi(midi_path, p_est, i_est, v_est)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model_file', type=str)
parser.add_argument('flac_paths', type=str, nargs='+')
parser.add_argument('--save-path', type=str, default='.')
parser.add_argument('--sequence-length', default=None, type=int)
parser.add_argument('--onset-threshold', default=0.5, type=float)
parser.add_argument('--frame-threshold', default=0.5, type=float)
parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
transcribe_file(**vars(parser.parse_args()))
| 34.733333 | 146 | 0.680742 | [
"MIT"
] | RKelln/onsets-and-frames | transcribe.py | 3,126 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script. Based on Jeff Knupp's Demo + Cookiecutter"""
import io
import os
from setuptools import setup, find_packages
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
NAME = 'tiingo'
AUTHOR = "Cameron Yick"
EMAIL = '[email protected]'
URL = 'https://github.com/hydrosquall/tiingo-python'
DESCRIPTION = "REST Client for Tiingo Data Platform API"
LONG_DESCRIPTION = read('README.rst', 'HISTORY.rst')
requirements = [
'requests',
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
'vcrpy',
]
# Metadata about the module
# Load the package's __version__.py module as a dictionary.
# Via https://github.com/kennethreitz/setup.py/blob/master/setup.py
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(include=[NAME]),
include_package_data=True,
install_requires=requirements,
extras_require={'pandas': ['pandas>=0.18']},
license="MIT license",
zip_safe=False,
keywords=['tiingo', 'finance', 'stocks', 'rest'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: Office/Business :: Financial :: Investment',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| 27.4 | 67 | 0.654197 | [
"MIT"
] | GenusGeoff/tiingo-python | setup.py | 2,192 | Python |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/models.ccpm.ipynb (unless otherwise specified).
__all__ = ['CCPM']
# Cell
import torch
from torch import nn
from .layers.embedding import EmbeddingLayer
from .layers.common import KMaxPooling
from .bases.ctr import CTRModel
# Internal Cell
def get_activation(activation):
if isinstance(activation, str):
if activation.lower() == "relu":
return nn.ReLU()
elif activation.lower() == "sigmoid":
return nn.Sigmoid()
elif activation.lower() == "tanh":
return nn.Tanh()
else:
return getattr(nn, activation)()
else:
return activation
# Internal Cell
class CCPM_ConvLayer(nn.Module):
"""
Input X: tensor of shape (batch_size, 1, num_fields, embedding_dim)
"""
def __init__(self, num_fields, channels=[3], kernel_heights=[3], activation="Tanh"):
super(CCPM_ConvLayer, self).__init__()
if not isinstance(kernel_heights, list):
kernel_heights = [kernel_heights] * len(channels)
elif len(kernel_heights) != len(channels):
raise ValueError("channels={} and kernel_heights={} should have the same length."\
.format(channels, kernel_heights))
module_list = []
self.channels = [1] + channels
layers = len(kernel_heights)
for i in range(1, len(self.channels)):
in_channels = self.channels[i - 1]
out_channels = self.channels[i]
kernel_height = kernel_heights[i - 1]
module_list.append(nn.ZeroPad2d((0, 0, kernel_height - 1, kernel_height - 1)))
module_list.append(nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_height, 1)))
if i < layers:
k = max(3, int((1 - pow(float(i) / layers, layers - i)) * num_fields))
else:
k = 3
module_list.append(KMaxPooling(k, dim=2))
module_list.append(get_activation(activation))
self.conv_layer = nn.Sequential(*module_list)
def forward(self, X):
return self.conv_layer(X)
# Cell
class CCPM(CTRModel):
def __init__(self,
feature_map,
model_id="CCPM",
task="binary_classification",
learning_rate=1e-3,
embedding_initializer="torch.nn.init.normal_(std=1e-4)",
embedding_dim=10,
channels=[4, 4, 2],
kernel_heights=[6, 5, 3],
activation="Tanh",
**kwargs):
super(CCPM, self).__init__(feature_map,
model_id=model_id,
**kwargs)
self.embedding_layer = EmbeddingLayer(feature_map, embedding_dim)
self.conv_layer = CCPM_ConvLayer(feature_map.num_fields,
channels=channels,
kernel_heights=kernel_heights,
activation=activation)
conv_out_dim = 3 * embedding_dim * channels[-1] # 3 is k-max-pooling size of the last layer
self.fc = nn.Linear(conv_out_dim, 1)
self.output_activation = self.get_final_activation(task)
self.init_weights(embedding_initializer=embedding_initializer)
def forward(self, inputs):
feature_emb = self.embedding_layer(inputs)
conv_in = torch.unsqueeze(feature_emb, 1) # shape (bs, 1, field, emb)
conv_out = self.conv_layer(conv_in)
flatten_out = torch.flatten(conv_out, start_dim=1)
y_pred = self.fc(flatten_out)
if self.output_activation is not None:
y_pred = self.output_activation(y_pred)
return y_pred | 40.382979 | 102 | 0.58667 | [
"Apache-2.0"
] | RecoHut-Projects/recohut | _docs/py/models/ccpm.py | 3,796 | Python |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 28000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 87.857923 | 710 | 0.803458 | [
"MIT"
] | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | log_mito/model_112.py | 16,078 | Python |
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "os-pause-server"
def authorize(context, action_name):
action = 'v3:%s:%s' % (ALIAS, action_name)
extensions.extension_authorizer('compute', action)(context)
class PauseServerController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(PauseServerController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
class PauseServer(extensions.V3APIExtensionBase):
"""Enable pause/unpause server actions."""
name = "PauseServer"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = PauseServerController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| 36.88172 | 79 | 0.680466 | [
"Apache-2.0"
] | PFZheng/nova | nova/api/openstack/compute/plugins/v3/pause_server.py | 3,430 | Python |
# coding=utf-8
import websocket
import datetime
import csv
import time
import logging
import redis
import json
import copy
import pytz
from hftcoin.mdagent.ccws.configs import REDIS_HOST
from hftcoin.mdagent.ccws.configs import TIMEZONE
from hftcoin.mdagent.ccws.configs import ExConfigs
from hftcoin.mdagent.ccws.configs import HOME_PATH
class Exchange(object):
ExchangeId = ''
WebSocketConnection = None
RedisConnection = None
def __init__(self):
self.Logger = logging.getLogger(self.ExchangeId)
[self.ExConfig, self._WebSocketAddress] = ExConfigs[self.ExchangeId]
self.Config = {}
def set_market(self, currency, mode):
self.Config = self.ExConfig[currency][mode]
self.Logger = logging.getLogger('%s.%s.%s' % (self.ExchangeId, currency, mode))
def run_websocketapp(self, **kwargs):
self.Logger.info('Begin Connection')
url = self._WebSocketAddress + kwargs.pop('url_append', '')
on_error = kwargs.pop('on_error', self.on_error)
on_close = kwargs.pop('on_close', self.on_close)
on_message = kwargs.pop('on_message', self.on_message)
self.WebSocketConnection = websocket.WebSocketApp(
url,
on_error=on_error,
on_close=on_close,
on_message=on_message,
**kwargs,
)
while True:
try:
self.WebSocketConnection.run_forever()
except Exception as e:
self.Logger.exception(e)
def on_message(self, _ws, msg):
ts = int(time.time()*1000)
rdk = self.Config['RedisCollectKey']
# self.Logger.debug(msg)
self.RedisConnection.lpush(rdk, json.dumps([ts, msg]))
def on_error(self, _ws, error):
self.Logger.exception(error)
def on_close(self, _ws):
self.Logger.info('Connection closed.')
def connect_redis(self):
try:
self.RedisConnection = redis.StrictRedis(host=REDIS_HOST)
self.RedisConnection.ping()
except Exception as e:
self.Logger.exception(e)
def write_data_csv(self):
self.connect_redis()
[fn, rdk] = [self.Config.get(item) for item in ['FileName', 'RedisOutputKey']]
error_count = 100
while True:
try:
if self.RedisConnection.llen(rdk) > 0:
data = json.loads(self.RedisConnection.rpop(rdk).decode('utf8'))
# data[1] is timestamp
dt = datetime.datetime.fromtimestamp(data[1] / 1000, TIMEZONE)
calendar_path = '%4d/%02d/%02d' % (dt.year, dt.month, dt.day)
with open('%s/%s/%s' % (HOME_PATH, calendar_path, fn), 'a+') as csvFile:
csvwriter = csv.writer(csvFile)
csvwriter.writerow(data)
else:
time.sleep(60)
except RuntimeWarning:
break
except Exception as e:
self.Logger.exception(e)
error_count -= 1
if error_count < 0:
break
def collect_data(self):
pass
def process_data(self):
self.connect_redis()
getattr(self, self.Config.get('DataHandler', object))()
def _check_price_eq(self, p1, p2):
# divide by 2 to avoid precision
return abs(p1-p2) < self.Config['TickSize']/2
def _binary_search(self, find, list1, low, high):
while low <= high:
mid = int((low + high) / 2)
if self._check_price_eq(list1[mid][0], find):
return [mid, 'True']
elif list1[mid][0] > find:
high = mid - 1
else:
low = mid + 1
return [low, 'False']
def _update_order_book(self, bids, asks, side, price, remaining):
if side in ['bid', 'buy']:
book = bids
cut = int(99*(len(book)-1)/100)
else:
book = asks
cut = int((len(book)-1)/100)
if price < book[cut][0]:
res = self._binary_search(price, book, 0, cut-1)
else:
res = self._binary_search(price, book, cut, len(book)-1)
if res[1] == 'True':
if remaining < self.Config['AmountMin']:
del book[res[0]]
else:
book[res[0]][1] = remaining
else:
if remaining >= self.Config['AmountMin']:
book.insert(res[0], [price, remaining])
def check_data_validation(self, book):
length = int(len(book)/2)
for i in range(0, length - 2, 2):
if book[i] <= book[i + 2]:
return False
for i in range(length, 2 * length - 2, 2):
if book[i] >= book[i + 2]:
return False
for i in range(1, 2 * length, 2):
if book[i] < self.Config['AmountMin']:
return False
if book[0] > book[length]:
return False
return True
@staticmethod
def _cut_order_book(bids, asks, depth):
if len(bids) >= depth:
book = bids[-depth:]
book.reverse()
else:
book = copy.deepcopy(bids)
book.reverse()
book += [['None', 'None']] * (depth - len(bids))
if len(asks) >= depth:
book += asks[:depth]
else:
book += asks + [['None', 'None']] * (depth - len(asks))
book = [x[0:2] for x in book]
return sum(book, [])
@staticmethod
def fmt_date(ts):
return datetime.datetime.fromtimestamp(ts / 1000, TIMEZONE).strftime('%Y-%m-%d %H:%M:%S.%f %z')
@staticmethod
def date_from_str(ts):
return pytz.utc.localize(datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%fZ'))
| 32.631285 | 103 | 0.546995 | [
"MIT"
] | applezjm/testsub | ccws/base.py | 5,841 | Python |
import json
import os
import re
import sys
import sysconfig
RX_VERSION = re.compile(r"\d\.\d")
INSIGHTS = {
"_gdbm": "_GDBM_VERSION",
"_tkinter": "TCL_VERSION TK_VERSION",
"_sqlite3": "sqlite_version version",
"_ssl": "OPENSSL_VERSION",
"dbm.gnu": "_GDBM_VERSION",
"ensurepip": "_PIP_VERSION",
"pyexpat": "version_info",
"readline": "_READLINE_LIBRARY_VERSION",
"tkinter": "TclVersion TkVersion",
"zlib": "ZLIB_VERSION ZLIB_RUNTIME_VERSION",
}
def get_version(text):
if text:
if isinstance(text, bytes):
text = text.decode("utf-8")
elif isinstance(text, tuple):
text = ".".join(str(x) for x in text)
else:
text = str(text)
if text and RX_VERSION.search(text):
return text.splitlines()[0]
def pymodule_version_info(key, value, pymodule):
version = get_version(value)
if version:
result = dict(version_field=key, version=version)
if hasattr(pymodule, "__file__"):
result["path"] = pymodule.__file__
return result
def pymodule_info(module_name, pymodule):
fields = INSIGHTS.get(module_name)
fields = fields.split() if fields else ["__version__", "version", "VERSION"]
for f in fields:
v = pymodule_version_info(f, getattr(pymodule, f, None), pymodule)
if v:
return v
if hasattr(pymodule, "__file__"):
return dict(path=pymodule.__file__)
if hasattr(pymodule, "__spec__"):
v = getattr(pymodule.__spec__, "origin")
if v == "built-in":
return dict(version=v)
return dict(note=str(dir(pymodule)))
def module_report(module_name):
try:
return pymodule_info(module_name, __import__(module_name))
except Exception as e:
note = str(e)
if "No module named" in note:
return dict(version="*absent*")
return dict(version="*absent*", note=note)
def get_srcdir():
srcdir = sysconfig.get_config_var("srcdir")
if not srcdir or len(srcdir) < 3:
srcdir = sysconfig.get_config_var("DESTSHARED") # edge case: py2 reports an odd '.' as srcdir
return srcdir
def get_simplified_dirs(path):
result = []
if path:
path = os.path.dirname(path)
result.append(path)
if path.startswith("/private"):
result.append(path[8:]) # whoever compiled didn't use realpath(tmp)
elif not path.startswith("/tmp"): # nosec, just simplifying paths
result.append(os.path.dirname(result[0]))
return result
def main(arg):
if arg == "sysconfig":
marker = "$^"
simplified_dirs = get_simplified_dirs(sysconfig.get_config_var("abs_builddir"))
if simplified_dirs:
print("# '%s' is original abs_builddir:" % marker)
print("%s: %s\n" % (marker, simplified_dirs[0]))
for k, v in sorted(sysconfig.get_config_vars().items()):
for sp in simplified_dirs:
v = str(v).replace(sp, marker)
print("%s: %s" % (k, v))
return
if arg and not arg.startswith("-"):
report = dict((k, module_report(k)) for k in arg.split(","))
report = dict(report=report, srcdir=get_srcdir(), prefix=sysconfig.get_config_var("prefix"))
print(json.dumps(report, indent=2, sort_keys=True))
if __name__ == "__main__":
main(sys.argv[1] if len(sys.argv) > 1 else "")
| 27.536 | 102 | 0.613597 | [
"MIT"
] | codrsquad/portable-python | src/portable_python/external/_inspect.py | 3,442 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRrblup(RPackage):
"""Ridge Regression and Other Kernels for Genomic Selection.
Software for genomic prediction with the RR-BLUP mixed model (Endelman
2011, <doi:10.3835/plantgenome2011.08.0024>). One application is to
estimate marker effects by ridge regression; alternatively, BLUPs can be
calculated based on an additive relationship matrix or a Gaussian
kernel."""
cran = "rrBLUP"
version('4.6.1', sha256='e9230e74cc430a83ac5567071cb1c7f00b35c368f7d79bcc1cfde7225446c4db')
version('4.6', sha256='28b475a1466fcdc1780caace75cf34155338fda496cebd5799315598a4bc84af')
depends_on('[email protected]:', type=('build', 'run'))
| 36.75 | 95 | 0.756236 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | FJ-NaokiMatsumura/spack | var/spack/repos/builtin/packages/r-rrblup/package.py | 882 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import selenium.common.exceptions as Exceptions
from selenium.webdriver.common import by
import selenium.webdriver.support.ui as Support
from selenium.webdriver.support import wait
class BaseWebObject(unittest.TestCase):
"""Base class for all web objects."""
_spinner_locator = (by.By.CSS_SELECTOR, '.modal-body > .spinner')
def __init__(self, driver, conf):
self.driver = driver
self.conf = conf
self.explicit_wait = self.conf.selenium.explicit_wait
def _is_element_present(self, *locator):
try:
self._turn_off_implicit_wait()
self._get_element(*locator)
return True
except Exceptions.NoSuchElementException:
return False
finally:
self._turn_on_implicit_wait()
def _is_element_visible(self, *locator):
try:
return self._get_element(*locator).is_displayed()
except (Exceptions.NoSuchElementException,
Exceptions.ElementNotVisibleException):
return False
def _is_element_displayed(self, element):
try:
return element.is_displayed()
except Exception:
return False
def _is_text_visible(self, element, text, strict=True):
try:
if strict:
return element.text == text
else:
return text in element.text
except Exception:
return False
def _get_element(self, *locator):
return self.driver.find_element(*locator)
def _get_elements(self, *locator):
return self.driver.find_elements(*locator)
def _fill_field_element(self, data, field_element):
field_element.clear()
field_element.send_keys(data)
return field_element
def _select_dropdown(self, value, element):
select = Support.Select(element)
select.select_by_visible_text(value)
def _select_dropdown_by_value(self, value, element):
select = Support.Select(element)
select.select_by_value(value)
def _turn_off_implicit_wait(self):
self.driver.implicitly_wait(0)
def _turn_on_implicit_wait(self):
self.driver.implicitly_wait(self.conf.selenium.page_timeout)
def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
"""Wait until the value returned by predicate is not False or
the timeout is elapsed.
'predicate' takes the driver as argument.
"""
if not timeout:
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout, poll_frequency).until(
predicate)
def _wait_till_text_present_in_element(self, element, text, timeout=None):
"""Waiting for a text to appear in a certain element very often is
actually waiting for a _different_ element with a different text to
appear in place of an old element. So a way to avoid capturing stale
element reference should be provided for this use case.
Better to wrap getting entity status cell in a lambda
to avoid problems with cell being replaced with totally different
element by Javascript
"""
def predicate(_):
elt = element() if hasattr(element, '__call__') else element
return self._is_text_visible(elt, text)
self._wait_until(predicate, timeout)
def _wait_till_element_visible(self, element, timeout=None):
self._wait_until(lambda x: self._is_element_displayed(element),
timeout)
def _wait_till_element_disappears(self, element, timeout=None):
self._wait_until(lambda x: not self._is_element_displayed(element),
timeout)
def wait_till_element_disappears(self, element_getter):
try:
self._turn_off_implicit_wait()
self._wait_till_element_disappears(element_getter())
except Exceptions.NoSuchElementException:
# NOTE(mpavlase): This is valid state. When request completes
# even before Selenium get a chance to get the spinner element,
# it will raise the NoSuchElementException exception.
pass
finally:
self._turn_on_implicit_wait()
def wait_till_spinner_disappears(self):
getter = lambda: self.driver.find_element(*self._spinner_locator)
self.wait_till_element_disappears(getter)
| 37.192593 | 78 | 0.671779 | [
"Apache-2.0"
] | JerryDog/horizon-f-road | openstack-dashboard/openstack_dashboard/test/integration_tests/basewebobject.py | 5,021 | Python |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# import deepspeed
# import mpi4py
# import pandas
import torch
import transformers
import wandb
#%env WANDB_PROJECT=wine_gpt2_Trainer_42
MODEL_NAME = "gpt2-medium"
# wandb.login(anonymous='never', key="222a37baaf0c1b0d1499ec003e5c2fe49f97b107")
wandb.init()
# wandb.watch(log='all')
print(torch.cuda.is_available())
print(f"transformers version: {transformers.__version__}")
print(f"PyTorch version: {torch.__version__}")
# Tokenizers
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
print(len(tokenizer))
tokenizer.add_special_tokens(
{"eos_token": "<|startoftext|>", "bos_token": "<|startoftext|>"}
)
tokenizer.add_tokens(
[
"[prompt]",
"[response]",
"[category_1]",
"[category_2]",
"[origin]",
"[description]",
"<|endoftext|>",
]
)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.save_pretrained("data/modeling/trainer_42/")
print(len(tokenizer))
print("Created tokenizer")
class wineDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __len__(self):
return len(self.encodings["input_ids"])
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = item["input_ids"]
return item
with open("data/scraped/name_desc_nlp_ready_train.txt", "r", encoding="utf8") as file:
wines_raw_train = file.read().splitlines()
with open("data/scraped/name_desc_nlp_ready_test.txt", "r", encoding="utf8") as file:
wines_raw_test = file.read().splitlines()
print("Loaded dataset")
# wines_raw_train, wines_raw_test = train_test_split(wines_raw,test_size=0.2)
# wine_encodings_train = tokenizer(wines_raw_train, max_length=200, truncation=True, padding=True)
wine_encodings_test = tokenizer(
wines_raw_test, max_length=200, truncation=True, padding=True
)
print("Encoded dataset")
# wine_dataset_train = wineDataset(wine_encodings_train)
wine_dataset_test = wineDataset(wine_encodings_test)
print("Created PyTorch DataSet")
# train_loader = torch.utils.data.DataLoader(wine_dataset_train)
model = transformers.AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# model.to('cuda')
model.resize_token_embeddings(len(tokenizer))
print(f"model parameters: {model.num_parameters():,}")
training_args = transformers.TrainingArguments(
output_dir="data/modeling/trainer_42/",
overwrite_output_dir=True,
num_train_epochs=1,
per_device_train_batch_size=2,
save_steps=100,
save_total_limit=2,
fp16=True,
# deepspeed='data/ds_config.json'
)
trainer = transformers.Trainer(
model=model, args=training_args, train_dataset=wine_dataset_test,
)
trainer.train()
| 26.433962 | 98 | 0.732691 | [
"Apache-2.0"
] | cipher982/Wine-o-matic | extra_code/transformers-gpt2-finetune.py | 2,802 | Python |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available """
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available')) | 35.210526 | 78 | 0.654709 | [
"MIT"
] | nagarjunand/receipe-app-api | app/core/management/commands/wait_for_db.py | 669 | Python |
from .base import * # noqa pylint: disable=wildcard-import, unused-wildcard-import
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
SECRET_KEY = env("DJANGO_SECRET_KEY")
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["coronacircles.de"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# CACHES = {
# 'default': {
# 'BACKEND': 'django_redis.cache.RedisCache',
# 'LOCATION': env('REDIS_URL'),
# 'OPTIONS': {
# 'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# 'IGNORE_EXCEPTIONS': True,
# }
# }
# }
# SECURITY
# ------------------------------------------------------------------------------
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
# set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = env("DJANGO_SECURE_HSTS_SECONDS", default="60")
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = "DENY"
# STORAGES
# ------------------------------------------------------------------------------
# INSTALLED_APPS += ["storages"] # noqa F405
# AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# AWS_AUTO_CREATE_BUCKET = False
# AWS_QUERYSTRING_AUTH = False
# _AWS_EXPIRY = 60 * 60 * 24 * 7
# AWS_S3_OBJECT_PARAMETERS = {
# "CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
# }
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
# DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="CoronaCircles <[email protected]>",
)
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default="[Coronacircles]")
EMAIL_HOST = env("DJANGO_EMAIL_HOST", default="localhost")
EMAIL_HOST_USER = env("DJANGO_EMAIL_HOST_USER", default="")
EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD", default="")
EMAIL_PORT = env("DJANGO_EMAIL_PORT", default="465")
EMAIL_USE_SSL = env.bool("DJANGO_EMAIL_USE_SSL", default=False)
EMAIL_USE_TLS = env.bool("DJANGO_EMAIL_USE_TLS", default=False)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
# ADMIN_URL = env("DJANGO_ADMIN_URL") # no admin in use here
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ["gunicorn"] # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins bon every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] "
"pathname=%(pathname)s lineno=%(lineno)s "
"funcname=%(funcName)s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"django.request": {
"handlers": ["console", "mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
} | 37.496732 | 87 | 0.569984 | [
"MIT"
] | CoronaCircle/coronacircles | settings/production.py | 5,737 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setup.py for the Airflow project."""
import io
import logging
import os
import subprocess
import sys
import unittest
from importlib import util
from os.path import dirname
from textwrap import wrap
from typing import Dict, Iterable, List
from setuptools import Command, find_packages, setup
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
spec = util.spec_from_file_location("airflow.version", os.path.join('airflow', 'version.py')) # noqa
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore
version = mod.version # type: ignore
PY3 = sys.version_info[0] == 3
PY38 = PY3 and sys.version_info[1] >= 8
my_dir = dirname(__file__)
try:
with io.open(os.path.join(my_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
def airflow_test_suite():
"""Test suite for Airflow tests"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite
class CleanCommand(Command):
"""
Command to tidy up the project root.
Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.
"""
description = "Tidy up the project root"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run command to remove temporary files and directories."""
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Compile and build the frontend assets using yarn and webpack.
Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.
"""
description = "Compile and build the frontend assets"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run a command to compile and build assets."""
subprocess.check_call('./airflow/www/compile_assets.sh')
class ListExtras(Command):
"""
List all available extras
Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.
"""
description = "List available extras"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""List extras."""
print("\n".join(wrap(", ".join(EXTRAS_REQUIREMENTS.keys()), 100)))
def git_version(version_: str) -> str:
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str
"""
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version_, sha=sha)
else:
return 'no_git_version'
def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version"])):
"""
Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write
"""
text = "{}".format(git_version(version))
with open(filename, 'w') as file:
file.write(text)
# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py
# If you change this mark you should also change ./scripts/ci/check_order_setup.py
# Start dependencies group
amazon = [
'boto3>=1.12.0,<2.0.0',
'watchtower~=0.7.3',
]
apache_beam = [
'apache-beam[gcp]',
]
async_packages = [
'eventlet>= 0.9.7',
'gevent>=0.13',
'greenlet>=0.4.9',
]
atlas = [
'atlasclient>=0.1.2',
]
azure = [
'azure-batch>=8.0.0',
'azure-cosmos>=3.0.1,<4',
'azure-datalake-store>=0.0.45',
'azure-identity>=1.3.1',
'azure-keyvault>=4.1.0',
'azure-kusto-data>=0.0.43,<0.1',
'azure-mgmt-containerinstance>=1.5.0,<2.0',
'azure-mgmt-datalake-store>=0.5.0',
'azure-mgmt-resource>=2.2.0',
'azure-storage>=0.34.0, <0.37.0',
'azure-storage-blob<12.0',
]
cassandra = [
'cassandra-driver>=3.13.0,<3.21.0',
]
celery = [
'celery~=4.4.2',
'flower>=0.7.3, <1.0',
'tornado>=4.2.0, <6.0', # Dep of flower. Pin to a version that works on Py3.5.2
'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five
]
cgroups = [
'cgroupspy>=0.1.4',
]
cloudant = [
'cloudant>=2.0',
]
dask = [
'cloudpickle>=1.4.1, <1.5.0',
'distributed>=2.11.1, <2.20'
]
databricks = [
'requests>=2.20.0, <3',
]
datadog = [
'datadog>=0.14.0',
]
doc = [
'sphinx>=2.1.2',
'sphinx-argparse>=0.1.13',
'sphinx-autoapi==1.0.0',
'sphinx-copybutton',
'sphinx-jinja~=1.1',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
"sphinxcontrib-redoc>=1.6.0",
"sphinxcontrib-spelling==5.2.1"
]
docker = [
'docker~=3.0',
]
druid = [
'pydruid>=0.4.1',
]
elasticsearch = [
'elasticsearch>7, <7.6.0',
'elasticsearch-dbapi==0.1.0',
'elasticsearch-dsl>=5.0.0',
]
exasol = [
'pyexasol>=0.5.1,<1.0.0',
]
facebook = [
'facebook-business>=6.0.2',
]
flask_oauth = [
'Flask-OAuthlib>=0.9.1,<0.9.6', # Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB
'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',
'requests-oauthlib==1.1.0',
]
google = [
'PyOpenSSL',
'google-ads>=4.0.0',
'google-api-python-client>=1.6.0,<2.0.0',
'google-auth>=1.0.0,<2.0.0',
'google-auth-httplib2>=0.0.1',
'google-cloud-automl>=0.4.0,<2.0.0',
'google-cloud-bigquery-datatransfer>=0.4.0,<2.0.0',
'google-cloud-bigtable>=1.0.0,<2.0.0',
'google-cloud-container>=0.1.1,<2.0.0',
'google-cloud-datacatalog>=0.5.0, <0.8', # TODO: we should migrate to 1.0 likely and add <2.0.0 then
'google-cloud-dataproc>=1.0.1,<2.0.0',
'google-cloud-dlp>=0.11.0,<2.0.0',
'google-cloud-kms>=1.2.1,<2.0.0',
'google-cloud-language>=1.1.1,<2.0.0',
'google-cloud-logging>=1.14.0,<2.0.0',
'google-cloud-monitoring>=0.34.0,<2.0.0',
'google-cloud-pubsub>=1.0.0,<2.0.0',
'google-cloud-redis>=0.3.0,<2.0.0',
'google-cloud-secret-manager>=0.2.0,<2.0.0',
'google-cloud-spanner>=1.10.0,<2.0.0',
'google-cloud-speech>=0.36.3,<2.0.0',
'google-cloud-storage>=1.16,<2.0.0',
'google-cloud-tasks>=1.2.1,<2.0.0',
'google-cloud-texttospeech>=0.4.0,<2.0.0',
'google-cloud-translate>=1.5.0,<2.0.0',
'google-cloud-videointelligence>=1.7.0,<2.0.0',
'google-cloud-vision>=0.35.2,<2.0.0',
'grpcio-gcp>=0.2.2',
'pandas-gbq',
]
grpc = [
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'grpcio>=1.15.0',
]
hashicorp = [
'hvac~=0.10',
]
hdfs = [
'snakebite-py3',
]
hive = [
'hmsclient>=0.1.0',
'pyhive[hive]>=0.6.0',
]
jdbc = [
'jaydebeapi>=1.1.1',
]
jenkins = [
'python-jenkins>=1.0.0',
]
jira = [
'JIRA>1.0.7',
]
kerberos = [
'pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
]
kubernetes = [
'cryptography>=2.0.0',
'kubernetes>=3.0.0',
]
kylin = [
'kylinpy>=2.6'
]
ldap = [
'ldap3>=2.5.1',
]
mongo = [
'dnspython>=1.13.0,<2.0.0',
'pymongo>=3.6.0',
]
mssql = [
'pymssql~=2.1.1',
]
mysql = [
'mysql-connector-python>=8.0.11, <=8.0.18',
'mysqlclient>=1.3.6,<1.4',
]
odbc = [
'pyodbc',
]
oracle = [
'cx_Oracle>=5.1.2',
]
pagerduty = [
'pypd>=1.1.0',
]
papermill = [
'papermill[all]>=1.2.1',
'nteract-scrapbook[all]>=0.3.1',
]
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = [
'pinotdb==0.1.1',
]
plexus = [
'arrow>=0.16.0',
]
postgres = [
'psycopg2-binary>=2.7.4',
]
presto = [
'presto-python-client>=0.7.0,<0.8'
]
qds = [
'qds-sdk>=1.10.4',
]
rabbitmq = [
'amqp',
]
redis = [
'redis~=3.2',
]
salesforce = [
'simple-salesforce>=1.0.0',
]
samba = [
'pysmbclient>=0.1.3',
]
segment = [
'analytics-python>=1.2.9',
]
sendgrid = [
'sendgrid>=6.0.0,<7',
]
sentry = [
'blinker>=1.1',
'sentry-sdk>=0.8.0',
]
singularity = ['spython>=0.0.56']
slack = [
'slackclient>=2.0.0,<3.0.0',
]
snowflake = [
'snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0',
]
spark = [
'pyspark',
]
ssh = [
'paramiko>=2.6.0',
'pysftp>=0.2.9',
'sshtunnel>=0.1.4,<0.2',
]
statsd = [
'statsd>=3.3.0, <4.0',
]
tableau = [
'tableauserverclient~=0.12',
]
vertica = [
'vertica-python>=0.5.1',
]
virtualenv = [
'virtualenv',
]
webhdfs = [
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
winrm = [
'pywinrm~=0.4',
]
yandexcloud = [
'yandexcloud>=0.22.0',
]
zendesk = [
'zdesk',
]
# End dependencies group
all_dbs = (cassandra + cloudant + druid + exasol + hdfs + hive + mongo + mssql + mysql +
pinot + postgres + presto + vertica)
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
devel = [
'beautifulsoup4~=4.7.1',
'blinker',
'bowler',
'click==6.7',
'contextdecorator;python_version<"3.4"',
'coverage',
'docutils',
'flake8>=3.6.0',
'flake8-colors',
'flaky',
'freezegun',
'github3.py',
'gitpython',
'ipdb',
'jira',
'mongomock',
'moto>=1.3.14,<2.0.0',
'parameterized',
'paramiko',
'pipdeptree',
'pre-commit',
'pylint==2.5.3',
'pysftp',
'pytest',
'pytest-cov',
'pytest-instafail',
'pytest-rerunfailures',
'pytest-timeouts',
'pytest-xdist',
'pywinrm',
'qds-sdk>=1.9.6',
'requests_mock',
'setuptools',
'wheel',
'yamllint',
]
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from the above list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
if PY3:
devel += ['mypy==0.770']
else:
devel += ['unittest2']
devel_minreq = cgroups + devel + doc + kubernetes + mysql + password
devel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs
PROVIDERS_REQUIREMENTS: Dict[str, Iterable[str]] = {
"amazon": amazon,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.livy": [],
"apache.pig": [],
"apache.pinot": pinot,
"apache.spark": spark,
"apache.sqoop": [],
"celery": celery,
"cloudant": cloudant,
"cncf.kubernetes": kubernetes,
"databricks": databricks,
"datadog": datadog,
"dingding": [],
"discord": [],
"docker": docker,
"elasticsearch": [],
"exasol": exasol,
"facebook": facebook,
"ftp": [],
"google": google,
"grpc": grpc,
"hashicorp": hashicorp,
"http": [],
"imap": [],
"jdbc": jdbc,
"jenkins": jenkins,
"jira": jira,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
"mongo": mongo,
"mysql": mysql,
"odbc": odbc,
"openfaas": [],
"opsgenie": [],
"oracle": oracle,
"pagerduty": pagerduty,
"papermill": papermill,
"plexus": plexus,
"postgres": postgres,
"presto": presto,
"qubole": qds,
"redis": redis,
"salesforce": salesforce,
"samba": samba,
"segment": segment,
"sftp": ssh,
"singularity": singularity,
"slack": slack,
"snowflake": snowflake,
"sqlite": [],
"ssh": ssh,
"vertica": vertica,
"yandex": yandexcloud,
"zendesk": zendesk,
}
EXTRAS_REQUIREMENTS: Dict[str, Iterable[str]] = {
'all_dbs': all_dbs,
'amazon': amazon,
'apache.atlas': atlas,
'apache.beam': apache_beam,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.pinot": pinot,
"apache.webhdfs": webhdfs,
'async': async_packages,
'atlas': atlas, # TODO: remove this in Airflow 2.1
'aws': amazon, # TODO: remove this in Airflow 2.1
'azure': azure, # TODO: remove this in Airflow 2.1
'cassandra': cassandra, # TODO: remove this in Airflow 2.1
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'cncf.kubernetes': kubernetes,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid, # TODO: remove this in Airflow 2.1
'elasticsearch': elasticsearch,
'exasol': exasol,
'facebook': facebook,
'gcp': google, # TODO: remove this in Airflow 2.1
'gcp_api': google, # TODO: remove this in Airflow 2.1
'github_enterprise': flask_oauth,
'google': google,
'google_auth': flask_oauth,
'grpc': grpc,
'hashicorp': hashicorp,
'hdfs': hdfs, # TODO: remove this in Airflow 2.1
'hive': hive, # TODO: remove this in Airflow 2.1
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes, # TODO: remove this in Airflow 2.1
'ldap': ldap,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
'mongo': mongo,
'mssql': mssql, # TODO: remove this in Airflow 2.1
'mysql': mysql,
'odbc': odbc,
'oracle': oracle,
'pagerduty': pagerduty,
'papermill': papermill,
'password': password,
'pinot': pinot, # TODO: remove this in Airflow 2.1
'plexus': plexus,
'postgres': postgres,
'presto': presto,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
'salesforce': salesforce,
'samba': samba,
'segment': segment,
'sendgrid': sendgrid,
'sentry': sentry,
'singularity': singularity,
'slack': slack,
'snowflake': snowflake,
'spark': spark,
'ssh': ssh,
'statsd': statsd,
'tableau': tableau,
'vertica': vertica,
'virtualenv': virtualenv,
'webhdfs': webhdfs, # TODO: remove this in Airflow 2.1
'winrm': winrm, # TODO: remove this in Airflow 2.1
'yandexcloud': yandexcloud,
}
# Make devel_all contain all providers + extras + unique
devel_all = list(set(devel +
[req for req_list in EXTRAS_REQUIREMENTS.values() for req in req_list] +
[req for req_list in PROVIDERS_REQUIREMENTS.values() for req in req_list]))
PACKAGES_EXCLUDED_FOR_ALL = [
]
if PY3:
PACKAGES_EXCLUDED_FOR_ALL.extend([
'snakebite',
])
if PY38:
PACKAGES_EXCLUDED_FOR_ALL.extend([
'pymssql',
])
# Those packages are excluded because they break tests (downgrading mock) and they are
# not needed to run our test suite.
PACKAGES_EXCLUDED_FOR_CI = [
'apache-beam',
]
def is_package_excluded(package: str, exclusion_list: List[str]):
"""
Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded
"""
return any([package.startswith(excluded_package) for excluded_package in exclusion_list])
devel_all = [package for package in devel_all if not is_package_excluded(
package=package,
exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)
]
devel_ci = [package for package in devel_all if not is_package_excluded(
package=package,
exclusion_list=PACKAGES_EXCLUDED_FOR_CI + PACKAGES_EXCLUDED_FOR_ALL)
]
EXTRAS_REQUIREMENTS.update(
{
'all': devel_all,
'devel_ci': devel_ci,
}
)
#####################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
#####################################################################################################
INSTALL_REQUIREMENTS = [
'alembic>=1.2, <2.0',
'argcomplete~=1.10',
'attrs~=19.3',
'cached_property~=1.5',
'cattrs~=1.0',
'colorlog==4.0.2',
'connexion[swagger-ui,flask]>=2.6.0,<3',
'croniter>=0.3.17, <0.4',
'cryptography>=0.9.3',
'dill>=0.2.2, <0.4',
'flask>=1.1.0, <2.0',
'flask-appbuilder>2.3.4,~=3.0',
'flask-caching>=1.3.3, <2.0.0',
'flask-login>=0.3, <0.5',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs>=1.0.0, <2.0.0',
'graphviz>=0.12',
'gunicorn>=19.5.0, <20.0',
'iso8601>=0.1.12',
'jinja2>=2.10.1, <2.12.0',
'json-merge-patch==0.2',
'jsonschema~=3.0',
'lazy_object_proxy~=1.3',
'lockfile>=0.12.2',
'markdown>=2.5.2, <3.0',
'markupsafe>=1.1.1, <2.0',
'marshmallow-oneofschema>=2.0.1',
'pandas>=0.17.1, <2.0',
'pendulum~=2.0',
'pep562~=1.0;python_version<"3.7"',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1',
'python-dateutil>=2.3, <3',
'python-nvd3~=0.15.0',
'python-slugify>=3.0.0,<5.0',
'requests>=2.20.0, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy~=1.3',
'sqlalchemy_jsonfield~=0.9',
'tabulate>=0.7.5, <0.9',
'tenacity>=4.12.0, <5.2',
'termcolor>=1.1.0',
'thrift>=0.9.2',
'typing;python_version<"3.6"',
'typing-extensions>=3.7.4;python_version<"3.8"',
'tzlocal>=1.4,<2.0.0',
'unicodecsv>=0.14.1',
'werkzeug<1.0.0',
]
def do_setup():
"""Perform the Airflow package setup."""
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version,
packages=find_packages(include=['airflow', 'airflow.*']),
package_data={
'airflow': ['py.typed'],
'': ['airflow/alembic.ini', "airflow/git_version", "*.ipynb",
"airflow/providers/cncf/kubernetes/example_dags/*.yaml"],
'airflow.api_connexion.openapi': ['*.yaml'],
'airflow.serialization': ["*.json"],
},
include_package_data=True,
zip_safe=False,
entry_points={
"console_scripts": [
"airflow = airflow.__main__:main",
],
},
install_requires=INSTALL_REQUIREMENTS,
setup_requires=[
'bowler',
'docutils',
'gitpython',
'setuptools',
'wheel',
],
extras_require=EXTRAS_REQUIREMENTS,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/airflow/' + version),
cmdclass={
'extra_clean': CleanCommand,
'compile_assets': CompileAssets,
'list_extras': ListExtras,
},
test_suite='setup.airflow_test_suite',
python_requires='~=3.6',
)
if __name__ == "__main__":
do_setup()
| 27.471023 | 108 | 0.586786 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 312day/airflow | setup.py | 22,279 | Python |
#!/usr/bin/env python
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Watch files for changes and rebuild.
pw watch runs Ninja in a build directory when source files change. It works with
any Ninja project (GN or CMake).
Usage examples:
# Find a build directory and build the default target
pw watch
# Find a build directory and build the stm32f429i target
pw watch python.lint stm32f429i
# Build pw_run_tests.modules in the out/cmake directory
pw watch -C out/cmake pw_run_tests.modules
# Build the default target in out/ and pw_apps in out/cmake
pw watch -C out -C out/cmake pw_apps
# Find a directory and build python.tests, and build pw_apps in out/cmake
pw watch python.tests -C out/cmake pw_apps
"""
import argparse
from dataclasses import dataclass
import logging
import os
from pathlib import Path
import shlex
import subprocess
import sys
import threading
from typing import (Iterable, List, NamedTuple, NoReturn, Optional, Sequence,
Tuple)
from watchdog.events import FileSystemEventHandler # type: ignore[import]
from watchdog.observers import Observer # type: ignore[import]
import pw_cli.branding
import pw_cli.color
import pw_cli.env
import pw_cli.plugins
from pw_watch.debounce import DebouncedFunction, Debouncer
_COLOR = pw_cli.color.colors()
_LOG = logging.getLogger(__name__)
_ERRNO_INOTIFY_LIMIT_REACHED = 28
# Suppress events under 'fsevents', generated by watchdog on every file
# event on MacOS.
# TODO(b/182281481): Fix file ignoring, rather than just suppressing logs
_FSEVENTS_LOG = logging.getLogger('fsevents')
_FSEVENTS_LOG.setLevel(logging.WARNING)
_PASS_MESSAGE = """
██████╗ █████╗ ███████╗███████╗██╗
██╔══██╗██╔══██╗██╔════╝██╔════╝██║
██████╔╝███████║███████╗███████╗██║
██╔═══╝ ██╔══██║╚════██║╚════██║╚═╝
██║ ██║ ██║███████║███████║██╗
╚═╝ ╚═╝ ╚═╝╚══════╝╚══════╝╚═╝
"""
# Pick a visually-distinct font from "PASS" to ensure that readers can't
# possibly mistake the difference between the two states.
_FAIL_MESSAGE = """
▄██████▒░▄▄▄ ██▓ ░██▓
▓█▓ ░▒████▄ ▓██▒ ░▓██▒
▒████▒ ░▒█▀ ▀█▄ ▒██▒ ▒██░
░▓█▒ ░░██▄▄▄▄██ ░██░ ▒██░
░▒█░ ▓█ ▓██▒░██░░ ████████▒
▒█░ ▒▒ ▓▒█░░▓ ░ ▒░▓ ░
░▒ ▒ ▒▒ ░ ▒ ░░ ░ ▒ ░
░ ░ ░ ▒ ▒ ░ ░ ░
░ ░ ░ ░ ░
"""
# TODO(keir): Figure out a better strategy for exiting. The problem with the
# watcher is that doing a "clean exit" is slow. However, by directly exiting,
# we remove the possibility of the wrapper script doing anything on exit.
def _die(*args) -> NoReturn:
_LOG.critical(*args)
sys.exit(1)
class WatchCharset(NamedTuple):
slug_ok: str
slug_fail: str
_ASCII_CHARSET = WatchCharset(_COLOR.green('OK '), _COLOR.red('FAIL'))
_EMOJI_CHARSET = WatchCharset('✔️ ', '💥')
@dataclass(frozen=True)
class BuildCommand:
build_dir: Path
targets: Tuple[str, ...] = ()
def args(self) -> Tuple[str, ...]:
return (str(self.build_dir), *self.targets)
def __str__(self) -> str:
return ' '.join(shlex.quote(arg) for arg in self.args())
def git_ignored(file: Path) -> bool:
"""Returns true if this file is in a Git repo and ignored by that repo.
Returns true for ignored files that were manually added to a repo.
"""
file = file.resolve()
directory = file.parent
# Run the Git command from file's parent so that the correct repo is used.
while True:
try:
returncode = subprocess.run(
['git', 'check-ignore', '--quiet', '--no-index', file],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=directory).returncode
return returncode in (0, 128)
except FileNotFoundError:
# If the directory no longer exists, try parent directories until
# an existing directory is found or all directories have been
# checked. This approach makes it possible to check if a deleted
# path is ignored in the repo it was originally created in.
if directory == directory.parent:
return False
directory = directory.parent
class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
"""Process filesystem events and launch builds if necessary."""
def __init__(
self,
patterns: Sequence[str] = (),
ignore_patterns: Sequence[str] = (),
build_commands: Sequence[BuildCommand] = (),
charset: WatchCharset = _ASCII_CHARSET,
restart: bool = True,
):
super().__init__()
self.patterns = patterns
self.ignore_patterns = ignore_patterns
self.build_commands = build_commands
self.charset: WatchCharset = charset
self.restart_on_changes = restart
self._current_build: subprocess.Popen
self.debouncer = Debouncer(self)
# Track state of a build. These need to be members instead of locals
# due to the split between dispatch(), run(), and on_complete().
self.matching_path: Optional[Path] = None
self.builds_succeeded: List[bool] = []
self.wait_for_keypress_thread = threading.Thread(
None, self._wait_for_enter)
self.wait_for_keypress_thread.start()
def _wait_for_enter(self) -> NoReturn:
try:
while True:
_ = input()
self._current_build.kill()
self.debouncer.press('Manual build requested...')
# Ctrl-C on Unix generates KeyboardInterrupt
# Ctrl-Z on Windows generates EOFError
except (KeyboardInterrupt, EOFError):
_exit_due_to_interrupt()
def _path_matches(self, path: Path) -> bool:
"""Returns true if path matches according to the watcher patterns"""
return (not any(path.match(x) for x in self.ignore_patterns)
and any(path.match(x) for x in self.patterns))
def dispatch(self, event) -> None:
# There isn't any point in triggering builds on new directory creation.
# It's the creation or modification of files that indicate something
# meaningful enough changed for a build.
if event.is_directory:
return
# Collect paths of interest from the event.
paths: List[str] = []
if hasattr(event, 'dest_path'):
paths.append(os.fsdecode(event.dest_path))
if event.src_path:
paths.append(os.fsdecode(event.src_path))
for raw_path in paths:
_LOG.debug('File event: %s', raw_path)
# Check whether Git cares about any of these paths.
for path in (Path(p).resolve() for p in paths):
if not git_ignored(path) and self._path_matches(path):
self._handle_matched_event(path)
return
def _handle_matched_event(self, matching_path: Path) -> None:
if self.matching_path is None:
self.matching_path = matching_path
self.debouncer.press(
f'File change detected: {os.path.relpath(matching_path)}')
# Implementation of DebouncedFunction.run()
#
# Note: This will run on the timer thread created by the Debouncer, rather
# than on the main thread that's watching file events. This enables the
# watcher to continue receiving file change events during a build.
def run(self) -> None:
"""Run all the builds in serial and capture pass/fail for each."""
# Clear the screen and show a banner indicating the build is starting.
print('\033c', end='') # TODO(pwbug/38): Not Windows compatible.
print(pw_cli.branding.banner())
print(
_COLOR.green(
' Watching for changes. Ctrl-C to exit; enter to rebuild'))
print()
_LOG.info('Change detected: %s', self.matching_path)
self.builds_succeeded = []
num_builds = len(self.build_commands)
_LOG.info('Starting build with %d directories', num_builds)
env = os.environ.copy()
# Force colors in Pigweed subcommands run through the watcher.
env['PW_USE_COLOR'] = '1'
for i, cmd in enumerate(self.build_commands, 1):
_LOG.info('[%d/%d] Starting build: %s', i, num_builds, cmd)
# Run the build. Put a blank before/after for visual separation.
print()
self._current_build = subprocess.Popen(
['ninja', '-C', *cmd.args()], env=env)
returncode = self._current_build.wait()
print()
build_ok = (returncode == 0)
if build_ok:
level = logging.INFO
tag = '(OK)'
else:
level = logging.ERROR
tag = '(FAIL)'
_LOG.log(level, '[%d/%d] Finished build: %s %s', i, num_builds,
cmd, tag)
self.builds_succeeded.append(build_ok)
# Implementation of DebouncedFunction.cancel()
def cancel(self) -> bool:
if self.restart_on_changes:
self._current_build.kill()
return True
return False
# Implementation of DebouncedFunction.run()
def on_complete(self, cancelled: bool = False) -> None:
# First, use the standard logging facilities to report build status.
if cancelled:
_LOG.error('Finished; build was interrupted')
elif all(self.builds_succeeded):
_LOG.info('Finished; all successful')
else:
_LOG.info('Finished; some builds failed')
# Then, show a more distinct colored banner.
if not cancelled:
# Write out build summary table so you can tell which builds passed
# and which builds failed.
print()
print(' .------------------------------------')
print(' |')
for (succeeded, cmd) in zip(self.builds_succeeded,
self.build_commands):
slug = (self.charset.slug_ok
if succeeded else self.charset.slug_fail)
print(f' | {slug} {cmd}')
print(' |')
print(" '------------------------------------")
else:
# Build was interrupted.
print()
print(' .------------------------------------')
print(' |')
print(' | ', self.charset.slug_fail, '- interrupted')
print(' |')
print(" '------------------------------------")
# Show a large color banner so it is obvious what the overall result is.
if all(self.builds_succeeded) and not cancelled:
print(_COLOR.green(_PASS_MESSAGE))
else:
print(_COLOR.red(_FAIL_MESSAGE))
self.matching_path = None
# Implementation of DebouncedFunction.on_keyboard_interrupt()
def on_keyboard_interrupt(self) -> NoReturn:
_exit_due_to_interrupt()
_WATCH_PATTERN_DELIMITER = ','
_WATCH_PATTERNS = (
'*.bloaty',
'*.c',
'*.cc',
'*.css',
'*.cpp',
'*.cmake',
'CMakeLists.txt',
'*.gn',
'*.gni',
'*.go',
'*.h',
'*.hpp',
'*.ld',
'*.md',
'*.options',
'*.proto',
'*.py',
'*.rst',
)
def add_parser_arguments(parser: argparse.ArgumentParser) -> None:
"""Sets up an argument parser for pw watch."""
parser.add_argument('--patterns',
help=(_WATCH_PATTERN_DELIMITER +
'-delimited list of globs to '
'watch to trigger recompile'),
default=_WATCH_PATTERN_DELIMITER.join(_WATCH_PATTERNS))
parser.add_argument('--ignore_patterns',
dest='ignore_patterns_string',
help=(_WATCH_PATTERN_DELIMITER +
'-delimited list of globs to '
'ignore events from'))
parser.add_argument('--exclude_list',
nargs='+',
type=Path,
help='directories to ignore during pw watch',
default=[])
parser.add_argument('--no-restart',
dest='restart',
action='store_false',
help='do not restart ongoing builds if files change')
parser.add_argument(
'default_build_targets',
nargs='*',
metavar='target',
default=[],
help=('Automatically locate a build directory and build these '
'targets. For example, `host docs` searches for a Ninja '
'build directory (starting with out/) and builds the '
'`host` and `docs` targets. To specify one or more '
'directories, ust the -C / --build_directory option.'))
parser.add_argument(
'-C',
'--build_directory',
dest='build_directories',
nargs='+',
action='append',
default=[],
metavar=('directory', 'target'),
help=('Specify a build directory and optionally targets to '
'build. `pw watch -C out tgt` is equivalent to `ninja '
'-C out tgt`'))
def _exit(code: int) -> NoReturn:
# Note: The "proper" way to exit is via observer.stop(), then
# running a join. However it's slower, so just exit immediately.
#
# Additionally, since there are several threads in the watcher, the usual
# sys.exit approach doesn't work. Instead, run the low level exit which
# kills all threads.
os._exit(code) # pylint: disable=protected-access
def _exit_due_to_interrupt() -> NoReturn:
# To keep the log lines aligned with each other in the presence of
# a '^C' from the keyboard interrupt, add a newline before the log.
print()
print()
_LOG.info('Got Ctrl-C; exiting...')
_exit(0)
def _exit_due_to_inotify_limit():
# Show information and suggested commands in OSError: inotify limit reached.
_LOG.error('Inotify limit reached: run this in your terminal if you '
'are in Linux to temporarily increase inotify limit. \n')
print(
_COLOR.green(' sudo sysctl fs.inotify.max_user_watches='
'$NEW_LIMIT$\n'))
print(' Change $NEW_LIMIT$ with an integer number, '
'e.g., 1000 should be enough.')
_exit(0)
def _exit_due_to_pigweed_not_installed():
# Show information and suggested commands when pigweed environment variable
# not found.
_LOG.error('Environment variable $PW_ROOT not defined or is defined '
'outside the current directory.')
_LOG.error('Did you forget to activate the Pigweed environment? '
'Try source ./activate.sh')
_LOG.error('Did you forget to install the Pigweed environment? '
'Try source ./bootstrap.sh')
_exit(1)
# Go over each directory inside of the current directory.
# If it is not on the path of elements in directories_to_exclude, add
# (directory, True) to subdirectories_to_watch and later recursively call
# Observer() on them.
# Otherwise add (directory, False) to subdirectories_to_watch and later call
# Observer() with recursion=False.
def minimal_watch_directories(to_watch: Path, to_exclude: Iterable[Path]):
"""Determine which subdirectory to watch recursively"""
try:
to_watch = Path(to_watch)
except TypeError:
assert False, "Please watch one directory at a time."
# Reformat to_exclude.
directories_to_exclude: List[Path] = [
to_watch.joinpath(directory_to_exclude)
for directory_to_exclude in to_exclude
if to_watch.joinpath(directory_to_exclude).is_dir()
]
# Split the relative path of directories_to_exclude (compared to to_watch),
# and generate all parent paths needed to be watched without recursion.
exclude_dir_parents = {to_watch}
for directory_to_exclude in directories_to_exclude:
parts = list(
Path(directory_to_exclude).relative_to(to_watch).parts)[:-1]
dir_tmp = to_watch
for part in parts:
dir_tmp = Path(dir_tmp, part)
exclude_dir_parents.add(dir_tmp)
# Go over all layers of directory. Append those that are the parents of
# directories_to_exclude to the list with recursion==False, and others
# with recursion==True.
for directory in exclude_dir_parents:
dir_path = Path(directory)
yield dir_path, False
for item in Path(directory).iterdir():
if (item.is_dir() and item not in exclude_dir_parents
and item not in directories_to_exclude):
yield item, True
def get_common_excludes() -> List[Path]:
"""Find commonly excluded directories, and return them as a [Path]"""
exclude_list: List[Path] = []
typical_ignored_directories: List[str] = [
'.environment', # Legacy bootstrap-created CIPD and Python venv.
'.presubmit', # Presubmit-created CIPD and Python venv.
'.git', # Pigweed's git repo.
'.mypy_cache', # Python static analyzer.
'.cargo', # Rust package manager.
'environment', # Bootstrap-created CIPD and Python venv.
'out', # Typical build directory.
]
# Preset exclude list for Pigweed's upstream directories.
pw_root_dir = Path(os.environ['PW_ROOT'])
exclude_list.extend(pw_root_dir / ignored_directory
for ignored_directory in typical_ignored_directories)
# Preset exclude for common downstream project structures.
#
# If watch is invoked outside of the Pigweed root, exclude common
# directories.
pw_project_root_dir = Path(os.environ['PW_PROJECT_ROOT'])
if pw_project_root_dir != pw_root_dir:
exclude_list.extend(
pw_project_root_dir / ignored_directory
for ignored_directory in typical_ignored_directories)
# Check for and warn about legacy directories.
legacy_directories = [
'.cipd', # Legacy CIPD location.
'.python3-venv', # Legacy Python venv location.
]
found_legacy = False
for legacy_directory in legacy_directories:
full_legacy_directory = pw_root_dir / legacy_directory
if full_legacy_directory.is_dir():
_LOG.warning('Legacy environment directory found: %s',
str(full_legacy_directory))
exclude_list.append(full_legacy_directory)
found_legacy = True
if found_legacy:
_LOG.warning('Found legacy environment directory(s); these '
'should be deleted')
return exclude_list
def _find_build_dir(default_build_dir: Path = Path('out')) -> Optional[Path]:
"""Searches for a build directory, returning the first it finds."""
# Give priority to out/, then something under out/.
if default_build_dir.joinpath('build.ninja').exists():
return default_build_dir
for path in default_build_dir.glob('**/build.ninja'):
return path.parent
for path in Path.cwd().glob('**/build.ninja'):
return path.parent
return None
def watch(default_build_targets: List[str], build_directories: List[str],
patterns: str, ignore_patterns_string: str, exclude_list: List[Path],
restart: bool):
"""Watches files and runs Ninja commands when they change."""
_LOG.info('Starting Pigweed build watcher')
# Get pigweed directory information from environment variable PW_ROOT.
if os.environ['PW_ROOT'] is None:
_exit_due_to_pigweed_not_installed()
pw_root = Path(os.environ['PW_ROOT']).resolve()
if Path.cwd().resolve() not in [pw_root, *pw_root.parents]:
_exit_due_to_pigweed_not_installed()
# Preset exclude list for pigweed directory.
exclude_list += get_common_excludes()
build_commands = [
BuildCommand(Path(build_dir[0]), tuple(build_dir[1:]))
for build_dir in build_directories
]
# If no build directory was specified, search the tree for a build.ninja.
if default_build_targets or not build_directories:
build_dir = _find_build_dir()
# Make sure we found something; if not, bail.
if build_dir is None:
_die("No build dirs found. Did you forget to run 'gn gen out'?")
build_commands.append(
BuildCommand(build_dir, tuple(default_build_targets)))
# Verify that the build output directories exist.
for i, build_target in enumerate(build_commands, 1):
if not build_target.build_dir.is_dir():
_die("Build directory doesn't exist: %s", build_target)
else:
_LOG.info('Will build [%d/%d]: %s', i, len(build_commands),
build_target)
_LOG.debug('Patterns: %s', patterns)
# Try to make a short display path for the watched directory that has
# "$HOME" instead of the full home directory. This is nice for users
# who have deeply nested home directories.
path_to_log = str(Path().resolve()).replace(str(Path.home()), '$HOME')
# Ignore the user-specified patterns.
ignore_patterns = (ignore_patterns_string.split(_WATCH_PATTERN_DELIMITER)
if ignore_patterns_string else [])
env = pw_cli.env.pigweed_environment()
if env.PW_EMOJI:
charset = _EMOJI_CHARSET
else:
charset = _ASCII_CHARSET
event_handler = PigweedBuildWatcher(
patterns=patterns.split(_WATCH_PATTERN_DELIMITER),
ignore_patterns=ignore_patterns,
build_commands=build_commands,
charset=charset,
restart=restart,
)
try:
# It can take awhile to configure the filesystem watcher, so have the
# message reflect that with the "...". Run inside the try: to
# gracefully handle the user Ctrl-C'ing out during startup.
_LOG.info('Attaching filesystem watcher to %s/...', path_to_log)
# Observe changes for all files in the root directory. Whether the
# directory should be observed recursively or not is determined by the
# second element in subdirectories_to_watch.
observers = []
for path, rec in minimal_watch_directories(Path.cwd(), exclude_list):
observer = Observer()
observer.schedule(
event_handler,
str(path),
recursive=rec,
)
observer.start()
observers.append(observer)
event_handler.debouncer.press('Triggering initial build...')
for observer in observers:
while observer.is_alive():
observer.join(1)
# Ctrl-C on Unix generates KeyboardInterrupt
# Ctrl-Z on Windows generates EOFError
except (KeyboardInterrupt, EOFError):
_exit_due_to_interrupt()
except OSError as err:
if err.args[0] == _ERRNO_INOTIFY_LIMIT_REACHED:
_exit_due_to_inotify_limit()
else:
raise err
_LOG.critical('Should never get here')
observer.join()
def main() -> None:
"""Watch files for changes and rebuild."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
add_parser_arguments(parser)
watch(**vars(parser.parse_args()))
if __name__ == '__main__':
main()
| 36.208459 | 80 | 0.617063 | [
"Apache-2.0"
] | isabella232/pigweed | pw_watch/py/pw_watch/watch.py | 24,653 | Python |
import os
import string
import textwrap
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# DMRIInstall
#
class DMRIInstall(ScriptedLoadableModule):
"""
"""
helpText = textwrap.dedent(
"""
The SlicerDMRI extension provides diffusion-related tools including:
<ul>
<li> Diffusion Tensor Estimation</li>
<li>Tractography Display</li>
<li>Tractography Seeding</li>
<li>Fiber Tract Measurement</li>
</ul>
<br>
<br>
For more information, please visit:
<br>
<br>
<a href="http://dmri.slicer.org">http://dmri.slicer.org</a>
<br>
<br>
Questions are welcome on the Slicer forum:
<br>
<br>
<a href="https://discourse.slicer.org">https://discourse.slicer.org</a><br><br>
""")
errorText = textwrap.dedent(
"""
<h5 style="color:red">The SlicerDMRI extension is currently unavailable.</h5><br>
Please try a manual installation via the Extension Manager,
and contact the Slicer forum at:<br><br>
<a href="https://discourse.slicer.org">https://discourse.slicer.org</a><br><br>
With the following information:<br>
Slicer version: {builddate}<br>
Slicer revision: {revision}<br>
Platform: {platform}
""").format(builddate=slicer.app.applicationVersion,
revision = slicer.app.repositoryRevision,
platform = slicer.app.platform)
def __init__(self, parent):
# Hide this module if SlicerDMRI is already installed
model = slicer.app.extensionsManagerModel()
if model.isExtensionInstalled("SlicerDMRI"):
parent.hidden = True
ScriptedLoadableModule.__init__(self, parent)
self.parent.categories = ["Diffusion"]
self.parent.title = "Install Slicer Diffusion Tools (SlicerDMRI)"
self.parent.dependencies = []
self.parent.contributors = ["Isaiah Norton (BWH), Lauren O'Donnell (BWH)"]
self.parent.helpText = DMRIInstall.helpText
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = textwrap.dedent(
"""
SlicerDMRI supported by NIH NCI ITCR U01CA199459 (Open Source Diffusion MRI
Technology For Brain Cancer Research), and made possible by NA-MIC, NAC,
BIRN, NCIGT, and the Slicer Community.
""")
class DMRIInstallWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.textBox = ctk.ctkFittedTextBrowser()
self.textBox.setOpenExternalLinks(True) # Open links in default browser
self.textBox.setHtml(DMRIInstall.helpText)
self.parent.layout().addWidget(self.textBox)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Install SlicerDMRI")
self.applyButton.toolTip = 'Installs the "SlicerDMRI" extension from the Diffusion category.'
self.applyButton.icon = qt.QIcon(":/Icons/ExtensionDefaultIcon.png")
self.applyButton.enabled = True
self.applyButton.connect('clicked()', self.onApply)
self.parent.layout().addWidget(self.applyButton)
self.parent.layout().addStretch(1)
def onError(self):
self.applyButton.enabled = False
self.textBox.setHtml(DMRIInstall.errorText)
return
def onApply(self):
emm = slicer.app.extensionsManagerModel()
if emm.isExtensionInstalled("SlicerDMRI"):
self.textBox.setHtml("<h4>SlicerDMRI is already installed.<h4>")
self.applyButton.enabled = False
return
md = emm.retrieveExtensionMetadataByName("SlicerDMRI")
if not md or 'extension_id' not in md:
return self.onError()
if emm.downloadAndInstallExtension(md['extension_id']):
slicer.app.confirmRestart("Restart to complete SlicerDMRI installation?")
else:
self.onError()
| 30.539063 | 97 | 0.712714 | [
"Apache-2.0"
] | forfullstack/slicersources-src | Modules/Scripted/DMRIInstall/DMRIInstall.py | 3,909 | Python |
from django.shortcuts import render,HttpResponse
from game.models import Contact
from django.contrib import messages
# Create your views here.
def index(request):
context={'variable':"This is sent."}
return render(request,'index.html',context)
def about(request):
return render(request,'about.html')
#return HttpResponse("This is about page.")
def products(request):
return render(request,'products.html')
#return HttpResponse("This is products page.")
def contact(request):
if request.method=="POST":
firstname=request.POST.get('firstname')
lastname=request.POST.get('lastname')
phone=request.POST.get('phone')
subject=request.POST.get('subject')
contact=Contact(firstname=firstname, lastname=lastname, phone=phone, subject=subject)
contact.save()
messages.success(request, 'Your message has been successfully sent.')
return render(request,'contacts.html')
#return HttpResponse("This is contact page.") | 40.8 | 94 | 0.697059 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | whitedevil4888/riddhisiddhi | views.py | 1,020 | Python |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import account_config_settings
import account_move
import account_partial_reconcile
import account_tax
import res_company
| 24.666667 | 74 | 0.815315 | [
"Apache-2.0"
] | gtfarng/Odoo_migrade | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account_tax_cash_basis/models/__init__.py | 222 | Python |
# import libraries
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.types import *
from pyspark.sql.functions import col, count, lit, rand, when
import pandas as pd
from math import ceil
#################################################
# spark config
#################################################
mtaMaster = "spark://192.168.0.182:7077"
conf = SparkConf()
conf.setMaster(mtaMaster)
conf.set("spark.executor.memory", "24g")
conf.set("spark.driver.memory", "26g")
conf.set("spark.cores.max", 96)
conf.set("spark.driver.cores", 8)
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
conf.set("spark.kryoserializer.buffer", "256m")
conf.set("spark.kryoserializer.buffer.max", "256m")
conf.set("spark.default.parallelism", 24)
conf.set("spark.eventLog.enabled", "true")
conf.set("spark.eventLog.dir", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.history.fs.logDirectory", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.driver.maxResultSize", "4g")
conf.getAll()
#################################################
# create spark session
#################################################
spark = SparkSession.builder.appName('ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation').config(conf=conf).getOrCreate()
sc = spark.sparkContext
# check things are working
print(sc)
print(sc.defaultParallelism)
print("SPARK CONTEXT IS RUNNING")
#################################################
# define major topic codes
#################################################
# major topic codes for loop (NO 23 IN THE NYT CORPUS)
majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]
#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]
#################################################
# read result data from round 3
#################################################
df_results = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r5_classified.parquet").repartition(50)
# verdict to integer for the comparison with majortopic later
df_results = df_results.withColumn('verdict', df_results.verdict.cast(IntegerType()))
#################################################
# create table to store sample and validation numbers
#################################################
columns = ["num_classified", "num_sample", "num_non_sample", "num_correct", "num_incorrect", "precision_in_sample", "num_added_to_training"]
df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)
df_numbers = df_numbers.fillna(0)
#################################################
# create table of samples from results
#################################################
# constants for sample size calculation for 95% confidence with +-0.05 precision confidence interval:
z = 1.96
delta = 0.05
z_delta = z*z*0.5*0.5/(delta*delta)
print("z_delta :", z_delta)
for i in majortopic_codes:
df_classified = df_results.where(col('verdict') == i)
num_classified = df_classified.count()
df_numbers["num_classified"].loc[i] = num_classified
print("MTC:", i, "num_classified: ", num_classified)
if num_classified > 100:
sample_size = ceil(z_delta/(1+1/num_classified*(z_delta-1)))
print("sample_size: ", sample_size)
if sample_size < 100:
sample_size = 100
df_sample = df_classified.sort('doc_id').withColumn('random', rand()).sort('random').limit(sample_size).drop('random')
df_sample_num = df_sample.count()
print("df_sample: ", df_sample_num)
# separate non-sample from sample elements
ids_drop = df_sample.select("doc_id")
df_non_sample = df_classified.join(ids_drop, "doc_id", "left_anti")
df_numbers["num_sample"].loc[i] = df_sample_num
df_numbers["num_non_sample"].loc[i] = df_non_sample.count()
else:
df_numbers["num_sample"].loc[i] = num_classified
df_sample = df_classified
df_non_sample = None
# create table of all samples and add new sample to it
if i == 1:
df_sample_all = df_sample
else:
df_sample_all = df_sample_all.union(df_sample)
#print("MTC:", i, "df_sample_all: ", df_sample_all.count())
# create table of all non-samples and add new non-sample to it
if i == 1:
df_non_sample_all = None
if df_non_sample != None and df_non_sample_all == None:
df_non_sample_all = df_non_sample
elif df_non_sample != None and df_non_sample_all != None:
df_non_sample_all = df_non_sample_all.union(df_non_sample)
#print("MTC:", i, "df_non_sample_all: ", df_non_sample_all.count())
print("MTC:", i)
#################################################
# check precision by majortopic codes
#################################################
# count correctly classified and precision for each majortopic code and write to table of numbers
df_correctly_classified = df_sample_all.where(col('majortopic') == col('verdict'))
for i in majortopic_codes:
num_correct = df_correctly_classified.where(col('verdict') == i).count()
df_numbers["num_correct"].loc[i] = num_correct
df_numbers["precision_in_sample"].loc[i] = num_correct/df_numbers["num_sample"].loc[i]
# count incorrectly classified for debugging and checking
df_incorrectly_classified = df_sample_all.where(col('majortopic') != col('verdict'))
for i in majortopic_codes:
num_incorrect = df_incorrectly_classified.where(col('verdict') == i).count()
df_numbers["num_incorrect"].loc[i] = num_incorrect
print(df_numbers)
#################################################
# create tables of elements based on precision
#################################################
# create tables for sorting elements based on precision results
# where precision is equal to or greater than 75%
# NOTE: validated wrongly classified elements will NOT be added to the results with the wrong major
# topic code, instead they will be added to the unclassified elements as in rounds 1&2
df_replace_all = None
# where precision is less than 75%
df_non_sample_replace = None
df_correct_replace = None
df_wrong_replace = None
for i in majortopic_codes:
print("create tables MTC:", i)
if df_numbers["precision_in_sample"].loc[i] >= 0.75:
# in this case add all elements from sample and non-sample to the training set with
# new major topic code i, EXCEPT for validated negatives, those are added to back into the
# test set
# first add wrong sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))
if df_wrong_replace == None:
df_wrong_replace = df_lemma
else:
df_wrong_replace = df_wrong_replace.union(df_lemma)
# get doc_ids for these elements to remove them from the rest of the elements classified as
# belonging to major topic i
ids_drop = df_lemma.select("doc_id")
# get all elements classified as belonging to major topic code i
df_lemma = df_results.where(col('verdict') == i)
# remove wrongly classified from df_lemma
df_lemma = df_lemma.join(ids_drop, "doc_id", "left_anti")
# add df_lemma to df_replace_all
if df_replace_all == None:
df_replace_all = df_lemma
else:
df_replace_all = df_replace_all.union(df_lemma)
# write numbers to df_numbers
df_numbers["num_added_to_training"].loc[i] = df_lemma.count()
#print("MTC:", i, "df_replace_all: ", df_replace_all.count())
else:
# in this case add only correct elements from sample to training set, the rest go back in
# the test set
# first add non-sample elements to their table, BUT we have to check whether non-sample elements
# exist
if df_non_sample_all != None:
df_lemma = df_non_sample_all.where(col('verdict') == i)
if df_non_sample_replace == None:
df_non_sample_replace = df_lemma
else:
df_non_sample_replace = df_non_sample_replace.union(df_lemma)
else:
df_non_sample_replace = None
#print("MTC:", i, "df_non_sample_replace: ", df_non_sample_replace.count())
# second add correct sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') == col('verdict'))
if df_correct_replace == None:
df_correct_replace = df_lemma
else:
df_correct_replace = df_correct_replace.union(df_lemma)
df_numbers["num_added_to_training"].loc[i] = df_lemma.count()
#print("MTC:", i, "df_correct_replace: ", df_correct_replace.count())
# finally add wrong sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))
if df_wrong_replace == None:
df_wrong_replace = df_lemma
else:
df_wrong_replace = df_wrong_replace.union(df_lemma)
#print("MTC:", i, "df_wrong_replace: ", df_wrong_replace.count())
# sometimes there will be no major topic code with precision => 75%
if df_replace_all == None:
df_replace_all = "empty"
# sometimes there will be no non-sample elements
if df_non_sample_replace == None:
df_non_sample_replace = "empty"
# the reason for creating these "empty" values, is because they will persist after we clear the
# cache, and we can use them later in the workflow control
# write all tables to parquet before clearing memory
df_correct_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet", mode="overwrite")
df_wrong_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet", mode="overwrite")
# sometimes there will be no non-sample elements
if df_non_sample_replace != "empty":
df_non_sample_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet", mode="overwrite")
# sometimes there will be no major topic code with precision => 75%
if df_replace_all != "empty":
df_replace_all.write.parquet("hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet", mode="overwrite")
# write df_numbers to csv
df_numbers.to_csv("ML2_HV_v1_NYT_human_validation_numbers_r5.csv", index=True)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
#################################################
# prepare df_original to add tables to it
#################################################
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r5_train_and_remaining_NOTclassified.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_original = df_original.withColumnRenamed('majortopic', 'mtc_after_r4')
df_original = df_original.withColumn('majortopic', df_original['mtc_after_r4'])
# finally, create the new train id column
df_original = df_original.withColumn("train_r6", when(df_original["train_r5"] == 1, 1).otherwise(0))
#################################################
# add df_replace_all back to df_original
#################################################
if df_replace_all != "empty":
print("df_replace_all is NOT empty")
df_replace_all = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_replace_all = df_replace_all.withColumnRenamed('majortopic', 'mtc_after_r4')
df_replace_all = df_replace_all.withColumn('majortopic', df_replace_all['verdict'])
# create the new train id column
df_replace_all = df_replace_all.withColumn("train_r6", lit(1))
# drop the extra columns to be able to add it back to df_original
df_replace_all = df_replace_all.drop('verdict')
# add df_replace_all elements to df_original
df_original = df_original.union(df_replace_all)
else:
print("df_replace_all is empty")
#################################################
# add df_non_sample_replace back to df_original
#################################################
if df_non_sample_replace != "empty":
print("df_non_sample_replace is NOT empty")
df_non_sample_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_non_sample_replace = df_non_sample_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_non_sample_replace = df_non_sample_replace.withColumn('majortopic', df_non_sample_replace['mtc_after_r4'])
# create the new train id column
df_non_sample_replace = df_non_sample_replace.withColumn("train_r6", lit(0))
# drop the extra columns to be able to add it back to df_original
df_non_sample_replace = df_non_sample_replace.drop('verdict')
# add df_non_sample_replace elements to df_original
df_original = df_original.union(df_non_sample_replace)
else:
print("df_non_sample_replace is empty")
#################################################
# add df_correct_replace back to df_original
#################################################
df_correct_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_correct_replace = df_correct_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_correct_replace = df_correct_replace.withColumn('majortopic', df_correct_replace['verdict'])
# create the new train id column
df_correct_replace = df_correct_replace.withColumn("train_r6", lit(1))
# drop the extra columns to be able to add it back to df_original
df_correct_replace = df_correct_replace.drop('verdict')
# add df_correct_replace elements to df_original
df_original = df_original.union(df_correct_replace)
#################################################
# add df_wrong_replace back to df_original
#################################################
df_wrong_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_wrong_replace = df_wrong_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_wrong_replace = df_wrong_replace.withColumn('majortopic', df_wrong_replace['mtc_after_r4'])
# create the new train id column
df_wrong_replace = df_wrong_replace.withColumn("train_r6", lit(0))
# drop the extra columns to be able to add it back to df_original
df_wrong_replace = df_wrong_replace.drop('verdict')
# add df_wrong_replace elements to df_original
df_original = df_original.union(df_wrong_replace)
#################################################
# final write operations
#################################################
df_original.write.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round6_start.parquet", mode="overwrite")
df_original.groupBy("train_r6").count().show(n=30)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
# write to pandas and export to csv for debugging
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round6_start.parquet").repartition(50)
df_original = df_original.drop('text', 'words', 'features', 'raw_features').toPandas()
df_original.to_csv("ML2_HV_v1_NYT_round6_starting_table.csv", index=False)
sc.stop()
spark.stop()
| 44.221289 | 142 | 0.668905 | [
"MIT"
] | poltextlab/nyt_hybrid_classification_workflow | spark_cluster/04_2_HV_basic/HV_v1_NYT_sim1_and_sim3_to_sim2/6200_ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation.py | 15,787 | Python |
"""Extra types understood by apitools.
This file will be replaced by a .proto file when we switch to proto2
from protorpc.
"""
import collections
import json
import numbers
from protorpc import message_types
from protorpc import messages
from protorpc import protojson
from apitools.base.py import encoding
from apitools.base.py import exceptions
from apitools.base.py import util
__all__ = [
'DateTimeMessage',
'JsonArray',
'JsonObject',
'JsonValue',
'JsonProtoEncoder',
'JsonProtoDecoder',
]
# We import from protorpc.
# pylint:disable=invalid-name
DateTimeMessage = message_types.DateTimeMessage
# pylint:enable=invalid-name
def _ValidateJsonValue(json_value):
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value) for f, value in entries if value is not None]
if len(assigned_entries) != 1:
raise exceptions.InvalidDataError('Malformed JsonValue: %s' % json_value)
def _JsonValueToPythonValue(json_value):
"""Convert the given JsonValue to a json string."""
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value) for f, value in entries if value is not None]
field, value = assigned_entries[0]
if not isinstance(field, messages.MessageField):
return value
elif field.message_type is JsonObject:
return _JsonObjectToPythonValue(value)
elif field.message_type is JsonArray:
return _JsonArrayToPythonValue(value)
def _JsonObjectToPythonValue(json_value):
util.Typecheck(json_value, JsonObject)
return dict([(prop.key, _JsonValueToPythonValue(prop.value)) for prop
in json_value.properties])
def _JsonArrayToPythonValue(json_value):
util.Typecheck(json_value, JsonArray)
return [_JsonValueToPythonValue(e) for e in json_value.entries]
_MAXINT64 = 2 << 63 - 1
_MININT64 = -(2 << 63)
def _PythonValueToJsonValue(py_value):
"""Convert the given python value to a JsonValue."""
if py_value is None:
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, basestring):
return JsonValue(string_value=py_value)
if isinstance(py_value, numbers.Number):
if isinstance(py_value, (int, long)):
if _MININT64 < py_value < _MAXINT64:
return JsonValue(integer_value=py_value)
return JsonValue(double_value=float(py_value))
if isinstance(py_value, dict):
return JsonValue(object_value=_PythonValueToJsonObject(py_value))
if isinstance(py_value, collections.Iterable):
return JsonValue(array_value=_PythonValueToJsonArray(py_value))
raise exceptions.InvalidDataError(
'Cannot convert "%s" to JsonValue' % py_value)
def _PythonValueToJsonObject(py_value):
util.Typecheck(py_value, dict)
return JsonObject(
properties=[
JsonObject.Property(key=key, value=_PythonValueToJsonValue(value))
for key, value in py_value.iteritems()])
def _PythonValueToJsonArray(py_value):
return JsonArray(entries=map(_PythonValueToJsonValue, py_value))
class JsonValue(messages.Message):
"""Any valid JSON value."""
# Is this JSON object `null`?
is_null = messages.BooleanField(1, default=False)
# Exactly one of the following is provided if is_null is False; none
# should be provided if is_null is True.
boolean_value = messages.BooleanField(2)
string_value = messages.StringField(3)
# We keep two numeric fields to keep int64 round-trips exact.
double_value = messages.FloatField(4, variant=messages.Variant.DOUBLE)
integer_value = messages.IntegerField(5, variant=messages.Variant.INT64)
# Compound types
object_value = messages.MessageField('JsonObject', 6)
array_value = messages.MessageField('JsonArray', 7)
class JsonObject(messages.Message):
"""A JSON object value.
Messages:
Property: A property of a JsonObject.
Fields:
properties: A list of properties of a JsonObject.
"""
class Property(messages.Message):
"""A property of a JSON object.
Fields:
key: Name of the property.
value: A JsonValue attribute.
"""
key = messages.StringField(1)
value = messages.MessageField(JsonValue, 2)
properties = messages.MessageField(Property, 1, repeated=True)
class JsonArray(messages.Message):
"""A JSON array value."""
entries = messages.MessageField(JsonValue, 1, repeated=True)
_JSON_PROTO_TO_PYTHON_MAP = {
JsonArray: _JsonArrayToPythonValue,
JsonObject: _JsonObjectToPythonValue,
JsonValue: _JsonValueToPythonValue,
}
_JSON_PROTO_TYPES = tuple(_JSON_PROTO_TO_PYTHON_MAP.keys())
def _JsonProtoToPythonValue(json_proto):
util.Typecheck(json_proto, _JSON_PROTO_TYPES)
return _JSON_PROTO_TO_PYTHON_MAP[type(json_proto)](json_proto)
def _PythonValueToJsonProto(py_value):
if isinstance(py_value, dict):
return _PythonValueToJsonObject(py_value)
if (isinstance(py_value, collections.Iterable) and
not isinstance(py_value, basestring)):
return _PythonValueToJsonArray(py_value)
return _PythonValueToJsonValue(py_value)
def _JsonProtoToJson(json_proto, unused_encoder=None):
return json.dumps(_JsonProtoToPythonValue(json_proto))
def _JsonToJsonProto(json_data, unused_decoder=None):
return _PythonValueToJsonProto(json.loads(json_data))
# pylint:disable=invalid-name
JsonProtoEncoder = _JsonProtoToJson
JsonProtoDecoder = _JsonToJsonProto
# pylint:enable=invalid-name
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonValue)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonObject)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonArray)
def _EncodeDateTimeField(field, value):
result = protojson.ProtoJson().encode_field(field, value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeDateTimeField(unused_field, value):
result = protojson.ProtoJson().decode_field(
message_types.DateTimeField(1), value)
return encoding.CodecResult(value=result, complete=True)
encoding.RegisterFieldTypeCodec(_EncodeDateTimeField, _DecodeDateTimeField)(
message_types.DateTimeField)
def _EncodeInt64Field(field, value):
"""Handle the special case of int64 as a string."""
capabilities = [
messages.Variant.INT64,
messages.Variant.UINT64,
]
if field.variant not in capabilities:
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x in value]
else:
result = str(value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeInt64Field(unused_field, value):
# Don't need to do anything special, they're decoded just fine
return encoding.CodecResult(value=value, complete=False)
encoding.RegisterFieldTypeCodec(_EncodeInt64Field, _DecodeInt64Field)(
messages.IntegerField)
| 30.577586 | 78 | 0.762193 | [
"Apache-2.0"
] | Technology-Hatchery/google-cloud-sdk | .install/.backup/lib/apitools/base/py/extra_types.py | 7,094 | Python |
import json
import numpy as np
from PIL import Image
def load_json(f):
with open(f, 'r') as fp:
return json.load(fp)
def save_json(obj, f, ensure_ascii=True, indent=None):
with open(f, 'w') as fp:
json.dump(obj, fp, ensure_ascii=ensure_ascii, indent=indent)
def load_image(f, mode='RGB'):
with Image.open(f) as image:
return np.array(image.convert(mode))
| 19.9 | 68 | 0.658291 | [
"MIT"
] | narumiruna/labelme-utils | labelmeutils/utils/io.py | 398 | Python |
#!/usr/bin/python3
import click
import os
import tempfile
import filecmp
import shutil
import difflib
import sys
import git
import shell_utils
SOURCE_EXTENSIONS = [".cpp", ".c", ".cxx", ".cc", ".h", ".hxx", ".hpp"]
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Symbols:
PASS = u'\u2714'
FAIL = u'\u2718'
# Find all the source files we want to check
def find_files_to_check(modified_files, repo_dir):
if modified_files:
# Check which files have been added or modified by git
changed_files = shell_utils.run_shell_command('git diff-index --diff-filter=ACMR --name-only HEAD')
changed_files = "{}".format(changed_files.decode('utf-8')).split()
sources_to_check = [os.path.join(repo_dir, f) for f in changed_files if
f.lower().endswith(tuple(SOURCE_EXTENSIONS))]
else:
# Recursively walk through the repo and find all the files that meet the extensions criteria
sources_to_check = [os.path.join(d, f)
for d, dirs, files in os.walk(repo_dir)
for f in files if f.lower().endswith(tuple(SOURCE_EXTENSIONS))]
return sources_to_check
# Given a list of files, run clang-format on them. Optionally fix the files in place if desired
def check_files(files, fix_in_place, verbose):
num_failed_files = 0
for file in files:
# format the file with clang-format and save the output to a temporary file
output = shell_utils.run_shell_command("clang-format -style=file -fallback-style=none " + file)
formatted_file = tempfile.NamedTemporaryFile()
formatted_file.write(output)
formatted_file.seek(0)
# check if the formatted file is different from the original
file_changed = not filecmp.cmp(formatted_file.name, file)
# Only need to handle those files that were changed by clang-format. Files that weren't changed are good to go.
if file_changed:
num_failed_files += 1
print(Colors.RED + Symbols.FAIL + Colors.END + " " + str(file))
if verbose:
# get and display the diff between the original and formatted files
original_file = open(file, 'r')
new_file = open(formatted_file.name, 'r')
diff = difflib.unified_diff(original_file.readlines(), new_file.readlines())
print(Colors.CYAN)
for line in diff:
sys.stdout.write(line)
print(Colors.END)
if fix_in_place:
# if we are fixing in place, just replace the original file with the changed contents
print(Colors.YELLOW + "WARNING: Fixing in place. Original file will be changed." + Colors.END)
shutil.move(formatted_file.name, file)
else:
print(Colors.GREEN + Symbols.PASS + Colors.END + " " + str(file))
# clean up
try:
formatted_file.close()
except FileNotFoundError as _:
# Do nothing. We must have moved the file above
pass
return num_failed_files
@click.command()
@click.option('-f', '--fix-in-place', default=False, is_flag=True, help='Fix the issues found.')
@click.option('-m', '--modified-files', default=False, is_flag=True, help='Check modified files (according to git) '
'only.')
@click.option('-v', '--verbose', default=False, is_flag=True, help="Print all the errors found.")
def main(fix_in_place, modified_files, verbose):
# change directory to the root of the git project
repo = git.Repo('.', search_parent_directories=True)
os.chdir(repo.working_tree_dir)
# Find the source files we want ot check
sources_to_check = find_files_to_check(modified_files, repo.working_tree_dir)
# Run clang-format and compare the output to the original files
num_failed_files = check_files(sources_to_check, fix_in_place, verbose)
# Return success or failure
if num_failed_files:
print(
Colors.RED + 3 * Symbols.FAIL + " " + str(num_failed_files) + " files have formatting errors." + Colors.END)
if fix_in_place:
print("The formatting errors have been automatically fixed.")
sys.exit(1)
print(Colors.GREEN + 3 * Symbols.PASS + Colors.END + " All files are properly formatted!")
sys.exit(0)
if __name__ == '__main__':
main()
| 37.02381 | 120 | 0.629582 | [
"MIT"
] | markcutler/autopilot | tools/run_clang_format.py | 4,665 | Python |
#filling 2nd form and validating ans
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver=webdriver.Chrome("../chromedriver.exe")
driver.get("https://www.seleniumeasy.com/test/basic-first-form-demo.html")
num1=2
num2=3
element1=driver.find_element(By.ID,"sum1").send_keys(num1)
element2=driver.find_element(By.ID,"sum2").send_keys(num2)
button=driver.find_element(By.XPATH,"/html/body/div[2]/div/div[2]/div[2]/div[2]/form/button").click()
displayed_sum=driver.find_element(By.ID,"displayvalue").text
if (num1+num2) == int(displayed_sum):
print("same")
else:
print("different")
time.sleep(5)
driver.quit()
| 23.266667 | 102 | 0.7149 | [
"MIT"
] | araj29011998/Complete-Selenium-Automation | selenium/filling form/form-2.py | 698 | Python |
from collections import OrderedDict
from sympy import Basic, true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
"""
A representation of an object in Z^n.
The elements of a Vector can be integers or generic SymPy expressions.
Notes
-----
1) Vector-scalar comparison
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to all vector entries.
For example: ::
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparing Vector entries when these are SymPy expressions
When we compare two symbolic (SymPy expressions) entries, it might not be
possible to determine the truth value of the relation. For example, the
truth value of `3*i < 4*j` cannot be determined (unless some information
about `i` and `j` is available). In some cases, however, the comparison is
feasible; for example, `i + 4 < i` is definitely False. A sufficient condition
for two Vectors to be comparable is that their pair-wise indices are affine
functions of the same variables, with identical coefficient.
If the Vector is instantiated passing the keyword argument ``smart = True``,
some manipulation will be attempted to infer the truth value of a non-trivial
symbolic relation. This increases the cost of the comparison, while potentially
being ineffective, so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
"""
def __new__(cls, *items, smart=False):
if not all(is_integer(i) or isinstance(i, Basic) for i in items):
raise TypeError("Illegal Vector element type")
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
# Not iterable
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
@_asvector()
def __lt__(self, other):
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
@_asvector()
def __gt__(self, other):
return other.__lt__(self)
@_asvector()
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
# See analogous considerations in __lt__
return False
raise TypeError("Non-comparable index functions")
# Note: unlike `__lt__`, if we end up here, then *it is* <=. For example,
# with `v0` and `v1` as above, we would get here
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
"""
return self - other
class LabeledVector(Vector):
"""
A Vector that associates a Dimension to each element.
"""
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
"""
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
"""
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in index._defines:
if d in self.labels:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
"""
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
# Utility functions
def vmin(*vectors):
"""
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
def vmax(*vectors):
"""
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
| 34.409471 | 87 | 0.567393 | [
"MIT"
] | rhodrin/devito | devito/ir/support/vector.py | 12,353 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from threathunter_common.geo.phonelocator import *
__author__ = "nebula"
def test_phone():
print check_phone_number("+13482345020", None)
assert check_phone_number("13482345020", 'CN')
assert not check_phone_number("+134823450", None)
print get_carrier("13482121123", 'CN')
print get_carrier("13815430576", 'CN')
print get_carrier("13093705423", 'CN')
print get_geo("13482121123", 'CN')
print get_geo("13815430576", 'CN')
print get_geo("13093705423", 'CN')
print 111, get_geo("020 8366 1177", "GB")
print 111, get_geo("+442083661177")
print phonenumbers.parse("020 8366 1177", "GB")
print phonenumbers.parse("+442083661177")
assert False
| 24.225806 | 53 | 0.679095 | [
"Apache-2.0"
] | threathunterX/python_lib | threathunter_common_python/test/testphone.py | 751 | Python |
"""
Output formats.
"""
from .rst import RST
from .console import Console
from .json import JSON
from .svg import SVG
from .png import PNG
| 14 | 28 | 0.735714 | [
"MIT"
] | sebMathieu/code_metrics | metrics/outputs/__init__.py | 140 | Python |
#!/usr/bin/env python3
"""Pre-commit hook to verify that all extras are documented in README.rst"""
import configparser
import re
from pathlib import Path
repo_dir = Path(__file__).parent.parent.parent
config = configparser.ConfigParser(strict=False)
config.read(repo_dir / "setup.cfg")
all_extra = []
extra_to_exclude = {"tests", "mypy", "docs"}
all_extras = set(config["options.extras_require"].keys()) - extra_to_exclude
readme_path = repo_dir / "README.rst"
extra_doc = """
.. list-table::
:header-rows: 1
* - Extra Name
- Installation Command
- Dependencies
"""
for extra in sorted(all_extras):
extra_doc += f"""
* - ``{extra}``
- ``pip install 'astronomer-providers[{extra}]'``
- {extra.replace(".", " ").title()}
"""
with open(readme_path, "r") as readme_file:
readme_contents = readme_file.read()
new_readme_text = re.sub(
r".. EXTRA_DOC_START([\s\S]*).. EXTRA_DOC_END",
f".. EXTRA_DOC_START{extra_doc}\n.. EXTRA_DOC_END",
readme_contents,
flags=re.MULTILINE,
)
if new_readme_text != readme_contents:
with open(readme_path, "w") as readme_file:
readme_file.write(new_readme_text)
| 24.265306 | 76 | 0.666947 | [
"Apache-2.0"
] | astronomer/astronomer-providers | .circleci/scripts/pre_commit_readme_extra.py | 1,189 | Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
setup(
name="q2-autopepsirf",
version=versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
package_data={},
author="Annabelle Brown",
author_email="[email protected]",
description="Auto-Run q2-pepsirf and q2-ps-plot",
license='Apache-2.0',
url="https://github.com/LadnerLab/q2-autopepsirf",
entry_points={
'qiime2.plugins': ['q2-autopepsirf=q2_autopepsirf.plugin_setup:plugin']
},
zip_safe=False,
) | 28.904762 | 80 | 0.672158 | [
"Apache-2.0"
] | Annabelle-Brown/q2-autopepsirf | setup.py | 607 | Python |
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from lda import LDA
def learn_topics(texts, topicnum):
# Get vocabulary and word counts. Use the top 10,000 most frequent
# lowercase unigrams with at least 3 alphabetical, non-numeric characters,
# punctuation treated as separators.
print("Vectorizing...")
CVzer = CountVectorizer(max_features=10000,
lowercase=True)
doc_vcnts = CVzer.fit_transform(texts)
vocabulary = CVzer.get_feature_names()
# Learn topics. Refresh conrols print frequency.
print("LDA")
lda_model = LDA(topicnum, n_iter=4000, refresh=500)
doc_topic = lda_model.fit_transform(doc_vcnts)
topic_word = lda_model.topic_word_
return doc_topic, topic_word, vocabulary
print("Reading data...")
env = pd.read_csv('../Data/Environmental Discourse/env_processed.csv', index_col=0)
env = env[~env.text_processed.isna()]
doc_topic, topic_word, vocabulary = learn_topics(env.text_processed, 100)
print(doc_topic[0,:])
for i in range(100):
env['topic_{}'.format(i)] = doc_topic[:, i]
env.to_csv('../Data/Environmental Discourse/env_lda.csv') | 34.514286 | 84 | 0.701987 | [
"MIT"
] | mikepackard415/Scientific-Environmental-Discourse | Programs/env_lda.py | 1,208 | Python |
import os
import glob
import psycopg2
import pandas as pd
import numpy as np
from sql_queries import *
def process_song_file(cur, filepath):
# open song file
df = pd.read_json(filepath, lines = True)
# insert song record
song_data = df[["song_id", "title", "artist_id", "year", "duration"]].values[0]
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = df[["artist_id", "artist_name", "artist_location", "artist_latitude", "artist_longitude",]].values[0]
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
# open log file
df = pd.read_json(filepath, lines = True)
# filter by NextSong action
df = df.query("page=='NextSong'")
# convert timestamp column to datetime
t = pd.to_datetime(df["ts"]/1000, unit = 's')
# insert time data records
time_data = np.transpose(np.array([df["ts"].values, t.dt.hour.values, t.dt.day.values, t.dt.week.values, \
t.dt.month.values, t.dt.year.values, t.dt.weekday.values]))
column_labels = ("timestamp", "hour", "day", "week of year", "month", "year", "weekday")
time_df = pd.DataFrame(data = time_data, columns = column_labels)
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
# load user table
user_df = df[["userId", "firstName", "lastName", "gender", "level"]]
# insert user records
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
# insert songplay records
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
# insert songplay record
songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, \
row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main() | 32.40625 | 119 | 0.641916 | [
"MIT"
] | cdiswine/data-engineering-nanodegree | ETL-data-with-postgres/etl.py | 3,111 | Python |
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from setuptools import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['soccer_trajectories'],
package_dir={'': 'src'},
)
setup(**setup_args)
| 24.230769 | 61 | 0.768254 | [
"BSD-3-Clause"
] | sadmanca/soccerbot | soccer_trajectories/setup.py | 315 | Python |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess"""
import argparse
import os
import numpy as np
from src.dataset import load_and_process
def generate_bin():
"""Generate bin files."""
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')
parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')
parser.add_argument('--eval_nodes_num', type=int, default=500, help='Nodes numbers for evaluation')
parser.add_argument('--test_nodes_num', type=int, default=1000, help='Nodes numbers for test')
parser.add_argument('--result_path', type=str, default='./preprocess_Result/', help='Result path')
args = parser.parse_args()
feature, biases, _, _, _, _, y_test, test_mask = load_and_process(args.data_dir,
args.train_nodes_num,
args.eval_nodes_num,
args.test_nodes_num)
feature_path = os.path.join(args.result_path, '00_data')
biases_path = os.path.join(args.result_path, '01_data')
y_test_path = os.path.join(args.result_path, 'y_test.npy')
test_mask_path = os.path.join(args.result_path, 'test_mask.npy')
os.makedirs(feature_path)
os.makedirs(biases_path)
feature.tofile(os.path.join(feature_path, 'feature.bin'))
biases.tofile(os.path.join(biases_path, 'biases.bin'))
np.save(y_test_path, y_test)
np.save(test_mask_path, test_mask)
if __name__ == "__main__":
generate_bin()
| 44.461538 | 103 | 0.645329 | [
"Apache-2.0"
] | 233-puchi/mindspore | model_zoo/official/gnn/gat/preprocess.py | 2,312 | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'birdview.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_birdview(object):
def setupUi(self, birdview):
birdview.setObjectName("birdview")
birdview.resize(552, 551)
self.verticalLayout = QtWidgets.QVBoxLayout(birdview)
self.verticalLayout.setContentsMargins(5, 5, 5, 5)
self.verticalLayout.setSpacing(2)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnOpenFile = QtWidgets.QPushButton(birdview)
self.btnOpenFile.setObjectName("btnOpenFile")
self.horizontalLayout.addWidget(self.btnOpenFile)
self.lab_file_name = QtWidgets.QLabel(birdview)
self.lab_file_name.setAlignment(QtCore.Qt.AlignCenter)
self.lab_file_name.setObjectName("lab_file_name")
self.horizontalLayout.addWidget(self.lab_file_name)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 4)
self.verticalLayout.addLayout(self.horizontalLayout)
self.vbox_bd = QtWidgets.QVBoxLayout()
self.vbox_bd.setObjectName("vbox_bd")
self.verticalLayout.addLayout(self.vbox_bd)
self.hbox_btn_slider = QtWidgets.QHBoxLayout()
self.hbox_btn_slider.setObjectName("hbox_btn_slider")
self.media_grid = QtWidgets.QGridLayout()
self.media_grid.setObjectName("media_grid")
self.hbox_btn_slider.addLayout(self.media_grid)
self.verticalLayout.addLayout(self.hbox_btn_slider)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 20)
self.verticalLayout.setStretch(2, 1)
self.retranslateUi(birdview)
QtCore.QMetaObject.connectSlotsByName(birdview)
def retranslateUi(self, birdview):
_translate = QtCore.QCoreApplication.translate
birdview.setWindowTitle(_translate("birdview", "BirdView"))
self.btnOpenFile.setText(_translate("birdview", "Open xls"))
self.lab_file_name.setText(_translate("birdview", "xls_name"))
| 43.5 | 75 | 0.71798 | [
"BSD-3-Clause"
] | AndyYangjd/data_fuse_demo | src/visualization_simulator/src/ui/ui_birdview.py | 2,436 | Python |
from django.utils import timezone
from maestros.models import Unidades
from maestros_generales.models import Empresas
__author__ = 'julian'
from django.contrib.gis.db import models
import datetime
class WaspTypeSensor(models.Model):
name = models.CharField(max_length=50)
units = models.ForeignKey(Unidades)
fechaalta = models.DateField(auto_now_add=True,verbose_name=("Fecha Alta"),blank=True,null=True)
fechabaja = models.DateField(verbose_name=("Fecha Baja"), blank=True,null=True)
class WaspMote(models.Model):
DeviceName = models.CharField(max_length=30)
Imei = models.BigIntegerField()
fechaalta = models.DateField(auto_now_add=True,verbose_name=("Fecha Alta"),blank=True,null=True)
fechabaja = models.DateField(verbose_name=("Fecha Baja"), blank=True,null=True)
empresa = models.ForeignKey(Empresas,null=True, blank=True,verbose_name=('Empresa'),on_delete=models.PROTECT)
class WaspSensor(models.Model):
waspmote = models.ForeignKey(WaspMote, on_delete=models.PROTECT)
probestype = models.ForeignKey(WaspTypeSensor,on_delete=models.PROTECT)
fechaalta = models.DateField(auto_now_add=True,verbose_name=("Fecha Alta"),blank=True,null=True)
fechabaja = models.DateField(verbose_name=("Fecha Baja"), blank=True,null=True)
empresa = models.ForeignKey(Empresas,null=True, blank=True,verbose_name=('Empresa'),on_delete=models.PROTECT)
class WaspData(models.Model):
waspsensor = models.ForeignKey(WaspSensor)
timestamp_waspmote = models.DateTimeField()
status = models.CharField(max_length=1)
#loc = models.PointField(srid=4326)
alt = models.FloatField()
lat = models.FloatField()
long = models.FloatField()
speed = models.FloatField()
course = models.FloatField()
voltage = models.IntegerField()
notes = models.TextField()
#objects = models.GeoManager()
valorsensor = models.FloatField()
#timestamp_server = models.DateTimeField()
timestamp_server = models.DateTimeField(default= lambda: timezone.now() + datetime.timedelta(hours=1), blank=True)
| 45.568627 | 121 | 0.671687 | [
"BSD-3-Clause"
] | Infinityloopsistemas/SIVA | rest_waspmote/models.py | 2,324 | Python |
"""Test length measured in half bytes (nibbles). Nibbles were added in v2.1"""
import copy
import iso8583
import iso8583.specs
import pytest
# fmt: off
@pytest.mark.parametrize(
["data_enc", "len_enc", "len_type", "max_len", "len_count", "result", "result_f2_len"],
[
("ascii", "ascii", 2, 8, "bytes", b"02004000000000000000041234", b"04"),
("ascii", "ascii", 2, 8, "nibbles", b"02004000000000000000081234", b"08"),
("ascii", "b", 2, 8, "bytes", b"02004000000000000000\x00\x041234", b"\x00\x04"),
("ascii", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x081234", b"\x00\x08"),
("b", "ascii", 2, 8, "bytes", b"0200400000000000000002\x12\x34", b"02"),
("b", "ascii", 2, 8, "nibbles", b"0200400000000000000004\x12\x34", b"04"),
("b", "b", 2, 8, "bytes", b"02004000000000000000\x00\x02\x12\x34", b"\x00\x02"),
("b", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x04\x12\x34", b"\x00\x04"),
("ascii", "ascii", 0, 4, "bytes", b"020040000000000000001234", b""),
("ascii", "ascii", 0, 8, "nibbles", b"020040000000000000001234", b""),
("b", "ascii", 0, 2, "bytes", b"02004000000000000000\x12\x34", b""),
("b", "ascii", 0, 4, "nibbles", b"02004000000000000000\x12\x34", b""),
],
)
# fmt: on
def test_encode_nibbles(
data_enc: str,
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = data_enc
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
decoded = {"t": "0200", "2": "1234"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "result", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\xF1\x23", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\xF1\x23", b""),
],
)
# fmt: on
def test_encode_nibbles_odd_left_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["left_pad"] = pad
decoded = {"t": "0200", "2": "123"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "result", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\x12\x3F", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\x12\x3F", b""),
],
)
# fmt: on
def test_encode_nibbles_odd_right_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["right_pad"] = pad
decoded = {"t": "0200", "2": "123"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
def test_encode_nibbles_odd_no_pad() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = "b"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 8
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1"}
with pytest.raises(
iso8583.EncodeError,
match="Failed to encode .*: field 2",
):
iso8583.encode(decoded, spec=spec)
# fmt: off
@pytest.mark.parametrize(
["data_enc", "len_enc", "len_type", "max_len", "len_count", "data", "result_f2_len"],
[
("ascii", "ascii", 2, 8, "bytes", b"02004000000000000000041234", b"04"),
("ascii", "ascii", 2, 8, "nibbles", b"02004000000000000000081234", b"08"),
("ascii", "b", 2, 8, "bytes", b"02004000000000000000\x00\x041234", b"\x00\x04"),
("ascii", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x081234", b"\x00\x08"),
("b", "ascii", 2, 8, "bytes", b"0200400000000000000002\x12\x34", b"02"),
("b", "ascii", 2, 8, "nibbles", b"0200400000000000000004\x12\x34", b"04"),
("b", "b", 2, 8, "bytes", b"02004000000000000000\x00\x02\x12\x34", b"\x00\x02"),
("b", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x04\x12\x34", b"\x00\x04"),
("ascii", "ascii", 0, 4, "bytes", b"020040000000000000001234", b""),
("ascii", "ascii", 0, 8, "nibbles", b"020040000000000000001234", b""),
("b", "ascii", 0, 2, "bytes", b"02004000000000000000\x12\x34", b""),
("b", "ascii", 0, 4, "nibbles", b"02004000000000000000\x12\x34", b""),
],
)
# fmt: on
def test_decode_nibbles(
data_enc: str,
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = data_enc
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "1234"
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "data", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\xF1\x23", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\xF1\x23", b""),
],
)
# fmt: on
def test_decode_nibbles_left_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["left_pad"] = pad
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "123"
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "data", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\x12\x3F", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\x12\x3F", b""),
],
)
# fmt: on
def test_decode_nibbles_right_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["right_pad"] = pad
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "123"
assert encoded["2"]["len"] == result_f2_len
def test_decode_nibbles_odd_no_pad() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = "b"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 8
spec["2"]["len_count"] = "nibbles"
data = b"02004000000000000000\x00\x03\x12\x30"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 4 nibbles, expecting 3: field 2 pos 22",
):
iso8583.decode(data, spec=spec)
def test_encode_nibbles_variable_over_max() -> None:
"""Variable field length is over maximum allowed"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1234"}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 8 nibbles, larger than maximum 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_encode_nibbles_fixed_partial() -> None:
"""Fixed field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1"}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 2 nibbles, expecting 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_encode_nibbles_fixed_missing() -> None:
"""Fixed field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": ""}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 0 nibbles, expecting 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_decode_nibbles_variable_over_max() -> None:
"""Variable field length is over maximum allowed"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000081234"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 8 nibbles, larger than maximum 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_variable_partial() -> None:
"""Variable field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000041"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 2 nibbles, expecting 4: field 2 pos 22",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_variable_missing() -> None:
"""Variable field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"0200400000000000000004"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 0 nibbles, expecting 4: field 2 pos 22",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_fixed_partial() -> None:
"""Fixed field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"020040000000000000001"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 2 nibbles, expecting 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_fixed_missing() -> None:
"""Fixed field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 0 nibbles, expecting 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
| 35.397917 | 98 | 0.571126 | [
"MIT"
] | knovichikhin/pyiso8583 | tests/test_nibbles.py | 16,991 | Python |
import argparse
import os, sys
import os.path as osp
import torchvision
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import network, loss
from torch.utils.data import DataLoader
import random, pdb, math, copy
from tqdm import tqdm
from scipy.spatial.distance import cdist
import pickle
from data_load import mnist, svhn, usps
# inverse_transform = None
# class InverseTransform(torchvision.transforms.Normalize):
# """
# Undoes the normalization and returns the reconstructed images in the input domain.
# """
# def __init__(self, mean, std):
# mean = torch.as_tensor(mean)
# std = torch.as_tensor(std)
# std_inv = 1 / (std + 1e-7)
# mean_inv = -mean * std_inv
# super().__init__(mean=mean_inv, std=std_inv)
# def __call__(self, tensor):
# t = super().__call__(tensor.clone())
# # return transforms.ToPILImage()(t)
# return t
def digit_load(args):
global inverse_transform
train_bs = args.batch_size
if args.dset == 's':
test_source = svhn.SVHN('./data/svhn/', split='test', download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
elif args.dset == 'u':
test_source = usps.USPS('./data/usps/', train=False, download=True,
transform=transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5,), (0.5,))
elif args.dset == 'm':
test_source = mnist.MNIST('./data/mnist/', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5,), (0.5,))
dset_loaders = {}
dset_loaders["test"] = DataLoader(test_source, batch_size=train_bs*2, shuffle=False,
num_workers=args.worker, drop_last=False)
return dset_loaders
def cal_acc(loader, netF, netB, netC):
k = 0
start_test = True
with torch.no_grad():
iter_test = iter(loader)
for i in range(len(loader)):
data = iter_test.next()
input_images = []
inputs = data[0]
inputs_clone = inputs.clone()
for j in range(inputs_clone.size(0)):
x = transforms.Normalize((-1,), (2,))(inputs_clone[j])
input_images.append(transforms.ToPILImage()(x))
labels = data[1]
outputs = netC(netB(netF(inputs)))
#
_, predict = torch.max(outputs.float().cpu(), 1)
for j in range(inputs.size(0)):
folder = args.output_dir + '/inspect/label-{}'.format(labels[j])
if not osp.exists(folder):
os.makedirs(folder)
subfolder = folder + '/pred-{}'.format(predict[j])
if not osp.exists(subfolder):
os.makedirs(subfolder)
input_images[j].save(subfolder + '/{}.jpg'.format(k))
k += 1
#
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
mean_ent = torch.mean(loss.Entropy(nn.Softmax(dim=1)(all_output))).cpu().data.item()
return accuracy*100, mean_ent
def test(args):
dset_loaders = digit_load(args)
## set base network
if args.dset == 'u':
netF = network.LeNetBase()#.cuda()
elif args.dset == 'm':
netF = network.LeNetBase()#.cuda()
elif args.dset == 's':
netF = network.DTNBase()#.cuda()
netB = network.feat_bootleneck(type=args.classifier, feature_dim=netF.in_features, bottleneck_dim=args.bottleneck)#.cuda()
netC = network.feat_classifier(type=args.layer, class_num = args.class_num, bottleneck_dim=args.bottleneck)#.cuda()
args.modelpath = args.output_dir + '/F.pt'
netF.load_state_dict(torch.load(args.modelpath))
args.modelpath = args.output_dir + '/B.pt'
netB.load_state_dict(torch.load(args.modelpath))
args.modelpath = args.output_dir + '/C.pt'
netC.load_state_dict(torch.load(args.modelpath))
netF.eval()
netB.eval()
netC.eval()
acc, _ = cal_acc(dset_loaders['test'], netF, netB, netC)
log_str = 'Task: {}, Accuracy = {:.2f}%'.format(args.dset, acc)
try:
args.out_file.write(log_str + '\n')
args.out_file.flush()
except:
pass
print(log_str+'\n')
def print_args(args):
s = "==========================================\n"
for arg, content in args.__dict__.items():
s += "{}:{}\n".format(arg, content)
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SHOT')
parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
parser.add_argument('--s', type=int, default=0, help="source")
parser.add_argument('--t', type=int, default=1, help="target")
parser.add_argument('--max_epoch', type=int, default=30, help="maximum epoch")
parser.add_argument('--batch_size', type=int, default=64, help="batch_size")
parser.add_argument('--worker', type=int, default=4, help="number of workers")
parser.add_argument('--dset', type=str, default='s', choices=['u', 'm','s'])
parser.add_argument('--lr', type=float, default=0.01, help="learning rate")
parser.add_argument('--seed', type=int, default=2020, help="random seed")
parser.add_argument('--bottleneck', type=int, default=256)
parser.add_argument('--layer', type=str, default="wn", choices=["linear", "wn"])
parser.add_argument('--classifier', type=str, default="bn", choices=["ori", "bn"])
parser.add_argument('--output', type=str, default='')
parser.add_argument('--issave', type=bool, default=True)
args = parser.parse_args()
args.class_num = 10
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
SEED = args.seed
torch.manual_seed(SEED)
# torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
# torch.backends.cudnn.deterministic = True
args.output_dir = osp.join(args.output, 'seed' + str(args.seed), args.dset)
test(args)
# python unsupervised_digit.py --dset m --gpu_id 0 --output ckps_unsupervised_digit
# python unsupervised_digit.py --dset m --gpu_id 0 --ent --output ckps_unsupervised_digit_ent
# python unsupervised_digit.py --dset m --gpu_id 0 --gent --output ckps_unsupervised_digit_gent
# python unsupervised_digit.py --dset m --gpu_id 0 --ent --gent --output ckps_unsupervised_digit_ent_gent
# na verdade n sem como saber qual classe vai sair .. ideal é ver tsne? ou mostrar as classificacoes primeiro?
# show classification + gradcam (versao mais rapida) | 40.820106 | 126 | 0.61024 | [
"MIT"
] | viniciusarruda/SHOT | experiments/digit/unsupervised_digit_inspect.py | 7,716 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, SIS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Encuesta(Document):
pass
| 22.545455 | 49 | 0.774194 | [
"MIT"
] | ErickLopez76/encuestaapp | encuestaapp/encuestaapp/doctype/encuesta/encuesta.py | 248 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.