max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
aries_cloudagent/protocols/out_of_band/v1_0/manager.py | Luis-GA/aries-cloudagent-python | 0 | 6629951 | <reponame>Luis-GA/aries-cloudagent-python
"""Classes to manage connections."""
import asyncio
import json
import logging
from typing import Mapping, Sequence, Optional
from ....connections.base_manager import BaseConnectionManager
from ....connections.models.conn_record import ConnRecord
from ....connections.util import mediation_record_if_id
from ....core.error import BaseError
from ....core.profile import ProfileSession
from ....indy.holder import IndyHolder
from ....indy.sdk.models.xform import indy_proof_req_preview2indy_requested_creds
from ....messaging.responder import BaseResponder
from ....messaging.decorators.attach_decorator import AttachDecorator
from ....multitenant.manager import MultitenantManager
from ....storage.error import StorageNotFoundError
from ....transport.inbound.receipt import MessageReceipt
from ....wallet.base import BaseWallet
from ....wallet.util import b64_to_bytes
from ....wallet.key_type import KeyType
from ....did.did_key import DIDKey
from ...coordinate_mediation.v1_0.manager import MediationManager
from ...connections.v1_0.manager import ConnectionManager
from ...connections.v1_0.messages.connection_invitation import ConnectionInvitation
from ...didcomm_prefix import DIDCommPrefix
from ...didexchange.v1_0.manager import DIDXManager
from ...issue_credential.v1_0.models.credential_exchange import V10CredentialExchange
from ...issue_credential.v2_0.models.cred_ex_record import V20CredExRecord
from ...present_proof.v1_0.manager import PresentationManager
from ...present_proof.v1_0.message_types import PRESENTATION_REQUEST
from ...present_proof.v1_0.models.presentation_exchange import V10PresentationExchange
from ...present_proof.v2_0.manager import V20PresManager
from ...present_proof.v2_0.message_types import PRES_20_REQUEST
from ...present_proof.v2_0.messages.pres_format import V20PresFormat
from ...present_proof.v2_0.messages.pres_request import V20PresRequest
from ...present_proof.v2_0.models.pres_exchange import V20PresExRecord
from .messages.invitation import HSProto, InvitationMessage
from .messages.problem_report import OOBProblemReport
from .messages.reuse import HandshakeReuse
from .messages.reuse_accept import HandshakeReuseAccept
from .messages.service import Service as ServiceMessage
from .models.invitation import InvitationRecord
class OutOfBandManagerError(BaseError):
"""Out of band error."""
class OutOfBandManagerNotImplementedError(BaseError):
"""Out of band error for unimplemented functionality."""
class OutOfBandManager(BaseConnectionManager):
"""Class for managing out of band messages."""
def __init__(self, session: ProfileSession):
"""
Initialize a OutOfBandManager.
Args:
session: The profile session for this out of band manager
"""
self._session = session
self._logger = logging.getLogger(__name__)
super().__init__(self._session)
@property
def session(self) -> ProfileSession:
"""
Accessor for the current profile session.
Returns:
The profile session for this connection manager
"""
return self._session
async def create_invitation(
self,
my_label: str = None,
my_endpoint: str = None,
auto_accept: bool = None,
public: bool = False,
hs_protos: Sequence[HSProto] = None,
multi_use: bool = False,
alias: str = None,
attachments: Sequence[Mapping] = None,
metadata: dict = None,
mediation_id: str = None,
) -> InvitationRecord:
"""
Generate new connection invitation.
This interaction represents an out-of-band communication channel. In the future
and in practice, these sort of invitations will be received over any number of
channels such as SMS, Email, QR Code, NFC, etc.
Args:
my_label: label for this connection
my_endpoint: endpoint where other party can reach me
auto_accept: auto-accept a corresponding connection request
(None to use config)
public: set to create an invitation from the public DID
hs_protos: list of handshake protocols to include
multi_use: set to True to create an invitation for multiple-use connection
alias: optional alias to apply to connection for later use
attachments: list of dicts in form of {"id": ..., "type": ...}
Returns:
Invitation record
"""
mediation_mgr = MediationManager(self._session.profile)
mediation_record = await mediation_record_if_id(
self._session,
mediation_id,
or_default=True,
)
keylist_updates = None
if not (hs_protos or attachments):
raise OutOfBandManagerError(
"Invitation must include handshake protocols, "
"request attachments, or both"
)
wallet = self._session.inject(BaseWallet)
# Multitenancy setup
multitenant_mgr = self._session.inject(MultitenantManager, required=False)
wallet_id = self._session.settings.get("wallet.id")
accept = bool(
auto_accept
or (
auto_accept is None
and self._session.settings.get("debug.auto_accept_requests")
)
)
if public:
if multi_use:
raise OutOfBandManagerError(
"Cannot create public invitation with multi_use"
)
if metadata:
raise OutOfBandManagerError(
"Cannot store metadata on public invitations"
)
message_attachments = []
for atch in attachments or []:
a_type = atch.get("type")
a_id = atch.get("id")
if a_type == "credential-offer":
try:
cred_ex_rec = await V10CredentialExchange.retrieve_by_id(
self._session,
a_id,
)
message_attachments.append(
InvitationMessage.wrap_message(
cred_ex_rec.credential_offer_dict
)
)
except StorageNotFoundError:
cred_ex_rec = await V20CredExRecord.retrieve_by_id(
self._session,
a_id,
)
message_attachments.append(
InvitationMessage.wrap_message(cred_ex_rec.cred_offer.offer())
)
elif a_type == "present-proof":
try:
pres_ex_rec = await V10PresentationExchange.retrieve_by_id(
self._session,
a_id,
)
message_attachments.append(
InvitationMessage.wrap_message(
pres_ex_rec.presentation_request_dict
)
)
except StorageNotFoundError:
pres_ex_rec = await V20PresExRecord.retrieve_by_id(
self._session,
a_id,
)
message_attachments.append(
InvitationMessage.wrap_message(
pres_ex_rec.pres_request.attachment()
)
)
else:
raise OutOfBandManagerError(f"Unknown attachment type: {a_type}")
handshake_protocols = [
DIDCommPrefix.qualify_current(hsp.name) for hsp in hs_protos or []
] or None
if public:
if not self._session.settings.get("public_invites"):
raise OutOfBandManagerError("Public invitations are not enabled")
public_did = await wallet.get_public_did()
if not public_did:
raise OutOfBandManagerError(
"Cannot create public invitation with no public DID"
)
invi_msg = InvitationMessage( # create invitation message
label=my_label or self._session.settings.get("default_label"),
handshake_protocols=handshake_protocols,
requests_attach=message_attachments,
services=[f"did:sov:{public_did.did}"],
)
keylist_updates = await mediation_mgr.add_key(
public_did.verkey, keylist_updates
)
endpoint, *_ = await self.resolve_invitation(public_did.did)
invi_url = invi_msg.to_url(endpoint)
conn_rec = ConnRecord( # create connection record
invitation_key=public_did.verkey,
invitation_msg_id=invi_msg._id,
their_role=ConnRecord.Role.REQUESTER.rfc23,
state=ConnRecord.State.INVITATION.rfc23,
accept=ConnRecord.ACCEPT_AUTO if accept else ConnRecord.ACCEPT_MANUAL,
alias=alias,
)
await conn_rec.save(self._session, reason="Created new invitation")
await conn_rec.attach_invitation(self._session, invi_msg)
if multitenant_mgr and wallet_id: # add mapping for multitenant relay
await multitenant_mgr.add_key(
wallet_id, public_did.verkey, skip_if_exists=True
)
else:
invitation_mode = (
ConnRecord.INVITATION_MODE_MULTI
if multi_use
else ConnRecord.INVITATION_MODE_ONCE
)
if not my_endpoint:
my_endpoint = self._session.settings.get("default_endpoint")
# Create and store new invitation key
connection_key = await wallet.create_signing_key(KeyType.ED25519)
keylist_updates = await mediation_mgr.add_key(
connection_key.verkey, keylist_updates
)
# Add mapping for multitenant relay
if multitenant_mgr and wallet_id:
await multitenant_mgr.add_key(wallet_id, connection_key.verkey)
# Create connection record
conn_rec = ConnRecord(
invitation_key=connection_key.verkey,
their_role=ConnRecord.Role.REQUESTER.rfc23,
state=ConnRecord.State.INVITATION.rfc23,
accept=ConnRecord.ACCEPT_AUTO if accept else ConnRecord.ACCEPT_MANUAL,
invitation_mode=invitation_mode,
alias=alias,
)
await conn_rec.save(self._session, reason="Created new connection")
routing_keys = []
# The base wallet can act as a mediator for all tenants
if multitenant_mgr and wallet_id:
base_mediation_record = await multitenant_mgr.get_default_mediator()
if base_mediation_record:
routing_keys = base_mediation_record.routing_keys
my_endpoint = base_mediation_record.endpoint
# If we use a mediator for the base wallet we don't
# need to register the key at the subwallet mediator
# because it only needs to know the key of the base mediator
# sub wallet mediator -> base wallet mediator -> agent
keylist_updates = None
if mediation_record:
routing_keys = [*routing_keys, *mediation_record.routing_keys]
my_endpoint = mediation_record.endpoint
# Save that this invitation was created with mediation
await conn_rec.metadata_set(
self._session, "mediation", {"id": mediation_id}
)
if keylist_updates:
responder = self._session.inject(BaseResponder, required=False)
await responder.send(
keylist_updates, connection_id=mediation_record.connection_id
)
routing_keys = [
key
if len(key.split(":")) == 3
else DIDKey.from_public_key_b58(key, KeyType.ED25519).did
for key in routing_keys
]
# Create connection invitation message
# Note: Need to split this into two stages to support inbound routing
# of invitations
# Would want to reuse create_did_document and convert the result
invi_msg = InvitationMessage(
label=my_label or self._session.settings.get("default_label"),
handshake_protocols=handshake_protocols,
requests_attach=message_attachments,
services=[
ServiceMessage(
_id="#inline",
_type="did-communication",
recipient_keys=[
DIDKey.from_public_key_b58(
connection_key.verkey, KeyType.ED25519
).did
],
service_endpoint=my_endpoint,
routing_keys=routing_keys,
)
],
)
invi_url = invi_msg.to_url()
# Update connection record
conn_rec.invitation_msg_id = invi_msg._id
await conn_rec.save(self._session, reason="Added Invitation")
await conn_rec.attach_invitation(self._session, invi_msg)
if metadata:
for key, value in metadata.items():
await conn_rec.metadata_set(self._session, key, value)
return InvitationRecord( # for return via admin API, not storage
state=InvitationRecord.STATE_INITIAL,
invi_msg_id=invi_msg._id,
invitation=invi_msg,
invitation_url=invi_url,
)
async def receive_invitation(
self,
invi_msg: InvitationMessage,
use_existing_connection: bool = True,
auto_accept: bool = None,
alias: str = None,
mediation_id: str = None,
) -> dict:
"""
Receive an out of band invitation message.
Args:
invi_msg: invitation message
use_existing_connection: whether to use existing connection if possible
auto_accept: whether to accept the invitation automatically
alias: Alias for connection record
mediation_id: mediation identifier
Returns:
ConnRecord, serialized
"""
if mediation_id:
try:
await mediation_record_if_id(self._session, mediation_id)
except StorageNotFoundError:
mediation_id = None
# There must be exactly 1 service entry
if len(invi_msg.services) != 1:
raise OutOfBandManagerError("service array must have exactly one element")
if not (invi_msg.requests_attach or invi_msg.handshake_protocols):
raise OutOfBandManagerError(
"Invitation must specify handshake_protocols, requests_attach, or both"
)
# Get the single service item
oob_service_item = invi_msg.services[0]
if isinstance(oob_service_item, ServiceMessage):
service = oob_service_item
public_did = None
else:
# If it's in the did format, we need to convert to a full service block
# An existing connection can only be reused based on a public DID
# in an out-of-band message (RFC 0434).
service_did = oob_service_item
# TODO: resolve_invitation should resolve key_info objects
# or something else that includes the key type. We now assume
# ED25519 keys
endpoint, recipient_keys, routing_keys = await self.resolve_invitation(
service_did
)
public_did = service_did.split(":")[-1]
service = ServiceMessage.deserialize(
{
"id": "#inline",
"type": "did-communication",
"recipientKeys": [
DIDKey.from_public_key_b58(key, KeyType.ED25519).did
for key in recipient_keys
],
"routingKeys": [
DIDKey.from_public_key_b58(key, KeyType.ED25519).did
for key in routing_keys
],
"serviceEndpoint": endpoint,
}
)
unq_handshake_protos = [
HSProto.get(hsp)
for hsp in dict.fromkeys(
[
DIDCommPrefix.unqualify(proto)
for proto in invi_msg.handshake_protocols
]
)
]
# Reuse Connection - only if started by an invitation with Public DID
conn_rec = None
if public_did is not None: # invite has public DID: seek existing connection
tag_filter = {}
post_filter = {}
# post_filter["state"] = ConnRecord.State.COMPLETED.rfc160
post_filter["their_public_did"] = public_did
conn_rec = await self.find_existing_connection(
tag_filter=tag_filter, post_filter=post_filter
)
if conn_rec is not None:
num_included_protocols = len(unq_handshake_protos)
num_included_req_attachments = len(invi_msg.requests_attach)
# With handshake protocol, request attachment; use existing connection
if (
num_included_protocols >= 1
and num_included_req_attachments == 0
and use_existing_connection
):
await self.create_handshake_reuse_message(
invi_msg=invi_msg,
conn_record=conn_rec,
)
try:
await asyncio.wait_for(
self.check_reuse_msg_state(
conn_rec=conn_rec,
),
15,
)
await conn_rec.metadata_delete(
session=self._session, key="reuse_msg_id"
)
if (
await conn_rec.metadata_get(self._session, "reuse_msg_state")
== "not_accepted"
):
conn_rec = None
else:
await conn_rec.metadata_delete(
session=self._session, key="reuse_msg_state"
)
except asyncio.TimeoutError:
# If no reuse_accepted or problem_report message was received within
# the 15s timeout then a new connection to be created
await conn_rec.metadata_delete(
session=self._session, key="reuse_msg_id"
)
await conn_rec.metadata_delete(
session=self._session, key="reuse_msg_state"
)
conn_rec.state = ConnRecord.State.ABANDONED.rfc160
await conn_rec.save(self._session, reason="Sent connection request")
conn_rec = None
# Inverse of the following cases
# Handshake_Protocol not included
# Request_Attachment included
# Use_Existing_Connection Yes
# Handshake_Protocol included
# Request_Attachment included
# Use_Existing_Connection Yes
elif not (
(
num_included_protocols == 0
and num_included_req_attachments >= 1
and use_existing_connection
)
or (
num_included_protocols >= 1
and num_included_req_attachments >= 1
and use_existing_connection
)
):
conn_rec = None
if conn_rec is None:
if not unq_handshake_protos:
raise OutOfBandManagerError(
"No existing connection exists and handshake_protocol is missing"
)
# Create a new connection
for proto in unq_handshake_protos:
if proto is HSProto.RFC23:
didx_mgr = DIDXManager(self._session)
conn_rec = await didx_mgr.receive_invitation(
invitation=invi_msg,
their_public_did=public_did,
auto_accept=auto_accept,
alias=alias,
mediation_id=mediation_id,
)
elif proto is HSProto.RFC160:
service.recipient_keys = [
DIDKey.from_did(key).public_key_b58
for key in service.recipient_keys or []
]
service.routing_keys = [
DIDKey.from_did(key).public_key_b58
for key in service.routing_keys
] or []
connection_invitation = ConnectionInvitation.deserialize(
{
"@id": invi_msg._id,
"@type": DIDCommPrefix.qualify_current(proto.name),
"label": invi_msg.label,
"recipientKeys": service.recipient_keys,
"serviceEndpoint": service.service_endpoint,
"routingKeys": service.routing_keys,
}
)
conn_mgr = ConnectionManager(self._session)
conn_rec = await conn_mgr.receive_invitation(
invitation=connection_invitation,
their_public_did=public_did,
auto_accept=auto_accept,
alias=alias,
mediation_id=mediation_id,
)
if conn_rec is not None:
break
# Request Attach
if len(invi_msg.requests_attach) >= 1 and conn_rec is not None:
req_attach = invi_msg.requests_attach[0]
if isinstance(req_attach, AttachDecorator):
if req_attach.data is not None:
unq_req_attach_type = DIDCommPrefix.unqualify(
req_attach.content["@type"]
)
if unq_req_attach_type == PRESENTATION_REQUEST:
await self._process_pres_request_v1(
req_attach=req_attach,
service=service,
conn_rec=conn_rec,
trace=(invi_msg._trace is not None),
)
elif unq_req_attach_type == PRES_20_REQUEST:
await self._process_pres_request_v2(
req_attach=req_attach,
service=service,
conn_rec=conn_rec,
trace=(invi_msg._trace is not None),
)
else:
raise OutOfBandManagerError(
(
"Unsupported requests~attach type "
f"{req_attach.content['@type']}: must unqualify to"
f"{PRESENTATION_REQUEST} or {PRES_20_REQUEST}"
)
)
else:
raise OutOfBandManagerError("requests~attach is not properly formatted")
return conn_rec.serialize()
async def _process_pres_request_v1(
self,
req_attach: AttachDecorator,
service: ServiceMessage,
conn_rec: ConnRecord,
trace: bool,
):
"""
Create exchange for v1 pres request attachment, auto-present if configured.
Args:
req_attach: request attachment on invitation
service: service message from invitation
conn_rec: connection record
trace: trace setting for presentation exchange record
"""
pres_mgr = PresentationManager(self._session.profile)
pres_request_msg = req_attach.content
indy_proof_request = json.loads(
b64_to_bytes(
pres_request_msg["request_presentations~attach"][0]["data"]["base64"]
)
)
oob_invi_service = service.serialize()
pres_request_msg["~service"] = {
"recipientKeys": oob_invi_service.get("recipientKeys"),
"routingKeys": oob_invi_service.get("routingKeys"),
"serviceEndpoint": oob_invi_service.get("serviceEndpoint"),
}
pres_ex_record = V10PresentationExchange(
connection_id=conn_rec.connection_id,
thread_id=pres_request_msg["@id"],
initiator=V10PresentationExchange.INITIATOR_EXTERNAL,
role=V10PresentationExchange.ROLE_PROVER,
presentation_request=indy_proof_request,
presentation_request_dict=pres_request_msg,
auto_present=self._session.context.settings.get(
"debug.auto_respond_presentation_request"
),
trace=trace,
)
pres_ex_record = await pres_mgr.receive_request(pres_ex_record)
if pres_ex_record.auto_present:
try:
req_creds = await indy_proof_req_preview2indy_requested_creds(
indy_proof_req=indy_proof_request,
preview=None,
holder=self._session.inject(IndyHolder),
)
except ValueError as err:
self._logger.warning(f"{err}")
raise OutOfBandManagerError(
f"Cannot auto-respond to presentation request attachment: {err}"
)
(pres_ex_record, presentation_message) = await pres_mgr.create_presentation(
presentation_exchange_record=pres_ex_record,
requested_credentials=req_creds,
comment=(
"auto-presented for proof request nonce={}".format(
indy_proof_request["nonce"]
)
),
)
responder = self._session.inject(BaseResponder, required=False)
if responder:
await responder.send(
message=presentation_message,
target_list=await self.fetch_connection_targets(
connection=conn_rec
),
)
else:
raise OutOfBandManagerError(
(
"Configuration sets auto_present false: cannot "
"respond automatically to presentation requests"
)
)
async def _process_pres_request_v2(
self,
req_attach: AttachDecorator,
service: ServiceMessage,
conn_rec: ConnRecord,
trace: bool,
):
"""
Create exchange for v2 pres request attachment, auto-present if configured.
Args:
req_attach: request attachment on invitation
service: service message from invitation
conn_rec: connection record
trace: trace setting for presentation exchange record
"""
pres_mgr = V20PresManager(self._session.profile)
pres_request_msg = req_attach.content
oob_invi_service = service.serialize()
pres_request_msg["~service"] = {
"recipientKeys": oob_invi_service.get("recipientKeys"),
"routingKeys": oob_invi_service.get("routingKeys"),
"serviceEndpoint": oob_invi_service.get("serviceEndpoint"),
}
pres_ex_record = V20PresExRecord(
connection_id=conn_rec.connection_id,
thread_id=pres_request_msg["@id"],
initiator=V20PresExRecord.INITIATOR_EXTERNAL,
role=V20PresExRecord.ROLE_PROVER,
pres_request=pres_request_msg,
auto_present=self._session.context.settings.get(
"debug.auto_respond_presentation_request"
),
trace=trace,
)
pres_ex_record = await pres_mgr.receive_pres_request(pres_ex_record)
if pres_ex_record.auto_present:
indy_proof_request = V20PresRequest.deserialize(
pres_request_msg
).attachment(
V20PresFormat.Format.INDY
) # assumption will change for DIF
try:
req_creds = await indy_proof_req_preview2indy_requested_creds(
indy_proof_req=indy_proof_request,
preview=None,
holder=self._session.inject(IndyHolder),
)
except ValueError as err:
self._logger.warning(f"{err}")
raise OutOfBandManagerError(
f"Cannot auto-respond to presentation request attachment: {err}"
)
(pres_ex_record, pres_msg) = await pres_mgr.create_pres(
pres_ex_record=pres_ex_record,
requested_credentials=req_creds,
comment=(
"auto-presented for proof request nonce={}".format(
indy_proof_request["nonce"]
)
),
)
responder = self._session.inject(BaseResponder, required=False)
if responder:
await responder.send(
message=pres_msg,
target_list=await self.fetch_connection_targets(
connection=conn_rec
),
)
else:
raise OutOfBandManagerError(
(
"Configuration sets auto_present false: cannot "
"respond automatically to presentation requests"
)
)
async def find_existing_connection(
self,
tag_filter: dict,
post_filter: dict,
) -> Optional[ConnRecord]:
"""
Find existing ConnRecord.
Args:
tag_filter: The filter dictionary to apply
post_filter: Additional value filters to apply matching positively,
with sequence values specifying alternatives to match (hit any)
Returns:
ConnRecord or None
"""
conn_records = await ConnRecord.query(
self._session,
tag_filter=tag_filter,
post_filter_positive=post_filter,
alt=True,
)
if not conn_records:
return None
else:
for conn_rec in conn_records:
if conn_rec.state == "active":
return conn_rec
return None
async def check_reuse_msg_state(
self,
conn_rec: ConnRecord,
):
"""
Check reuse message state from the ConnRecord Metadata.
Args:
conn_rec: The required ConnRecord with updated metadata
Returns:
"""
received = False
while not received:
if (
not await conn_rec.metadata_get(self._session, "reuse_msg_state")
== "initial"
):
received = True
return
async def create_handshake_reuse_message(
self,
invi_msg: InvitationMessage,
conn_record: ConnRecord,
) -> None:
"""
Create and Send a Handshake Reuse message under RFC 0434.
Args:
invi_msg: OOB Invitation Message
service: Service block extracted from the OOB invitation
Returns:
Raises:
OutOfBandManagerError: If there is an issue creating or
sending the OOB invitation
"""
try:
# ID of Out-of-Band invitation to use as a pthid
# pthid = invi_msg._id
pthid = conn_record.invitation_msg_id
reuse_msg = HandshakeReuse()
thid = reuse_msg._id
reuse_msg.assign_thread_id(thid=thid, pthid=pthid)
connection_targets = await self.fetch_connection_targets(
connection=conn_record
)
responder = self._session.inject(BaseResponder, required=False)
if responder:
await responder.send(
message=reuse_msg,
target_list=connection_targets,
)
await conn_record.metadata_set(
session=self._session, key="reuse_msg_id", value=reuse_msg._id
)
await conn_record.metadata_set(
session=self._session, key="reuse_msg_state", value="initial"
)
except Exception as err:
raise OutOfBandManagerError(
f"Error on creating and sending a handshake reuse message: {err}"
)
async def receive_reuse_message(
self,
reuse_msg: HandshakeReuse,
receipt: MessageReceipt,
) -> None:
"""
Receive and process a HandshakeReuse message under RFC 0434.
Process a `HandshakeReuse` message by looking up
the connection records using the MessageReceipt sender DID.
Args:
reuse_msg: The `HandshakeReuse` to process
receipt: The message receipt
Returns:
Raises:
OutOfBandManagerError: If the existing connection is not active
or the connection does not exists
"""
try:
invi_msg_id = reuse_msg._thread.pthid
reuse_msg_id = reuse_msg._thread.thid
tag_filter = {}
post_filter = {}
# post_filter["state"] = "active"
# tag_filter["their_did"] = receipt.sender_did
post_filter["invitation_msg_id"] = invi_msg_id
conn_record = await self.find_existing_connection(
tag_filter=tag_filter, post_filter=post_filter
)
responder = self._session.inject(BaseResponder, required=False)
if conn_record is not None:
# For ConnRecords created using did-exchange
reuse_accept_msg = HandshakeReuseAccept()
reuse_accept_msg.assign_thread_id(thid=reuse_msg_id, pthid=invi_msg_id)
connection_targets = await self.fetch_connection_targets(
connection=conn_record
)
if responder:
await responder.send(
message=reuse_accept_msg,
target_list=connection_targets,
)
# This is not required as now we attaching the invitation_msg_id
# using original invitation [from existing connection]
#
# Delete the ConnRecord created; re-use existing connection
# invi_id_post_filter = {}
# invi_id_post_filter["invitation_msg_id"] = invi_msg_id
# conn_rec_to_delete = await self.find_existing_connection(
# tag_filter={},
# post_filter=invi_id_post_filter,
# )
# if conn_rec_to_delete is not None:
# if conn_record.connection_id != conn_rec_to_delete.connection_id:
# await conn_rec_to_delete.delete_record(session=self._session)
else:
conn_record = await self.find_existing_connection(
tag_filter={"their_did": receipt.sender_did}, post_filter={}
)
# Problem Report is redundant in this case as with no active
# connection, it cannot reach the invitee any way
if conn_record is not None:
# For ConnRecords created using RFC 0160 connections
reuse_accept_msg = HandshakeReuseAccept()
reuse_accept_msg.assign_thread_id(
thid=reuse_msg_id, pthid=invi_msg_id
)
connection_targets = await self.fetch_connection_targets(
connection=conn_record
)
if responder:
await responder.send(
message=reuse_accept_msg,
target_list=connection_targets,
)
except StorageNotFoundError:
raise OutOfBandManagerError(
(f"No existing ConnRecord found for OOB Invitee, {receipt.sender_did}"),
)
async def receive_reuse_accepted_message(
self,
reuse_accepted_msg: HandshakeReuseAccept,
receipt: MessageReceipt,
conn_record: ConnRecord,
) -> None:
"""
Receive and process a HandshakeReuseAccept message under RFC 0434.
Process a `HandshakeReuseAccept` message by updating the ConnRecord metadata
state to `accepted`.
Args:
reuse_accepted_msg: The `HandshakeReuseAccept` to process
receipt: The message receipt
Returns:
Raises:
OutOfBandManagerError: if there is an error in processing the
HandshakeReuseAccept message
"""
try:
invi_msg_id = reuse_accepted_msg._thread.pthid
thread_reuse_msg_id = reuse_accepted_msg._thread.thid
conn_reuse_msg_id = await conn_record.metadata_get(
session=self._session, key="reuse_msg_id"
)
assert thread_reuse_msg_id == conn_reuse_msg_id
await conn_record.metadata_set(
session=self._session, key="reuse_msg_state", value="accepted"
)
except Exception as e:
raise OutOfBandManagerError(
(
(
"Error processing reuse accepted message "
f"for OOB invitation {invi_msg_id}, {e}"
)
)
)
async def receive_problem_report(
self,
problem_report: OOBProblemReport,
receipt: MessageReceipt,
conn_record: ConnRecord,
) -> None:
"""
Receive and process a ProblemReport message from the inviter to invitee.
Process a `ProblemReport` message by updating the ConnRecord metadata
state to `not_accepted`.
Args:
problem_report: The `OOBProblemReport` to process
receipt: The message receipt
Returns:
Raises:
OutOfBandManagerError: if there is an error in processing the
HandshakeReuseAccept message
"""
try:
invi_msg_id = problem_report._thread.pthid
thread_reuse_msg_id = problem_report._thread.thid
conn_reuse_msg_id = await conn_record.metadata_get(
session=self._session, key="reuse_msg_id"
)
assert thread_reuse_msg_id == conn_reuse_msg_id
await conn_record.metadata_set(
session=self._session, key="reuse_msg_state", value="not_accepted"
)
except Exception as e:
raise OutOfBandManagerError(
(
(
"Error processing problem report message "
f"for OOB invitation {invi_msg_id}, {e}"
)
)
)
| """Classes to manage connections."""
import asyncio
import json
import logging
from typing import Mapping, Sequence, Optional
from ....connections.base_manager import BaseConnectionManager
from ....connections.models.conn_record import ConnRecord
from ....connections.util import mediation_record_if_id
from ....core.error import BaseError
from ....core.profile import ProfileSession
from ....indy.holder import IndyHolder
from ....indy.sdk.models.xform import indy_proof_req_preview2indy_requested_creds
from ....messaging.responder import BaseResponder
from ....messaging.decorators.attach_decorator import AttachDecorator
from ....multitenant.manager import MultitenantManager
from ....storage.error import StorageNotFoundError
from ....transport.inbound.receipt import MessageReceipt
from ....wallet.base import BaseWallet
from ....wallet.util import b64_to_bytes
from ....wallet.key_type import KeyType
from ....did.did_key import DIDKey
from ...coordinate_mediation.v1_0.manager import MediationManager
from ...connections.v1_0.manager import ConnectionManager
from ...connections.v1_0.messages.connection_invitation import ConnectionInvitation
from ...didcomm_prefix import DIDCommPrefix
from ...didexchange.v1_0.manager import DIDXManager
from ...issue_credential.v1_0.models.credential_exchange import V10CredentialExchange
from ...issue_credential.v2_0.models.cred_ex_record import V20CredExRecord
from ...present_proof.v1_0.manager import PresentationManager
from ...present_proof.v1_0.message_types import PRESENTATION_REQUEST
from ...present_proof.v1_0.models.presentation_exchange import V10PresentationExchange
from ...present_proof.v2_0.manager import V20PresManager
from ...present_proof.v2_0.message_types import PRES_20_REQUEST
from ...present_proof.v2_0.messages.pres_format import V20PresFormat
from ...present_proof.v2_0.messages.pres_request import V20PresRequest
from ...present_proof.v2_0.models.pres_exchange import V20PresExRecord
from .messages.invitation import HSProto, InvitationMessage
from .messages.problem_report import OOBProblemReport
from .messages.reuse import HandshakeReuse
from .messages.reuse_accept import HandshakeReuseAccept
from .messages.service import Service as ServiceMessage
from .models.invitation import InvitationRecord
class OutOfBandManagerError(BaseError):
"""Out of band error."""
class OutOfBandManagerNotImplementedError(BaseError):
"""Out of band error for unimplemented functionality."""
class OutOfBandManager(BaseConnectionManager):
"""Class for managing out of band messages."""
def __init__(self, session: ProfileSession):
"""
Initialize a OutOfBandManager.
Args:
session: The profile session for this out of band manager
"""
self._session = session
self._logger = logging.getLogger(__name__)
super().__init__(self._session)
@property
def session(self) -> ProfileSession:
"""
Accessor for the current profile session.
Returns:
The profile session for this connection manager
"""
return self._session
async def create_invitation(
self,
my_label: str = None,
my_endpoint: str = None,
auto_accept: bool = None,
public: bool = False,
hs_protos: Sequence[HSProto] = None,
multi_use: bool = False,
alias: str = None,
attachments: Sequence[Mapping] = None,
metadata: dict = None,
mediation_id: str = None,
) -> InvitationRecord:
"""
Generate new connection invitation.
This interaction represents an out-of-band communication channel. In the future
and in practice, these sort of invitations will be received over any number of
channels such as SMS, Email, QR Code, NFC, etc.
Args:
my_label: label for this connection
my_endpoint: endpoint where other party can reach me
auto_accept: auto-accept a corresponding connection request
(None to use config)
public: set to create an invitation from the public DID
hs_protos: list of handshake protocols to include
multi_use: set to True to create an invitation for multiple-use connection
alias: optional alias to apply to connection for later use
attachments: list of dicts in form of {"id": ..., "type": ...}
Returns:
Invitation record
"""
mediation_mgr = MediationManager(self._session.profile)
mediation_record = await mediation_record_if_id(
self._session,
mediation_id,
or_default=True,
)
keylist_updates = None
if not (hs_protos or attachments):
raise OutOfBandManagerError(
"Invitation must include handshake protocols, "
"request attachments, or both"
)
wallet = self._session.inject(BaseWallet)
# Multitenancy setup
multitenant_mgr = self._session.inject(MultitenantManager, required=False)
wallet_id = self._session.settings.get("wallet.id")
accept = bool(
auto_accept
or (
auto_accept is None
and self._session.settings.get("debug.auto_accept_requests")
)
)
if public:
if multi_use:
raise OutOfBandManagerError(
"Cannot create public invitation with multi_use"
)
if metadata:
raise OutOfBandManagerError(
"Cannot store metadata on public invitations"
)
message_attachments = []
for atch in attachments or []:
a_type = atch.get("type")
a_id = atch.get("id")
if a_type == "credential-offer":
try:
cred_ex_rec = await V10CredentialExchange.retrieve_by_id(
self._session,
a_id,
)
message_attachments.append(
InvitationMessage.wrap_message(
cred_ex_rec.credential_offer_dict
)
)
except StorageNotFoundError:
cred_ex_rec = await V20CredExRecord.retrieve_by_id(
self._session,
a_id,
)
message_attachments.append(
InvitationMessage.wrap_message(cred_ex_rec.cred_offer.offer())
)
elif a_type == "present-proof":
try:
pres_ex_rec = await V10PresentationExchange.retrieve_by_id(
self._session,
a_id,
)
message_attachments.append(
InvitationMessage.wrap_message(
pres_ex_rec.presentation_request_dict
)
)
except StorageNotFoundError:
pres_ex_rec = await V20PresExRecord.retrieve_by_id(
self._session,
a_id,
)
message_attachments.append(
InvitationMessage.wrap_message(
pres_ex_rec.pres_request.attachment()
)
)
else:
raise OutOfBandManagerError(f"Unknown attachment type: {a_type}")
handshake_protocols = [
DIDCommPrefix.qualify_current(hsp.name) for hsp in hs_protos or []
] or None
if public:
if not self._session.settings.get("public_invites"):
raise OutOfBandManagerError("Public invitations are not enabled")
public_did = await wallet.get_public_did()
if not public_did:
raise OutOfBandManagerError(
"Cannot create public invitation with no public DID"
)
invi_msg = InvitationMessage( # create invitation message
label=my_label or self._session.settings.get("default_label"),
handshake_protocols=handshake_protocols,
requests_attach=message_attachments,
services=[f"did:sov:{public_did.did}"],
)
keylist_updates = await mediation_mgr.add_key(
public_did.verkey, keylist_updates
)
endpoint, *_ = await self.resolve_invitation(public_did.did)
invi_url = invi_msg.to_url(endpoint)
conn_rec = ConnRecord( # create connection record
invitation_key=public_did.verkey,
invitation_msg_id=invi_msg._id,
their_role=ConnRecord.Role.REQUESTER.rfc23,
state=ConnRecord.State.INVITATION.rfc23,
accept=ConnRecord.ACCEPT_AUTO if accept else ConnRecord.ACCEPT_MANUAL,
alias=alias,
)
await conn_rec.save(self._session, reason="Created new invitation")
await conn_rec.attach_invitation(self._session, invi_msg)
if multitenant_mgr and wallet_id: # add mapping for multitenant relay
await multitenant_mgr.add_key(
wallet_id, public_did.verkey, skip_if_exists=True
)
else:
invitation_mode = (
ConnRecord.INVITATION_MODE_MULTI
if multi_use
else ConnRecord.INVITATION_MODE_ONCE
)
if not my_endpoint:
my_endpoint = self._session.settings.get("default_endpoint")
# Create and store new invitation key
connection_key = await wallet.create_signing_key(KeyType.ED25519)
keylist_updates = await mediation_mgr.add_key(
connection_key.verkey, keylist_updates
)
# Add mapping for multitenant relay
if multitenant_mgr and wallet_id:
await multitenant_mgr.add_key(wallet_id, connection_key.verkey)
# Create connection record
conn_rec = ConnRecord(
invitation_key=connection_key.verkey,
their_role=ConnRecord.Role.REQUESTER.rfc23,
state=ConnRecord.State.INVITATION.rfc23,
accept=ConnRecord.ACCEPT_AUTO if accept else ConnRecord.ACCEPT_MANUAL,
invitation_mode=invitation_mode,
alias=alias,
)
await conn_rec.save(self._session, reason="Created new connection")
routing_keys = []
# The base wallet can act as a mediator for all tenants
if multitenant_mgr and wallet_id:
base_mediation_record = await multitenant_mgr.get_default_mediator()
if base_mediation_record:
routing_keys = base_mediation_record.routing_keys
my_endpoint = base_mediation_record.endpoint
# If we use a mediator for the base wallet we don't
# need to register the key at the subwallet mediator
# because it only needs to know the key of the base mediator
# sub wallet mediator -> base wallet mediator -> agent
keylist_updates = None
if mediation_record:
routing_keys = [*routing_keys, *mediation_record.routing_keys]
my_endpoint = mediation_record.endpoint
# Save that this invitation was created with mediation
await conn_rec.metadata_set(
self._session, "mediation", {"id": mediation_id}
)
if keylist_updates:
responder = self._session.inject(BaseResponder, required=False)
await responder.send(
keylist_updates, connection_id=mediation_record.connection_id
)
routing_keys = [
key
if len(key.split(":")) == 3
else DIDKey.from_public_key_b58(key, KeyType.ED25519).did
for key in routing_keys
]
# Create connection invitation message
# Note: Need to split this into two stages to support inbound routing
# of invitations
# Would want to reuse create_did_document and convert the result
invi_msg = InvitationMessage(
label=my_label or self._session.settings.get("default_label"),
handshake_protocols=handshake_protocols,
requests_attach=message_attachments,
services=[
ServiceMessage(
_id="#inline",
_type="did-communication",
recipient_keys=[
DIDKey.from_public_key_b58(
connection_key.verkey, KeyType.ED25519
).did
],
service_endpoint=my_endpoint,
routing_keys=routing_keys,
)
],
)
invi_url = invi_msg.to_url()
# Update connection record
conn_rec.invitation_msg_id = invi_msg._id
await conn_rec.save(self._session, reason="Added Invitation")
await conn_rec.attach_invitation(self._session, invi_msg)
if metadata:
for key, value in metadata.items():
await conn_rec.metadata_set(self._session, key, value)
return InvitationRecord( # for return via admin API, not storage
state=InvitationRecord.STATE_INITIAL,
invi_msg_id=invi_msg._id,
invitation=invi_msg,
invitation_url=invi_url,
)
async def receive_invitation(
self,
invi_msg: InvitationMessage,
use_existing_connection: bool = True,
auto_accept: bool = None,
alias: str = None,
mediation_id: str = None,
) -> dict:
"""
Receive an out of band invitation message.
Args:
invi_msg: invitation message
use_existing_connection: whether to use existing connection if possible
auto_accept: whether to accept the invitation automatically
alias: Alias for connection record
mediation_id: mediation identifier
Returns:
ConnRecord, serialized
"""
if mediation_id:
try:
await mediation_record_if_id(self._session, mediation_id)
except StorageNotFoundError:
mediation_id = None
# There must be exactly 1 service entry
if len(invi_msg.services) != 1:
raise OutOfBandManagerError("service array must have exactly one element")
if not (invi_msg.requests_attach or invi_msg.handshake_protocols):
raise OutOfBandManagerError(
"Invitation must specify handshake_protocols, requests_attach, or both"
)
# Get the single service item
oob_service_item = invi_msg.services[0]
if isinstance(oob_service_item, ServiceMessage):
service = oob_service_item
public_did = None
else:
# If it's in the did format, we need to convert to a full service block
# An existing connection can only be reused based on a public DID
# in an out-of-band message (RFC 0434).
service_did = oob_service_item
# TODO: resolve_invitation should resolve key_info objects
# or something else that includes the key type. We now assume
# ED25519 keys
endpoint, recipient_keys, routing_keys = await self.resolve_invitation(
service_did
)
public_did = service_did.split(":")[-1]
service = ServiceMessage.deserialize(
{
"id": "#inline",
"type": "did-communication",
"recipientKeys": [
DIDKey.from_public_key_b58(key, KeyType.ED25519).did
for key in recipient_keys
],
"routingKeys": [
DIDKey.from_public_key_b58(key, KeyType.ED25519).did
for key in routing_keys
],
"serviceEndpoint": endpoint,
}
)
unq_handshake_protos = [
HSProto.get(hsp)
for hsp in dict.fromkeys(
[
DIDCommPrefix.unqualify(proto)
for proto in invi_msg.handshake_protocols
]
)
]
# Reuse Connection - only if started by an invitation with Public DID
conn_rec = None
if public_did is not None: # invite has public DID: seek existing connection
tag_filter = {}
post_filter = {}
# post_filter["state"] = ConnRecord.State.COMPLETED.rfc160
post_filter["their_public_did"] = public_did
conn_rec = await self.find_existing_connection(
tag_filter=tag_filter, post_filter=post_filter
)
if conn_rec is not None:
num_included_protocols = len(unq_handshake_protos)
num_included_req_attachments = len(invi_msg.requests_attach)
# With handshake protocol, request attachment; use existing connection
if (
num_included_protocols >= 1
and num_included_req_attachments == 0
and use_existing_connection
):
await self.create_handshake_reuse_message(
invi_msg=invi_msg,
conn_record=conn_rec,
)
try:
await asyncio.wait_for(
self.check_reuse_msg_state(
conn_rec=conn_rec,
),
15,
)
await conn_rec.metadata_delete(
session=self._session, key="reuse_msg_id"
)
if (
await conn_rec.metadata_get(self._session, "reuse_msg_state")
== "not_accepted"
):
conn_rec = None
else:
await conn_rec.metadata_delete(
session=self._session, key="reuse_msg_state"
)
except asyncio.TimeoutError:
# If no reuse_accepted or problem_report message was received within
# the 15s timeout then a new connection to be created
await conn_rec.metadata_delete(
session=self._session, key="reuse_msg_id"
)
await conn_rec.metadata_delete(
session=self._session, key="reuse_msg_state"
)
conn_rec.state = ConnRecord.State.ABANDONED.rfc160
await conn_rec.save(self._session, reason="Sent connection request")
conn_rec = None
# Inverse of the following cases
# Handshake_Protocol not included
# Request_Attachment included
# Use_Existing_Connection Yes
# Handshake_Protocol included
# Request_Attachment included
# Use_Existing_Connection Yes
elif not (
(
num_included_protocols == 0
and num_included_req_attachments >= 1
and use_existing_connection
)
or (
num_included_protocols >= 1
and num_included_req_attachments >= 1
and use_existing_connection
)
):
conn_rec = None
if conn_rec is None:
if not unq_handshake_protos:
raise OutOfBandManagerError(
"No existing connection exists and handshake_protocol is missing"
)
# Create a new connection
for proto in unq_handshake_protos:
if proto is HSProto.RFC23:
didx_mgr = DIDXManager(self._session)
conn_rec = await didx_mgr.receive_invitation(
invitation=invi_msg,
their_public_did=public_did,
auto_accept=auto_accept,
alias=alias,
mediation_id=mediation_id,
)
elif proto is HSProto.RFC160:
service.recipient_keys = [
DIDKey.from_did(key).public_key_b58
for key in service.recipient_keys or []
]
service.routing_keys = [
DIDKey.from_did(key).public_key_b58
for key in service.routing_keys
] or []
connection_invitation = ConnectionInvitation.deserialize(
{
"@id": invi_msg._id,
"@type": DIDCommPrefix.qualify_current(proto.name),
"label": invi_msg.label,
"recipientKeys": service.recipient_keys,
"serviceEndpoint": service.service_endpoint,
"routingKeys": service.routing_keys,
}
)
conn_mgr = ConnectionManager(self._session)
conn_rec = await conn_mgr.receive_invitation(
invitation=connection_invitation,
their_public_did=public_did,
auto_accept=auto_accept,
alias=alias,
mediation_id=mediation_id,
)
if conn_rec is not None:
break
# Request Attach
if len(invi_msg.requests_attach) >= 1 and conn_rec is not None:
req_attach = invi_msg.requests_attach[0]
if isinstance(req_attach, AttachDecorator):
if req_attach.data is not None:
unq_req_attach_type = DIDCommPrefix.unqualify(
req_attach.content["@type"]
)
if unq_req_attach_type == PRESENTATION_REQUEST:
await self._process_pres_request_v1(
req_attach=req_attach,
service=service,
conn_rec=conn_rec,
trace=(invi_msg._trace is not None),
)
elif unq_req_attach_type == PRES_20_REQUEST:
await self._process_pres_request_v2(
req_attach=req_attach,
service=service,
conn_rec=conn_rec,
trace=(invi_msg._trace is not None),
)
else:
raise OutOfBandManagerError(
(
"Unsupported requests~attach type "
f"{req_attach.content['@type']}: must unqualify to"
f"{PRESENTATION_REQUEST} or {PRES_20_REQUEST}"
)
)
else:
raise OutOfBandManagerError("requests~attach is not properly formatted")
return conn_rec.serialize()
async def _process_pres_request_v1(
self,
req_attach: AttachDecorator,
service: ServiceMessage,
conn_rec: ConnRecord,
trace: bool,
):
"""
Create exchange for v1 pres request attachment, auto-present if configured.
Args:
req_attach: request attachment on invitation
service: service message from invitation
conn_rec: connection record
trace: trace setting for presentation exchange record
"""
pres_mgr = PresentationManager(self._session.profile)
pres_request_msg = req_attach.content
indy_proof_request = json.loads(
b64_to_bytes(
pres_request_msg["request_presentations~attach"][0]["data"]["base64"]
)
)
oob_invi_service = service.serialize()
pres_request_msg["~service"] = {
"recipientKeys": oob_invi_service.get("recipientKeys"),
"routingKeys": oob_invi_service.get("routingKeys"),
"serviceEndpoint": oob_invi_service.get("serviceEndpoint"),
}
pres_ex_record = V10PresentationExchange(
connection_id=conn_rec.connection_id,
thread_id=pres_request_msg["@id"],
initiator=V10PresentationExchange.INITIATOR_EXTERNAL,
role=V10PresentationExchange.ROLE_PROVER,
presentation_request=indy_proof_request,
presentation_request_dict=pres_request_msg,
auto_present=self._session.context.settings.get(
"debug.auto_respond_presentation_request"
),
trace=trace,
)
pres_ex_record = await pres_mgr.receive_request(pres_ex_record)
if pres_ex_record.auto_present:
try:
req_creds = await indy_proof_req_preview2indy_requested_creds(
indy_proof_req=indy_proof_request,
preview=None,
holder=self._session.inject(IndyHolder),
)
except ValueError as err:
self._logger.warning(f"{err}")
raise OutOfBandManagerError(
f"Cannot auto-respond to presentation request attachment: {err}"
)
(pres_ex_record, presentation_message) = await pres_mgr.create_presentation(
presentation_exchange_record=pres_ex_record,
requested_credentials=req_creds,
comment=(
"auto-presented for proof request nonce={}".format(
indy_proof_request["nonce"]
)
),
)
responder = self._session.inject(BaseResponder, required=False)
if responder:
await responder.send(
message=presentation_message,
target_list=await self.fetch_connection_targets(
connection=conn_rec
),
)
else:
raise OutOfBandManagerError(
(
"Configuration sets auto_present false: cannot "
"respond automatically to presentation requests"
)
)
async def _process_pres_request_v2(
self,
req_attach: AttachDecorator,
service: ServiceMessage,
conn_rec: ConnRecord,
trace: bool,
):
"""
Create exchange for v2 pres request attachment, auto-present if configured.
Args:
req_attach: request attachment on invitation
service: service message from invitation
conn_rec: connection record
trace: trace setting for presentation exchange record
"""
pres_mgr = V20PresManager(self._session.profile)
pres_request_msg = req_attach.content
oob_invi_service = service.serialize()
pres_request_msg["~service"] = {
"recipientKeys": oob_invi_service.get("recipientKeys"),
"routingKeys": oob_invi_service.get("routingKeys"),
"serviceEndpoint": oob_invi_service.get("serviceEndpoint"),
}
pres_ex_record = V20PresExRecord(
connection_id=conn_rec.connection_id,
thread_id=pres_request_msg["@id"],
initiator=V20PresExRecord.INITIATOR_EXTERNAL,
role=V20PresExRecord.ROLE_PROVER,
pres_request=pres_request_msg,
auto_present=self._session.context.settings.get(
"debug.auto_respond_presentation_request"
),
trace=trace,
)
pres_ex_record = await pres_mgr.receive_pres_request(pres_ex_record)
if pres_ex_record.auto_present:
indy_proof_request = V20PresRequest.deserialize(
pres_request_msg
).attachment(
V20PresFormat.Format.INDY
) # assumption will change for DIF
try:
req_creds = await indy_proof_req_preview2indy_requested_creds(
indy_proof_req=indy_proof_request,
preview=None,
holder=self._session.inject(IndyHolder),
)
except ValueError as err:
self._logger.warning(f"{err}")
raise OutOfBandManagerError(
f"Cannot auto-respond to presentation request attachment: {err}"
)
(pres_ex_record, pres_msg) = await pres_mgr.create_pres(
pres_ex_record=pres_ex_record,
requested_credentials=req_creds,
comment=(
"auto-presented for proof request nonce={}".format(
indy_proof_request["nonce"]
)
),
)
responder = self._session.inject(BaseResponder, required=False)
if responder:
await responder.send(
message=pres_msg,
target_list=await self.fetch_connection_targets(
connection=conn_rec
),
)
else:
raise OutOfBandManagerError(
(
"Configuration sets auto_present false: cannot "
"respond automatically to presentation requests"
)
)
async def find_existing_connection(
self,
tag_filter: dict,
post_filter: dict,
) -> Optional[ConnRecord]:
"""
Find existing ConnRecord.
Args:
tag_filter: The filter dictionary to apply
post_filter: Additional value filters to apply matching positively,
with sequence values specifying alternatives to match (hit any)
Returns:
ConnRecord or None
"""
conn_records = await ConnRecord.query(
self._session,
tag_filter=tag_filter,
post_filter_positive=post_filter,
alt=True,
)
if not conn_records:
return None
else:
for conn_rec in conn_records:
if conn_rec.state == "active":
return conn_rec
return None
async def check_reuse_msg_state(
self,
conn_rec: ConnRecord,
):
"""
Check reuse message state from the ConnRecord Metadata.
Args:
conn_rec: The required ConnRecord with updated metadata
Returns:
"""
received = False
while not received:
if (
not await conn_rec.metadata_get(self._session, "reuse_msg_state")
== "initial"
):
received = True
return
async def create_handshake_reuse_message(
self,
invi_msg: InvitationMessage,
conn_record: ConnRecord,
) -> None:
"""
Create and Send a Handshake Reuse message under RFC 0434.
Args:
invi_msg: OOB Invitation Message
service: Service block extracted from the OOB invitation
Returns:
Raises:
OutOfBandManagerError: If there is an issue creating or
sending the OOB invitation
"""
try:
# ID of Out-of-Band invitation to use as a pthid
# pthid = invi_msg._id
pthid = conn_record.invitation_msg_id
reuse_msg = HandshakeReuse()
thid = reuse_msg._id
reuse_msg.assign_thread_id(thid=thid, pthid=pthid)
connection_targets = await self.fetch_connection_targets(
connection=conn_record
)
responder = self._session.inject(BaseResponder, required=False)
if responder:
await responder.send(
message=reuse_msg,
target_list=connection_targets,
)
await conn_record.metadata_set(
session=self._session, key="reuse_msg_id", value=reuse_msg._id
)
await conn_record.metadata_set(
session=self._session, key="reuse_msg_state", value="initial"
)
except Exception as err:
raise OutOfBandManagerError(
f"Error on creating and sending a handshake reuse message: {err}"
)
async def receive_reuse_message(
self,
reuse_msg: HandshakeReuse,
receipt: MessageReceipt,
) -> None:
"""
Receive and process a HandshakeReuse message under RFC 0434.
Process a `HandshakeReuse` message by looking up
the connection records using the MessageReceipt sender DID.
Args:
reuse_msg: The `HandshakeReuse` to process
receipt: The message receipt
Returns:
Raises:
OutOfBandManagerError: If the existing connection is not active
or the connection does not exists
"""
try:
invi_msg_id = reuse_msg._thread.pthid
reuse_msg_id = reuse_msg._thread.thid
tag_filter = {}
post_filter = {}
# post_filter["state"] = "active"
# tag_filter["their_did"] = receipt.sender_did
post_filter["invitation_msg_id"] = invi_msg_id
conn_record = await self.find_existing_connection(
tag_filter=tag_filter, post_filter=post_filter
)
responder = self._session.inject(BaseResponder, required=False)
if conn_record is not None:
# For ConnRecords created using did-exchange
reuse_accept_msg = HandshakeReuseAccept()
reuse_accept_msg.assign_thread_id(thid=reuse_msg_id, pthid=invi_msg_id)
connection_targets = await self.fetch_connection_targets(
connection=conn_record
)
if responder:
await responder.send(
message=reuse_accept_msg,
target_list=connection_targets,
)
# This is not required as now we attaching the invitation_msg_id
# using original invitation [from existing connection]
#
# Delete the ConnRecord created; re-use existing connection
# invi_id_post_filter = {}
# invi_id_post_filter["invitation_msg_id"] = invi_msg_id
# conn_rec_to_delete = await self.find_existing_connection(
# tag_filter={},
# post_filter=invi_id_post_filter,
# )
# if conn_rec_to_delete is not None:
# if conn_record.connection_id != conn_rec_to_delete.connection_id:
# await conn_rec_to_delete.delete_record(session=self._session)
else:
conn_record = await self.find_existing_connection(
tag_filter={"their_did": receipt.sender_did}, post_filter={}
)
# Problem Report is redundant in this case as with no active
# connection, it cannot reach the invitee any way
if conn_record is not None:
# For ConnRecords created using RFC 0160 connections
reuse_accept_msg = HandshakeReuseAccept()
reuse_accept_msg.assign_thread_id(
thid=reuse_msg_id, pthid=invi_msg_id
)
connection_targets = await self.fetch_connection_targets(
connection=conn_record
)
if responder:
await responder.send(
message=reuse_accept_msg,
target_list=connection_targets,
)
except StorageNotFoundError:
raise OutOfBandManagerError(
(f"No existing ConnRecord found for OOB Invitee, {receipt.sender_did}"),
)
async def receive_reuse_accepted_message(
self,
reuse_accepted_msg: HandshakeReuseAccept,
receipt: MessageReceipt,
conn_record: ConnRecord,
) -> None:
"""
Receive and process a HandshakeReuseAccept message under RFC 0434.
Process a `HandshakeReuseAccept` message by updating the ConnRecord metadata
state to `accepted`.
Args:
reuse_accepted_msg: The `HandshakeReuseAccept` to process
receipt: The message receipt
Returns:
Raises:
OutOfBandManagerError: if there is an error in processing the
HandshakeReuseAccept message
"""
try:
invi_msg_id = reuse_accepted_msg._thread.pthid
thread_reuse_msg_id = reuse_accepted_msg._thread.thid
conn_reuse_msg_id = await conn_record.metadata_get(
session=self._session, key="reuse_msg_id"
)
assert thread_reuse_msg_id == conn_reuse_msg_id
await conn_record.metadata_set(
session=self._session, key="reuse_msg_state", value="accepted"
)
except Exception as e:
raise OutOfBandManagerError(
(
(
"Error processing reuse accepted message "
f"for OOB invitation {invi_msg_id}, {e}"
)
)
)
async def receive_problem_report(
self,
problem_report: OOBProblemReport,
receipt: MessageReceipt,
conn_record: ConnRecord,
) -> None:
"""
Receive and process a ProblemReport message from the inviter to invitee.
Process a `ProblemReport` message by updating the ConnRecord metadata
state to `not_accepted`.
Args:
problem_report: The `OOBProblemReport` to process
receipt: The message receipt
Returns:
Raises:
OutOfBandManagerError: if there is an error in processing the
HandshakeReuseAccept message
"""
try:
invi_msg_id = problem_report._thread.pthid
thread_reuse_msg_id = problem_report._thread.thid
conn_reuse_msg_id = await conn_record.metadata_get(
session=self._session, key="reuse_msg_id"
)
assert thread_reuse_msg_id == conn_reuse_msg_id
await conn_record.metadata_set(
session=self._session, key="reuse_msg_state", value="not_accepted"
)
except Exception as e:
raise OutOfBandManagerError(
(
(
"Error processing problem report message "
f"for OOB invitation {invi_msg_id}, {e}"
)
)
) | en | 0.735382 | Classes to manage connections. Out of band error. Out of band error for unimplemented functionality. Class for managing out of band messages. Initialize a OutOfBandManager. Args: session: The profile session for this out of band manager Accessor for the current profile session. Returns: The profile session for this connection manager Generate new connection invitation. This interaction represents an out-of-band communication channel. In the future and in practice, these sort of invitations will be received over any number of channels such as SMS, Email, QR Code, NFC, etc. Args: my_label: label for this connection my_endpoint: endpoint where other party can reach me auto_accept: auto-accept a corresponding connection request (None to use config) public: set to create an invitation from the public DID hs_protos: list of handshake protocols to include multi_use: set to True to create an invitation for multiple-use connection alias: optional alias to apply to connection for later use attachments: list of dicts in form of {"id": ..., "type": ...} Returns: Invitation record # Multitenancy setup # create invitation message # create connection record # add mapping for multitenant relay # Create and store new invitation key # Add mapping for multitenant relay # Create connection record # The base wallet can act as a mediator for all tenants # If we use a mediator for the base wallet we don't # need to register the key at the subwallet mediator # because it only needs to know the key of the base mediator # sub wallet mediator -> base wallet mediator -> agent # Save that this invitation was created with mediation # Create connection invitation message # Note: Need to split this into two stages to support inbound routing # of invitations # Would want to reuse create_did_document and convert the result # Update connection record # for return via admin API, not storage Receive an out of band invitation message. Args: invi_msg: invitation message use_existing_connection: whether to use existing connection if possible auto_accept: whether to accept the invitation automatically alias: Alias for connection record mediation_id: mediation identifier Returns: ConnRecord, serialized # There must be exactly 1 service entry # Get the single service item # If it's in the did format, we need to convert to a full service block # An existing connection can only be reused based on a public DID # in an out-of-band message (RFC 0434). # TODO: resolve_invitation should resolve key_info objects # or something else that includes the key type. We now assume # ED25519 keys # Reuse Connection - only if started by an invitation with Public DID # invite has public DID: seek existing connection # post_filter["state"] = ConnRecord.State.COMPLETED.rfc160 # With handshake protocol, request attachment; use existing connection # If no reuse_accepted or problem_report message was received within # the 15s timeout then a new connection to be created # Inverse of the following cases # Handshake_Protocol not included # Request_Attachment included # Use_Existing_Connection Yes # Handshake_Protocol included # Request_Attachment included # Use_Existing_Connection Yes # Create a new connection # Request Attach Create exchange for v1 pres request attachment, auto-present if configured. Args: req_attach: request attachment on invitation service: service message from invitation conn_rec: connection record trace: trace setting for presentation exchange record Create exchange for v2 pres request attachment, auto-present if configured. Args: req_attach: request attachment on invitation service: service message from invitation conn_rec: connection record trace: trace setting for presentation exchange record # assumption will change for DIF Find existing ConnRecord. Args: tag_filter: The filter dictionary to apply post_filter: Additional value filters to apply matching positively, with sequence values specifying alternatives to match (hit any) Returns: ConnRecord or None Check reuse message state from the ConnRecord Metadata. Args: conn_rec: The required ConnRecord with updated metadata Returns: Create and Send a Handshake Reuse message under RFC 0434. Args: invi_msg: OOB Invitation Message service: Service block extracted from the OOB invitation Returns: Raises: OutOfBandManagerError: If there is an issue creating or sending the OOB invitation # ID of Out-of-Band invitation to use as a pthid # pthid = invi_msg._id Receive and process a HandshakeReuse message under RFC 0434. Process a `HandshakeReuse` message by looking up the connection records using the MessageReceipt sender DID. Args: reuse_msg: The `HandshakeReuse` to process receipt: The message receipt Returns: Raises: OutOfBandManagerError: If the existing connection is not active or the connection does not exists # post_filter["state"] = "active" # tag_filter["their_did"] = receipt.sender_did # For ConnRecords created using did-exchange # This is not required as now we attaching the invitation_msg_id # using original invitation [from existing connection] # # Delete the ConnRecord created; re-use existing connection # invi_id_post_filter = {} # invi_id_post_filter["invitation_msg_id"] = invi_msg_id # conn_rec_to_delete = await self.find_existing_connection( # tag_filter={}, # post_filter=invi_id_post_filter, # ) # if conn_rec_to_delete is not None: # if conn_record.connection_id != conn_rec_to_delete.connection_id: # await conn_rec_to_delete.delete_record(session=self._session) # Problem Report is redundant in this case as with no active # connection, it cannot reach the invitee any way # For ConnRecords created using RFC 0160 connections Receive and process a HandshakeReuseAccept message under RFC 0434. Process a `HandshakeReuseAccept` message by updating the ConnRecord metadata state to `accepted`. Args: reuse_accepted_msg: The `HandshakeReuseAccept` to process receipt: The message receipt Returns: Raises: OutOfBandManagerError: if there is an error in processing the HandshakeReuseAccept message Receive and process a ProblemReport message from the inviter to invitee. Process a `ProblemReport` message by updating the ConnRecord metadata state to `not_accepted`. Args: problem_report: The `OOBProblemReport` to process receipt: The message receipt Returns: Raises: OutOfBandManagerError: if there is an error in processing the HandshakeReuseAccept message | 1.477814 | 1 |
app/core/tests/test_admin.py | PythonDjangoJavascript/advanced_django_rest_api_with_tdd | 0 | 6629952 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTest(TestCase):
"""Test admin side methods working"""
def setUp(self):
"""Setup method will be available to every method"""
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="<EMAIL>",
password="<PASSWORD>"
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="<EMAIL>",
name="user",
password="<PASSWORD>"
)
def test_user_listed(self):
"""test created user listed in user list"""
url = reverse("admin:core_user_changelist")
res = self.client.get(url)
self.assertContains(res, self.user.email)
self.assertContains(res, self.user.name)
def test_user_edit_page(self):
"""Test user edit page available"""
# Thsi args value retuns the value and add it at the end of the url
# ie. /admin/core/user/user_id
url = reverse("admin:core_user_change", args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_user_create_page(self):
"""test user creae page available"""
url = reverse("admin:core_user_add")
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTest(TestCase):
"""Test admin side methods working"""
def setUp(self):
"""Setup method will be available to every method"""
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="<EMAIL>",
password="<PASSWORD>"
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="<EMAIL>",
name="user",
password="<PASSWORD>"
)
def test_user_listed(self):
"""test created user listed in user list"""
url = reverse("admin:core_user_changelist")
res = self.client.get(url)
self.assertContains(res, self.user.email)
self.assertContains(res, self.user.name)
def test_user_edit_page(self):
"""Test user edit page available"""
# Thsi args value retuns the value and add it at the end of the url
# ie. /admin/core/user/user_id
url = reverse("admin:core_user_change", args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_user_create_page(self):
"""test user creae page available"""
url = reverse("admin:core_user_add")
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| en | 0.733805 | Test admin side methods working Setup method will be available to every method test created user listed in user list Test user edit page available # Thsi args value retuns the value and add it at the end of the url # ie. /admin/core/user/user_id test user creae page available | 2.620917 | 3 |
assesments/urls.py | gotoiot/service-django-rest-api | 0 | 6629953 | <filename>assesments/urls.py
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from assesments import views
app_name = 'assesments'
urlpatterns = [
path('', views.api_root, name='assesments-home'),
path('assesments/', views.AssesmentList.as_view(), name='assesment-list'),
path('assesments/<int:pk>/', views.AssesmentDetail.as_view(), name='assesment-detail'),
path('assesments/<int:pk>/status', views.AssesmentStatus.as_view(), name='assesment-status'),
path('assesments/<int:pk>/create', views.InstanceCreate.as_view(), name='instance-creation'),
path('instances/', views.InstanceList.as_view(), name='instance-list'),
path('instances/<uuid:pk>/', views.InstanceDetail.as_view(), name="instance-detail"),
path('instances/<uuid:pk>/test', views.InstanceTest.as_view(), name="instance-test"),
path('instances/<uuid:pk>/start', views.InstanceStart.as_view(), name="instance-start"),
path('instances/<uuid:pk>/questions/<int:q_id>', views.InstanceQuestionDetail.as_view(), name="instance-question-detail"),
path('instances/<uuid:pk>/answer', views.InstanceAnswer.as_view(), name="instance-answer"),
path('instances/<uuid:pk>/end', views.InstanceEnd.as_view(), name="instance-end"),
path('instances/<uuid:pk>/result', views.InstanceResult.as_view(), name="instance-result"),
path('instances/restore', views.InstanceRestore.as_view(), name="instance-restore"),
path('takers/', views.TakerList.as_view(), name='taker-list'),
path('takers/<int:pk>/', views.TakerDetail.as_view(), name="taker-detail"),
path('takers/me', views.TakerDetailMeAnswer.as_view(), name='taker-detail-me'),
path('questions/', views.QuestionList.as_view(), name='question-list'),
path('questions/<int:pk>/', views.QuestionDetail.as_view(), name='question-detail'),
path('options/', views.OptionList.as_view(), name='option-list'),
path('options/<int:pk>/', views.OptionDetail.as_view(), name='option-detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns) | <filename>assesments/urls.py
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from assesments import views
app_name = 'assesments'
urlpatterns = [
path('', views.api_root, name='assesments-home'),
path('assesments/', views.AssesmentList.as_view(), name='assesment-list'),
path('assesments/<int:pk>/', views.AssesmentDetail.as_view(), name='assesment-detail'),
path('assesments/<int:pk>/status', views.AssesmentStatus.as_view(), name='assesment-status'),
path('assesments/<int:pk>/create', views.InstanceCreate.as_view(), name='instance-creation'),
path('instances/', views.InstanceList.as_view(), name='instance-list'),
path('instances/<uuid:pk>/', views.InstanceDetail.as_view(), name="instance-detail"),
path('instances/<uuid:pk>/test', views.InstanceTest.as_view(), name="instance-test"),
path('instances/<uuid:pk>/start', views.InstanceStart.as_view(), name="instance-start"),
path('instances/<uuid:pk>/questions/<int:q_id>', views.InstanceQuestionDetail.as_view(), name="instance-question-detail"),
path('instances/<uuid:pk>/answer', views.InstanceAnswer.as_view(), name="instance-answer"),
path('instances/<uuid:pk>/end', views.InstanceEnd.as_view(), name="instance-end"),
path('instances/<uuid:pk>/result', views.InstanceResult.as_view(), name="instance-result"),
path('instances/restore', views.InstanceRestore.as_view(), name="instance-restore"),
path('takers/', views.TakerList.as_view(), name='taker-list'),
path('takers/<int:pk>/', views.TakerDetail.as_view(), name="taker-detail"),
path('takers/me', views.TakerDetailMeAnswer.as_view(), name='taker-detail-me'),
path('questions/', views.QuestionList.as_view(), name='question-list'),
path('questions/<int:pk>/', views.QuestionDetail.as_view(), name='question-detail'),
path('options/', views.OptionList.as_view(), name='option-list'),
path('options/<int:pk>/', views.OptionDetail.as_view(), name='option-detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns) | none | 1 | 2.24528 | 2 |
|
runNomadDocker.py | csawyerYumaed/nomad-docker | 11 | 6629954 | <filename>runNomadDocker.py
#!/usr/bin/python -u
"""
nomad docker doesn't allow us to do volumes, which hurts.
This is a work-around. I hope they get their act together soon.
all META keys are capitalized by NOMAD, so be sure to define your _LABELS as uppercase.
required keys:
IMAGE -- name of the docker image to pull.
optional:
REGISTRY_URL = the URL for the regitry to pull from.
REGISTRY_USER = username for registry, defaults None
REGISTRY_PASSWORD = password for registry, defaults None
NETWORK_MODE = "bridge" network mode for docker:
('bridge': creates a new network stack for the container on the Docker bridge, 'none': no networking for this container, 'container:[name|id]': reuses another container network stack, 'host': use the host network stack inside the container or any name that identifies an existing Docker network).
defaults "bridge"
nomad do HOST export networking at all, so you have to specify it special in the env {} config.
NETWORK_LABELS = "" space seperated list of network labels.
NOMAD_PORT_<label> = '' IP port to expose inside the container.
NOMAD_IP_<label> = '' IP ADDRESS to expose.
NOMAD_HOST_PORT_<label> = '' IP port to expose on the host.
nomad doesn't do volumes at all. currently only bind mounts are supported. here is how to do them:
VOLUME_LABELS="" is a space seperated list of volume labels (just like network labels)
SRC_<LABEL>="" the source of the volume.
DST_<LABEL>="" the destination of the volume.
MODE_<LABEL>="" the mode (rw/ro) of the volume. if missing defaults to rw.
"""
from __future__ import print_function
import os
import signal
import sys
try:
from docker import Client
except ImportError:
print("You must install docker-py module, try running: pip install docker-py")
#used for signal, yes globals suck, get over it.
RUNNINGID = 0
DEBUG = True
def getKey(name, default=None):
"""get key or set default from os.environ, which is ""
"""
if os.environ.has_key(name):
ret = os.environ[name]
else:
ret = default
return ret
def main(buildNumber):
"""main code"""
global RUNNINGID
cli = Client(base_url='unix://var/run/docker.sock')
# specify the network mode, port bindings, and volume mounts.
# this is how the docker python client wants these parameters
networkMode = getKey('NOMAD_META_NETWORK_MODE', "bridge")
networkLabels = getKey('NOMAD_META_NETWORK_LABELS', "")
portBindings = {}
for label in networkLabels.split():
port = getKey('NOMAD_PORT_{}'.format(label))
ip = getKey('NOMAD_IP_{}'.format(label))
hostPort = getKey('NOMAD_HOST_PORT_{}'.format(label))
portBindings[port] = (ip, hostPort)
print("exposing container port {} to external ip:port {}:{}".format(
port, ip, hostPort))
volumeLabels = getKey('NOMAD_META_VOLUME_LABELS', "")
volumes = {}
for label in volumeLabels.split():
src = os.environ['NOMAD_META_SRC_{}'.format(label)]
dst = os.environ['NOMAD_META_DST_{}'.format(label)]
mode = getKey('NOMAD_META_MODE_{}'.format(label), "rw")
volumes[src] = {'bind': dst, 'mode': mode}
print("binding volume {} src:dst:mode {}:{}:{}".format(label, src, dst,
mode))
labels = {}
# just move all the nomad stuff into docker labels... why not!
for k in os.environ.keys():
#redefine them all without the NOMAD_META prefix.
if 'NOMAD' in k:
newk = k.replace('NOMAD_META_', '')
labels[newk] = os.environ[k]
hostConfig = cli.create_host_config(
port_bindings=portBindings, binds=volumes, network_mode=networkMode)
serviceName = os.environ['NOMAD_META_IMAGE']
dockerName = "{}-{}".format(serviceName, os.environ['NOMAD_ALLOC_ID'])
registryURL = getKey('NOMAD_META_REGISTRY_URL', "")
registryAuthConfig = {
'username': getKey('NOMAD_META_REGISTRY_USER'),
'password': getKey('NOMAD_META_REGISTRY_PASSWORD')
}
imageTag = buildNumber
registry = '%s%s' % (registryURL, serviceName)
image = "{}:{}".format(registry, imageTag)
print("will download image {}:{}".format(registry, imageTag))
cli.pull(
repository=registry,
tag=imageTag,
stream=False,
auth_config=registryAuthConfig)
containers = cli.containers(all=True, filters={'name': image})
# if container name or image is already around, stop and remove it, since we are about to run it again.
for i in containers:
if i['Image'] == image:
# currently running, we should stop it.
if i['State'] == 'running':
print("stoppping container {} with ID {}".format(i['Image'], i['Id']))
cli.stop(i['Id'])
cli.remove_container(i['Id'])
else:
print('container {} exists, but is not running, removing id {}'.format(i[
'Image'], i['Id']))
cli.remove_container(i['Id'])
if dockerName in i['Names']:
if i['State'] == 'running':
print("stoppping container {} with ID {}".format(i['Image'], i['Id']))
cli.stop(i['Id'])
cli.remove_container(i['Id'])
else:
print('container {} exists, but is not running, removing id {}'.format(i[
'Image'], i['Id']))
cli.remove_container(i['Id'])
container = cli.create_container(
image=image,
detach=True,
name=dockerName,
environment=labels,
labels=labels,
ports=portBindings.keys(),
host_config=hostConfig)
print("created container: {}".format(container))
id = container.get('Id')
RUNNINGID = id
cli.start(container=id)
print('container started..: retrieve and print stdout/err...')
for msg in cli.logs(container=id, stream=True, stdout=True, stderr=True):
print(msg, end="")
def cleanupDocker(signal, frame):
"""stop container"""
cli = Client(base_url='unix://var/run/docker.sock')
if RUNNINGID:
print("stopping container: {}".format(RUNNINGID))
cli.stop(RUNNINGID)
sys.exit(0)
signal.signal(signal.SIGINT, cleanupDocker)
def printEnv(d):
"""for printing os.environ, pprint doesn't do it well *sad face*
"""
for k in d.keys():
print("{}: {}".format(k, d[k]))
if __name__ == '__main__':
try:
buildNumber = sys.argv[1]
except IndexError:
buildNumber = 'latest'
try:
print("nomad-rundocker v0.1")
if DEBUG:
printEnv(os.environ)
main(buildNumber)
except KeyError:
print("UNABLE to find key, current environment is:")
printEnv(os.environ)
raise
| <filename>runNomadDocker.py
#!/usr/bin/python -u
"""
nomad docker doesn't allow us to do volumes, which hurts.
This is a work-around. I hope they get their act together soon.
all META keys are capitalized by NOMAD, so be sure to define your _LABELS as uppercase.
required keys:
IMAGE -- name of the docker image to pull.
optional:
REGISTRY_URL = the URL for the regitry to pull from.
REGISTRY_USER = username for registry, defaults None
REGISTRY_PASSWORD = password for registry, defaults None
NETWORK_MODE = "bridge" network mode for docker:
('bridge': creates a new network stack for the container on the Docker bridge, 'none': no networking for this container, 'container:[name|id]': reuses another container network stack, 'host': use the host network stack inside the container or any name that identifies an existing Docker network).
defaults "bridge"
nomad do HOST export networking at all, so you have to specify it special in the env {} config.
NETWORK_LABELS = "" space seperated list of network labels.
NOMAD_PORT_<label> = '' IP port to expose inside the container.
NOMAD_IP_<label> = '' IP ADDRESS to expose.
NOMAD_HOST_PORT_<label> = '' IP port to expose on the host.
nomad doesn't do volumes at all. currently only bind mounts are supported. here is how to do them:
VOLUME_LABELS="" is a space seperated list of volume labels (just like network labels)
SRC_<LABEL>="" the source of the volume.
DST_<LABEL>="" the destination of the volume.
MODE_<LABEL>="" the mode (rw/ro) of the volume. if missing defaults to rw.
"""
from __future__ import print_function
import os
import signal
import sys
try:
from docker import Client
except ImportError:
print("You must install docker-py module, try running: pip install docker-py")
#used for signal, yes globals suck, get over it.
RUNNINGID = 0
DEBUG = True
def getKey(name, default=None):
"""get key or set default from os.environ, which is ""
"""
if os.environ.has_key(name):
ret = os.environ[name]
else:
ret = default
return ret
def main(buildNumber):
"""main code"""
global RUNNINGID
cli = Client(base_url='unix://var/run/docker.sock')
# specify the network mode, port bindings, and volume mounts.
# this is how the docker python client wants these parameters
networkMode = getKey('NOMAD_META_NETWORK_MODE', "bridge")
networkLabels = getKey('NOMAD_META_NETWORK_LABELS', "")
portBindings = {}
for label in networkLabels.split():
port = getKey('NOMAD_PORT_{}'.format(label))
ip = getKey('NOMAD_IP_{}'.format(label))
hostPort = getKey('NOMAD_HOST_PORT_{}'.format(label))
portBindings[port] = (ip, hostPort)
print("exposing container port {} to external ip:port {}:{}".format(
port, ip, hostPort))
volumeLabels = getKey('NOMAD_META_VOLUME_LABELS', "")
volumes = {}
for label in volumeLabels.split():
src = os.environ['NOMAD_META_SRC_{}'.format(label)]
dst = os.environ['NOMAD_META_DST_{}'.format(label)]
mode = getKey('NOMAD_META_MODE_{}'.format(label), "rw")
volumes[src] = {'bind': dst, 'mode': mode}
print("binding volume {} src:dst:mode {}:{}:{}".format(label, src, dst,
mode))
labels = {}
# just move all the nomad stuff into docker labels... why not!
for k in os.environ.keys():
#redefine them all without the NOMAD_META prefix.
if 'NOMAD' in k:
newk = k.replace('NOMAD_META_', '')
labels[newk] = os.environ[k]
hostConfig = cli.create_host_config(
port_bindings=portBindings, binds=volumes, network_mode=networkMode)
serviceName = os.environ['NOMAD_META_IMAGE']
dockerName = "{}-{}".format(serviceName, os.environ['NOMAD_ALLOC_ID'])
registryURL = getKey('NOMAD_META_REGISTRY_URL', "")
registryAuthConfig = {
'username': getKey('NOMAD_META_REGISTRY_USER'),
'password': getKey('NOMAD_META_REGISTRY_PASSWORD')
}
imageTag = buildNumber
registry = '%s%s' % (registryURL, serviceName)
image = "{}:{}".format(registry, imageTag)
print("will download image {}:{}".format(registry, imageTag))
cli.pull(
repository=registry,
tag=imageTag,
stream=False,
auth_config=registryAuthConfig)
containers = cli.containers(all=True, filters={'name': image})
# if container name or image is already around, stop and remove it, since we are about to run it again.
for i in containers:
if i['Image'] == image:
# currently running, we should stop it.
if i['State'] == 'running':
print("stoppping container {} with ID {}".format(i['Image'], i['Id']))
cli.stop(i['Id'])
cli.remove_container(i['Id'])
else:
print('container {} exists, but is not running, removing id {}'.format(i[
'Image'], i['Id']))
cli.remove_container(i['Id'])
if dockerName in i['Names']:
if i['State'] == 'running':
print("stoppping container {} with ID {}".format(i['Image'], i['Id']))
cli.stop(i['Id'])
cli.remove_container(i['Id'])
else:
print('container {} exists, but is not running, removing id {}'.format(i[
'Image'], i['Id']))
cli.remove_container(i['Id'])
container = cli.create_container(
image=image,
detach=True,
name=dockerName,
environment=labels,
labels=labels,
ports=portBindings.keys(),
host_config=hostConfig)
print("created container: {}".format(container))
id = container.get('Id')
RUNNINGID = id
cli.start(container=id)
print('container started..: retrieve and print stdout/err...')
for msg in cli.logs(container=id, stream=True, stdout=True, stderr=True):
print(msg, end="")
def cleanupDocker(signal, frame):
"""stop container"""
cli = Client(base_url='unix://var/run/docker.sock')
if RUNNINGID:
print("stopping container: {}".format(RUNNINGID))
cli.stop(RUNNINGID)
sys.exit(0)
signal.signal(signal.SIGINT, cleanupDocker)
def printEnv(d):
"""for printing os.environ, pprint doesn't do it well *sad face*
"""
for k in d.keys():
print("{}: {}".format(k, d[k]))
if __name__ == '__main__':
try:
buildNumber = sys.argv[1]
except IndexError:
buildNumber = 'latest'
try:
print("nomad-rundocker v0.1")
if DEBUG:
printEnv(os.environ)
main(buildNumber)
except KeyError:
print("UNABLE to find key, current environment is:")
printEnv(os.environ)
raise
| en | 0.783632 | #!/usr/bin/python -u nomad docker doesn't allow us to do volumes, which hurts. This is a work-around. I hope they get their act together soon. all META keys are capitalized by NOMAD, so be sure to define your _LABELS as uppercase. required keys: IMAGE -- name of the docker image to pull. optional: REGISTRY_URL = the URL for the regitry to pull from. REGISTRY_USER = username for registry, defaults None REGISTRY_PASSWORD = password for registry, defaults None NETWORK_MODE = "bridge" network mode for docker: ('bridge': creates a new network stack for the container on the Docker bridge, 'none': no networking for this container, 'container:[name|id]': reuses another container network stack, 'host': use the host network stack inside the container or any name that identifies an existing Docker network). defaults "bridge" nomad do HOST export networking at all, so you have to specify it special in the env {} config. NETWORK_LABELS = "" space seperated list of network labels. NOMAD_PORT_<label> = '' IP port to expose inside the container. NOMAD_IP_<label> = '' IP ADDRESS to expose. NOMAD_HOST_PORT_<label> = '' IP port to expose on the host. nomad doesn't do volumes at all. currently only bind mounts are supported. here is how to do them: VOLUME_LABELS="" is a space seperated list of volume labels (just like network labels) SRC_<LABEL>="" the source of the volume. DST_<LABEL>="" the destination of the volume. MODE_<LABEL>="" the mode (rw/ro) of the volume. if missing defaults to rw. #used for signal, yes globals suck, get over it. get key or set default from os.environ, which is "" main code # specify the network mode, port bindings, and volume mounts. # this is how the docker python client wants these parameters # just move all the nomad stuff into docker labels... why not! #redefine them all without the NOMAD_META prefix. # if container name or image is already around, stop and remove it, since we are about to run it again. # currently running, we should stop it. stop container for printing os.environ, pprint doesn't do it well *sad face* | 2.327519 | 2 |
.venv/lib/python3.10/site-packages/lunr/pipeline.py | plocandido/docinfrati | 128 | 6629955 | <reponame>plocandido/docinfrati<gh_stars>100-1000
import logging
from typing import Callable, Dict
from lunr.exceptions import BaseLunrException
from lunr.token import Token
log = logging.getLogger(__name__)
class Pipeline:
"""lunr.Pipelines maintain a list of functions to be applied to all tokens
in documents entering the search index and queries ran agains the index.
"""
registered_functions: Dict[str, Callable] = {}
def __init__(self):
self._stack = []
def __len__(self):
return len(self._stack)
def __repr__(self):
return '<Pipeline stack="{}">'.format(",".join(fn.label for fn in self._stack))
# TODO: add iterator methods?
@classmethod
def register_function(cls, fn, label=None):
"""Register a function with the pipeline."""
label = label or fn.__name__
if label in cls.registered_functions:
log.warning("Overwriting existing registered function %s", label)
fn.label = label
cls.registered_functions[fn.label] = fn
@classmethod
def load(cls, serialised):
"""Loads a previously serialised pipeline."""
pipeline = cls()
for fn_name in serialised:
try:
fn = cls.registered_functions[fn_name]
except KeyError:
raise BaseLunrException(
"Cannot load unregistered function {}".format(fn_name)
)
else:
pipeline.add(fn)
return pipeline
def add(self, *args):
"""Adds new functions to the end of the pipeline.
Functions must accept three arguments:
- Token: A lunr.Token object which will be updated
- i: The index of the token in the set
- tokens: A list of tokens representing the set
"""
for fn in args:
self.warn_if_function_not_registered(fn)
self._stack.append(fn)
def warn_if_function_not_registered(self, fn):
try:
return fn.label in self.registered_functions
except AttributeError:
log.warning(
'Function "{}" is not registered with pipeline. '
"This may cause problems when serialising the index.".format(
getattr(fn, "label", fn)
)
)
def after(self, existing_fn, new_fn):
"""Adds a single function after a function that already exists in the
pipeline."""
self.warn_if_function_not_registered(new_fn)
try:
index = self._stack.index(existing_fn)
self._stack.insert(index + 1, new_fn)
except ValueError as e:
raise BaseLunrException("Cannot find existing_fn") from e
def before(self, existing_fn, new_fn):
"""Adds a single function before a function that already exists in the
pipeline.
"""
self.warn_if_function_not_registered(new_fn)
try:
index = self._stack.index(existing_fn)
self._stack.insert(index, new_fn)
except ValueError as e:
raise BaseLunrException("Cannot find existing_fn") from e
def remove(self, fn):
"""Removes a function from the pipeline."""
try:
self._stack.remove(fn)
except ValueError:
pass
def run(self, tokens):
"""Runs the current list of functions that make up the pipeline against
the passed tokens."""
for fn in self._stack:
results = []
for i, token in enumerate(tokens):
# JS ignores additional arguments to the functions but we
# force pipeline functions to declare (token, i, tokens)
# or *args
result = fn(token, i, tokens)
if not result:
continue
if isinstance(result, (list, tuple)): # simulate Array.concat
results.extend(result)
else:
results.append(result)
tokens = results
return tokens
def run_string(self, string, metadata=None):
"""Convenience method for passing a string through a pipeline and
getting strings out. This method takes care of wrapping the passed
string in a token and mapping the resulting tokens back to strings."""
token = Token(string, metadata)
return [str(tkn) for tkn in self.run([token])]
def reset(self):
self._stack = []
def serialize(self):
return [fn.label for fn in self._stack]
| import logging
from typing import Callable, Dict
from lunr.exceptions import BaseLunrException
from lunr.token import Token
log = logging.getLogger(__name__)
class Pipeline:
"""lunr.Pipelines maintain a list of functions to be applied to all tokens
in documents entering the search index and queries ran agains the index.
"""
registered_functions: Dict[str, Callable] = {}
def __init__(self):
self._stack = []
def __len__(self):
return len(self._stack)
def __repr__(self):
return '<Pipeline stack="{}">'.format(",".join(fn.label for fn in self._stack))
# TODO: add iterator methods?
@classmethod
def register_function(cls, fn, label=None):
"""Register a function with the pipeline."""
label = label or fn.__name__
if label in cls.registered_functions:
log.warning("Overwriting existing registered function %s", label)
fn.label = label
cls.registered_functions[fn.label] = fn
@classmethod
def load(cls, serialised):
"""Loads a previously serialised pipeline."""
pipeline = cls()
for fn_name in serialised:
try:
fn = cls.registered_functions[fn_name]
except KeyError:
raise BaseLunrException(
"Cannot load unregistered function {}".format(fn_name)
)
else:
pipeline.add(fn)
return pipeline
def add(self, *args):
"""Adds new functions to the end of the pipeline.
Functions must accept three arguments:
- Token: A lunr.Token object which will be updated
- i: The index of the token in the set
- tokens: A list of tokens representing the set
"""
for fn in args:
self.warn_if_function_not_registered(fn)
self._stack.append(fn)
def warn_if_function_not_registered(self, fn):
try:
return fn.label in self.registered_functions
except AttributeError:
log.warning(
'Function "{}" is not registered with pipeline. '
"This may cause problems when serialising the index.".format(
getattr(fn, "label", fn)
)
)
def after(self, existing_fn, new_fn):
"""Adds a single function after a function that already exists in the
pipeline."""
self.warn_if_function_not_registered(new_fn)
try:
index = self._stack.index(existing_fn)
self._stack.insert(index + 1, new_fn)
except ValueError as e:
raise BaseLunrException("Cannot find existing_fn") from e
def before(self, existing_fn, new_fn):
"""Adds a single function before a function that already exists in the
pipeline.
"""
self.warn_if_function_not_registered(new_fn)
try:
index = self._stack.index(existing_fn)
self._stack.insert(index, new_fn)
except ValueError as e:
raise BaseLunrException("Cannot find existing_fn") from e
def remove(self, fn):
"""Removes a function from the pipeline."""
try:
self._stack.remove(fn)
except ValueError:
pass
def run(self, tokens):
"""Runs the current list of functions that make up the pipeline against
the passed tokens."""
for fn in self._stack:
results = []
for i, token in enumerate(tokens):
# JS ignores additional arguments to the functions but we
# force pipeline functions to declare (token, i, tokens)
# or *args
result = fn(token, i, tokens)
if not result:
continue
if isinstance(result, (list, tuple)): # simulate Array.concat
results.extend(result)
else:
results.append(result)
tokens = results
return tokens
def run_string(self, string, metadata=None):
"""Convenience method for passing a string through a pipeline and
getting strings out. This method takes care of wrapping the passed
string in a token and mapping the resulting tokens back to strings."""
token = Token(string, metadata)
return [str(tkn) for tkn in self.run([token])]
def reset(self):
self._stack = []
def serialize(self):
return [fn.label for fn in self._stack] | en | 0.767862 | lunr.Pipelines maintain a list of functions to be applied to all tokens in documents entering the search index and queries ran agains the index. # TODO: add iterator methods? Register a function with the pipeline. Loads a previously serialised pipeline. Adds new functions to the end of the pipeline. Functions must accept three arguments: - Token: A lunr.Token object which will be updated - i: The index of the token in the set - tokens: A list of tokens representing the set Adds a single function after a function that already exists in the pipeline. Adds a single function before a function that already exists in the pipeline. Removes a function from the pipeline. Runs the current list of functions that make up the pipeline against the passed tokens. # JS ignores additional arguments to the functions but we # force pipeline functions to declare (token, i, tokens) # or *args # simulate Array.concat Convenience method for passing a string through a pipeline and getting strings out. This method takes care of wrapping the passed string in a token and mapping the resulting tokens back to strings. | 2.466331 | 2 |
funpackager/loader/jsonLoader.py | raojinlin/funpackager | 0 | 6629956 | import json
from funpackager.loader.abstractLoader import AbstractLoader
class JSONLoader(AbstractLoader):
def __init__(self, conf_file):
AbstractLoader.__init__(self, conf_file)
self._data = None
def get_data(self):
if not self._data:
self._data = json.load(open(self._config_file, 'rt'))
return self._data
| import json
from funpackager.loader.abstractLoader import AbstractLoader
class JSONLoader(AbstractLoader):
def __init__(self, conf_file):
AbstractLoader.__init__(self, conf_file)
self._data = None
def get_data(self):
if not self._data:
self._data = json.load(open(self._config_file, 'rt'))
return self._data
| none | 1 | 2.687084 | 3 |
|
pages/extensions/internal_links/pod_internal_link_rewriter_test.py | psimyn/amp.dev | 1 | 6629957 | """Tests for the source code exporter."""
import unittest
import sys
import os
from grow.common.urls import Url
from grow.cache.object_cache import ObjectCache
sys.path.extend([os.path.join(os.path.dirname(__file__), '.')])
from pod_internal_link_rewriter import PodInternalLinkRewriter
class PodInternalLinkRewriterTestCase(unittest.TestCase):
def test_a_href_relative(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="../folder2/page2.md">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html">test</a>', result)
def test_a_href_pod_path(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="/content/test/folder2/page2.md">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html">test</a>', result)
def test_a_href_none_existing(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="../folder2/page2.md">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals(content, result)
def test_multiple_href_none_existing_with_anchors(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
# two times the same url and different anchor to test possible cache problems
content = '<a href="../folder2/page2.md#test">test</a><br>' \
'<a href="../folder2/page2.md#other">test2</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals(content, result)
def test_a_href_with_protocol(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="http://amp.dev/test/folder2/page2.md">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals(content, result)
def test_a_href_relative_with_anchor(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
# two times the same url and different anchor to test possible cache problems
content = '<a href="../folder2/page2.md#test">test</a><br>' \
'<a href="../folder2/page2.md#other">test2</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html#test">test</a><br>' \
'<a href="/test/folder_2/page2.html#other">test2</a>', result)
def test_a_href_relative_with_params(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="../folder2/page2.md?format=ads">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html?format=ads">test</a>', result)
def test_a_href_relative_with_params_and_anchor(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="../folder2/page2.md?format=ads#test">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html?format=ads#test">test</a>', result)
def test_multiple_relative_href(self):
link_map = {
'/content/test/test.md': '/test/test.html',
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
'/content/test/page3.md': '/test/page3.html',
'/content/page4.md': '/page4.html',
}
doc = MockPod(link_map).get_doc('/content/test/test.md')
content = '<p><a class="link" href="folder2/page2.md">test</a><br>' \
'<a href="./page3.md" class="link">page3</a><br>' \
'<a href="../page4.md">page4</a><br>' \
'<a href = "./folder1/page.md">page</a></p>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<p><a class="link" href="/test/folder_2/page2.html">test</a><br>' \
'<a href="/test/page3.html" class="link">page3</a><br>' \
'<a href="/page4.html">page4</a><br>' \
'<a href = "/test/folder_1/page1.html">page</a></p>', result)
def test_cache_should_not_(self):
# Ensure the cache does not
cache = ObjectCache()
content = '<a href="./page.md">page</a><br>'
link_map = {
'/content/test/test.md': '/en/test/test.html',
'/content/test/page.md': '/en/test/page.html',
}
doc = MockPod(link_map).get_doc('/content/test/test.md', 'en')
link_rewriter = PodInternalLinkRewriter(doc, cache, None);
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/en/test/page.html">page</a><br>', result)
link_map = {
'/content/test/test.md': '/de/test/test.html',
'/content/test/page.md': '/de/test/page.html',
}
doc = MockPod(link_map).get_doc('/content/test/test.md', 'de')
link_rewriter = PodInternalLinkRewriter(doc, cache, None);
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/de/test/page.html">page</a><br>', result)
class MockPod:
def __init__(self, link_map):
self.link_map = link_map
def get_doc(self, path, locale='en'):
site_path = self.link_map.get(path)
return MockDoc(self, path, site_path, locale)
class MockDoc:
def __init__(self, pod, pod_path, site_path, locale):
self.pod = pod
self.pod_path = pod_path
self.url = None
if site_path:
self.url = Url(site_path)
self.locale = locale
@property
def exists(self):
if self.url:
return True
return False
| """Tests for the source code exporter."""
import unittest
import sys
import os
from grow.common.urls import Url
from grow.cache.object_cache import ObjectCache
sys.path.extend([os.path.join(os.path.dirname(__file__), '.')])
from pod_internal_link_rewriter import PodInternalLinkRewriter
class PodInternalLinkRewriterTestCase(unittest.TestCase):
def test_a_href_relative(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="../folder2/page2.md">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html">test</a>', result)
def test_a_href_pod_path(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="/content/test/folder2/page2.md">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html">test</a>', result)
def test_a_href_none_existing(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="../folder2/page2.md">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals(content, result)
def test_multiple_href_none_existing_with_anchors(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
# two times the same url and different anchor to test possible cache problems
content = '<a href="../folder2/page2.md#test">test</a><br>' \
'<a href="../folder2/page2.md#other">test2</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals(content, result)
def test_a_href_with_protocol(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="http://amp.dev/test/folder2/page2.md">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals(content, result)
def test_a_href_relative_with_anchor(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
# two times the same url and different anchor to test possible cache problems
content = '<a href="../folder2/page2.md#test">test</a><br>' \
'<a href="../folder2/page2.md#other">test2</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html#test">test</a><br>' \
'<a href="/test/folder_2/page2.html#other">test2</a>', result)
def test_a_href_relative_with_params(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="../folder2/page2.md?format=ads">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html?format=ads">test</a>', result)
def test_a_href_relative_with_params_and_anchor(self):
link_map = {
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
}
doc = MockPod(link_map).get_doc('/content/test/folder1/page.md')
content = '<a href="../folder2/page2.md?format=ads#test">test</a>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/test/folder_2/page2.html?format=ads#test">test</a>', result)
def test_multiple_relative_href(self):
link_map = {
'/content/test/test.md': '/test/test.html',
'/content/test/folder1/page.md': '/test/folder_1/page1.html',
'/content/test/folder2/page2.md': '/test/folder_2/page2.html',
'/content/test/page3.md': '/test/page3.html',
'/content/page4.md': '/page4.html',
}
doc = MockPod(link_map).get_doc('/content/test/test.md')
content = '<p><a class="link" href="folder2/page2.md">test</a><br>' \
'<a href="./page3.md" class="link">page3</a><br>' \
'<a href="../page4.md">page4</a><br>' \
'<a href = "./folder1/page.md">page</a></p>'
link_rewriter = PodInternalLinkRewriter(doc, ObjectCache(), None)
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<p><a class="link" href="/test/folder_2/page2.html">test</a><br>' \
'<a href="/test/page3.html" class="link">page3</a><br>' \
'<a href="/page4.html">page4</a><br>' \
'<a href = "/test/folder_1/page1.html">page</a></p>', result)
def test_cache_should_not_(self):
# Ensure the cache does not
cache = ObjectCache()
content = '<a href="./page.md">page</a><br>'
link_map = {
'/content/test/test.md': '/en/test/test.html',
'/content/test/page.md': '/en/test/page.html',
}
doc = MockPod(link_map).get_doc('/content/test/test.md', 'en')
link_rewriter = PodInternalLinkRewriter(doc, cache, None);
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/en/test/page.html">page</a><br>', result)
link_map = {
'/content/test/test.md': '/de/test/test.html',
'/content/test/page.md': '/de/test/page.html',
}
doc = MockPod(link_map).get_doc('/content/test/test.md', 'de')
link_rewriter = PodInternalLinkRewriter(doc, cache, None);
result = link_rewriter.rewrite_pod_internal_links(content)
self.assertEquals('<a href="/de/test/page.html">page</a><br>', result)
class MockPod:
def __init__(self, link_map):
self.link_map = link_map
def get_doc(self, path, locale='en'):
site_path = self.link_map.get(path)
return MockDoc(self, path, site_path, locale)
class MockDoc:
def __init__(self, pod, pod_path, site_path, locale):
self.pod = pod
self.pod_path = pod_path
self.url = None
if site_path:
self.url = Url(site_path)
self.locale = locale
@property
def exists(self):
if self.url:
return True
return False
| en | 0.3907 | Tests for the source code exporter. # two times the same url and different anchor to test possible cache problems #test">test</a><br>' \ #other">test2</a>' # two times the same url and different anchor to test possible cache problems #test">test</a><br>' \ #other">test2</a>' #test">test</a><br>' \ #other">test2</a>', result) #test">test</a>' #test">test</a>', result) # Ensure the cache does not | 2.435373 | 2 |
examples/11-camera/Camera.py | henkjannl/py-animate | 0 | 6629958 | from Animate import Animate
Animate.Model('Camera.xlsx', 'Main')
| from Animate import Animate
Animate.Model('Camera.xlsx', 'Main')
| none | 1 | 1.181914 | 1 |
|
src/assistants/telegram.py | SmBe19/Todoistant | 0 | 6629959 | from datetime import datetime, timedelta
from utils import parse_task_config, run_every, run_next_in, local_to_utc
INIT_CONFIG = {
'chatid': 0,
'username': '',
'plain_labels': [],
'link_labels': [],
'forward_labels': [],
}
CONFIG_VERSION = 1
CONFIG_WHITELIST = [
'plain_project',
'plain_labels',
'link_project',
'link_labels',
'forward_project',
'forward_labels',
]
CONFIG_LIST = [
'plain_labels',
'link_labels',
'forward_labels',
]
CONFIG_INT = [
'plain_project',
'plain_labels',
'link_project',
'link_labels',
'forward_project',
'forward_labels',
]
def migrate_config(cfg, old_version):
pass
should_run = run_every(timedelta(minutes=15))
handle_update = run_next_in(timedelta(seconds=1), {'item:added', 'item:updated'})
def run(api, timezone, telegram, cfg, tmp):
telegram_label = None
for label in api.state['labels']:
if label['name'] == 'telegram':
telegram_label = label
break
if not telegram_label:
return
now = datetime.utcnow()
last = cfg.get('last_run', now - timedelta(days=2))
next_run = None
for item in api.state['items']:
if item['date_completed']:
continue
if telegram_label['id'] not in item['labels']:
continue
due = local_to_utc(datetime.fromisoformat(item['due']['date']).replace(tzinfo=timezone)) if item['due'] else None
content, config = parse_task_config(item['content'])
if 'telegram-due' in config:
new_due = config['telegram-due']
try:
if 'T' in new_due:
new_due = datetime.fromisoformat(new_due)
if not new_due.tzinfo:
new_due = new_due.replace(tzinfo=timezone)
due = new_due
elif ':' in new_due:
parts = new_due.split(':')
if not due:
due = datetime.now(timezone)
due = due.replace(hour=int(parts[0]), minute=int(parts[1]), second=0, microsecond=0)
due = local_to_utc(due)
except ValueError as e:
telegram('Error with {}: {}.'.format(content, e))
continue
if not due:
continue
if due > now and (not next_run or due < next_run):
next_run = due
if last <= due <= now:
telegram(content)
cfg['next_run'] = next_run
| from datetime import datetime, timedelta
from utils import parse_task_config, run_every, run_next_in, local_to_utc
INIT_CONFIG = {
'chatid': 0,
'username': '',
'plain_labels': [],
'link_labels': [],
'forward_labels': [],
}
CONFIG_VERSION = 1
CONFIG_WHITELIST = [
'plain_project',
'plain_labels',
'link_project',
'link_labels',
'forward_project',
'forward_labels',
]
CONFIG_LIST = [
'plain_labels',
'link_labels',
'forward_labels',
]
CONFIG_INT = [
'plain_project',
'plain_labels',
'link_project',
'link_labels',
'forward_project',
'forward_labels',
]
def migrate_config(cfg, old_version):
pass
should_run = run_every(timedelta(minutes=15))
handle_update = run_next_in(timedelta(seconds=1), {'item:added', 'item:updated'})
def run(api, timezone, telegram, cfg, tmp):
telegram_label = None
for label in api.state['labels']:
if label['name'] == 'telegram':
telegram_label = label
break
if not telegram_label:
return
now = datetime.utcnow()
last = cfg.get('last_run', now - timedelta(days=2))
next_run = None
for item in api.state['items']:
if item['date_completed']:
continue
if telegram_label['id'] not in item['labels']:
continue
due = local_to_utc(datetime.fromisoformat(item['due']['date']).replace(tzinfo=timezone)) if item['due'] else None
content, config = parse_task_config(item['content'])
if 'telegram-due' in config:
new_due = config['telegram-due']
try:
if 'T' in new_due:
new_due = datetime.fromisoformat(new_due)
if not new_due.tzinfo:
new_due = new_due.replace(tzinfo=timezone)
due = new_due
elif ':' in new_due:
parts = new_due.split(':')
if not due:
due = datetime.now(timezone)
due = due.replace(hour=int(parts[0]), minute=int(parts[1]), second=0, microsecond=0)
due = local_to_utc(due)
except ValueError as e:
telegram('Error with {}: {}.'.format(content, e))
continue
if not due:
continue
if due > now and (not next_run or due < next_run):
next_run = due
if last <= due <= now:
telegram(content)
cfg['next_run'] = next_run
| none | 1 | 2.420144 | 2 |
|
docs/generate.py | EgorDm/notionsci | 13 | 6629960 | <gh_stars>10-100
"""Generate virtual files for mkdocs."""
import mkdocs_gen_files
def docs_stub(module_name):
return f"::: notionsci.{module_name}\
\n\trendering:\n\t\tshow_root_heading: true\n\t\tshow_source: true"
virtual_files = {
"index.md": "--8<-- 'README.md'",
# "reference/config.md": docs_stub("config"),
"contributing.md": "--8<-- '.github/CONTRIBUTING.md'",
"license.md": "```text\n--8<-- 'LICENSE'\n```",
"reference/sync.md": docs_stub("sync"),
"reference/connections/notion.md": docs_stub("connections.notion"),
"reference/connections/notion_unofficial.md": docs_stub(
"connections.notion_unofficial"
),
"reference/connections/zotero.md": docs_stub("connections.zotero"),
}
for file_name, content in virtual_files.items():
with mkdocs_gen_files.open(file_name, "w") as file:
print(content, file=file)
| """Generate virtual files for mkdocs."""
import mkdocs_gen_files
def docs_stub(module_name):
return f"::: notionsci.{module_name}\
\n\trendering:\n\t\tshow_root_heading: true\n\t\tshow_source: true"
virtual_files = {
"index.md": "--8<-- 'README.md'",
# "reference/config.md": docs_stub("config"),
"contributing.md": "--8<-- '.github/CONTRIBUTING.md'",
"license.md": "```text\n--8<-- 'LICENSE'\n```",
"reference/sync.md": docs_stub("sync"),
"reference/connections/notion.md": docs_stub("connections.notion"),
"reference/connections/notion_unofficial.md": docs_stub(
"connections.notion_unofficial"
),
"reference/connections/zotero.md": docs_stub("connections.zotero"),
}
for file_name, content in virtual_files.items():
with mkdocs_gen_files.open(file_name, "w") as file:
print(content, file=file) | en | 0.354047 | Generate virtual files for mkdocs. # "reference/config.md": docs_stub("config"), | 2.459982 | 2 |
warehouse_cloud/cloud/warehouse.py | 2021-SE-Lab-Mindstorm-Project/Smart-Warehouse-Cloud | 0 | 6629961 | from . import rl
from .models import Inventory, Order
class Warehouse:
def __init__(self, anomaly_aware):
# config
self.cap_conveyor = 5
self.cap_wait = 5
self.reward_order = 30
self.reward_trash = 70
self.reward_wait = 1
self.order_total = 20
self.order_delay = 0
self.anomaly_mtbf = 5
self.anomaly_duration = 10
self.anomaly_wait = 3
self.item_buy = 5
# Warehouse
self.tick = 0
self.anomaly_aware = anomaly_aware
try:
self.rl_model = rl.DQN(path='../model/rl.pth')
self.a_rl_models = [rl.DQN(path='../model/a_rl_0.pth'),
None,
rl.DQN(path='../model/a_rl_2.pth')]
except:
pass
self.c = [0] * 4
self.recent_c = 0
self.recent_s = 0
self.c_waiting = 0
self.c_allow = 3
self.r_allow = [False] * 3
self.s_allow = 3
self.r_wait = [0] * 3
self.s_wait = 0
self.stuck = [False] * 3
self.count = [0] * 3
self.current_anomaly = [-1] * 3
self.reward = 0
self.old_state = None
self.old_decision = None
self.old_reward = 0
def need_decision(self):
if sum(self.c) == 0:
return False
num_true = 0
for ans in self.available():
if ans:
num_true += 1
return num_true > 1
def available(self, i=None):
if i is not None:
inventory_objects = Inventory.objects.filter(stored=i)
ans = len(inventory_objects) < self.cap_conveyor
if not self.anomaly_aware:
return ans
return ans and self.current_anomaly[i] == -1
ans = []
for i in range(3):
inventory_objects = Inventory.objects.filter(stored=i)
single_ans = len(inventory_objects) < self.cap_conveyor
if not self.anomaly_aware:
ans.append(single_ans)
else:
ans.append(single_ans and self.current_anomaly[i] == -1)
return ans
def get_available(self):
available = self.available()
ans = []
for i, avail in enumerate(available):
if avail:
ans.append(i)
return ans
def get_inventory(self, item):
return self.c[item - 1] + len(Inventory.objects.filter(item_type=item, stored__lt=4))
def get_order(self, is_sum=True):
if is_sum:
return len(Order.objects.all())
orders = []
for i in range(4):
orders.append(len(Order.objects.filter(item_type=i + 1)))
return orders
def get_state(self):
def repr_list(conveyor):
ans = 0
for i, item in enumerate(conveyor):
ans += item.item_type * (5 ** (5 - i - 1))
return ans
ans = [self.tick, self.recent_c]
for i in range(4):
ans.append(repr_list(Inventory.objects.filter(stored=i)))
ans.extend(self.get_order(False))
return ans
def anomaly_state(self):
anomaly_number = 0
for i, anomaly in enumerate(self.current_anomaly):
if anomaly != -1:
anomaly_number += (2 ** i)
return anomaly_number
| from . import rl
from .models import Inventory, Order
class Warehouse:
def __init__(self, anomaly_aware):
# config
self.cap_conveyor = 5
self.cap_wait = 5
self.reward_order = 30
self.reward_trash = 70
self.reward_wait = 1
self.order_total = 20
self.order_delay = 0
self.anomaly_mtbf = 5
self.anomaly_duration = 10
self.anomaly_wait = 3
self.item_buy = 5
# Warehouse
self.tick = 0
self.anomaly_aware = anomaly_aware
try:
self.rl_model = rl.DQN(path='../model/rl.pth')
self.a_rl_models = [rl.DQN(path='../model/a_rl_0.pth'),
None,
rl.DQN(path='../model/a_rl_2.pth')]
except:
pass
self.c = [0] * 4
self.recent_c = 0
self.recent_s = 0
self.c_waiting = 0
self.c_allow = 3
self.r_allow = [False] * 3
self.s_allow = 3
self.r_wait = [0] * 3
self.s_wait = 0
self.stuck = [False] * 3
self.count = [0] * 3
self.current_anomaly = [-1] * 3
self.reward = 0
self.old_state = None
self.old_decision = None
self.old_reward = 0
def need_decision(self):
if sum(self.c) == 0:
return False
num_true = 0
for ans in self.available():
if ans:
num_true += 1
return num_true > 1
def available(self, i=None):
if i is not None:
inventory_objects = Inventory.objects.filter(stored=i)
ans = len(inventory_objects) < self.cap_conveyor
if not self.anomaly_aware:
return ans
return ans and self.current_anomaly[i] == -1
ans = []
for i in range(3):
inventory_objects = Inventory.objects.filter(stored=i)
single_ans = len(inventory_objects) < self.cap_conveyor
if not self.anomaly_aware:
ans.append(single_ans)
else:
ans.append(single_ans and self.current_anomaly[i] == -1)
return ans
def get_available(self):
available = self.available()
ans = []
for i, avail in enumerate(available):
if avail:
ans.append(i)
return ans
def get_inventory(self, item):
return self.c[item - 1] + len(Inventory.objects.filter(item_type=item, stored__lt=4))
def get_order(self, is_sum=True):
if is_sum:
return len(Order.objects.all())
orders = []
for i in range(4):
orders.append(len(Order.objects.filter(item_type=i + 1)))
return orders
def get_state(self):
def repr_list(conveyor):
ans = 0
for i, item in enumerate(conveyor):
ans += item.item_type * (5 ** (5 - i - 1))
return ans
ans = [self.tick, self.recent_c]
for i in range(4):
ans.append(repr_list(Inventory.objects.filter(stored=i)))
ans.extend(self.get_order(False))
return ans
def anomaly_state(self):
anomaly_number = 0
for i, anomaly in enumerate(self.current_anomaly):
if anomaly != -1:
anomaly_number += (2 ** i)
return anomaly_number
| en | 0.453361 | # config # Warehouse | 2.542484 | 3 |
run.py | zjlbzf/MNIST-test | 0 | 6629962 | <gh_stars>0
import threading
import os
def process():
os.sys('conda activate py3.7')
os.sys('G:\code\Projects\AI\Competation\Kaggle\003_MNIST> & D:/ProgramData/Anaconda3/envs/py3.7/python.exe g:/code/Projects/AI/Competation/Kaggle/003_MNIST/tf_MNIST.py')
for i in range(3):
t1 = threading.Thread(target=process, args=[])
tt.start()
| import threading
import os
def process():
os.sys('conda activate py3.7')
os.sys('G:\code\Projects\AI\Competation\Kaggle\003_MNIST> & D:/ProgramData/Anaconda3/envs/py3.7/python.exe g:/code/Projects/AI/Competation/Kaggle/003_MNIST/tf_MNIST.py')
for i in range(3):
t1 = threading.Thread(target=process, args=[])
tt.start() | none | 1 | 2.196281 | 2 |
|
helixswarm/adapters/aio.py | pbelskiy/helix-swarm | 2 | 6629963 | <filename>helixswarm/adapters/aio.py
import asyncio
from typing import Any, Callable, Optional, Union
from aiohttp import (
BasicAuth,
ClientError,
ClientResponse,
ClientSession,
ClientTimeout,
)
from helixswarm.swarm import Response, Swarm, SwarmError
class RetryClientSession:
def __init__(self, loop: Optional[asyncio.AbstractEventLoop], options: dict):
self.total = options['total']
self.factor = options.get('factor', 1)
self.statuses = options.get('statuses', [])
self.session = ClientSession(loop=loop)
async def request(self, *args: Any, **kwargs: Any) -> ClientResponse:
for total in range(self.total):
try:
response = await self.session.request(*args, **kwargs)
except (ClientError, asyncio.TimeoutError) as e:
if total + 1 == self.total:
raise SwarmError from e
else:
if response.status not in self.statuses:
break
await asyncio.sleep(self.factor * (2 ** (total - 1)))
return response
async def close(self) -> None:
await self.session.close()
class SwarmAsyncClient(Swarm):
session = None # type: Union[ClientSession, RetryClientSession]
timeout = None
def __init__(self,
url: str,
user: str,
password: str,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
verify: bool = True,
timeout: Optional[float] = None,
retry: Optional[dict] = None
):
"""
Swarm async client class.
* url: ``str``
Url of Swarm server, must include API version.
* user: ``str``
User name, login.
* password: ``str``
Password for user.
* loop: ``AbstractEventLoop`` (optional)
Asyncio current event loop.
* verify: ``bool`` (optional)
Verify SSL (default: true).
* timeout: ``int``, (optional)
HTTP request timeout.
* retry: ``dict`` (optional)
Retry options to prevent failures if server restarting or temporary
network problem. Disabled by default use total > 0 to enable.
- total: ``int`` Total retries count.
- factor: ``int`` Sleep factor between retries (default 1)
{factor} * (2 ** ({number of total retries} - 1))
- statuses: ``List[int]`` HTTP statues retries on. (default [])
Example:
.. code-block:: python
retry = dict(
total=10,
factor=1,
statuses=[500]
)
:returns: ``SwarmClient instance``
:raises: ``SwarmError``
"""
super().__init__()
self.loop = loop or asyncio.get_event_loop()
self.host, self.version = self._get_host_and_api_version(url)
self.auth = BasicAuth(user, password)
if retry:
self._validate_retry_argument(retry)
self.session = RetryClientSession(loop, retry)
else:
self.session = ClientSession(loop=self.loop)
self.verify = verify
if timeout:
self.timeout = ClientTimeout(total=timeout)
async def close(self) -> None: # type: ignore
await self.session.close()
async def request(self, # type: ignore
callback: Callable,
method: str,
path: str,
fcb: Optional[Callable] = None,
**kwargs: Any
) -> dict:
if self.timeout and 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
response = await self.session.request(
method,
'{host}/api/v{version}/{path}'.format(
host=self.host,
version=self.version,
path=path,
),
auth=self.auth,
ssl=self.verify,
**kwargs
)
body = await response.text()
return callback(Response(response.status, body), fcb)
| <filename>helixswarm/adapters/aio.py
import asyncio
from typing import Any, Callable, Optional, Union
from aiohttp import (
BasicAuth,
ClientError,
ClientResponse,
ClientSession,
ClientTimeout,
)
from helixswarm.swarm import Response, Swarm, SwarmError
class RetryClientSession:
def __init__(self, loop: Optional[asyncio.AbstractEventLoop], options: dict):
self.total = options['total']
self.factor = options.get('factor', 1)
self.statuses = options.get('statuses', [])
self.session = ClientSession(loop=loop)
async def request(self, *args: Any, **kwargs: Any) -> ClientResponse:
for total in range(self.total):
try:
response = await self.session.request(*args, **kwargs)
except (ClientError, asyncio.TimeoutError) as e:
if total + 1 == self.total:
raise SwarmError from e
else:
if response.status not in self.statuses:
break
await asyncio.sleep(self.factor * (2 ** (total - 1)))
return response
async def close(self) -> None:
await self.session.close()
class SwarmAsyncClient(Swarm):
session = None # type: Union[ClientSession, RetryClientSession]
timeout = None
def __init__(self,
url: str,
user: str,
password: str,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
verify: bool = True,
timeout: Optional[float] = None,
retry: Optional[dict] = None
):
"""
Swarm async client class.
* url: ``str``
Url of Swarm server, must include API version.
* user: ``str``
User name, login.
* password: ``str``
Password for user.
* loop: ``AbstractEventLoop`` (optional)
Asyncio current event loop.
* verify: ``bool`` (optional)
Verify SSL (default: true).
* timeout: ``int``, (optional)
HTTP request timeout.
* retry: ``dict`` (optional)
Retry options to prevent failures if server restarting or temporary
network problem. Disabled by default use total > 0 to enable.
- total: ``int`` Total retries count.
- factor: ``int`` Sleep factor between retries (default 1)
{factor} * (2 ** ({number of total retries} - 1))
- statuses: ``List[int]`` HTTP statues retries on. (default [])
Example:
.. code-block:: python
retry = dict(
total=10,
factor=1,
statuses=[500]
)
:returns: ``SwarmClient instance``
:raises: ``SwarmError``
"""
super().__init__()
self.loop = loop or asyncio.get_event_loop()
self.host, self.version = self._get_host_and_api_version(url)
self.auth = BasicAuth(user, password)
if retry:
self._validate_retry_argument(retry)
self.session = RetryClientSession(loop, retry)
else:
self.session = ClientSession(loop=self.loop)
self.verify = verify
if timeout:
self.timeout = ClientTimeout(total=timeout)
async def close(self) -> None: # type: ignore
await self.session.close()
async def request(self, # type: ignore
callback: Callable,
method: str,
path: str,
fcb: Optional[Callable] = None,
**kwargs: Any
) -> dict:
if self.timeout and 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
response = await self.session.request(
method,
'{host}/api/v{version}/{path}'.format(
host=self.host,
version=self.version,
path=path,
),
auth=self.auth,
ssl=self.verify,
**kwargs
)
body = await response.text()
return callback(Response(response.status, body), fcb)
| en | 0.499065 | # type: Union[ClientSession, RetryClientSession] Swarm async client class. * url: ``str`` Url of Swarm server, must include API version. * user: ``str`` User name, login. * password: ``str`` Password for user. * loop: ``AbstractEventLoop`` (optional) Asyncio current event loop. * verify: ``bool`` (optional) Verify SSL (default: true). * timeout: ``int``, (optional) HTTP request timeout. * retry: ``dict`` (optional) Retry options to prevent failures if server restarting or temporary network problem. Disabled by default use total > 0 to enable. - total: ``int`` Total retries count. - factor: ``int`` Sleep factor between retries (default 1) {factor} * (2 ** ({number of total retries} - 1)) - statuses: ``List[int]`` HTTP statues retries on. (default []) Example: .. code-block:: python retry = dict( total=10, factor=1, statuses=[500] ) :returns: ``SwarmClient instance`` :raises: ``SwarmError`` # type: ignore # type: ignore | 2.420816 | 2 |
setup.py | jd-boyd/HowLong | 17 | 6629964 | from setuptools import setup, find_packages
setup(author='<NAME>',
description='A simple timing utility for long running processes',
name='howlong',
py_modules=[
'HowLong.HowLong',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'howlong = HowLong.HowLong:howlong'
]
},
install_requires=[
'psutil>=5.0.1',
'termcolor>=1.1.0',
'colorama>=0.3.9'
],
url='https://github.com/mattjegan/howlong',
version='0.0.2'
)
| from setuptools import setup, find_packages
setup(author='<NAME>',
description='A simple timing utility for long running processes',
name='howlong',
py_modules=[
'HowLong.HowLong',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'howlong = HowLong.HowLong:howlong'
]
},
install_requires=[
'psutil>=5.0.1',
'termcolor>=1.1.0',
'colorama>=0.3.9'
],
url='https://github.com/mattjegan/howlong',
version='0.0.2'
)
| none | 1 | 1.296488 | 1 |
|
api/src/tests/application/functional_tests/housing_units/test_endpoints.py | iliaskaras/housing-units | 0 | 6629965 | <gh_stars>0
import pytest
from fastapi.testclient import TestClient
from tests.application.functional_tests.housing_units.utils import get_cleaned_housing_units_response
from application.main import app
client = TestClient(app)
@pytest.mark.asyncio
async def test_filter_housing_units_get_request(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.get("/housing-units", headers={"Authorization": "Bearer {}".format(admin_jwt_token)})
assert response.status_code == 200
response_json = response.json()
total: int = response_json.get('total')
assert total == len(stub_housing_units)
housing_units = get_cleaned_housing_units_response(response_json.get('housing_units'))
assert housing_units == [
{
'project_id': 'project id 1', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 2
},
{
'project_id': 'project id 2', 'street_name': 'street name test 2', 'borough': 'Brooklyn', 'postcode': 2,
'reporting_construction_type': 'construction type test 2', 'total_units': 4
},
{
'project_id': 'project id 3', 'street_name': 'street name test 3', 'borough': 'Staten Island',
'postcode': 3, 'reporting_construction_type': 'construction type test 3', 'total_units': 6
},
{
'project_id': 'project id 4', 'street_name': 'street name test 4', 'borough': 'Manhattan', 'postcode': 4,
'reporting_construction_type': 'construction type test 4', 'total_units': 8
},
{
'project_id': 'project id 5', 'street_name': 'street name test 5', 'borough': 'Bronx', 'postcode': 5,
'reporting_construction_type': 'construction type test 5', 'total_units': 10
},
{
'project_id': 'project id 6', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 12
},
{
'project_id': 'project id 7', 'street_name': 'street name test 2', 'borough': 'Brooklyn', 'postcode': 2,
'reporting_construction_type': 'construction type test 2', 'total_units': 14
},
{
'project_id': 'project id 11', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 16
},
{
'project_id': 'project id 8', 'street_name': 'street name test 3', 'borough': 'Staten Island',
'postcode': 3, 'reporting_construction_type': 'construction type test 3', 'total_units': 16
},
{
'project_id': 'project id 9', 'street_name': 'street name test 4', 'borough': 'Manhattan', 'postcode': 4,
'reporting_construction_type': 'construction type test 4', 'total_units': 18
},
{
'project_id': 'project id 10', 'street_name': 'street name test 5', 'borough': 'Bronx', 'postcode': 5,
'reporting_construction_type': 'construction type test 5', 'total_units': 20
},
{
'project_id': 'project id 12', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 25
},
{
'project_id': 'project id 12', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 25
}
]
@pytest.mark.asyncio
async def test_filter_housing_units_get_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token
):
response = client.get(
"/housing-units?street_name=street name test 5&num_units_min=15",
headers={"Authorization": "Bearer {}".format(customer_jwt_token)},
)
assert response.status_code == 200
response_json = response.json()
total: int = response_json.get('total')
assert total == 1
housing_units = get_cleaned_housing_units_response(response_json.get('housing_units'))
assert housing_units == [
{
'project_id': 'project id 10', 'street_name': 'street name test 5', 'borough': 'Bronx', 'postcode': 5,
'reporting_construction_type': 'construction type test 5', 'total_units': 20
}
]
@pytest.mark.asyncio
async def test_filter_housing_units_get_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.get(
"/housing-units?street_name=street name test 5&num_units_min=15",
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
)
assert response.status_code == 200
response_json = response.json()
total: int = response_json.get('total')
assert total == 1
housing_units = get_cleaned_housing_units_response(response_json.get('housing_units'))
assert housing_units == [
{
'project_id': 'project id 10', 'street_name': 'street name test 5', 'borough': 'Bronx', 'postcode': 5,
'reporting_construction_type': 'construction type test 5', 'total_units': 20
}
]
@pytest.mark.asyncio
async def test_filter_housing_units_get_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.get(
"/housing-units?street_name=street name test 5&num_units_min=15"
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
@pytest.mark.asyncio
async def test_filter_housing_units_get_request_raise_error_when_num_units_min_is_greater_than_max(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.get(
"/housing-units?street_name=street name test 5&num_units_min=15&num_units_max=10",
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
)
assert response.status_code == 400
assert response.json() == {
"Detail": "The provided number of maximum units can't be smaller than the number of minimum units",
"Type": "ValidationError"
}
@pytest.mark.asyncio
async def test_retrieve_housing_unit_get_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token
):
response = client.get(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(customer_jwt_token)}
)
assert response.status_code == 200
response_json = response.json()
assert response_json == {
'uuid': stub_housing_units[0].uuid,
'project_id': 'project id 1', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 2, 'project_name': 'project name 1',
'project_start_date': '2018-12-25T11:27:53', 'project_completion_date': None,
'building_id': None, 'house_number': None, 'bbl': None, 'bin': None, 'community_board': 'community board 1',
'council_district': None, 'census_tract': None, 'neighborhood_tabulation_area': None, 'latitude': None,
'longitude': None, 'latitude_internal': None, 'longitude_internal': None, 'building_completion_date': None,
'extended_affordability_status': 'extended affordability status 1',
'prevailing_wage_status': 'prevailing wage status 1', 'extremely_low_income_units': 0,
'very_low_income_units': 0, 'low_income_units': 0, 'moderate_income_units': 0, 'middle_income_units': 2,
'other_income_units': 0, 'studio_units': 0, 'one_br_units': 1, 'two_br_units': 0, 'three_br_units': 1,
'four_br_units': 0, 'five_br_units': 0, 'six_br_units': 0, 'unknown_br_units': 0, 'counted_rental_units': 1,
'counted_homeownership_units': 1, 'all_counted_units': 2
}
@pytest.mark.asyncio
async def test_retrieve_housing_unit_get_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.get(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(admin_jwt_token)}
)
assert response.status_code == 200
response_json = response.json()
assert response_json == {
'uuid': stub_housing_units[0].uuid,
'project_id': 'project id 1', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 2, 'project_name': 'project name 1',
'project_start_date': '2018-12-25T11:27:53', 'project_completion_date': None,
'building_id': None, 'house_number': None, 'bbl': None, 'bin': None, 'community_board': 'community board 1',
'council_district': None, 'census_tract': None, 'neighborhood_tabulation_area': None, 'latitude': None,
'longitude': None, 'latitude_internal': None, 'longitude_internal': None, 'building_completion_date': None,
'extended_affordability_status': 'extended affordability status 1',
'prevailing_wage_status': 'prevailing wage status 1', 'extremely_low_income_units': 0,
'very_low_income_units': 0, 'low_income_units': 0, 'moderate_income_units': 0, 'middle_income_units': 2,
'other_income_units': 0, 'studio_units': 0, 'one_br_units': 1, 'two_br_units': 0, 'three_br_units': 1,
'four_br_units': 0, 'five_br_units': 0, 'six_br_units': 0, 'unknown_br_units': 0, 'counted_rental_units': 1,
'counted_homeownership_units': 1, 'all_counted_units': 2
}
@pytest.mark.asyncio
async def test_retrieve_housing_unit_get_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.get(
"/housing-units/{}".format(stub_housing_units[0].uuid),
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
@pytest.mark.asyncio
async def test_post_housing_unit_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token, full_housing_unit_request_body
):
response = client.post(
"/housing-units/",
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
json=full_housing_unit_request_body
)
assert response.status_code == 200
response_json = response.json()
assert response_json.get('uuid', None)
response_json.pop('uuid')
assert response_json == full_housing_unit_request_body
@pytest.mark.asyncio
async def test_post_housing_unit_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token, full_housing_unit_request_body
):
response = client.post(
"/housing-units/",
headers={"Authorization": "Bearer {}".format(customer_jwt_token)},
json=full_housing_unit_request_body
)
assert response.status_code == 200
response_json = response.json()
assert response_json.get('uuid', None)
response_json.pop('uuid')
assert response_json == full_housing_unit_request_body
@pytest.mark.asyncio
async def test_post_housing_unit_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.post(
"/housing-units/",
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
@pytest.mark.asyncio
async def test_post_housing_unit_request_raise_unprocessable_entity_errors(
populate_users,
populate_housing_units,
stub_housing_units,
admin_jwt_token,
full_housing_unit_request_body
):
# Remove project_id which is required.
full_housing_unit_request_body.pop('project_id')
# Add a wrong formatted date to date fields.
full_housing_unit_request_body['project_completion_date'] = 'not_correct_date'
full_housing_unit_request_body['building_completion_date'] = 'not_correct_date'
full_housing_unit_request_body['project_start_date'] = 'not_correct_date'
# Add string to integer and float fields.
full_housing_unit_request_body['postcode'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['total_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['building_id'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['bbl'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['bin'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['council_district'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['latitude'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['longitude'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['latitude_internal'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['longitude_internal'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['extremely_low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['very_low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['moderate_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['middle_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['other_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['studio_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['one_br_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['unknown_br_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['counted_rental_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['counted_homeownership_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['all_counted_units'] = 'not_correct_integer_or_float'
response = client.post(
"/housing-units/",
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
json=full_housing_unit_request_body
)
assert response.status_code == 422
assert response.json() == {
'detail': [
{'loc': ['body', 'one_br_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'project_id'], 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ['body', 'building_id'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'bbl'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'bin'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'council_district'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'latitude'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'longitude'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'latitude_internal'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'longitude_internal'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'extremely_low_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'very_low_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'low_income_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'moderate_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'middle_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'other_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'studio_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'unknown_br_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'counted_rental_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'counted_homeownership_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'all_counted_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'postcode'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'total_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'project_start_date'], 'msg': 'invalid datetime format', 'type': 'value_error.datetime'},
{'loc': ['body', 'project_completion_date'], 'msg': 'invalid datetime format',
'type': 'value_error.datetime'},
{'loc': ['body', 'building_completion_date'], 'msg': 'invalid datetime format',
'type': 'value_error.datetime'}
]
}
@pytest.mark.asyncio
async def test_put_housing_unit_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
put_request_body = {
"project_id": "44223",
"street_name": "RALPH AVENUE TEST",
"borough": "Brooklyn",
"postcode": None,
"reporting_construction_type": "New Construction TEST",
"total_units": 10,
"project_name": "ROCHESTER SUYDAM PHASE 1",
"project_start_date": "2021-06-30T00:00:00",
"project_completion_date": None,
"building_id": 977564,
"house_number": "329/331",
"bbl": None,
"bin": None,
"community_board": "BK-03",
"council_district": 977564,
"census_tract": None,
"neighborhood_tabulation_area": None,
"latitude": None,
"longitude": None,
"latitude_internal": None,
"longitude_internal": None,
"building_completion_date": None,
"extended_affordability_status": "No",
"prevailing_wage_status": "Non Prevailing Wage",
"extremely_low_income_units": 0,
"very_low_income_units": 0,
"low_income_units": 0,
"moderate_income_units": 10,
"middle_income_units": 0,
"other_income_units": 0,
"studio_units": 0,
"one_br_units": 3,
"two_br_units": 7,
"three_br_units": 0,
"four_br_units": 0,
"five_br_units": 0,
"six_br_units": 0,
"unknown_br_units": 0,
"counted_rental_units": 0,
"counted_homeownership_units": 10,
"all_counted_units": 10
}
response = client.put(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
json=put_request_body
)
assert response.status_code == 200
response_json = response.json()
assert response_json.get('uuid', None)
response_json.pop('uuid')
assert response_json == {
'project_id': '44223', 'street_name': 'RALPH AVENUE TEST', 'borough': 'Brooklyn', 'postcode': None,
'reporting_construction_type': 'New Construction TEST', 'total_units': 10,
'project_name': 'ROCHESTER SUYDAM PHASE 1', 'project_start_date': '2021-06-30T00:00:00',
'project_completion_date': None, 'building_id': 977564, 'house_number': '329/331', 'bbl': None,
'bin': None, 'community_board': 'BK-03', 'council_district': 977564, 'census_tract': None,
'neighborhood_tabulation_area': None, 'latitude': None, 'longitude': None, 'latitude_internal': None,
'longitude_internal': None, 'building_completion_date': None,
'extended_affordability_status': 'No', 'prevailing_wage_status': 'Non Prevailing Wage',
'extremely_low_income_units': 0, 'very_low_income_units': 0, 'low_income_units': 0,
'moderate_income_units': 10, 'middle_income_units': 0, 'other_income_units': 0, 'studio_units': 0,
'one_br_units': 3, 'two_br_units': 7, 'three_br_units': 0, 'four_br_units': 0, 'five_br_units': 0,
'six_br_units': 0, 'unknown_br_units': 0, 'counted_rental_units': 0, 'counted_homeownership_units': 10,
'all_counted_units': 10
}
@pytest.mark.asyncio
async def test_put_housing_unit_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token
):
put_request_body = {
"project_id": "44223",
"street_name": "RALPH AVENUE TEST",
"borough": "Brooklyn",
"postcode": None,
"reporting_construction_type": "New Construction TEST",
"total_units": 10,
"project_name": "ROCHESTER SUYDAM PHASE 1",
"project_start_date": "2021-06-30T00:00:00",
"project_completion_date": None,
"building_id": 977564,
"house_number": "329/331",
"bbl": None,
"bin": None,
"community_board": "BK-03",
"council_district": 977564,
"census_tract": None,
"neighborhood_tabulation_area": None,
"latitude": None,
"longitude": None,
"latitude_internal": None,
"longitude_internal": None,
"building_completion_date": None,
"extended_affordability_status": "No",
"prevailing_wage_status": "Non Prevailing Wage",
"extremely_low_income_units": 0,
"very_low_income_units": 0,
"low_income_units": 0,
"moderate_income_units": 10,
"middle_income_units": 0,
"other_income_units": 0,
"studio_units": 0,
"one_br_units": 3,
"two_br_units": 7,
"three_br_units": 0,
"four_br_units": 0,
"five_br_units": 0,
"six_br_units": 0,
"unknown_br_units": 0,
"counted_rental_units": 0,
"counted_homeownership_units": 10,
"all_counted_units": 10
}
response = client.put(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(customer_jwt_token)},
json=put_request_body
)
assert response.status_code == 200
response_json = response.json()
assert response_json.get('uuid', None)
response_json.pop('uuid')
assert response_json == {
'project_id': '44223', 'street_name': 'RALPH AVENUE TEST', 'borough': 'Brooklyn', 'postcode': None,
'reporting_construction_type': 'New Construction TEST', 'total_units': 10,
'project_name': 'ROCHESTER SUYDAM PHASE 1', 'project_start_date': '2021-06-30T00:00:00',
'project_completion_date': None, 'building_id': 977564, 'house_number': '329/331', 'bbl': None,
'bin': None, 'community_board': 'BK-03', 'council_district': 977564, 'census_tract': None,
'neighborhood_tabulation_area': None, 'latitude': None, 'longitude': None, 'latitude_internal': None,
'longitude_internal': None, 'building_completion_date': None,
'extended_affordability_status': 'No', 'prevailing_wage_status': 'Non Prevailing Wage',
'extremely_low_income_units': 0, 'very_low_income_units': 0, 'low_income_units': 0,
'moderate_income_units': 10, 'middle_income_units': 0, 'other_income_units': 0, 'studio_units': 0,
'one_br_units': 3, 'two_br_units': 7, 'three_br_units': 0, 'four_br_units': 0, 'five_br_units': 0,
'six_br_units': 0, 'unknown_br_units': 0, 'counted_rental_units': 0, 'counted_homeownership_units': 10,
'all_counted_units': 10
}
@pytest.mark.asyncio
async def test_put_housing_unit_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.put(
"/housing-units/{}".format(stub_housing_units[0].uuid),
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
@pytest.mark.asyncio
async def test_put_housing_unit_request_raise_unprocessable_entity_errors(
populate_users,
populate_housing_units,
stub_housing_units,
admin_jwt_token,
full_housing_unit_request_body
):
# Remove project_id which is required.
full_housing_unit_request_body.pop('project_id')
# Add a wrong formatted date to date fields.
full_housing_unit_request_body['project_completion_date'] = 'not_correct_date'
full_housing_unit_request_body['building_completion_date'] = 'not_correct_date'
full_housing_unit_request_body['project_start_date'] = 'not_correct_date'
# Add string to integer and float fields.
full_housing_unit_request_body['postcode'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['total_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['building_id'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['bbl'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['bin'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['council_district'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['latitude'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['longitude'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['latitude_internal'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['longitude_internal'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['extremely_low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['very_low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['moderate_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['middle_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['other_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['studio_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['one_br_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['unknown_br_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['counted_rental_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['counted_homeownership_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['all_counted_units'] = 'not_correct_integer_or_float'
response = client.put(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
json=full_housing_unit_request_body
)
assert response.status_code == 422
assert response.json() == {
'detail': [
{'loc': ['body', 'one_br_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'project_id'], 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ['body', 'building_id'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'bbl'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'bin'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'council_district'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'latitude'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'longitude'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'latitude_internal'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'longitude_internal'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'extremely_low_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'very_low_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'low_income_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'moderate_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'middle_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'other_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'studio_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'unknown_br_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'counted_rental_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'counted_homeownership_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'all_counted_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'postcode'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'total_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'project_start_date'], 'msg': 'invalid datetime format', 'type': 'value_error.datetime'},
{'loc': ['body', 'project_completion_date'], 'msg': 'invalid datetime format',
'type': 'value_error.datetime'},
{'loc': ['body', 'building_completion_date'], 'msg': 'invalid datetime format',
'type': 'value_error.datetime'}
]
}
@pytest.mark.asyncio
async def test_delete_housing_unit_get_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token
):
response = client.delete(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(customer_jwt_token)}
)
assert response.status_code == 200
response_json = response.json()
assert response_json == {'status': 'deleted', 'uuid': stub_housing_units[0].uuid}
@pytest.mark.asyncio
async def test_delete_housing_unit_get_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.delete(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(admin_jwt_token)}
)
assert response.status_code == 200
response_json = response.json()
assert response_json == {'status': 'deleted', 'uuid': stub_housing_units[0].uuid}
@pytest.mark.asyncio
async def test_delete_housing_unit_get_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.get(
"/housing-units/{}".format(stub_housing_units[0].uuid),
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
| import pytest
from fastapi.testclient import TestClient
from tests.application.functional_tests.housing_units.utils import get_cleaned_housing_units_response
from application.main import app
client = TestClient(app)
@pytest.mark.asyncio
async def test_filter_housing_units_get_request(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.get("/housing-units", headers={"Authorization": "Bearer {}".format(admin_jwt_token)})
assert response.status_code == 200
response_json = response.json()
total: int = response_json.get('total')
assert total == len(stub_housing_units)
housing_units = get_cleaned_housing_units_response(response_json.get('housing_units'))
assert housing_units == [
{
'project_id': 'project id 1', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 2
},
{
'project_id': 'project id 2', 'street_name': 'street name test 2', 'borough': 'Brooklyn', 'postcode': 2,
'reporting_construction_type': 'construction type test 2', 'total_units': 4
},
{
'project_id': 'project id 3', 'street_name': 'street name test 3', 'borough': 'Staten Island',
'postcode': 3, 'reporting_construction_type': 'construction type test 3', 'total_units': 6
},
{
'project_id': 'project id 4', 'street_name': 'street name test 4', 'borough': 'Manhattan', 'postcode': 4,
'reporting_construction_type': 'construction type test 4', 'total_units': 8
},
{
'project_id': 'project id 5', 'street_name': 'street name test 5', 'borough': 'Bronx', 'postcode': 5,
'reporting_construction_type': 'construction type test 5', 'total_units': 10
},
{
'project_id': 'project id 6', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 12
},
{
'project_id': 'project id 7', 'street_name': 'street name test 2', 'borough': 'Brooklyn', 'postcode': 2,
'reporting_construction_type': 'construction type test 2', 'total_units': 14
},
{
'project_id': 'project id 11', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 16
},
{
'project_id': 'project id 8', 'street_name': 'street name test 3', 'borough': 'Staten Island',
'postcode': 3, 'reporting_construction_type': 'construction type test 3', 'total_units': 16
},
{
'project_id': 'project id 9', 'street_name': 'street name test 4', 'borough': 'Manhattan', 'postcode': 4,
'reporting_construction_type': 'construction type test 4', 'total_units': 18
},
{
'project_id': 'project id 10', 'street_name': 'street name test 5', 'borough': 'Bronx', 'postcode': 5,
'reporting_construction_type': 'construction type test 5', 'total_units': 20
},
{
'project_id': 'project id 12', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 25
},
{
'project_id': 'project id 12', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 25
}
]
@pytest.mark.asyncio
async def test_filter_housing_units_get_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token
):
response = client.get(
"/housing-units?street_name=street name test 5&num_units_min=15",
headers={"Authorization": "Bearer {}".format(customer_jwt_token)},
)
assert response.status_code == 200
response_json = response.json()
total: int = response_json.get('total')
assert total == 1
housing_units = get_cleaned_housing_units_response(response_json.get('housing_units'))
assert housing_units == [
{
'project_id': 'project id 10', 'street_name': 'street name test 5', 'borough': 'Bronx', 'postcode': 5,
'reporting_construction_type': 'construction type test 5', 'total_units': 20
}
]
@pytest.mark.asyncio
async def test_filter_housing_units_get_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.get(
"/housing-units?street_name=street name test 5&num_units_min=15",
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
)
assert response.status_code == 200
response_json = response.json()
total: int = response_json.get('total')
assert total == 1
housing_units = get_cleaned_housing_units_response(response_json.get('housing_units'))
assert housing_units == [
{
'project_id': 'project id 10', 'street_name': 'street name test 5', 'borough': 'Bronx', 'postcode': 5,
'reporting_construction_type': 'construction type test 5', 'total_units': 20
}
]
@pytest.mark.asyncio
async def test_filter_housing_units_get_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.get(
"/housing-units?street_name=street name test 5&num_units_min=15"
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
@pytest.mark.asyncio
async def test_filter_housing_units_get_request_raise_error_when_num_units_min_is_greater_than_max(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.get(
"/housing-units?street_name=street name test 5&num_units_min=15&num_units_max=10",
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
)
assert response.status_code == 400
assert response.json() == {
"Detail": "The provided number of maximum units can't be smaller than the number of minimum units",
"Type": "ValidationError"
}
@pytest.mark.asyncio
async def test_retrieve_housing_unit_get_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token
):
response = client.get(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(customer_jwt_token)}
)
assert response.status_code == 200
response_json = response.json()
assert response_json == {
'uuid': stub_housing_units[0].uuid,
'project_id': 'project id 1', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 2, 'project_name': 'project name 1',
'project_start_date': '2018-12-25T11:27:53', 'project_completion_date': None,
'building_id': None, 'house_number': None, 'bbl': None, 'bin': None, 'community_board': 'community board 1',
'council_district': None, 'census_tract': None, 'neighborhood_tabulation_area': None, 'latitude': None,
'longitude': None, 'latitude_internal': None, 'longitude_internal': None, 'building_completion_date': None,
'extended_affordability_status': 'extended affordability status 1',
'prevailing_wage_status': 'prevailing wage status 1', 'extremely_low_income_units': 0,
'very_low_income_units': 0, 'low_income_units': 0, 'moderate_income_units': 0, 'middle_income_units': 2,
'other_income_units': 0, 'studio_units': 0, 'one_br_units': 1, 'two_br_units': 0, 'three_br_units': 1,
'four_br_units': 0, 'five_br_units': 0, 'six_br_units': 0, 'unknown_br_units': 0, 'counted_rental_units': 1,
'counted_homeownership_units': 1, 'all_counted_units': 2
}
@pytest.mark.asyncio
async def test_retrieve_housing_unit_get_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.get(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(admin_jwt_token)}
)
assert response.status_code == 200
response_json = response.json()
assert response_json == {
'uuid': stub_housing_units[0].uuid,
'project_id': 'project id 1', 'street_name': 'street name test 1', 'borough': 'Queens', 'postcode': 1,
'reporting_construction_type': 'construction type test 1', 'total_units': 2, 'project_name': 'project name 1',
'project_start_date': '2018-12-25T11:27:53', 'project_completion_date': None,
'building_id': None, 'house_number': None, 'bbl': None, 'bin': None, 'community_board': 'community board 1',
'council_district': None, 'census_tract': None, 'neighborhood_tabulation_area': None, 'latitude': None,
'longitude': None, 'latitude_internal': None, 'longitude_internal': None, 'building_completion_date': None,
'extended_affordability_status': 'extended affordability status 1',
'prevailing_wage_status': 'prevailing wage status 1', 'extremely_low_income_units': 0,
'very_low_income_units': 0, 'low_income_units': 0, 'moderate_income_units': 0, 'middle_income_units': 2,
'other_income_units': 0, 'studio_units': 0, 'one_br_units': 1, 'two_br_units': 0, 'three_br_units': 1,
'four_br_units': 0, 'five_br_units': 0, 'six_br_units': 0, 'unknown_br_units': 0, 'counted_rental_units': 1,
'counted_homeownership_units': 1, 'all_counted_units': 2
}
@pytest.mark.asyncio
async def test_retrieve_housing_unit_get_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.get(
"/housing-units/{}".format(stub_housing_units[0].uuid),
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
@pytest.mark.asyncio
async def test_post_housing_unit_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token, full_housing_unit_request_body
):
response = client.post(
"/housing-units/",
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
json=full_housing_unit_request_body
)
assert response.status_code == 200
response_json = response.json()
assert response_json.get('uuid', None)
response_json.pop('uuid')
assert response_json == full_housing_unit_request_body
@pytest.mark.asyncio
async def test_post_housing_unit_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token, full_housing_unit_request_body
):
response = client.post(
"/housing-units/",
headers={"Authorization": "Bearer {}".format(customer_jwt_token)},
json=full_housing_unit_request_body
)
assert response.status_code == 200
response_json = response.json()
assert response_json.get('uuid', None)
response_json.pop('uuid')
assert response_json == full_housing_unit_request_body
@pytest.mark.asyncio
async def test_post_housing_unit_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.post(
"/housing-units/",
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
@pytest.mark.asyncio
async def test_post_housing_unit_request_raise_unprocessable_entity_errors(
populate_users,
populate_housing_units,
stub_housing_units,
admin_jwt_token,
full_housing_unit_request_body
):
# Remove project_id which is required.
full_housing_unit_request_body.pop('project_id')
# Add a wrong formatted date to date fields.
full_housing_unit_request_body['project_completion_date'] = 'not_correct_date'
full_housing_unit_request_body['building_completion_date'] = 'not_correct_date'
full_housing_unit_request_body['project_start_date'] = 'not_correct_date'
# Add string to integer and float fields.
full_housing_unit_request_body['postcode'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['total_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['building_id'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['bbl'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['bin'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['council_district'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['latitude'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['longitude'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['latitude_internal'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['longitude_internal'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['extremely_low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['very_low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['moderate_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['middle_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['other_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['studio_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['one_br_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['unknown_br_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['counted_rental_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['counted_homeownership_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['all_counted_units'] = 'not_correct_integer_or_float'
response = client.post(
"/housing-units/",
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
json=full_housing_unit_request_body
)
assert response.status_code == 422
assert response.json() == {
'detail': [
{'loc': ['body', 'one_br_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'project_id'], 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ['body', 'building_id'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'bbl'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'bin'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'council_district'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'latitude'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'longitude'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'latitude_internal'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'longitude_internal'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'extremely_low_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'very_low_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'low_income_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'moderate_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'middle_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'other_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'studio_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'unknown_br_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'counted_rental_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'counted_homeownership_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'all_counted_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'postcode'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'total_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'project_start_date'], 'msg': 'invalid datetime format', 'type': 'value_error.datetime'},
{'loc': ['body', 'project_completion_date'], 'msg': 'invalid datetime format',
'type': 'value_error.datetime'},
{'loc': ['body', 'building_completion_date'], 'msg': 'invalid datetime format',
'type': 'value_error.datetime'}
]
}
@pytest.mark.asyncio
async def test_put_housing_unit_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
put_request_body = {
"project_id": "44223",
"street_name": "RALPH AVENUE TEST",
"borough": "Brooklyn",
"postcode": None,
"reporting_construction_type": "New Construction TEST",
"total_units": 10,
"project_name": "ROCHESTER SUYDAM PHASE 1",
"project_start_date": "2021-06-30T00:00:00",
"project_completion_date": None,
"building_id": 977564,
"house_number": "329/331",
"bbl": None,
"bin": None,
"community_board": "BK-03",
"council_district": 977564,
"census_tract": None,
"neighborhood_tabulation_area": None,
"latitude": None,
"longitude": None,
"latitude_internal": None,
"longitude_internal": None,
"building_completion_date": None,
"extended_affordability_status": "No",
"prevailing_wage_status": "Non Prevailing Wage",
"extremely_low_income_units": 0,
"very_low_income_units": 0,
"low_income_units": 0,
"moderate_income_units": 10,
"middle_income_units": 0,
"other_income_units": 0,
"studio_units": 0,
"one_br_units": 3,
"two_br_units": 7,
"three_br_units": 0,
"four_br_units": 0,
"five_br_units": 0,
"six_br_units": 0,
"unknown_br_units": 0,
"counted_rental_units": 0,
"counted_homeownership_units": 10,
"all_counted_units": 10
}
response = client.put(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
json=put_request_body
)
assert response.status_code == 200
response_json = response.json()
assert response_json.get('uuid', None)
response_json.pop('uuid')
assert response_json == {
'project_id': '44223', 'street_name': 'RALPH AVENUE TEST', 'borough': 'Brooklyn', 'postcode': None,
'reporting_construction_type': 'New Construction TEST', 'total_units': 10,
'project_name': 'ROCHESTER SUYDAM PHASE 1', 'project_start_date': '2021-06-30T00:00:00',
'project_completion_date': None, 'building_id': 977564, 'house_number': '329/331', 'bbl': None,
'bin': None, 'community_board': 'BK-03', 'council_district': 977564, 'census_tract': None,
'neighborhood_tabulation_area': None, 'latitude': None, 'longitude': None, 'latitude_internal': None,
'longitude_internal': None, 'building_completion_date': None,
'extended_affordability_status': 'No', 'prevailing_wage_status': 'Non Prevailing Wage',
'extremely_low_income_units': 0, 'very_low_income_units': 0, 'low_income_units': 0,
'moderate_income_units': 10, 'middle_income_units': 0, 'other_income_units': 0, 'studio_units': 0,
'one_br_units': 3, 'two_br_units': 7, 'three_br_units': 0, 'four_br_units': 0, 'five_br_units': 0,
'six_br_units': 0, 'unknown_br_units': 0, 'counted_rental_units': 0, 'counted_homeownership_units': 10,
'all_counted_units': 10
}
@pytest.mark.asyncio
async def test_put_housing_unit_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token
):
put_request_body = {
"project_id": "44223",
"street_name": "RALPH AVENUE TEST",
"borough": "Brooklyn",
"postcode": None,
"reporting_construction_type": "New Construction TEST",
"total_units": 10,
"project_name": "ROCHESTER SUYDAM PHASE 1",
"project_start_date": "2021-06-30T00:00:00",
"project_completion_date": None,
"building_id": 977564,
"house_number": "329/331",
"bbl": None,
"bin": None,
"community_board": "BK-03",
"council_district": 977564,
"census_tract": None,
"neighborhood_tabulation_area": None,
"latitude": None,
"longitude": None,
"latitude_internal": None,
"longitude_internal": None,
"building_completion_date": None,
"extended_affordability_status": "No",
"prevailing_wage_status": "Non Prevailing Wage",
"extremely_low_income_units": 0,
"very_low_income_units": 0,
"low_income_units": 0,
"moderate_income_units": 10,
"middle_income_units": 0,
"other_income_units": 0,
"studio_units": 0,
"one_br_units": 3,
"two_br_units": 7,
"three_br_units": 0,
"four_br_units": 0,
"five_br_units": 0,
"six_br_units": 0,
"unknown_br_units": 0,
"counted_rental_units": 0,
"counted_homeownership_units": 10,
"all_counted_units": 10
}
response = client.put(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(customer_jwt_token)},
json=put_request_body
)
assert response.status_code == 200
response_json = response.json()
assert response_json.get('uuid', None)
response_json.pop('uuid')
assert response_json == {
'project_id': '44223', 'street_name': 'RALPH AVENUE TEST', 'borough': 'Brooklyn', 'postcode': None,
'reporting_construction_type': 'New Construction TEST', 'total_units': 10,
'project_name': 'ROCHESTER SUYDAM PHASE 1', 'project_start_date': '2021-06-30T00:00:00',
'project_completion_date': None, 'building_id': 977564, 'house_number': '329/331', 'bbl': None,
'bin': None, 'community_board': 'BK-03', 'council_district': 977564, 'census_tract': None,
'neighborhood_tabulation_area': None, 'latitude': None, 'longitude': None, 'latitude_internal': None,
'longitude_internal': None, 'building_completion_date': None,
'extended_affordability_status': 'No', 'prevailing_wage_status': 'Non Prevailing Wage',
'extremely_low_income_units': 0, 'very_low_income_units': 0, 'low_income_units': 0,
'moderate_income_units': 10, 'middle_income_units': 0, 'other_income_units': 0, 'studio_units': 0,
'one_br_units': 3, 'two_br_units': 7, 'three_br_units': 0, 'four_br_units': 0, 'five_br_units': 0,
'six_br_units': 0, 'unknown_br_units': 0, 'counted_rental_units': 0, 'counted_homeownership_units': 10,
'all_counted_units': 10
}
@pytest.mark.asyncio
async def test_put_housing_unit_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.put(
"/housing-units/{}".format(stub_housing_units[0].uuid),
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'}
@pytest.mark.asyncio
async def test_put_housing_unit_request_raise_unprocessable_entity_errors(
populate_users,
populate_housing_units,
stub_housing_units,
admin_jwt_token,
full_housing_unit_request_body
):
# Remove project_id which is required.
full_housing_unit_request_body.pop('project_id')
# Add a wrong formatted date to date fields.
full_housing_unit_request_body['project_completion_date'] = 'not_correct_date'
full_housing_unit_request_body['building_completion_date'] = 'not_correct_date'
full_housing_unit_request_body['project_start_date'] = 'not_correct_date'
# Add string to integer and float fields.
full_housing_unit_request_body['postcode'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['total_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['building_id'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['bbl'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['bin'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['council_district'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['latitude'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['longitude'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['latitude_internal'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['longitude_internal'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['extremely_low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['very_low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['low_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['moderate_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['middle_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['other_income_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['studio_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['one_br_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['unknown_br_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['counted_rental_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['counted_homeownership_units'] = 'not_correct_integer_or_float'
full_housing_unit_request_body['all_counted_units'] = 'not_correct_integer_or_float'
response = client.put(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(admin_jwt_token)},
json=full_housing_unit_request_body
)
assert response.status_code == 422
assert response.json() == {
'detail': [
{'loc': ['body', 'one_br_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'project_id'], 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ['body', 'building_id'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'bbl'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'bin'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'council_district'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'latitude'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'longitude'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'latitude_internal'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'longitude_internal'], 'msg': 'value is not a valid float', 'type': 'type_error.float'},
{'loc': ['body', 'extremely_low_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'very_low_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'low_income_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'moderate_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'middle_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'other_income_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'studio_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'unknown_br_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'counted_rental_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'counted_homeownership_units'], 'msg': 'value is not a valid integer',
'type': 'type_error.integer'},
{'loc': ['body', 'all_counted_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'postcode'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'total_units'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer'},
{'loc': ['body', 'project_start_date'], 'msg': 'invalid datetime format', 'type': 'value_error.datetime'},
{'loc': ['body', 'project_completion_date'], 'msg': 'invalid datetime format',
'type': 'value_error.datetime'},
{'loc': ['body', 'building_completion_date'], 'msg': 'invalid datetime format',
'type': 'value_error.datetime'}
]
}
@pytest.mark.asyncio
async def test_delete_housing_unit_get_request_called_by_customer(
populate_users, populate_housing_units, stub_housing_units, customer_jwt_token
):
response = client.delete(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(customer_jwt_token)}
)
assert response.status_code == 200
response_json = response.json()
assert response_json == {'status': 'deleted', 'uuid': stub_housing_units[0].uuid}
@pytest.mark.asyncio
async def test_delete_housing_unit_get_request_called_by_admin(
populate_users, populate_housing_units, stub_housing_units, admin_jwt_token
):
response = client.delete(
"/housing-units/{}".format(stub_housing_units[0].uuid),
headers={"Authorization": "Bearer {}".format(admin_jwt_token)}
)
assert response.status_code == 200
response_json = response.json()
assert response_json == {'status': 'deleted', 'uuid': stub_housing_units[0].uuid}
@pytest.mark.asyncio
async def test_delete_housing_unit_get_request_raise_authorization_error_when_jwt_not_provided(
populate_users, populate_housing_units, stub_housing_units
):
response = client.get(
"/housing-units/{}".format(stub_housing_units[0].uuid),
)
assert response.status_code == 403
assert response.json() == {'detail': 'Not authenticated'} | en | 0.914375 | # Remove project_id which is required. # Add a wrong formatted date to date fields. # Add string to integer and float fields. # Remove project_id which is required. # Add a wrong formatted date to date fields. # Add string to integer and float fields. | 2.445397 | 2 |
Source/PW2_main.py | xaviercucurull/sel-forests | 0 | 6629966 | """
Supervised and Experiential Learning (SEL)
Master in Artificial Intelligence (UPC)
PW2 - Implementation of a Decision Forest and Random Forest
Author: <NAME> <<EMAIL>>
Course: 2020/2021
"""
import sys
import os
sys.path.append(os.path.abspath(r'..'))
import pandas as pd
import time
import math
from sklearn.metrics import classification_report, f1_score, accuracy_score
from sklearn.model_selection import train_test_split
from Data import datasets
from cart import CART
from forest import DecisionForest, RandomForest
def test_small():
# Load Heart Disease database from CSV
# https://www.kaggle.com/ronitf/heart-disease-uci
print('###########################################################')
print('################## Test Small Dataset ####################')
print('################## Heart Disease ####################')
print('###########################################################\n')
x, y = datasets.load_csv(os.path.join('..','DATA', 'heart.csv'))
cart_acc = test_CART(x, y)
results_rf = test_random_forest(x, y)
results_df = test_decision_forest(x, y)
return results_rf, results_df, cart_acc
def test_medium():
# Load Mammographic Mass dataset
print('###########################################################')
print('################## Test Medium Dataset ####################')
print('################## Mammographic Mass ####################')
print('###########################################################\n')
x, y = datasets.load_mammographic_mass()
cart_acc = test_CART(x, y)
results_rf = test_random_forest(x, y)
results_df = test_decision_forest(x, y)
return results_rf, results_df, cart_acc
def test_large():
# Load Rice dataset
print('###########################################################')
print('#################### Test Large Dataset ###################')
print('######################### Rice #########################')
print('###########################################################\n')
x, y = datasets.load_rice()
cart_acc = test_CART(x, y)
results_rf, results_df = None, None
results_rf = test_random_forest(x, y)
results_df = test_decision_forest(x, y)
return results_rf, results_df, cart_acc
def split_dataset(x, y):
""" Split data into 75% train and 25% test
Args:
x (DataFrame): data features
y (array-like): data labels
"""
X_train, X_test, y_train, y_test = train_test_split(x, y, stratify=y, test_size=0.25, random_state=42)
X_train.reset_index(drop=True, inplace=True)
X_test.reset_index(drop=True, inplace=True)
y_train.reset_index(drop=True, inplace=True)
y_test.reset_index(drop=True, inplace=True)
return X_train, X_test, y_train, y_test
def test_CART(x, y):
""" Test CART
Args:
x ([type]): [description]
y ([type]): [description]
"""
print(' -----------------------')
print('| Test CART |')
print(' -----------------------\n')
# Split data into 75% train and 25% test
X_train, X_test, y_train, y_test = split_dataset(x, y)
print('Data attributes: {}'.format(len(X_train.keys())))
print('Training size: {}'.format(len(y_train)))
print('Test size: {}\n'.format(len(y_test)))
cart = CART(verbose=0)
f1, acc, fit_t, pred_t = evaluate_model(cart, X_train, y_train, X_test, y_test, return_scores=True)
return acc
def test_forest(x, y, NT_values, F_values, forest_classifier):
# Split data into 75% train and 25% test
X_train, X_test, y_train, y_test = split_dataset(x, y)
print('Training size: {}'.format(len(y_train)))
print('Test size: {}\n'.format(len(y_test)))
nt_vals = []
f_vals = []
accuracies = []
fit_times = []
pred_times = []
importances = []
for nt in NT_values:
for f in F_values:
# Instantiate Random Forest and evaluate it
model = forest_classifier(NT=nt, F=f)
model.fit(X_train, y_train)
f1, acc, fit_t, pred_t = evaluate_model(model, X_train, y_train, X_test, y_test, print_classificiation_report=False, return_scores=True)
# Save parameters used and results
nt_vals.append(nt)
f_vals.append(f)
accuracies.append(acc)
fit_times.append(fit_t)
pred_times.append(pred_t)
importances.append(model.feature_importances.to_dict())
# Print results
print('NT={} F={} -> Accuracy: {:.2f}%'.format(nt, f, acc*100))
print('Feature importance:\n{}\n'.format(model.feature_importances))
# Save results in a table
results_table = pd.DataFrame({'NT': nt_vals, 'F': f_vals, 'Accuracy': accuracies,
'Fit time': fit_times, 'Prediction times': pred_times,
'Feature importance': importances})
return results_table
def test_random_forest(x, y):
""" Test random forest with proposed hyperparameters
"""
print(' -----------------------')
print('| Test Random Forest |')
print(' -----------------------\n')
M = len(x.columns)
print('Data attributes: {}'.format(M))
# Hyperparameters to test
NT_values = [1, 10, 25, 50, 75, 100]
F_values = [1, 3, int(math.log2(M) + 1), int(math.sqrt(M))]
# Remove duplicates
F_values = set(F_values)
# Evaluate model with all hyperparameter combinations
results_table = test_forest(x, y, NT_values, F_values, RandomForest)
return results_table
def test_decision_forest(x, y):
""" Test decision forest with proposed hyperparameters
"""
print(' -----------------------')
print('| Test Decision Forest |')
print(' -----------------------\n')
M = len(x.columns)
print('Data attributes: {}'.format(M))
# Hyperparameters to test
NT_values = [1, 10, 25, 50, 75, 100]
F_values = [int(M/4), int(M/2), int(3*M/4), 'Runif(1/M)']
# Remove duplicates
F_values = set(F_values)
# Evaluate model with all hyperparameter combinations
results_table = test_forest(x, y, NT_values, F_values, DecisionForest)
return results_table
def evaluate_model(model, x_train, y_train, x_test, y_test, print_classificiation_report=True, return_scores=False):
# Train classifier
print('Training model...')
time0 = time.time()
model.fit(x_train, y_train.to_list())
time_fit = time.time() - time0
print('Model trained in {:.1f}s'.format(time_fit))
# Predict test data
time0 = time.time()
y_pred = model.predict(x_test)
time_predict = time.time() - time0
print('Prediction made in {:.1f}s'.format(time_predict))
if print_classificiation_report:
print('Classification report:')
print(classification_report(y_test, y_pred))
f1 = f1_score(y_test, y_pred, average='weighted')
acc = accuracy_score(y_test, y_pred)
print('F1-Score: {:.2f}%'.format(f1*100))
print('Accuracy: {:.2f}%\n'.format(acc*100))
if return_scores:
return f1, acc, time_fit, time_predict
if __name__ == '__main__':
small_results_rf, small_results_df, small_cart_acc = test_small()
small_results_rf.to_csv(os.path.join('out', 'small_results_rf.csv'), sep=';')
small_results_df.to_csv(os.path.join('out', 'small_results_df.csv'), sep=';')
medium_results_rf, medium_results_df, medium_cart_acc = test_medium()
medium_results_rf.to_csv(os.path.join('out', 'medium_results_rf.csv'), sep=';')
medium_results_df.to_csv(os.path.join('out', 'medium_results_df.csv'), sep=';')
large_results_rf, large_results_df, large_cart_acc = test_large()
large_results_rf.to_csv(os.path.join('out', 'large_results_rf.csv'), sep=';')
large_results_df.to_csv(os.path.join('out', 'large_results_df.csv'), sep=';') | """
Supervised and Experiential Learning (SEL)
Master in Artificial Intelligence (UPC)
PW2 - Implementation of a Decision Forest and Random Forest
Author: <NAME> <<EMAIL>>
Course: 2020/2021
"""
import sys
import os
sys.path.append(os.path.abspath(r'..'))
import pandas as pd
import time
import math
from sklearn.metrics import classification_report, f1_score, accuracy_score
from sklearn.model_selection import train_test_split
from Data import datasets
from cart import CART
from forest import DecisionForest, RandomForest
def test_small():
# Load Heart Disease database from CSV
# https://www.kaggle.com/ronitf/heart-disease-uci
print('###########################################################')
print('################## Test Small Dataset ####################')
print('################## Heart Disease ####################')
print('###########################################################\n')
x, y = datasets.load_csv(os.path.join('..','DATA', 'heart.csv'))
cart_acc = test_CART(x, y)
results_rf = test_random_forest(x, y)
results_df = test_decision_forest(x, y)
return results_rf, results_df, cart_acc
def test_medium():
# Load Mammographic Mass dataset
print('###########################################################')
print('################## Test Medium Dataset ####################')
print('################## Mammographic Mass ####################')
print('###########################################################\n')
x, y = datasets.load_mammographic_mass()
cart_acc = test_CART(x, y)
results_rf = test_random_forest(x, y)
results_df = test_decision_forest(x, y)
return results_rf, results_df, cart_acc
def test_large():
# Load Rice dataset
print('###########################################################')
print('#################### Test Large Dataset ###################')
print('######################### Rice #########################')
print('###########################################################\n')
x, y = datasets.load_rice()
cart_acc = test_CART(x, y)
results_rf, results_df = None, None
results_rf = test_random_forest(x, y)
results_df = test_decision_forest(x, y)
return results_rf, results_df, cart_acc
def split_dataset(x, y):
""" Split data into 75% train and 25% test
Args:
x (DataFrame): data features
y (array-like): data labels
"""
X_train, X_test, y_train, y_test = train_test_split(x, y, stratify=y, test_size=0.25, random_state=42)
X_train.reset_index(drop=True, inplace=True)
X_test.reset_index(drop=True, inplace=True)
y_train.reset_index(drop=True, inplace=True)
y_test.reset_index(drop=True, inplace=True)
return X_train, X_test, y_train, y_test
def test_CART(x, y):
""" Test CART
Args:
x ([type]): [description]
y ([type]): [description]
"""
print(' -----------------------')
print('| Test CART |')
print(' -----------------------\n')
# Split data into 75% train and 25% test
X_train, X_test, y_train, y_test = split_dataset(x, y)
print('Data attributes: {}'.format(len(X_train.keys())))
print('Training size: {}'.format(len(y_train)))
print('Test size: {}\n'.format(len(y_test)))
cart = CART(verbose=0)
f1, acc, fit_t, pred_t = evaluate_model(cart, X_train, y_train, X_test, y_test, return_scores=True)
return acc
def test_forest(x, y, NT_values, F_values, forest_classifier):
# Split data into 75% train and 25% test
X_train, X_test, y_train, y_test = split_dataset(x, y)
print('Training size: {}'.format(len(y_train)))
print('Test size: {}\n'.format(len(y_test)))
nt_vals = []
f_vals = []
accuracies = []
fit_times = []
pred_times = []
importances = []
for nt in NT_values:
for f in F_values:
# Instantiate Random Forest and evaluate it
model = forest_classifier(NT=nt, F=f)
model.fit(X_train, y_train)
f1, acc, fit_t, pred_t = evaluate_model(model, X_train, y_train, X_test, y_test, print_classificiation_report=False, return_scores=True)
# Save parameters used and results
nt_vals.append(nt)
f_vals.append(f)
accuracies.append(acc)
fit_times.append(fit_t)
pred_times.append(pred_t)
importances.append(model.feature_importances.to_dict())
# Print results
print('NT={} F={} -> Accuracy: {:.2f}%'.format(nt, f, acc*100))
print('Feature importance:\n{}\n'.format(model.feature_importances))
# Save results in a table
results_table = pd.DataFrame({'NT': nt_vals, 'F': f_vals, 'Accuracy': accuracies,
'Fit time': fit_times, 'Prediction times': pred_times,
'Feature importance': importances})
return results_table
def test_random_forest(x, y):
""" Test random forest with proposed hyperparameters
"""
print(' -----------------------')
print('| Test Random Forest |')
print(' -----------------------\n')
M = len(x.columns)
print('Data attributes: {}'.format(M))
# Hyperparameters to test
NT_values = [1, 10, 25, 50, 75, 100]
F_values = [1, 3, int(math.log2(M) + 1), int(math.sqrt(M))]
# Remove duplicates
F_values = set(F_values)
# Evaluate model with all hyperparameter combinations
results_table = test_forest(x, y, NT_values, F_values, RandomForest)
return results_table
def test_decision_forest(x, y):
""" Test decision forest with proposed hyperparameters
"""
print(' -----------------------')
print('| Test Decision Forest |')
print(' -----------------------\n')
M = len(x.columns)
print('Data attributes: {}'.format(M))
# Hyperparameters to test
NT_values = [1, 10, 25, 50, 75, 100]
F_values = [int(M/4), int(M/2), int(3*M/4), 'Runif(1/M)']
# Remove duplicates
F_values = set(F_values)
# Evaluate model with all hyperparameter combinations
results_table = test_forest(x, y, NT_values, F_values, DecisionForest)
return results_table
def evaluate_model(model, x_train, y_train, x_test, y_test, print_classificiation_report=True, return_scores=False):
# Train classifier
print('Training model...')
time0 = time.time()
model.fit(x_train, y_train.to_list())
time_fit = time.time() - time0
print('Model trained in {:.1f}s'.format(time_fit))
# Predict test data
time0 = time.time()
y_pred = model.predict(x_test)
time_predict = time.time() - time0
print('Prediction made in {:.1f}s'.format(time_predict))
if print_classificiation_report:
print('Classification report:')
print(classification_report(y_test, y_pred))
f1 = f1_score(y_test, y_pred, average='weighted')
acc = accuracy_score(y_test, y_pred)
print('F1-Score: {:.2f}%'.format(f1*100))
print('Accuracy: {:.2f}%\n'.format(acc*100))
if return_scores:
return f1, acc, time_fit, time_predict
if __name__ == '__main__':
small_results_rf, small_results_df, small_cart_acc = test_small()
small_results_rf.to_csv(os.path.join('out', 'small_results_rf.csv'), sep=';')
small_results_df.to_csv(os.path.join('out', 'small_results_df.csv'), sep=';')
medium_results_rf, medium_results_df, medium_cart_acc = test_medium()
medium_results_rf.to_csv(os.path.join('out', 'medium_results_rf.csv'), sep=';')
medium_results_df.to_csv(os.path.join('out', 'medium_results_df.csv'), sep=';')
large_results_rf, large_results_df, large_cart_acc = test_large()
large_results_rf.to_csv(os.path.join('out', 'large_results_rf.csv'), sep=';')
large_results_df.to_csv(os.path.join('out', 'large_results_df.csv'), sep=';') | de | 0.357528 | Supervised and Experiential Learning (SEL) Master in Artificial Intelligence (UPC) PW2 - Implementation of a Decision Forest and Random Forest Author: <NAME> <<EMAIL>> Course: 2020/2021 # Load Heart Disease database from CSV # https://www.kaggle.com/ronitf/heart-disease-uci ##########################################################') ################# Test Small Dataset ####################') ################# Heart Disease ####################') ##########################################################\n') # Load Mammographic Mass dataset ##########################################################') ################# Test Medium Dataset ####################') ################# Mammographic Mass ####################') ##########################################################\n') # Load Rice dataset ##########################################################') ################### Test Large Dataset ###################') ######################## Rice #########################') ##########################################################\n') Split data into 75% train and 25% test Args: x (DataFrame): data features y (array-like): data labels Test CART Args: x ([type]): [description] y ([type]): [description] # Split data into 75% train and 25% test # Split data into 75% train and 25% test # Instantiate Random Forest and evaluate it # Save parameters used and results # Print results # Save results in a table Test random forest with proposed hyperparameters # Hyperparameters to test # Remove duplicates # Evaluate model with all hyperparameter combinations Test decision forest with proposed hyperparameters # Hyperparameters to test # Remove duplicates # Evaluate model with all hyperparameter combinations # Train classifier # Predict test data | 3.318381 | 3 |
maskrcnn_benchmark/structures/image_list.py | pwllr/IDA-3D | 78 | 6629967 | <gh_stars>10-100
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import torch
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
"""
def __init__(self, tensors, image_sizes):
"""
Arguments:
tensors (tensor)
image_sizes (list[tuple[int, int]])
"""
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
def to_image_list(tensors, size_divisible=0):
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
if tensors.dim() == 3:
tensors = tensors[None]
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes)
elif isinstance(tensors, (tuple, list)):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_()
for img, pad_img in zip(tensors, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
image_sizes = [im.shape[-2:] for im in tensors]
return ImageList(batched_imgs, image_sizes)
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import torch
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
"""
def __init__(self, tensors, image_sizes):
"""
Arguments:
tensors (tensor)
image_sizes (list[tuple[int, int]])
"""
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
def to_image_list(tensors, size_divisible=0):
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
if tensors.dim() == 3:
tensors = tensors[None]
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes)
elif isinstance(tensors, (tuple, list)):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_()
for img, pad_img in zip(tensors, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
image_sizes = [im.shape[-2:] for im in tensors]
return ImageList(batched_imgs, image_sizes)
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors))) | en | 0.870339 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. Structure that holds a list of images (of possibly varying sizes) as a single tensor. This works by padding the images to the same size, and storing in a field the original sizes of each image Arguments: tensors (tensor) image_sizes (list[tuple[int, int]]) # single tensor shape can be inferred # TODO Ideally, just remove this and let me model handle arbitrary # input sizs | 2.91487 | 3 |
motor_bundle/__init__.py | applauncher-team/motor-bundle | 0 | 6629968 | <reponame>applauncher-team/motor-bundle
from .bundle import MotorBundle
| from .bundle import MotorBundle | none | 1 | 1.184517 | 1 |
|
apple/jwt_apple_signin.py | ranadeepmitra21/WeVoteServer_Ranadeep | 44 | 6629969 | <reponame>ranadeepmitra21/WeVoteServer_Ranadeep
import jwt
from jwt.algorithms import RSAAlgorithm
import requests
from time import time
import json
# import os
from config.base import get_environment_variable
APPLE_PUBLIC_KEY_URL = "https://appleid.apple.com/auth/keys"
APPLE_PUBLIC_KEY = None
APPLE_KEY_CACHE_EXP = 60 * 60 * 24
APPLE_LAST_KEY_FETCH = 0
# https://gist.github.com/davidhariri/b053787aabc9a8a9cc0893244e1549fe
# TODO: Question - could we change the name of this class so it doesn't conflict with the AppleUser in models.py?
class AppleUser(object):
def __init__(self, apple_id, email=None):
self.id = apple_id
self.email = email
self.full_user = False
if email is not None:
self.full_user = True
def __repr__(self):
return "<AppleUser {}>".format(self.id)
def _fetch_apple_public_key():
# Check to see if the public key is unset or is stale before returning
global APPLE_LAST_KEY_FETCH
global APPLE_PUBLIC_KEY
if (APPLE_LAST_KEY_FETCH + APPLE_KEY_CACHE_EXP) < int(time()) or APPLE_PUBLIC_KEY is None:
key_payload = requests.get(APPLE_PUBLIC_KEY_URL).json()
APPLE_PUBLIC_KEY = RSAAlgorithm.from_jwk(json.dumps(key_payload["keys"][0]))
APPLE_LAST_KEY_FETCH = int(time())
return APPLE_PUBLIC_KEY
def _decode_apple_user_token(apple_user_token):
public_key = _fetch_apple_public_key()
try:
# token = jwt.decode(apple_user_token, public_key, audience=os.getenv("APPLE_APP_ID"), algorithm="RS256")
token = jwt.decode(apple_user_token, public_key, audience=get_environment_variable("SOCIAL_AUTH_APPLE_KEY_ID"),
algorithms=["RS256"])
except jwt.exceptions.ExpiredSignatureError as e:
raise Exception("That token has expired")
except jwt.exceptions.InvalidAudienceError as e:
raise Exception("That token's audience did not match")
except Exception as e:
print(e)
raise Exception("An unexpected error occurred")
return token
def retrieve_user(user_token):
token = _decode_apple_user_token(user_token)
apple_user = AppleUser(token["sub"], token.get("email", None))
return apple_user
| import jwt
from jwt.algorithms import RSAAlgorithm
import requests
from time import time
import json
# import os
from config.base import get_environment_variable
APPLE_PUBLIC_KEY_URL = "https://appleid.apple.com/auth/keys"
APPLE_PUBLIC_KEY = None
APPLE_KEY_CACHE_EXP = 60 * 60 * 24
APPLE_LAST_KEY_FETCH = 0
# https://gist.github.com/davidhariri/b053787aabc9a8a9cc0893244e1549fe
# TODO: Question - could we change the name of this class so it doesn't conflict with the AppleUser in models.py?
class AppleUser(object):
def __init__(self, apple_id, email=None):
self.id = apple_id
self.email = email
self.full_user = False
if email is not None:
self.full_user = True
def __repr__(self):
return "<AppleUser {}>".format(self.id)
def _fetch_apple_public_key():
# Check to see if the public key is unset or is stale before returning
global APPLE_LAST_KEY_FETCH
global APPLE_PUBLIC_KEY
if (APPLE_LAST_KEY_FETCH + APPLE_KEY_CACHE_EXP) < int(time()) or APPLE_PUBLIC_KEY is None:
key_payload = requests.get(APPLE_PUBLIC_KEY_URL).json()
APPLE_PUBLIC_KEY = RSAAlgorithm.from_jwk(json.dumps(key_payload["keys"][0]))
APPLE_LAST_KEY_FETCH = int(time())
return APPLE_PUBLIC_KEY
def _decode_apple_user_token(apple_user_token):
public_key = _fetch_apple_public_key()
try:
# token = jwt.decode(apple_user_token, public_key, audience=os.getenv("APPLE_APP_ID"), algorithm="RS256")
token = jwt.decode(apple_user_token, public_key, audience=get_environment_variable("SOCIAL_AUTH_APPLE_KEY_ID"),
algorithms=["RS256"])
except jwt.exceptions.ExpiredSignatureError as e:
raise Exception("That token has expired")
except jwt.exceptions.InvalidAudienceError as e:
raise Exception("That token's audience did not match")
except Exception as e:
print(e)
raise Exception("An unexpected error occurred")
return token
def retrieve_user(user_token):
token = _decode_apple_user_token(user_token)
apple_user = AppleUser(token["sub"], token.get("email", None))
return apple_user | en | 0.778512 | # import os # https://gist.github.com/davidhariri/b053787aabc9a8a9cc0893244e1549fe # TODO: Question - could we change the name of this class so it doesn't conflict with the AppleUser in models.py? # Check to see if the public key is unset or is stale before returning # token = jwt.decode(apple_user_token, public_key, audience=os.getenv("APPLE_APP_ID"), algorithm="RS256") | 2.71324 | 3 |
tests/conftest.py | n-wbrown/pytpy | 0 | 6629970 | import logging
import pathlib
import pytest
import pytmc
from pytmc import linter, parser
logger = logging.getLogger(__name__)
TEST_PATH = pathlib.Path(__file__).parent
DBD_FILE = TEST_PATH / 'ads.dbd'
TMC_ROOT = TEST_PATH / 'tmc_files'
TMC_FILES = list(TMC_ROOT.glob('*.tmc'))
INVALID_TMC_FILES = list((TMC_ROOT / 'invalid').glob('*.tmc'))
PROJ_ROOT = TEST_PATH / 'projects'
TSPROJ_PROJECTS = list(str(fn) for fn in TEST_PATH.glob('**/*.tsproj'))
@pytest.fixture(scope='module')
def dbd_file():
return pytmc.linter.DbdFile(DBD_FILE)
@pytest.fixture(params=TMC_FILES,
ids=[f.name for f in TMC_FILES])
def tmc_filename(request):
return request.param
@pytest.fixture(scope='module')
def tmc_xtes_sxr_plc():
"""
generic .tmc file
"""
return TMC_ROOT / "xtes_sxr_plc.tmc"
@pytest.fixture(scope='module')
def tmc_arbiter_plc():
"""
generic .tmc file
"""
return TMC_ROOT / "ArbiterPLC.tmc"
@pytest.fixture(scope='module')
def tmc_pmps_dev_arbiter():
"""
.tmc file containing pinned global variables
"""
path = PROJ_ROOT / "pmps-dev-arbiter/Arbiter/ArbiterPLC/ArbiterPLC.tmc"
return path
@pytest.fixture(params=TSPROJ_PROJECTS)
def project_filename(request):
return request.param
def _generate_project_and_plcs():
for project_filename in TSPROJ_PROJECTS:
project = parser.parse(project_filename)
for plc_name in project.plcs_by_name:
yield project_filename, plc_name
@pytest.fixture(
params=[
pytest.param((project_filename, plc_name), id=f'{project_filename} {plc_name}')
for project_filename, plc_name in _generate_project_and_plcs()
]
)
def project_and_plc(request):
class Item:
project = request.param[0]
plc_name = request.param[1]
return Item
@pytest.fixture(scope='function')
def project(project_filename):
return parser.parse(project_filename)
def lint_record(dbd_file, record):
assert record.valid
linted = linter.lint_db(dbd=dbd_file, db=record.render())
assert not len(linted.errors)
| import logging
import pathlib
import pytest
import pytmc
from pytmc import linter, parser
logger = logging.getLogger(__name__)
TEST_PATH = pathlib.Path(__file__).parent
DBD_FILE = TEST_PATH / 'ads.dbd'
TMC_ROOT = TEST_PATH / 'tmc_files'
TMC_FILES = list(TMC_ROOT.glob('*.tmc'))
INVALID_TMC_FILES = list((TMC_ROOT / 'invalid').glob('*.tmc'))
PROJ_ROOT = TEST_PATH / 'projects'
TSPROJ_PROJECTS = list(str(fn) for fn in TEST_PATH.glob('**/*.tsproj'))
@pytest.fixture(scope='module')
def dbd_file():
return pytmc.linter.DbdFile(DBD_FILE)
@pytest.fixture(params=TMC_FILES,
ids=[f.name for f in TMC_FILES])
def tmc_filename(request):
return request.param
@pytest.fixture(scope='module')
def tmc_xtes_sxr_plc():
"""
generic .tmc file
"""
return TMC_ROOT / "xtes_sxr_plc.tmc"
@pytest.fixture(scope='module')
def tmc_arbiter_plc():
"""
generic .tmc file
"""
return TMC_ROOT / "ArbiterPLC.tmc"
@pytest.fixture(scope='module')
def tmc_pmps_dev_arbiter():
"""
.tmc file containing pinned global variables
"""
path = PROJ_ROOT / "pmps-dev-arbiter/Arbiter/ArbiterPLC/ArbiterPLC.tmc"
return path
@pytest.fixture(params=TSPROJ_PROJECTS)
def project_filename(request):
return request.param
def _generate_project_and_plcs():
for project_filename in TSPROJ_PROJECTS:
project = parser.parse(project_filename)
for plc_name in project.plcs_by_name:
yield project_filename, plc_name
@pytest.fixture(
params=[
pytest.param((project_filename, plc_name), id=f'{project_filename} {plc_name}')
for project_filename, plc_name in _generate_project_and_plcs()
]
)
def project_and_plc(request):
class Item:
project = request.param[0]
plc_name = request.param[1]
return Item
@pytest.fixture(scope='function')
def project(project_filename):
return parser.parse(project_filename)
def lint_record(dbd_file, record):
assert record.valid
linted = linter.lint_db(dbd=dbd_file, db=record.render())
assert not len(linted.errors)
| en | 0.725882 | generic .tmc file generic .tmc file .tmc file containing pinned global variables | 2.18342 | 2 |
tests/test_reference.py | Nightfurex/build-magic | 10 | 6629971 | """This module hosts unit tests for the reference module."""
import math
import pytest
from build_magic.exc import ValidationError
from build_magic.reference import EnumExt, Parameter
def test_parameter():
"""Verify the Parameter class works correctly."""
class Test(Parameter):
KEY = 'test'
DEFAULT = 3.
OTHER = 'bogus'
param = Test(math.pi)
assert param.KEY == 'test'
assert param.key == 'test'
assert param.value == math.pi
assert str(param) == '<Test: test, 3.141592653589793>'
assert param.as_dict() == {'test': math.pi}
assert param.as_tuple() == ('test', math.pi)
assert not param.alias
assert not param.ALIAS
assert not param.ENUM
assert not param.enum
assert not param.pattern
assert not param.PATTERN
assert param.default == 3.
assert param.DEFAULT == 3.
# Make sure that changing KEY doesn't affect the internal key of a Parameter object.
with pytest.raises(AttributeError):
param.KEY = 'other'
def test_parameter_alias():
"""Verify the Parameter class with an alias works correctly."""
class Test(Parameter):
KEY = 'test'
ALIAS = 'pi'
param = Test(math.pi)
assert param.KEY == 'test'
assert param.key == 'test'
assert param.value == math.pi
assert str(param) == '<Test: test, pi, 3.141592653589793>'
assert param.as_dict() == {'test': math.pi}
assert param.as_tuple() == ('test', math.pi)
assert param.alias == 'pi'
assert param.ALIAS == 'pi'
assert not param.default
assert not param.DEFAULT
assert not param.ENUM
assert not param.enum
assert not param.pattern
assert not param.PATTERN
with pytest.raises(AttributeError):
param.ALIAS = 'other'
def test_parameter_default():
"""Verify the Parameter class with a default works correctly."""
class Test(Parameter):
KEY = 'test'
DEFAULT = 3.
param = Test()
assert param.KEY == 'test'
assert param.key == 'test'
assert param.value == 3.
assert str(param) == '<Test: test, 3.0>'
assert param.as_dict() == {'test': 3.}
assert param.as_tuple() == ('test', 3.)
assert not param.ALIAS
assert not param.alias
assert not param.ENUM
assert not param.enum
assert not param.pattern
assert not param.PATTERN
assert param.default == 3.
assert param.DEFAULT == 3.
with pytest.raises(AttributeError):
param.DEFAULT = 'other'
def test_parameter_enum_value():
"""Verify the Parameter class with an enum value works correctly."""
class TestEnum(EnumExt):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
class Test(Parameter):
KEY = 'test'
ENUM = TestEnum
param = Test(3)
assert param.key == 'test'
assert param.KEY == 'test'
assert param.ENUM == TestEnum
assert param.enum == TestEnum
assert param.value == 3
assert param.as_dict() == {'test': 3}
assert param.as_tuple() == ('test', 3)
assert not param.ALIAS
assert not param.alias
assert not param.pattern
assert not param.PATTERN
def test_parameter_enum_key():
"""Verify the Parameter class with an enum key works correctly."""
class TestEnum(EnumExt):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
class Test(Parameter):
KEY = 'test'
ENUM = TestEnum
param = Test('THREE')
assert param.key == 'test'
assert param.KEY == 'test'
assert param.ENUM == TestEnum
assert param.enum == TestEnum
assert param.value == 3
assert param.as_dict() == {'test': 3}
assert param.as_tuple() == ('test', 3)
assert not param.ALIAS
assert not param.alias
assert not param.pattern
assert not param.PATTERN
def test_parameter_enum_validation_fail():
"""Test the case where the Parameter enum validation fails."""
class TestEnum(EnumExt):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
class Test(Parameter):
KEY = 'test'
ENUM = TestEnum
with pytest.raises(ValidationError, match='Validation failed: Value 7 is not one of'):
Test(7)
def test_parameter_enum_invalid_type():
"""Test the case where the enum attribute is not an Enum."""
class Test(Parameter):
ENUM = 'dummy'
with pytest.raises(TypeError):
Test(3)
def test_parameter_pattern():
"""Verify the Parameter class with a pattern works correctly."""
class Test(Parameter):
KEY = 'Test'
PATTERN = r'solid|liquid|gas'
param = Test('liquid')
assert param.value == 'liquid'
assert not param.default
assert not param.DEFAULT
assert not param.ENUM
assert not param.enum
assert not param.alias
assert not param.ALIAS
assert param.pattern == r'solid|liquid|gas'
assert param.PATTERN == r'solid|liquid|gas'
def test_parameter_pattern_fail():
"""Test the case where value doesn't match PATTERN."""
class Test(Parameter):
KEY = 'Test'
PATTERN = r'solid|liquid|gas'
with pytest.raises(ValidationError, match='Validation failed: Value plasma does not match solid|liquid|gas.'):
Test('plasma')
def test_parameter_pattern_invalid_type():
"""Test the case where PATTERN isn't a string."""
class Test(Parameter):
KEY = 'Test'
PATTERN = 42
with pytest.raises(TypeError):
Test('plasma')
def test_enum_ext():
"""Verify the EnumExt class works correctly."""
class Test(EnumExt):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
assert Test.names() == ('ONE', 'TWO', 'THREE', 'FOUR')
assert Test.values() == (1, 2, 3, 4)
assert Test.available() == (1, 2, 3, 4)
assert Test['THREE'] == Test.THREE
assert Test.THREE.name == 'THREE'
assert Test.THREE.value == 3
| """This module hosts unit tests for the reference module."""
import math
import pytest
from build_magic.exc import ValidationError
from build_magic.reference import EnumExt, Parameter
def test_parameter():
"""Verify the Parameter class works correctly."""
class Test(Parameter):
KEY = 'test'
DEFAULT = 3.
OTHER = 'bogus'
param = Test(math.pi)
assert param.KEY == 'test'
assert param.key == 'test'
assert param.value == math.pi
assert str(param) == '<Test: test, 3.141592653589793>'
assert param.as_dict() == {'test': math.pi}
assert param.as_tuple() == ('test', math.pi)
assert not param.alias
assert not param.ALIAS
assert not param.ENUM
assert not param.enum
assert not param.pattern
assert not param.PATTERN
assert param.default == 3.
assert param.DEFAULT == 3.
# Make sure that changing KEY doesn't affect the internal key of a Parameter object.
with pytest.raises(AttributeError):
param.KEY = 'other'
def test_parameter_alias():
"""Verify the Parameter class with an alias works correctly."""
class Test(Parameter):
KEY = 'test'
ALIAS = 'pi'
param = Test(math.pi)
assert param.KEY == 'test'
assert param.key == 'test'
assert param.value == math.pi
assert str(param) == '<Test: test, pi, 3.141592653589793>'
assert param.as_dict() == {'test': math.pi}
assert param.as_tuple() == ('test', math.pi)
assert param.alias == 'pi'
assert param.ALIAS == 'pi'
assert not param.default
assert not param.DEFAULT
assert not param.ENUM
assert not param.enum
assert not param.pattern
assert not param.PATTERN
with pytest.raises(AttributeError):
param.ALIAS = 'other'
def test_parameter_default():
"""Verify the Parameter class with a default works correctly."""
class Test(Parameter):
KEY = 'test'
DEFAULT = 3.
param = Test()
assert param.KEY == 'test'
assert param.key == 'test'
assert param.value == 3.
assert str(param) == '<Test: test, 3.0>'
assert param.as_dict() == {'test': 3.}
assert param.as_tuple() == ('test', 3.)
assert not param.ALIAS
assert not param.alias
assert not param.ENUM
assert not param.enum
assert not param.pattern
assert not param.PATTERN
assert param.default == 3.
assert param.DEFAULT == 3.
with pytest.raises(AttributeError):
param.DEFAULT = 'other'
def test_parameter_enum_value():
"""Verify the Parameter class with an enum value works correctly."""
class TestEnum(EnumExt):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
class Test(Parameter):
KEY = 'test'
ENUM = TestEnum
param = Test(3)
assert param.key == 'test'
assert param.KEY == 'test'
assert param.ENUM == TestEnum
assert param.enum == TestEnum
assert param.value == 3
assert param.as_dict() == {'test': 3}
assert param.as_tuple() == ('test', 3)
assert not param.ALIAS
assert not param.alias
assert not param.pattern
assert not param.PATTERN
def test_parameter_enum_key():
"""Verify the Parameter class with an enum key works correctly."""
class TestEnum(EnumExt):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
class Test(Parameter):
KEY = 'test'
ENUM = TestEnum
param = Test('THREE')
assert param.key == 'test'
assert param.KEY == 'test'
assert param.ENUM == TestEnum
assert param.enum == TestEnum
assert param.value == 3
assert param.as_dict() == {'test': 3}
assert param.as_tuple() == ('test', 3)
assert not param.ALIAS
assert not param.alias
assert not param.pattern
assert not param.PATTERN
def test_parameter_enum_validation_fail():
"""Test the case where the Parameter enum validation fails."""
class TestEnum(EnumExt):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
class Test(Parameter):
KEY = 'test'
ENUM = TestEnum
with pytest.raises(ValidationError, match='Validation failed: Value 7 is not one of'):
Test(7)
def test_parameter_enum_invalid_type():
"""Test the case where the enum attribute is not an Enum."""
class Test(Parameter):
ENUM = 'dummy'
with pytest.raises(TypeError):
Test(3)
def test_parameter_pattern():
"""Verify the Parameter class with a pattern works correctly."""
class Test(Parameter):
KEY = 'Test'
PATTERN = r'solid|liquid|gas'
param = Test('liquid')
assert param.value == 'liquid'
assert not param.default
assert not param.DEFAULT
assert not param.ENUM
assert not param.enum
assert not param.alias
assert not param.ALIAS
assert param.pattern == r'solid|liquid|gas'
assert param.PATTERN == r'solid|liquid|gas'
def test_parameter_pattern_fail():
"""Test the case where value doesn't match PATTERN."""
class Test(Parameter):
KEY = 'Test'
PATTERN = r'solid|liquid|gas'
with pytest.raises(ValidationError, match='Validation failed: Value plasma does not match solid|liquid|gas.'):
Test('plasma')
def test_parameter_pattern_invalid_type():
"""Test the case where PATTERN isn't a string."""
class Test(Parameter):
KEY = 'Test'
PATTERN = 42
with pytest.raises(TypeError):
Test('plasma')
def test_enum_ext():
"""Verify the EnumExt class works correctly."""
class Test(EnumExt):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
assert Test.names() == ('ONE', 'TWO', 'THREE', 'FOUR')
assert Test.values() == (1, 2, 3, 4)
assert Test.available() == (1, 2, 3, 4)
assert Test['THREE'] == Test.THREE
assert Test.THREE.name == 'THREE'
assert Test.THREE.value == 3
| en | 0.707071 | This module hosts unit tests for the reference module. Verify the Parameter class works correctly. # Make sure that changing KEY doesn't affect the internal key of a Parameter object. Verify the Parameter class with an alias works correctly. Verify the Parameter class with a default works correctly. Verify the Parameter class with an enum value works correctly. Verify the Parameter class with an enum key works correctly. Test the case where the Parameter enum validation fails. Test the case where the enum attribute is not an Enum. Verify the Parameter class with a pattern works correctly. Test the case where value doesn't match PATTERN. Test the case where PATTERN isn't a string. Verify the EnumExt class works correctly. | 2.804728 | 3 |
pyro/distributions/unit.py | ludkinm/pyro | 0 | 6629972 | <gh_stars>0
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from torch.distributions import constraints
from pyro.distributions.torch_distribution import TorchDistribution
from pyro.distributions.util import broadcast_shape
class Unit(TorchDistribution):
"""
Trivial nonnormalized distribution representing the unit type.
The unit type has a single value with no data, i.e. ``value.numel() == 0``.
This is used for :func:`pyro.factor` statements.
"""
arg_constraints = {'log_factor': constraints.real}
support = constraints.real
def __init__(self, log_factor, validate_args=None):
log_factor = torch.as_tensor(log_factor)
batch_shape = log_factor.shape
event_shape = torch.Size((0,)) # This satisfies .numel() == 0.
self.log_factor = log_factor
super(Unit, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Unit, _instance)
new.log_factor = self.log_factor.expand(batch_shape)
super(Unit, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
return self.log_factor.new_empty(sample_shape + self.shape())
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:-1])
return self.log_factor.expand(shape)
| # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from torch.distributions import constraints
from pyro.distributions.torch_distribution import TorchDistribution
from pyro.distributions.util import broadcast_shape
class Unit(TorchDistribution):
"""
Trivial nonnormalized distribution representing the unit type.
The unit type has a single value with no data, i.e. ``value.numel() == 0``.
This is used for :func:`pyro.factor` statements.
"""
arg_constraints = {'log_factor': constraints.real}
support = constraints.real
def __init__(self, log_factor, validate_args=None):
log_factor = torch.as_tensor(log_factor)
batch_shape = log_factor.shape
event_shape = torch.Size((0,)) # This satisfies .numel() == 0.
self.log_factor = log_factor
super(Unit, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Unit, _instance)
new.log_factor = self.log_factor.expand(batch_shape)
super(Unit, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
return self.log_factor.new_empty(sample_shape + self.shape())
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:-1])
return self.log_factor.expand(shape) | en | 0.6316 | # Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 Trivial nonnormalized distribution representing the unit type. The unit type has a single value with no data, i.e. ``value.numel() == 0``. This is used for :func:`pyro.factor` statements. # This satisfies .numel() == 0. | 2.389423 | 2 |
cogs/help.py | GamerGG1/Roblox-Discord-Bot | 1 | 6629973 | <gh_stars>1-10
import discord
from discord.ext import commands
from datetime import datetime
def setup(bot):
bot.add_cog(Help(bot))
class Help(commands.Cog):
def __init__(self,bot):
self.bot = bot
def get_commands(self):
helptext = []
for command in self.bot.commands:
if command.hidden == True:
pass
else:
helptext.append(command)
embed = discord.Embed(colour=self.bot.defaultcolour, title="Help", description="All the commands")
embed.add_field(name="Support",value="For more help, join the official bot support server: https://discord.gg/CTuUKJJ",inline=False)
embed.add_field(name="Commands",value=", ".join([command.name for command in helptext]),inline=False)
return embed
@commands.command(name="help", description="Shows all the bot's command", aliases=["h","commands"],usage="`[command]`")
async def help_command(self, ctx, cmd = None):
if cmd is None:
base_embed = self.get_commands()
await ctx.send(embed=base_embed)
elif cmd != None:
command = self.bot.get_command(cmd)
if command:
aliases = []
for alias in command.aliases:
aliases.append(alias)
command.description = command.description or "No description provided."
embed = discord.Embed(colour=self.bot.defaultcolour,title="Help",description=f"`{command.name}`: {command.description}", timestamp=datetime.utcnow())
embed.add_field(name="Aliases",value=" ,".join(aliases) if len(aliases) >= 1 else "No aliases",inline=False)
embed.add_field(name="Usage", value=command.usage if command.usage != None else f"`{command.name}`", inline=False)
embed.set_footer(text="<> - Required | [] - Optional")
await ctx.send(embed=embed)
else:
inv_embed = self.get_commands()
await ctx.send("Invalid Command Usage. Activiating Help...")
await ctx.send(embed=inv_embed) | import discord
from discord.ext import commands
from datetime import datetime
def setup(bot):
bot.add_cog(Help(bot))
class Help(commands.Cog):
def __init__(self,bot):
self.bot = bot
def get_commands(self):
helptext = []
for command in self.bot.commands:
if command.hidden == True:
pass
else:
helptext.append(command)
embed = discord.Embed(colour=self.bot.defaultcolour, title="Help", description="All the commands")
embed.add_field(name="Support",value="For more help, join the official bot support server: https://discord.gg/CTuUKJJ",inline=False)
embed.add_field(name="Commands",value=", ".join([command.name for command in helptext]),inline=False)
return embed
@commands.command(name="help", description="Shows all the bot's command", aliases=["h","commands"],usage="`[command]`")
async def help_command(self, ctx, cmd = None):
if cmd is None:
base_embed = self.get_commands()
await ctx.send(embed=base_embed)
elif cmd != None:
command = self.bot.get_command(cmd)
if command:
aliases = []
for alias in command.aliases:
aliases.append(alias)
command.description = command.description or "No description provided."
embed = discord.Embed(colour=self.bot.defaultcolour,title="Help",description=f"`{command.name}`: {command.description}", timestamp=datetime.utcnow())
embed.add_field(name="Aliases",value=" ,".join(aliases) if len(aliases) >= 1 else "No aliases",inline=False)
embed.add_field(name="Usage", value=command.usage if command.usage != None else f"`{command.name}`", inline=False)
embed.set_footer(text="<> - Required | [] - Optional")
await ctx.send(embed=embed)
else:
inv_embed = self.get_commands()
await ctx.send("Invalid Command Usage. Activiating Help...")
await ctx.send(embed=inv_embed) | none | 1 | 2.796157 | 3 |
|
process/helpers/__init__.py | ml-boringtao/mlp | 0 | 6629974 | from .csvhdf5dataset import CSVDatasetWriter, CSVDatasetReader | from .csvhdf5dataset import CSVDatasetWriter, CSVDatasetReader | none | 1 | 1.106368 | 1 |
|
masar_optimal/custom/sales_invoice/sales_invoice.py | karamakcsc/masar_optimal | 0 | 6629975 | <filename>masar_optimal/custom/sales_invoice/sales_invoice.py
from __future__ import unicode_literals
import frappe, erpnext
import json
from frappe.utils import flt, cstr, nowdate, comma_and
from frappe import throw, msgprint, _
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from six import iteritems, string_types
@frappe.whitelist()
def get_last_price(item_code):
last_price = frappe.db.sql("""
Select tpii.rate as rate
from `tabSales Invoice Item` tpii
Inner Join `tabSales Invoice` tpi On tpii.parent = tpi.name
where tpii.item_code = '%s' and tpi.docstatus = 1
order By tpii.creation DESC
LIMIT 1""" %(item_code), as_dict=True
)
if last_price:
return last_price[0].rate
else:
return
@frappe.whitelist()
def get_default_location(item_code):
dloc = frappe.db.sql("""
Select item_location
From `tabItem Location` til
Where is_default = 1 and parent = '%s'
LIMIT 1""" %(item_code), as_dict=True)
if dloc:
return dloc[0].item_location
else:
return
| <filename>masar_optimal/custom/sales_invoice/sales_invoice.py
from __future__ import unicode_literals
import frappe, erpnext
import json
from frappe.utils import flt, cstr, nowdate, comma_and
from frappe import throw, msgprint, _
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from six import iteritems, string_types
@frappe.whitelist()
def get_last_price(item_code):
last_price = frappe.db.sql("""
Select tpii.rate as rate
from `tabSales Invoice Item` tpii
Inner Join `tabSales Invoice` tpi On tpii.parent = tpi.name
where tpii.item_code = '%s' and tpi.docstatus = 1
order By tpii.creation DESC
LIMIT 1""" %(item_code), as_dict=True
)
if last_price:
return last_price[0].rate
else:
return
@frappe.whitelist()
def get_default_location(item_code):
dloc = frappe.db.sql("""
Select item_location
From `tabItem Location` til
Where is_default = 1 and parent = '%s'
LIMIT 1""" %(item_code), as_dict=True)
if dloc:
return dloc[0].item_location
else:
return
| en | 0.572892 | Select tpii.rate as rate
from `tabSales Invoice Item` tpii
Inner Join `tabSales Invoice` tpi On tpii.parent = tpi.name
where tpii.item_code = '%s' and tpi.docstatus = 1
order By tpii.creation DESC
LIMIT 1 Select item_location
From `tabItem Location` til
Where is_default = 1 and parent = '%s'
LIMIT 1 | 1.829443 | 2 |
plugins/wazuh_ossec/komand_wazuh_ossec/actions/agent_restart/action.py | lukaszlaszuk/insightconnect-plugins | 46 | 6629976 | <reponame>lukaszlaszuk/insightconnect-plugins<gh_stars>10-100
import komand
from .schema import AgentRestartInput, AgentRestartOutput
# Custom imports below
import json
import requests
class AgentRestart(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="agent_restart",
description="Restarts all agents, or a specified agent",
input=AgentRestartInput(),
output=AgentRestartOutput(),
)
def run(self, params={}):
if params.get("agent_id"):
api = "/agents/{}/restart".format(params.get("agent_id"))
self.logger.info("Agent %s specified for restart", params.get("agent_id"))
else:
api = "/agents/restart"
self.logger.info("No agent specified, restart for all agents")
url = "{url}{api}".format(url=self.connection.url, api=api)
self.logger.info("Request: %s", url)
try:
resp = requests.put(url, auth=self.connection.creds)
r = resp.json()
self.logger.info("Raw Response: %s", resp.json())
# Rename key to meet spec
if "data" in r:
r["message"] = r.pop("data")
except requests.exceptions.HTTPError:
self.logger.error("Requests: HTTPError: status code %s for %s" % (str(resp.status_code), url))
raise Exception("Requests: Connect: Failed response from server {}".format(url))
self.logger.info("Normalized Response: %s", r)
return r
def test(self):
url = self.connection.url
try:
resp = requests.get(url, auth=self.connection.creds)
r = resp.json()
self.logger.info("Raw Response: %s", r)
except requests.exceptions.HTTPError:
self.logger.error("Requests: HTTPError: status code %s for %s" % (str(resp.status_code), url))
raise Exception("Requests: Connect: Failed response from server {}".format(url))
if r["error"] == 0:
# Example must match spec to succeed due to required's
return {"message": "Restarting all agents", "error": 0}
else:
self.logger.error(r)
raise Exception("Requests: Connect: Failed response from server {}".format(url))
| import komand
from .schema import AgentRestartInput, AgentRestartOutput
# Custom imports below
import json
import requests
class AgentRestart(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="agent_restart",
description="Restarts all agents, or a specified agent",
input=AgentRestartInput(),
output=AgentRestartOutput(),
)
def run(self, params={}):
if params.get("agent_id"):
api = "/agents/{}/restart".format(params.get("agent_id"))
self.logger.info("Agent %s specified for restart", params.get("agent_id"))
else:
api = "/agents/restart"
self.logger.info("No agent specified, restart for all agents")
url = "{url}{api}".format(url=self.connection.url, api=api)
self.logger.info("Request: %s", url)
try:
resp = requests.put(url, auth=self.connection.creds)
r = resp.json()
self.logger.info("Raw Response: %s", resp.json())
# Rename key to meet spec
if "data" in r:
r["message"] = r.pop("data")
except requests.exceptions.HTTPError:
self.logger.error("Requests: HTTPError: status code %s for %s" % (str(resp.status_code), url))
raise Exception("Requests: Connect: Failed response from server {}".format(url))
self.logger.info("Normalized Response: %s", r)
return r
def test(self):
url = self.connection.url
try:
resp = requests.get(url, auth=self.connection.creds)
r = resp.json()
self.logger.info("Raw Response: %s", r)
except requests.exceptions.HTTPError:
self.logger.error("Requests: HTTPError: status code %s for %s" % (str(resp.status_code), url))
raise Exception("Requests: Connect: Failed response from server {}".format(url))
if r["error"] == 0:
# Example must match spec to succeed due to required's
return {"message": "Restarting all agents", "error": 0}
else:
self.logger.error(r)
raise Exception("Requests: Connect: Failed response from server {}".format(url)) | en | 0.854742 | # Custom imports below # Rename key to meet spec # Example must match spec to succeed due to required's | 2.114073 | 2 |
tests/nft20/conftest.py | Amirh24/cross-asset-swap | 1 | 6629977 | <reponame>Amirh24/cross-asset-swap<filename>tests/nft20/conftest.py
import pytest
from brownie import *
@pytest.fixture(scope="module", autouse=True)
def user(deployer):
return TestSwap.deploy(deployer)
@pytest.fixture(scope="module", autouse=True)
def deployer():
return {'from': accounts[1]}
@pytest.fixture(scope="module", autouse=True)
def blockartNft():
return Contract("0xb80fbf6cdb49c33dc6ae4ca11af8ac47b0b4c0f3")
@pytest.fixture(scope="module", autouse=True)
def decentralandNft():
return Contract("0xF87E31492Faf9A91B02Ee0dEAAd50d51d56D5d4d")
@pytest.fixture(scope="module", autouse=True)
def nodeRunnerNft():
return Contract("0x89eE76cC25Fcbf1714ed575FAa6A10202B71c26A")
@pytest.fixture(scope="module", autouse=True)
def hashmaskNft():
return Contract("0xC2C747E0F7004F9E8817Db2ca4997657a7746928")
@pytest.fixture(scope="module", autouse=True)
def dokiNft():
return Contract("0x7CdC0421469398e0F3aA8890693d86c840Ac8931")
@pytest.fixture(scope="module", autouse=True)
def chonkerNft():
return Contract("0xC805658931f959abc01133aa13fF173769133512")
@pytest.fixture(scope="module", autouse=True)
def memeNft():
return Contract("0xe4605d46Fd0B3f8329d936a8b258D69276cBa264")
@pytest.fixture(scope="module", autouse=True)
def dai():
return Contract("0x6B175474E89094C44Da98b954EedeAC495271d0F")
@pytest.fixture(scope="module", autouse=True)
def usdc():
return Contract("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48")
@pytest.fixture(scope="module", autouse=True)
def usdt():
return Contract("0xdAC17F958D2ee523a2206206994597C13D831ec7")
@pytest.fixture(scope="module", autouse=True)
def tusd():
return Contract("0x0000000000085d4780B73119b644AE5ecd22b376")
@pytest.fixture(scope="module", autouse=True)
def eth_bag(accounts):
return accounts.at("0x829BD824B016326A401d083B33D092293333A830", force=True)
@pytest.fixture(scope="module", autouse=True)
def EBA20():
return "0x57c31c042cb2f6a50f3da70ade4fee20c86b7493"
@pytest.fixture(scope="module", autouse=True)
def LAND20():
return "0x1E0CD9506d465937E9d6754e76Cd389A8bD90FBf"
@pytest.fixture(scope="module", autouse=True)
def NDR20():
return "0x303Af77Cf2774AABff12462C110A0CCf971D7DbE"
@pytest.fixture(scope="module", autouse=True)
def DOKI20():
return "0x22C4AD011Cce6a398B15503e0aB64286568933Ed"
@pytest.fixture(scope="module", autouse=True)
def MASK20():
return "0xc2BdE1A2fA26890c8E6AcB10C91CC6D9c11F4a73"
@pytest.fixture(scope="module", autouse=True)
def CHONK20():
return "0xaDBEBbd65a041E3AEb474FE9fe6939577eB2544F"
@pytest.fixture(scope="module", autouse=True)
def MEME20():
return "0x60ACD58d00b2BcC9a8924fdaa54A2F7C0793B3b2"
@pytest.fixture(scope="module", autouse=True)
def ZERO_ADDRESS():
return "0x0000000000000000000000000000000000000000"
@pytest.fixture(scope="module", autouse=True)
def ETH_ADDRESS():
return "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"
@pytest.fixture(scope="module", autouse=True)
def hashmaskIds():
return [3790,3791,3792,3793,3794,3795,3796,3797,3798,3799,3800]
@pytest.fixture(scope="module", autouse=True)
def blockartIds():
return [227, 228]
@pytest.fixture(scope="module", autouse=True)
def swapper(deployer):
return NFT20Swapper.deploy(deployer)
| import pytest
from brownie import *
@pytest.fixture(scope="module", autouse=True)
def user(deployer):
return TestSwap.deploy(deployer)
@pytest.fixture(scope="module", autouse=True)
def deployer():
return {'from': accounts[1]}
@pytest.fixture(scope="module", autouse=True)
def blockartNft():
return Contract("0xb80fbf6cdb49c33dc6ae4ca11af8ac47b0b4c0f3")
@pytest.fixture(scope="module", autouse=True)
def decentralandNft():
return Contract("0xF87E31492Faf9A91B02Ee0dEAAd50d51d56D5d4d")
@pytest.fixture(scope="module", autouse=True)
def nodeRunnerNft():
return Contract("0x89eE76cC25Fcbf1714ed575FAa6A10202B71c26A")
@pytest.fixture(scope="module", autouse=True)
def hashmaskNft():
return Contract("0xC2C747E0F7004F9E8817Db2ca4997657a7746928")
@pytest.fixture(scope="module", autouse=True)
def dokiNft():
return Contract("0x7CdC0421469398e0F3aA8890693d86c840Ac8931")
@pytest.fixture(scope="module", autouse=True)
def chonkerNft():
return Contract("0xC805658931f959abc01133aa13fF173769133512")
@pytest.fixture(scope="module", autouse=True)
def memeNft():
return Contract("0xe4605d46Fd0B3f8329d936a8b258D69276cBa264")
@pytest.fixture(scope="module", autouse=True)
def dai():
return Contract("0x6B175474E89094C44Da98b954EedeAC495271d0F")
@pytest.fixture(scope="module", autouse=True)
def usdc():
return Contract("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48")
@pytest.fixture(scope="module", autouse=True)
def usdt():
return Contract("0xdAC17F958D2ee523a2206206994597C13D831ec7")
@pytest.fixture(scope="module", autouse=True)
def tusd():
return Contract("0x0000000000085d4780B73119b644AE5ecd22b376")
@pytest.fixture(scope="module", autouse=True)
def eth_bag(accounts):
return accounts.at("0x829BD824B016326A401d083B33D092293333A830", force=True)
@pytest.fixture(scope="module", autouse=True)
def EBA20():
return "0x57c31c042cb2f6a50f3da70ade4fee20c86b7493"
@pytest.fixture(scope="module", autouse=True)
def LAND20():
return "0x1E0CD9506d465937E9d6754e76Cd389A8bD90FBf"
@pytest.fixture(scope="module", autouse=True)
def NDR20():
return "0x303Af77Cf2774AABff12462C110A0CCf971D7DbE"
@pytest.fixture(scope="module", autouse=True)
def DOKI20():
return "0x22C4AD011Cce6a398B15503e0aB64286568933Ed"
@pytest.fixture(scope="module", autouse=True)
def MASK20():
return "0xc2BdE1A2fA26890c8E6AcB10C91CC6D9c11F4a73"
@pytest.fixture(scope="module", autouse=True)
def CHONK20():
return "0xaDBEBbd65a041E3AEb474FE9fe6939577eB2544F"
@pytest.fixture(scope="module", autouse=True)
def MEME20():
return "0x60ACD58d00b2BcC9a8924fdaa54A2F7C0793B3b2"
@pytest.fixture(scope="module", autouse=True)
def ZERO_ADDRESS():
return "0x0000000000000000000000000000000000000000"
@pytest.fixture(scope="module", autouse=True)
def ETH_ADDRESS():
return "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"
@pytest.fixture(scope="module", autouse=True)
def hashmaskIds():
return [3790,3791,3792,3793,3794,3795,3796,3797,3798,3799,3800]
@pytest.fixture(scope="module", autouse=True)
def blockartIds():
return [227, 228]
@pytest.fixture(scope="module", autouse=True)
def swapper(deployer):
return NFT20Swapper.deploy(deployer) | none | 1 | 1.905525 | 2 |
|
src/samples/test.py | DanEdens/azure-devops-python-samples | 65 | 6629978 | <reponame>DanEdens/azure-devops-python-samples<gh_stars>10-100
"""
TEST samples
"""
import datetime
import logging
from samples import resource
from utils import emit
logger = logging.getLogger(__name__)
def get_project_names(context):
core_client = context.connection.clients.get_core_client()
return (project.name for project in core_client.get_projects())
@resource("test_plans")
def get_plans(context):
test_client = context.connection.clients.get_test_client()
for project in get_project_names(context):
try:
for plan in test_client.get_plans(project):
emit("Test Plan {}: {} ({})".format(plan.id, plan.name, plan.area.name))
except Exception as e:
emit("Project '{}' raised error: {}".format(project, e))
@resource("test_suites")
def get_test_suites_for_plan(context):
test_client = context.connection.clients.get_test_client()
for project in get_project_names(context):
try:
for plan in test_client.get_plans(project):
for suite in test_client.get_test_suites_for_plan(project, plan.id):
emit(
"Test Suite {}: {} ({}.{})".format(
suite.id, suite.name, plan.id, plan.name
)
)
except Exception as e:
emit("Project '{}' raised error: {}".format(project, e))
@resource("test_runs")
def get_test_runs(context):
test_client = context.connection.clients.get_test_client()
for project in get_project_names(context):
try:
for run in test_client.get_test_runs(project, top=16):
emit(
"Test Run {}: {} => {} ({})".format(
run.id, run.name, run.state, project
)
)
except Exception as e:
emit("Project '{}' raised error: {}".format(project, e))
@resource("test_results")
def get_test_results(context):
test_client = context.connection.clients.get_test_client()
for project in get_project_names(context):
try:
for run in test_client.get_test_runs(project, top=10):
# Limiting Test Results is not something one shall do!
for res in test_client.get_test_results(project, run.id, top=3):
tc = res.test_case
tester = res.run_by.display_name
emit(
"Test Result {}: {} => {} by {} ({})".format(
run.id, tc.name, res.outcome, tester, project
)
)
except Exception as e:
emit("Project '{}' raised error: {}".format(project, e))
| """
TEST samples
"""
import datetime
import logging
from samples import resource
from utils import emit
logger = logging.getLogger(__name__)
def get_project_names(context):
core_client = context.connection.clients.get_core_client()
return (project.name for project in core_client.get_projects())
@resource("test_plans")
def get_plans(context):
test_client = context.connection.clients.get_test_client()
for project in get_project_names(context):
try:
for plan in test_client.get_plans(project):
emit("Test Plan {}: {} ({})".format(plan.id, plan.name, plan.area.name))
except Exception as e:
emit("Project '{}' raised error: {}".format(project, e))
@resource("test_suites")
def get_test_suites_for_plan(context):
test_client = context.connection.clients.get_test_client()
for project in get_project_names(context):
try:
for plan in test_client.get_plans(project):
for suite in test_client.get_test_suites_for_plan(project, plan.id):
emit(
"Test Suite {}: {} ({}.{})".format(
suite.id, suite.name, plan.id, plan.name
)
)
except Exception as e:
emit("Project '{}' raised error: {}".format(project, e))
@resource("test_runs")
def get_test_runs(context):
test_client = context.connection.clients.get_test_client()
for project in get_project_names(context):
try:
for run in test_client.get_test_runs(project, top=16):
emit(
"Test Run {}: {} => {} ({})".format(
run.id, run.name, run.state, project
)
)
except Exception as e:
emit("Project '{}' raised error: {}".format(project, e))
@resource("test_results")
def get_test_results(context):
test_client = context.connection.clients.get_test_client()
for project in get_project_names(context):
try:
for run in test_client.get_test_runs(project, top=10):
# Limiting Test Results is not something one shall do!
for res in test_client.get_test_results(project, run.id, top=3):
tc = res.test_case
tester = res.run_by.display_name
emit(
"Test Result {}: {} => {} by {} ({})".format(
run.id, tc.name, res.outcome, tester, project
)
)
except Exception as e:
emit("Project '{}' raised error: {}".format(project, e)) | en | 0.825337 | TEST samples # Limiting Test Results is not something one shall do! | 2.328908 | 2 |
tests/test_distribution.py | josephsnyder/CastXML-python-distributions | 0 | 6629979 | <reponame>josephsnyder/CastXML-python-distributions
import os
import pytest
from path import Path
DIST_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../dist'))
def _check_castxml_install(virtualenv, tmpdir):
expected_version = "0.3.4"
for executable_name in ["castxml"]:
output = virtualenv.run(
"%s --version" % executable_name, capture=True).splitlines()[0]
assert output == "%s version %s" % (executable_name, expected_version)
@pytest.mark.skipif(not Path(DIST_DIR).exists(), reason="dist directory does not exist")
def test_wheel(virtualenv, tmpdir):
wheels = Path(DIST_DIR).files(match="*.whl")
if not wheels:
pytest.skip("no wheel available")
assert len(wheels) == 1
print(wheels)
virtualenv.run("pip install %s" % wheels[0])
_check_castxml_install(virtualenv, tmpdir)
| import os
import pytest
from path import Path
DIST_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../dist'))
def _check_castxml_install(virtualenv, tmpdir):
expected_version = "0.3.4"
for executable_name in ["castxml"]:
output = virtualenv.run(
"%s --version" % executable_name, capture=True).splitlines()[0]
assert output == "%s version %s" % (executable_name, expected_version)
@pytest.mark.skipif(not Path(DIST_DIR).exists(), reason="dist directory does not exist")
def test_wheel(virtualenv, tmpdir):
wheels = Path(DIST_DIR).files(match="*.whl")
if not wheels:
pytest.skip("no wheel available")
assert len(wheels) == 1
print(wheels)
virtualenv.run("pip install %s" % wheels[0])
_check_castxml_install(virtualenv, tmpdir) | none | 1 | 2.324274 | 2 |
|
static_count/run.py | JiangYee/key_phrase_extract | 1 | 6629980 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
import time
from pandas.core.frame import DataFrame
from nltk import ngrams
from nltk.text import TextCollection
from static_count import preprocess, count, tf_idf, draw
json_file = '../data/test_json'
# json_file = '../data/all_title_abstract_keyword_clean.json'
json_obj = preprocess.load_json(json_file)
abstract_list, keyword_list, _ = preprocess.get_info(json_obj)
# print(keyword_list[144])
# print(len(keyword_list[144]))
# print(len(keyword_list))
# # ็ป่ฎกๅ
ณ้ฎ่ฏin or not in
# count_results = count.count_in_all(abstract_list, keyword_list, isPart=False,isStem=False, isAnd=False)
# in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results)
# count_dict = {'in':in_num_list,'out':out_num_list}
# print(count_results)
# print(in_num_list)
# print(out_num_list)
# print(avg_in,avg_out)
# data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก
# DataFrame(data).to_excel('count_ff1.xlsx')
# print('count_ff1 over!')
#
# count_results = count.count_in_all(abstract_list, keyword_list, isPart=False,isStem=True)
# in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results)
# count_dict = {'in':in_num_list,'out':out_num_list}
# print(count_results)
# print(avg_in,avg_out)
# data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก
# DataFrame(data).to_excel('count_ft1.xlsx')
# print('count_ft1 over!')
#
# count_results = count.count_in_all(abstract_list, keyword_list, isPart=True,isStem=False)
# in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results)
# count_dict = {'in':in_num_list,'out':out_num_list}
# print(count_results)
# print(avg_in,avg_out)
# data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก
# DataFrame(data).to_excel('count_tff1.xlsx')
# print('count_tf1 over!')
#
# count_results = count.count_in_all(abstract_list, keyword_list, isPart=True,isStem=True)
# in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results)
# count_dict = {'in':in_num_list,'out':out_num_list}
# print(count_results)
# print(avg_in,avg_out)
# data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก
# DataFrame(data).to_excel('count_ttf1.xlsx')
# print('count_tt1 over!')
# ็ป่ฎกๆฏ็ฏๆ็ซ ็count_in_out็พๅๆฏ
# in_out_persent = count.in_out_persents('./4_25/count_ff1.xlsx')
# in_persent = in_out_persent[0]
# out_persent = in_out_persent[1]
# print(in_persent[:10])
# print(out_persent[:10])
# in_persent_persent = count.get_percentage(in_persent[0:10])
# print(in_persent_persent)
# # ็ป่ฎกๅ
ณ้ฎ่ฏ้ฟๅบฆ
# # kw_len= count.count_kw_len(keyword_list[4])
# # print(keyword_list[4])
# # print(np.average(kw_len))
# #
# # print('็ป่ฎกๅ
ณ้ฎ่ฏ้ฟๅบฆ......')
# n_kw_len = count.count_n_kw_len(keyword_list)
# # print('exp_num', exp_num)
# flatten =count.flatten_len(n_kw_len)
# # print(len(flatten))
# preprocess.save(flatten,'flatten_len_tokenize_new')
# # data=DataFrame(flatten)
# # DataFrame(data).to_excel('flatten_len.xlsx')
# ็ป่ฎก็พๅๆฏ
# print('็ป่ฎก็พๅๆฏ...')
# flatten_len_tokenize = preprocess.read('flatten_len_tokenize').tolist()
# persents_dict = count.get_percentage(flatten_len_tokenize)
# print(persents_dict)
# preprocess.save(persents_dict, 'persents_len_tokenize')
# data=DataFrame({'length':list(persents_dict.keys()), 'percent':list(persents_dict.values())})
# DataFrame(data).to_excel('persents_len_tokenize.xlsx')
# preprocess.save(n_kw_len,'len')
# data=DataFrame({'keyword': keyword_list, 'len':n_kw_len})
# data=DataFrame(n_kw_len)
# DataFrame(data).to_excel('len0.xlsx')
# print(n_kw_len)
# avgs = [np.average(kw_len) for kw_len in n_kw_len]
# print(avgs)
# print(np.average(avgs))
# len_data = DataFrame(n_kw_len)
# DataFrame(len_data).to_excel('len.xlsx')
# n_grams = count.n_gram(abstract_list[0],2)
# for gram in n_grams:
# if 'application' in gram: # keywordไธญ็wordๅญๅจไบn_gramไธญ
# print(gram)
# ่ฎก็ฎtf-idf
# wordไธบๅไฝ๏ผ
print("็ป่ฎกtf-idf")
keyword_list = preprocess.stemming_all_keyword_list(keyword_list)
abstract_list = [preprocess.stemming_list(abs) for abs in abstract_list]
start_time = time.time()
corpus1 = TextCollection(abstract_list)
preprocess.save(corpus1,'corpus1')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus1ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw(kw_list_stem,corpus1) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf1')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('1ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all(abstract_list, corpus1)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf1')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('1ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
# print('kw_tfidf_dict_list: ', kw_tfidf_dict_list)
# print('tf_idf_abs_list[1]: ',tf_idf_abs_list[1])
rank_list1 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
preprocess.save(rank_list1,'tfidf_rank1')
print('rank_list1: ',rank_list1)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('1ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('1 over======================================')
start_time = time.time()
n_gram_lists = tf_idf.get_n_gram_list(abstract_list,2)
corpus2 = TextCollection(n_gram_lists)
preprocess.save(corpus2,'corpus2')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus2ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw_n_gram(kw_list_stem,corpus2) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf2')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('2ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all_n_gram(n_gram_lists, corpus2)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf2')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('2ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
# print('kw_tfidf_dict_list[1]: ', kw_tfidf_dict_list[1])
# print('tf_idf_abs_list[1]: ',tf_idf_abs_list[1])
rank_list2 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
# # abstractไธญ่ฏ็tf - idfๅป้
# tf_idf_abs = list(set(tf_idf_abs_list[1]))
# # abstractไธญ่ฏ็tf-idfๅผ้ๅบๆๅบ
# tf_idf_abs.sort(reverse=True)
# print('tf_idf_abs_list[1]: ',tf_idf_abs)
preprocess.save(rank_list2,'tfidf_rank2')
print('rank_list2: ',rank_list2)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('2ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('2 over======================================')
start_time = time.time()
n_gram_lists = tf_idf.get_n_gram_list(abstract_list,3)
corpus3 = TextCollection(n_gram_lists)
preprocess.save(corpus3,'corpus3')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus3ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw_n_gram(kw_list_stem,corpus3) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf3')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('3ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all_n_gram(n_gram_lists, corpus3)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf3')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('3ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
rank_list3 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
preprocess.save(rank_list3,'tfidf_rank3')
print('rank_list3: ',rank_list3)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('3ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('3 over======================================')
start_time = time.time()
n_gram_lists = tf_idf.get_n_gram_list(abstract_list,4)
corpus4 = TextCollection(n_gram_lists)
preprocess.save(corpus4,'corpus4')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus4ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw_n_gram(kw_list_stem,corpus4) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf4')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('4ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all_n_gram(n_gram_lists, corpus4)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf4')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('4ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
rank_list4 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
preprocess.save(rank_list4,'tfidf_rank4')
print('rank_list4: ',rank_list4)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('3ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('4 over======================================')
n_gram_lists = tf_idf.get_n_gram_list(abstract_list,5)
corpus5 = TextCollection(n_gram_lists)
preprocess.save(corpus5,'corpus5')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus5ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw_n_gram(kw_list_stem,corpus5) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf5s')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('5ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all_n_gram(n_gram_lists, corpus5)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf5')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('5ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
rank_list5 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
preprocess.save(rank_list5,'tfidf_rank5')
print('rank_list5: ',rank_list5)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('5ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('5 over======================================')
# tf_idf1 = tf_idf.tf_idf_abs(abstract_list[0], corpus1)
# print('abstract็tf-ifd่ฎก็ฎๅฎๆฏ')
# keyword_list1 = [preprocess.stemming_str(keyword) for keyword in keyword_list[0]] #keyword_listๅทฒ็ปstemming
# print(keyword_list1)
# print(abstract_list[0])
# kw_tf_idf1 = tf_idf.tf_idf_kw(keyword_list1,corpus1)
# tf_idf1.sort(reverse=True)
# # print(tf_idf1)
# print(kw_tf_idf1)
# for keyword in kw_tf_idf1:
# print(keyword, tf_idf1.index(kw_tf_idf1.get(keyword)))
# corpus1 = tf_idf.get_corpus_word(abstract_list)
# all_tf_idf1 = tf_idf.tf_idf_abs_all(abstract_list,corpus1)
# data_tf_idf = DataFrame(all_tf_idf)
# data_tf_idf = DataFrame(np.array(all_tf_idf)[:,1])
# DataFrame(data_tf_idf).to_excel('tf-idf_test.xlsx')
# ๅ
ณ้ฎ่ฏ็tf-idf
# tf_idf_kw = count.tf_idf_kw(keyword_list[0], corpus0)
# print(tf_idf_kw)
# ไปฅn_gramไธบๅไฝ๏ผ
# n_grams = tf_idf.n_gram(abstract_list[0],2)
# abs_n_gram_lsit = tf_idf.get_n_gram_list(abstract_list,2)
# tfidf1 = tf_idf.tf_idf_abs_n_gram(n_grams,abs_n_gram_lsit)
# data_tf_idf = DataFrame({'2-gram': n_grams, 'tf-idf':tfidf1})
# DataFrame(data_tf_idf).to_excel('tf-idf_2gram.xlsx')
# print(tfidf1) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
import time
from pandas.core.frame import DataFrame
from nltk import ngrams
from nltk.text import TextCollection
from static_count import preprocess, count, tf_idf, draw
json_file = '../data/test_json'
# json_file = '../data/all_title_abstract_keyword_clean.json'
json_obj = preprocess.load_json(json_file)
abstract_list, keyword_list, _ = preprocess.get_info(json_obj)
# print(keyword_list[144])
# print(len(keyword_list[144]))
# print(len(keyword_list))
# # ็ป่ฎกๅ
ณ้ฎ่ฏin or not in
# count_results = count.count_in_all(abstract_list, keyword_list, isPart=False,isStem=False, isAnd=False)
# in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results)
# count_dict = {'in':in_num_list,'out':out_num_list}
# print(count_results)
# print(in_num_list)
# print(out_num_list)
# print(avg_in,avg_out)
# data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก
# DataFrame(data).to_excel('count_ff1.xlsx')
# print('count_ff1 over!')
#
# count_results = count.count_in_all(abstract_list, keyword_list, isPart=False,isStem=True)
# in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results)
# count_dict = {'in':in_num_list,'out':out_num_list}
# print(count_results)
# print(avg_in,avg_out)
# data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก
# DataFrame(data).to_excel('count_ft1.xlsx')
# print('count_ft1 over!')
#
# count_results = count.count_in_all(abstract_list, keyword_list, isPart=True,isStem=False)
# in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results)
# count_dict = {'in':in_num_list,'out':out_num_list}
# print(count_results)
# print(avg_in,avg_out)
# data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก
# DataFrame(data).to_excel('count_tff1.xlsx')
# print('count_tf1 over!')
#
# count_results = count.count_in_all(abstract_list, keyword_list, isPart=True,isStem=True)
# in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results)
# count_dict = {'in':in_num_list,'out':out_num_list}
# print(count_results)
# print(avg_in,avg_out)
# data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก
# DataFrame(data).to_excel('count_ttf1.xlsx')
# print('count_tt1 over!')
# ็ป่ฎกๆฏ็ฏๆ็ซ ็count_in_out็พๅๆฏ
# in_out_persent = count.in_out_persents('./4_25/count_ff1.xlsx')
# in_persent = in_out_persent[0]
# out_persent = in_out_persent[1]
# print(in_persent[:10])
# print(out_persent[:10])
# in_persent_persent = count.get_percentage(in_persent[0:10])
# print(in_persent_persent)
# # ็ป่ฎกๅ
ณ้ฎ่ฏ้ฟๅบฆ
# # kw_len= count.count_kw_len(keyword_list[4])
# # print(keyword_list[4])
# # print(np.average(kw_len))
# #
# # print('็ป่ฎกๅ
ณ้ฎ่ฏ้ฟๅบฆ......')
# n_kw_len = count.count_n_kw_len(keyword_list)
# # print('exp_num', exp_num)
# flatten =count.flatten_len(n_kw_len)
# # print(len(flatten))
# preprocess.save(flatten,'flatten_len_tokenize_new')
# # data=DataFrame(flatten)
# # DataFrame(data).to_excel('flatten_len.xlsx')
# ็ป่ฎก็พๅๆฏ
# print('็ป่ฎก็พๅๆฏ...')
# flatten_len_tokenize = preprocess.read('flatten_len_tokenize').tolist()
# persents_dict = count.get_percentage(flatten_len_tokenize)
# print(persents_dict)
# preprocess.save(persents_dict, 'persents_len_tokenize')
# data=DataFrame({'length':list(persents_dict.keys()), 'percent':list(persents_dict.values())})
# DataFrame(data).to_excel('persents_len_tokenize.xlsx')
# preprocess.save(n_kw_len,'len')
# data=DataFrame({'keyword': keyword_list, 'len':n_kw_len})
# data=DataFrame(n_kw_len)
# DataFrame(data).to_excel('len0.xlsx')
# print(n_kw_len)
# avgs = [np.average(kw_len) for kw_len in n_kw_len]
# print(avgs)
# print(np.average(avgs))
# len_data = DataFrame(n_kw_len)
# DataFrame(len_data).to_excel('len.xlsx')
# n_grams = count.n_gram(abstract_list[0],2)
# for gram in n_grams:
# if 'application' in gram: # keywordไธญ็wordๅญๅจไบn_gramไธญ
# print(gram)
# ่ฎก็ฎtf-idf
# wordไธบๅไฝ๏ผ
print("็ป่ฎกtf-idf")
keyword_list = preprocess.stemming_all_keyword_list(keyword_list)
abstract_list = [preprocess.stemming_list(abs) for abs in abstract_list]
start_time = time.time()
corpus1 = TextCollection(abstract_list)
preprocess.save(corpus1,'corpus1')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus1ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw(kw_list_stem,corpus1) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf1')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('1ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all(abstract_list, corpus1)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf1')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('1ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
# print('kw_tfidf_dict_list: ', kw_tfidf_dict_list)
# print('tf_idf_abs_list[1]: ',tf_idf_abs_list[1])
rank_list1 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
preprocess.save(rank_list1,'tfidf_rank1')
print('rank_list1: ',rank_list1)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('1ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('1 over======================================')
start_time = time.time()
n_gram_lists = tf_idf.get_n_gram_list(abstract_list,2)
corpus2 = TextCollection(n_gram_lists)
preprocess.save(corpus2,'corpus2')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus2ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw_n_gram(kw_list_stem,corpus2) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf2')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('2ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all_n_gram(n_gram_lists, corpus2)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf2')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('2ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
# print('kw_tfidf_dict_list[1]: ', kw_tfidf_dict_list[1])
# print('tf_idf_abs_list[1]: ',tf_idf_abs_list[1])
rank_list2 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
# # abstractไธญ่ฏ็tf - idfๅป้
# tf_idf_abs = list(set(tf_idf_abs_list[1]))
# # abstractไธญ่ฏ็tf-idfๅผ้ๅบๆๅบ
# tf_idf_abs.sort(reverse=True)
# print('tf_idf_abs_list[1]: ',tf_idf_abs)
preprocess.save(rank_list2,'tfidf_rank2')
print('rank_list2: ',rank_list2)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('2ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('2 over======================================')
start_time = time.time()
n_gram_lists = tf_idf.get_n_gram_list(abstract_list,3)
corpus3 = TextCollection(n_gram_lists)
preprocess.save(corpus3,'corpus3')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus3ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw_n_gram(kw_list_stem,corpus3) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf3')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('3ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all_n_gram(n_gram_lists, corpus3)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf3')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('3ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
rank_list3 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
preprocess.save(rank_list3,'tfidf_rank3')
print('rank_list3: ',rank_list3)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('3ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('3 over======================================')
start_time = time.time()
n_gram_lists = tf_idf.get_n_gram_list(abstract_list,4)
corpus4 = TextCollection(n_gram_lists)
preprocess.save(corpus4,'corpus4')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus4ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw_n_gram(kw_list_stem,corpus4) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf4')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('4ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all_n_gram(n_gram_lists, corpus4)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf4')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('4ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
rank_list4 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
preprocess.save(rank_list4,'tfidf_rank4')
print('rank_list4: ',rank_list4)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('3ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('4 over======================================')
n_gram_lists = tf_idf.get_n_gram_list(abstract_list,5)
corpus5 = TextCollection(n_gram_lists)
preprocess.save(corpus5,'corpus5')
end_time1 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time1 - start_time)))
print('corpus5ๆๅปบๅฎๆ๏ผ่ๆถ๏ผ',str(time_used))
kw_tfidf_dict_list = [tf_idf.tf_idf_kw_n_gram(kw_list_stem,corpus5) for kw_list_stem in keyword_list]
preprocess.save(kw_tfidf_dict_list,'all_kw_tfidf5s')
end_time2 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time2 - end_time1)))
print('5ๅ
ณ้ฎ่ฏtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
tf_idf_abs_list = tf_idf.tf_idf_abs_all_n_gram(n_gram_lists, corpus5)
preprocess.save(tf_idf_abs_list, 'all_abs_tfidf5')
end_time3 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time3 - end_time2)))
print('5ๆ่ฆtfidf่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
rank_list5 = tf_idf.get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list)
preprocess.save(rank_list5,'tfidf_rank5')
print('rank_list5: ',rank_list5)
end_time4 = time.time()
time_used = datetime.timedelta(seconds=int(round(end_time4 - end_time3)))
print('5ๅ
ณ้ฎ่ฏrank่ฎก็ฎๅฎๆฏ๏ผ่ๆถ๏ผ',str(time_used))
print('5 over======================================')
# tf_idf1 = tf_idf.tf_idf_abs(abstract_list[0], corpus1)
# print('abstract็tf-ifd่ฎก็ฎๅฎๆฏ')
# keyword_list1 = [preprocess.stemming_str(keyword) for keyword in keyword_list[0]] #keyword_listๅทฒ็ปstemming
# print(keyword_list1)
# print(abstract_list[0])
# kw_tf_idf1 = tf_idf.tf_idf_kw(keyword_list1,corpus1)
# tf_idf1.sort(reverse=True)
# # print(tf_idf1)
# print(kw_tf_idf1)
# for keyword in kw_tf_idf1:
# print(keyword, tf_idf1.index(kw_tf_idf1.get(keyword)))
# corpus1 = tf_idf.get_corpus_word(abstract_list)
# all_tf_idf1 = tf_idf.tf_idf_abs_all(abstract_list,corpus1)
# data_tf_idf = DataFrame(all_tf_idf)
# data_tf_idf = DataFrame(np.array(all_tf_idf)[:,1])
# DataFrame(data_tf_idf).to_excel('tf-idf_test.xlsx')
# ๅ
ณ้ฎ่ฏ็tf-idf
# tf_idf_kw = count.tf_idf_kw(keyword_list[0], corpus0)
# print(tf_idf_kw)
# ไปฅn_gramไธบๅไฝ๏ผ
# n_grams = tf_idf.n_gram(abstract_list[0],2)
# abs_n_gram_lsit = tf_idf.get_n_gram_list(abstract_list,2)
# tfidf1 = tf_idf.tf_idf_abs_n_gram(n_grams,abs_n_gram_lsit)
# data_tf_idf = DataFrame({'2-gram': n_grams, 'tf-idf':tfidf1})
# DataFrame(data_tf_idf).to_excel('tf-idf_2gram.xlsx')
# print(tfidf1) | en | 0.230682 | #!/usr/bin/env python # -*- coding: utf-8 -*- # json_file = '../data/all_title_abstract_keyword_clean.json' # print(keyword_list[144]) # print(len(keyword_list[144])) # print(len(keyword_list)) # # ็ป่ฎกๅ
ณ้ฎ่ฏin or not in # count_results = count.count_in_all(abstract_list, keyword_list, isPart=False,isStem=False, isAnd=False) # in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results) # count_dict = {'in':in_num_list,'out':out_num_list} # print(count_results) # print(in_num_list) # print(out_num_list) # print(avg_in,avg_out) # data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก # DataFrame(data).to_excel('count_ff1.xlsx') # print('count_ff1 over!') # # count_results = count.count_in_all(abstract_list, keyword_list, isPart=False,isStem=True) # in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results) # count_dict = {'in':in_num_list,'out':out_num_list} # print(count_results) # print(avg_in,avg_out) # data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก # DataFrame(data).to_excel('count_ft1.xlsx') # print('count_ft1 over!') # # count_results = count.count_in_all(abstract_list, keyword_list, isPart=True,isStem=False) # in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results) # count_dict = {'in':in_num_list,'out':out_num_list} # print(count_results) # print(avg_in,avg_out) # data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก # DataFrame(data).to_excel('count_tff1.xlsx') # print('count_tf1 over!') # # count_results = count.count_in_all(abstract_list, keyword_list, isPart=True,isStem=True) # in_num_list, out_num_list, avg_in, avg_out = count.cal_in_out_avg(count_results) # count_dict = {'in':in_num_list,'out':out_num_list} # print(count_results) # print(avg_in,avg_out) # data=DataFrame(count_dict) #ๅฐๅญๅ
ธ่ฝฌๆขๆไธบๆฐๆฎๆก # DataFrame(data).to_excel('count_ttf1.xlsx') # print('count_tt1 over!') # ็ป่ฎกๆฏ็ฏๆ็ซ ็count_in_out็พๅๆฏ # in_out_persent = count.in_out_persents('./4_25/count_ff1.xlsx') # in_persent = in_out_persent[0] # out_persent = in_out_persent[1] # print(in_persent[:10]) # print(out_persent[:10]) # in_persent_persent = count.get_percentage(in_persent[0:10]) # print(in_persent_persent) # # ็ป่ฎกๅ
ณ้ฎ่ฏ้ฟๅบฆ # # kw_len= count.count_kw_len(keyword_list[4]) # # print(keyword_list[4]) # # print(np.average(kw_len)) # # # # print('็ป่ฎกๅ
ณ้ฎ่ฏ้ฟๅบฆ......') # n_kw_len = count.count_n_kw_len(keyword_list) # # print('exp_num', exp_num) # flatten =count.flatten_len(n_kw_len) # # print(len(flatten)) # preprocess.save(flatten,'flatten_len_tokenize_new') # # data=DataFrame(flatten) # # DataFrame(data).to_excel('flatten_len.xlsx') # ็ป่ฎก็พๅๆฏ # print('็ป่ฎก็พๅๆฏ...') # flatten_len_tokenize = preprocess.read('flatten_len_tokenize').tolist() # persents_dict = count.get_percentage(flatten_len_tokenize) # print(persents_dict) # preprocess.save(persents_dict, 'persents_len_tokenize') # data=DataFrame({'length':list(persents_dict.keys()), 'percent':list(persents_dict.values())}) # DataFrame(data).to_excel('persents_len_tokenize.xlsx') # preprocess.save(n_kw_len,'len') # data=DataFrame({'keyword': keyword_list, 'len':n_kw_len}) # data=DataFrame(n_kw_len) # DataFrame(data).to_excel('len0.xlsx') # print(n_kw_len) # avgs = [np.average(kw_len) for kw_len in n_kw_len] # print(avgs) # print(np.average(avgs)) # len_data = DataFrame(n_kw_len) # DataFrame(len_data).to_excel('len.xlsx') # n_grams = count.n_gram(abstract_list[0],2) # for gram in n_grams: # if 'application' in gram: # keywordไธญ็wordๅญๅจไบn_gramไธญ # print(gram) # ่ฎก็ฎtf-idf # wordไธบๅไฝ๏ผ # print('kw_tfidf_dict_list: ', kw_tfidf_dict_list) # print('tf_idf_abs_list[1]: ',tf_idf_abs_list[1]) # print('kw_tfidf_dict_list[1]: ', kw_tfidf_dict_list[1]) # print('tf_idf_abs_list[1]: ',tf_idf_abs_list[1]) # # abstractไธญ่ฏ็tf - idfๅป้ # tf_idf_abs = list(set(tf_idf_abs_list[1])) # # abstractไธญ่ฏ็tf-idfๅผ้ๅบๆๅบ # tf_idf_abs.sort(reverse=True) # print('tf_idf_abs_list[1]: ',tf_idf_abs) # tf_idf1 = tf_idf.tf_idf_abs(abstract_list[0], corpus1) # print('abstract็tf-ifd่ฎก็ฎๅฎๆฏ') # keyword_list1 = [preprocess.stemming_str(keyword) for keyword in keyword_list[0]] #keyword_listๅทฒ็ปstemming # print(keyword_list1) # print(abstract_list[0]) # kw_tf_idf1 = tf_idf.tf_idf_kw(keyword_list1,corpus1) # tf_idf1.sort(reverse=True) # # print(tf_idf1) # print(kw_tf_idf1) # for keyword in kw_tf_idf1: # print(keyword, tf_idf1.index(kw_tf_idf1.get(keyword))) # corpus1 = tf_idf.get_corpus_word(abstract_list) # all_tf_idf1 = tf_idf.tf_idf_abs_all(abstract_list,corpus1) # data_tf_idf = DataFrame(all_tf_idf) # data_tf_idf = DataFrame(np.array(all_tf_idf)[:,1]) # DataFrame(data_tf_idf).to_excel('tf-idf_test.xlsx') # ๅ
ณ้ฎ่ฏ็tf-idf # tf_idf_kw = count.tf_idf_kw(keyword_list[0], corpus0) # print(tf_idf_kw) # ไปฅn_gramไธบๅไฝ๏ผ # n_grams = tf_idf.n_gram(abstract_list[0],2) # abs_n_gram_lsit = tf_idf.get_n_gram_list(abstract_list,2) # tfidf1 = tf_idf.tf_idf_abs_n_gram(n_grams,abs_n_gram_lsit) # data_tf_idf = DataFrame({'2-gram': n_grams, 'tf-idf':tfidf1}) # DataFrame(data_tf_idf).to_excel('tf-idf_2gram.xlsx') # print(tfidf1) | 2.557335 | 3 |
Hapi/calibration.py | juancotrino/Hapi | 0 | 6629981 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Calibration
calibration contains functions to to connect the parameter spatial distribution
function with the with both component of the spatial representation of the hydrological
process (conceptual model & spatial routing) to calculate the performance of predicted
runoff at known locations based on given performance function
@author: Mostafa
"""
#%links
#%library
import os
import numpy as np
import gdal
from Oasis.optimization import Optimization
from Oasis.hsapi import HSapi
# from Oasis.optimizer import Optimizer
# functions
import Hapi.raster as raster
#import DistParameters as Dp
#import PerformanceCriteria as PC
import Hapi.wrapper as wrapper
def RunCalibration(ConceptualModel, Paths, Basic_inputs, SpatialVarFun, SpatialVarArgs,
OF, OF_args, Q_obs, OptimizationArgs, printError=None):
"""
=======================================================================
RunCalibration(ConceptualModel, Paths, p2, Q_obs, UB, LB, SpatialVarFun, lumpedParNo, lumpedParPos, objective_function, printError=None, *args):
=======================================================================
this function runs the calibration algorithm for the conceptual distributed
hydrological model
Inputs:
----------
1-ConceptualModel:
[function] conceptual model and it should contain a function called simulate
1-Paths:
1-PrecPath:
[String] path to the Folder contains precipitation rasters
2-Evap_Path:
[String] path to the Folder contains Evapotranspiration rasters
3-TempPath:
[String] path to the Folder contains Temperature rasters
4-FlowAccPath:
[String] path to the Flow Accumulation raster of the catchment (it should
include the raster name and extension)
5-FlowDPath:
[String] path to the Flow Direction raster of the catchment (it should
include the raster name and extension)
2-Basic_inputs:
1-p2:
[List] list of unoptimized parameters
p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step
p2[1] = catchment area in km2
2-init_st:
[list] initial values for the state variables [sp,sm,uz,lz,wc] in mm
3-UB:
[Numeric] upper bound of the values of the parameters
4-LB:
[Numeric] Lower bound of the values of the parameters
3-Q_obs:
[Numeric] Observed values of discharge
6-lumpedParNo:
[int] nomber of lumped parameters, you have to enter the value of
the lumped parameter at the end of the list, default is 0 (no lumped parameters)
7-lumpedParPos:
[List] list of order or position of the lumped parameter among all
the parameters of the lumped model (order starts from 0 to the length
of the model parameters), default is [] (empty), the following order
of parameters is used for the lumped HBV model used
[ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp,
c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum]
8-objective_function:
[function] objective function to calculate the performance of the model
and to be used in the calibration
9-*args:
other arguments needed on the objective function
Outputs:
----------
1- st:
[4D array] state variables
2- q_out:
[1D array] calculated Discharge at the outlet of the catchment
3- q_uz:
[3D array] Distributed discharge for each cell
Example:
----------
PrecPath = prec_path="meteodata/4000/calib/prec"
Evap_Path = evap_path="meteodata/4000/calib/evap"
TempPath = temp_path="meteodata/4000/calib/temp"
FlowAccPath = "GIS/4000/acc4000.tif"
FlowDPath = "GIS/4000/fd4000.tif"
ParPath = "meteodata/4000/"+"parameters.txt"
p2=[1, 227.31]
st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath,
FlowAccPath,FlowDPath,ParPath,p2)
"""
### inputs validation
# data type
assert len(Paths) == 5, "Paths should include 5 folder pathes " +str(len(Paths))+" paths are only provided"
PrecPath=Paths[0]
Evap_Path=Paths[1]
TempPath=Paths[2]
# DemPath=Paths[3]
FlowAccPath=Paths[3]
FlowDPath=Paths[4]
assert type(PrecPath)== str, "PrecPath input should be string type"
assert type(Evap_Path)== str, "Evap_Path input should be string type"
assert type(TempPath)== str, "TempPath input should be string type"
# assert type(DemPath)== str, "DemPath input should be string type"
assert type(FlowAccPath)== str, "FlowAccPath input should be string type"
assert type(FlowDPath)== str, "FlowDPath input should be string type"
# input values
# dem_ext=DemPath[-4:]
# assert dem_ext == ".tif", "please add the extension at the end of the DEM raster path input"
acc_ext=FlowAccPath[-4:]
assert acc_ext == ".tif", "please add the extension at the end of the Flow accumulation raster path input"
fd_ext=FlowDPath[-4:]
assert fd_ext == ".tif", "please add the extension at the end of the Flow Direction path input"
# check wether the path exists or not
assert os.path.exists(PrecPath), PrecPath + " you have provided does not exist"
assert os.path.exists(Evap_Path), Evap_Path+" path you have provided does not exist"
assert os.path.exists(TempPath), TempPath+" path you have provided does not exist"
# assert os.path.exists(DemPath), DemPath+ " you have provided does not exist"
assert os.path.exists(FlowAccPath), FlowAccPath + " you have provided does not exist"
assert os.path.exists(FlowDPath), FlowDPath+ " you have provided does not exist"
# check wether the folder has the rasters or not
assert len(os.listdir(PrecPath)) > 0, PrecPath+" folder you have provided is empty"
assert len(os.listdir(Evap_Path)) > 0, Evap_Path+" folder you have provided is empty"
assert len(os.listdir(TempPath)) > 0, TempPath+" folder you have provided is empty"
# basic inputs
# check if all inputs are included
assert all(["p2","init_st","UB","LB","snow "][i] in Basic_inputs.keys() for i in range(4)), "Basic_inputs should contain ['p2','init_st','UB','LB'] "
p2 = Basic_inputs['p2']
init_st = Basic_inputs["init_st"]
UB = Basic_inputs['UB']
LB = Basic_inputs['LB']
snow = Basic_inputs['snow']
assert len(UB)==len(LB), "length of UB should be the same like LB"
# check objective_function
assert callable(OF) , "second argument should be a function"
if OF_args== None :
OF_args=[]
# read data
### meteorological data
prec=raster.ReadRastersFolder(PrecPath)
evap=raster.ReadRastersFolder(Evap_Path)
temp=raster.ReadRastersFolder(TempPath)
print("meteorological data are read successfully")
#### GIS data
# dem= gdal.Open(DemPath)
acc=gdal.Open(FlowAccPath)
fd=gdal.Open(FlowDPath)
print("GIS data are read successfully")
### optimization
# get arguments
ApiObjArgs = OptimizationArgs[0]
pll_type = OptimizationArgs[1]
ApiSolveArgs = OptimizationArgs[2]
# check optimization arguement
assert type(ApiObjArgs) == dict, "store_history should be 0 or 1"
assert type(ApiSolveArgs) == dict, "history_fname should be of type string "
print('Calibration starts')
### calculate the objective function
def opt_fun(par):
try:
# parameters
klb=float(par[-2])
kub=float(par[-1])
par=par[:-2]
par_dist=SpatialVarFun(par,*SpatialVarArgs,kub=kub,klb=klb)
#run the model
_, q_out, q_uz_routed, q_lz_trans = wrapper.HapiModel(ConceptualModel,
acc, fd, prec, evap,
temp, par_dist, p2,
snow , init_st)
# calculate performance of the model
try:
error=OF(Q_obs,q_out,q_uz_routed,q_lz_trans,*OF_args)
except TypeError: # if no of inputs less than what the function needs
assert 1==5, "the objective function you have entered needs more inputs please enter then in a list as *args"
# print error
if printError != 0:
print(error)
print(par)
fail = 0
except:
error = np.nan
fail = 1
return error, [], fail
### define the optimization components
opt_prob = Optimization('HBV Calibration', opt_fun)
for i in range(len(LB)):
opt_prob.addVar('x{0}'.format(i), type='c', lower=LB[i], upper=UB[i])
print(opt_prob)
opt_engine = HSapi(pll_type=pll_type , options=ApiObjArgs)
store_sol = ApiSolveArgs['store_sol']
display_opts = ApiSolveArgs['display_opts']
store_hst = ApiSolveArgs['store_hst']
hot_start = ApiSolveArgs['hot_start']
res = opt_engine(opt_prob, store_sol=store_sol, display_opts=display_opts,
store_hst=store_hst, hot_start=hot_start)
return res
def LumpedCalibration(ConceptualModel, data, Basic_inputs, OF, OF_args, Q_obs,
OptimizationArgs, printError=None):
"""
=======================================================================
RunCalibration(ConceptualModel, data,parameters, p2, init_st, snow, Routing=0, RoutingFn=[], objective_function, printError=None, *args):
=======================================================================
this function runs the calibration algorithm for the Lumped conceptual hydrological model
Inputs:
----------
1-ConceptualModel:
[function] conceptual model and it should contain a function called simulate
2-data:
[numpy array] meteorological data as array with the first column as precipitation
second as evapotranspiration, third as temperature and forth column as
long term average temperature
2-Basic_inputs:
1-p2:
[List] list of unoptimized parameters
p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step
p2[1] = catchment area in km2
2-init_st:
[list] initial values for the state variables [sp,sm,uz,lz,wc] in mm
3-UB:
[Numeric] upper bound of the values of the parameters
4-LB:
[Numeric] Lower bound of the values of the parameters
3-Q_obs:
[Numeric] Observed values of discharge
6-lumpedParNo:
[int] nomber of lumped parameters, you have to enter the value of
the lumped parameter at the end of the list, default is 0 (no lumped parameters)
7-lumpedParPos:
[List] list of order or position of the lumped parameter among all
the parameters of the lumped model (order starts from 0 to the length
of the model parameters), default is [] (empty), the following order
of parameters is used for the lumped HBV model used
[ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp,
c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum]
8-objective_function:
[function] objective function to calculate the performance of the model
and to be used in the calibration
9-*args:
other arguments needed on the objective function
Outputs:
----------
1- st:
[4D array] state variables
2- q_out:
[1D array] calculated Discharge at the outlet of the catchment
3- q_uz:
[3D array] Distributed discharge for each cell
Example:
----------
PrecPath = prec_path="meteodata/4000/calib/prec"
Evap_Path = evap_path="meteodata/4000/calib/evap"
TempPath = temp_path="meteodata/4000/calib/temp"
FlowAccPath = "GIS/4000/acc4000.tif"
FlowDPath = "GIS/4000/fd4000.tif"
ParPath = "meteodata/4000/"+"parameters.txt"
p2=[1, 227.31]
st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath,
FlowAccPath,FlowDPath,ParPath,p2)
"""
### inputs validation
# data type
# input values
# basic inputs
# check if all inputs are included
assert all(["p2","init_st","UB","LB","snow","Routing","RoutingFn"][i] in Basic_inputs.keys() for i in range(4)), "Basic_inputs should contain ['p2','init_st','UB','LB'] "
p2 = Basic_inputs['p2']
init_st = Basic_inputs["init_st"]
UB = Basic_inputs['UB']
LB = Basic_inputs['LB']
snow = Basic_inputs['snow']
Routing = Basic_inputs["Routing"]
RoutingFn = Basic_inputs["RoutingFn"]
if 'InitialValues' in Basic_inputs.keys():
InitialValues = Basic_inputs['InitialValues']
assert len(UB)==len(LB), "length of UB should be the same like LB"
# check objective_function
assert callable(OF) , "second argument should be a function"
if OF_args== None :
OF_args=[]
### optimization
# get arguments
ApiObjArgs = OptimizationArgs[0]
pll_type = OptimizationArgs[1]
ApiSolveArgs = OptimizationArgs[2]
# check optimization arguement
assert type(ApiObjArgs) == dict, "store_history should be 0 or 1"
assert type(ApiSolveArgs) == dict, "history_fname should be of type string "
# assert history_fname[-4:] == ".txt", "history_fname should be txt file please change extension or add .txt ad the end of the history_fname"
print('Calibration starts')
### calculate the objective function
def opt_fun(par):
try:
# parameters
#run the model
_, q_out = wrapper.Lumped(ConceptualModel,data,par,p2,init_st,
snow,Routing, RoutingFn)
# calculate performance of the model
try:
error=OF(Q_obs,q_out,*OF_args)
except TypeError: # if no of inputs less than what the function needs
assert 1==5, "the objective function you have entered needs more inputs please enter then in a list as *args"
# print error
if printError != 0:
print(error)
# print(par)
fail = 0
except:
error = np.nan
fail = 1
return error, [], fail
### define the optimization components
opt_prob = Optimization('HBV Calibration', opt_fun)
for i in range(len(LB)):
opt_prob.addVar('x{0}'.format(i), type='c', lower=LB[i], upper=UB[i], value=InitialValues[i])
print(opt_prob)
opt_engine = HSapi(pll_type=pll_type , options=ApiObjArgs)
# parse the ApiSolveArgs inputs
# availablekeys = ['store_sol',"display_opts","store_hst","hot_start"]
store_sol = ApiSolveArgs['store_sol']
display_opts = ApiSolveArgs['display_opts']
store_hst = ApiSolveArgs['store_hst']
hot_start = ApiSolveArgs['hot_start']
# for i in range(len(availablekeys)):
# if availablekeys[i] in ApiSolveArgs.keys():
# exec(availablekeys[i] + "=" + str(ApiSolveArgs[availablekeys[i]]))
# print(availablekeys[i] + " = " + str(ApiSolveArgs[availablekeys[i]]))
res = opt_engine(opt_prob, store_sol=store_sol, display_opts=display_opts,
store_hst=store_hst, hot_start=hot_start)
return res | # -*- coding: utf-8 -*-
"""
Calibration
calibration contains functions to to connect the parameter spatial distribution
function with the with both component of the spatial representation of the hydrological
process (conceptual model & spatial routing) to calculate the performance of predicted
runoff at known locations based on given performance function
@author: Mostafa
"""
#%links
#%library
import os
import numpy as np
import gdal
from Oasis.optimization import Optimization
from Oasis.hsapi import HSapi
# from Oasis.optimizer import Optimizer
# functions
import Hapi.raster as raster
#import DistParameters as Dp
#import PerformanceCriteria as PC
import Hapi.wrapper as wrapper
def RunCalibration(ConceptualModel, Paths, Basic_inputs, SpatialVarFun, SpatialVarArgs,
OF, OF_args, Q_obs, OptimizationArgs, printError=None):
"""
=======================================================================
RunCalibration(ConceptualModel, Paths, p2, Q_obs, UB, LB, SpatialVarFun, lumpedParNo, lumpedParPos, objective_function, printError=None, *args):
=======================================================================
this function runs the calibration algorithm for the conceptual distributed
hydrological model
Inputs:
----------
1-ConceptualModel:
[function] conceptual model and it should contain a function called simulate
1-Paths:
1-PrecPath:
[String] path to the Folder contains precipitation rasters
2-Evap_Path:
[String] path to the Folder contains Evapotranspiration rasters
3-TempPath:
[String] path to the Folder contains Temperature rasters
4-FlowAccPath:
[String] path to the Flow Accumulation raster of the catchment (it should
include the raster name and extension)
5-FlowDPath:
[String] path to the Flow Direction raster of the catchment (it should
include the raster name and extension)
2-Basic_inputs:
1-p2:
[List] list of unoptimized parameters
p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step
p2[1] = catchment area in km2
2-init_st:
[list] initial values for the state variables [sp,sm,uz,lz,wc] in mm
3-UB:
[Numeric] upper bound of the values of the parameters
4-LB:
[Numeric] Lower bound of the values of the parameters
3-Q_obs:
[Numeric] Observed values of discharge
6-lumpedParNo:
[int] nomber of lumped parameters, you have to enter the value of
the lumped parameter at the end of the list, default is 0 (no lumped parameters)
7-lumpedParPos:
[List] list of order or position of the lumped parameter among all
the parameters of the lumped model (order starts from 0 to the length
of the model parameters), default is [] (empty), the following order
of parameters is used for the lumped HBV model used
[ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp,
c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum]
8-objective_function:
[function] objective function to calculate the performance of the model
and to be used in the calibration
9-*args:
other arguments needed on the objective function
Outputs:
----------
1- st:
[4D array] state variables
2- q_out:
[1D array] calculated Discharge at the outlet of the catchment
3- q_uz:
[3D array] Distributed discharge for each cell
Example:
----------
PrecPath = prec_path="meteodata/4000/calib/prec"
Evap_Path = evap_path="meteodata/4000/calib/evap"
TempPath = temp_path="meteodata/4000/calib/temp"
FlowAccPath = "GIS/4000/acc4000.tif"
FlowDPath = "GIS/4000/fd4000.tif"
ParPath = "meteodata/4000/"+"parameters.txt"
p2=[1, 227.31]
st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath,
FlowAccPath,FlowDPath,ParPath,p2)
"""
### inputs validation
# data type
assert len(Paths) == 5, "Paths should include 5 folder pathes " +str(len(Paths))+" paths are only provided"
PrecPath=Paths[0]
Evap_Path=Paths[1]
TempPath=Paths[2]
# DemPath=Paths[3]
FlowAccPath=Paths[3]
FlowDPath=Paths[4]
assert type(PrecPath)== str, "PrecPath input should be string type"
assert type(Evap_Path)== str, "Evap_Path input should be string type"
assert type(TempPath)== str, "TempPath input should be string type"
# assert type(DemPath)== str, "DemPath input should be string type"
assert type(FlowAccPath)== str, "FlowAccPath input should be string type"
assert type(FlowDPath)== str, "FlowDPath input should be string type"
# input values
# dem_ext=DemPath[-4:]
# assert dem_ext == ".tif", "please add the extension at the end of the DEM raster path input"
acc_ext=FlowAccPath[-4:]
assert acc_ext == ".tif", "please add the extension at the end of the Flow accumulation raster path input"
fd_ext=FlowDPath[-4:]
assert fd_ext == ".tif", "please add the extension at the end of the Flow Direction path input"
# check wether the path exists or not
assert os.path.exists(PrecPath), PrecPath + " you have provided does not exist"
assert os.path.exists(Evap_Path), Evap_Path+" path you have provided does not exist"
assert os.path.exists(TempPath), TempPath+" path you have provided does not exist"
# assert os.path.exists(DemPath), DemPath+ " you have provided does not exist"
assert os.path.exists(FlowAccPath), FlowAccPath + " you have provided does not exist"
assert os.path.exists(FlowDPath), FlowDPath+ " you have provided does not exist"
# check wether the folder has the rasters or not
assert len(os.listdir(PrecPath)) > 0, PrecPath+" folder you have provided is empty"
assert len(os.listdir(Evap_Path)) > 0, Evap_Path+" folder you have provided is empty"
assert len(os.listdir(TempPath)) > 0, TempPath+" folder you have provided is empty"
# basic inputs
# check if all inputs are included
assert all(["p2","init_st","UB","LB","snow "][i] in Basic_inputs.keys() for i in range(4)), "Basic_inputs should contain ['p2','init_st','UB','LB'] "
p2 = Basic_inputs['p2']
init_st = Basic_inputs["init_st"]
UB = Basic_inputs['UB']
LB = Basic_inputs['LB']
snow = Basic_inputs['snow']
assert len(UB)==len(LB), "length of UB should be the same like LB"
# check objective_function
assert callable(OF) , "second argument should be a function"
if OF_args== None :
OF_args=[]
# read data
### meteorological data
prec=raster.ReadRastersFolder(PrecPath)
evap=raster.ReadRastersFolder(Evap_Path)
temp=raster.ReadRastersFolder(TempPath)
print("meteorological data are read successfully")
#### GIS data
# dem= gdal.Open(DemPath)
acc=gdal.Open(FlowAccPath)
fd=gdal.Open(FlowDPath)
print("GIS data are read successfully")
### optimization
# get arguments
ApiObjArgs = OptimizationArgs[0]
pll_type = OptimizationArgs[1]
ApiSolveArgs = OptimizationArgs[2]
# check optimization arguement
assert type(ApiObjArgs) == dict, "store_history should be 0 or 1"
assert type(ApiSolveArgs) == dict, "history_fname should be of type string "
print('Calibration starts')
### calculate the objective function
def opt_fun(par):
try:
# parameters
klb=float(par[-2])
kub=float(par[-1])
par=par[:-2]
par_dist=SpatialVarFun(par,*SpatialVarArgs,kub=kub,klb=klb)
#run the model
_, q_out, q_uz_routed, q_lz_trans = wrapper.HapiModel(ConceptualModel,
acc, fd, prec, evap,
temp, par_dist, p2,
snow , init_st)
# calculate performance of the model
try:
error=OF(Q_obs,q_out,q_uz_routed,q_lz_trans,*OF_args)
except TypeError: # if no of inputs less than what the function needs
assert 1==5, "the objective function you have entered needs more inputs please enter then in a list as *args"
# print error
if printError != 0:
print(error)
print(par)
fail = 0
except:
error = np.nan
fail = 1
return error, [], fail
### define the optimization components
opt_prob = Optimization('HBV Calibration', opt_fun)
for i in range(len(LB)):
opt_prob.addVar('x{0}'.format(i), type='c', lower=LB[i], upper=UB[i])
print(opt_prob)
opt_engine = HSapi(pll_type=pll_type , options=ApiObjArgs)
store_sol = ApiSolveArgs['store_sol']
display_opts = ApiSolveArgs['display_opts']
store_hst = ApiSolveArgs['store_hst']
hot_start = ApiSolveArgs['hot_start']
res = opt_engine(opt_prob, store_sol=store_sol, display_opts=display_opts,
store_hst=store_hst, hot_start=hot_start)
return res
def LumpedCalibration(ConceptualModel, data, Basic_inputs, OF, OF_args, Q_obs,
OptimizationArgs, printError=None):
"""
=======================================================================
RunCalibration(ConceptualModel, data,parameters, p2, init_st, snow, Routing=0, RoutingFn=[], objective_function, printError=None, *args):
=======================================================================
this function runs the calibration algorithm for the Lumped conceptual hydrological model
Inputs:
----------
1-ConceptualModel:
[function] conceptual model and it should contain a function called simulate
2-data:
[numpy array] meteorological data as array with the first column as precipitation
second as evapotranspiration, third as temperature and forth column as
long term average temperature
2-Basic_inputs:
1-p2:
[List] list of unoptimized parameters
p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step
p2[1] = catchment area in km2
2-init_st:
[list] initial values for the state variables [sp,sm,uz,lz,wc] in mm
3-UB:
[Numeric] upper bound of the values of the parameters
4-LB:
[Numeric] Lower bound of the values of the parameters
3-Q_obs:
[Numeric] Observed values of discharge
6-lumpedParNo:
[int] nomber of lumped parameters, you have to enter the value of
the lumped parameter at the end of the list, default is 0 (no lumped parameters)
7-lumpedParPos:
[List] list of order or position of the lumped parameter among all
the parameters of the lumped model (order starts from 0 to the length
of the model parameters), default is [] (empty), the following order
of parameters is used for the lumped HBV model used
[ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp,
c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum]
8-objective_function:
[function] objective function to calculate the performance of the model
and to be used in the calibration
9-*args:
other arguments needed on the objective function
Outputs:
----------
1- st:
[4D array] state variables
2- q_out:
[1D array] calculated Discharge at the outlet of the catchment
3- q_uz:
[3D array] Distributed discharge for each cell
Example:
----------
PrecPath = prec_path="meteodata/4000/calib/prec"
Evap_Path = evap_path="meteodata/4000/calib/evap"
TempPath = temp_path="meteodata/4000/calib/temp"
FlowAccPath = "GIS/4000/acc4000.tif"
FlowDPath = "GIS/4000/fd4000.tif"
ParPath = "meteodata/4000/"+"parameters.txt"
p2=[1, 227.31]
st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath,
FlowAccPath,FlowDPath,ParPath,p2)
"""
### inputs validation
# data type
# input values
# basic inputs
# check if all inputs are included
assert all(["p2","init_st","UB","LB","snow","Routing","RoutingFn"][i] in Basic_inputs.keys() for i in range(4)), "Basic_inputs should contain ['p2','init_st','UB','LB'] "
p2 = Basic_inputs['p2']
init_st = Basic_inputs["init_st"]
UB = Basic_inputs['UB']
LB = Basic_inputs['LB']
snow = Basic_inputs['snow']
Routing = Basic_inputs["Routing"]
RoutingFn = Basic_inputs["RoutingFn"]
if 'InitialValues' in Basic_inputs.keys():
InitialValues = Basic_inputs['InitialValues']
assert len(UB)==len(LB), "length of UB should be the same like LB"
# check objective_function
assert callable(OF) , "second argument should be a function"
if OF_args== None :
OF_args=[]
### optimization
# get arguments
ApiObjArgs = OptimizationArgs[0]
pll_type = OptimizationArgs[1]
ApiSolveArgs = OptimizationArgs[2]
# check optimization arguement
assert type(ApiObjArgs) == dict, "store_history should be 0 or 1"
assert type(ApiSolveArgs) == dict, "history_fname should be of type string "
# assert history_fname[-4:] == ".txt", "history_fname should be txt file please change extension or add .txt ad the end of the history_fname"
print('Calibration starts')
### calculate the objective function
def opt_fun(par):
try:
# parameters
#run the model
_, q_out = wrapper.Lumped(ConceptualModel,data,par,p2,init_st,
snow,Routing, RoutingFn)
# calculate performance of the model
try:
error=OF(Q_obs,q_out,*OF_args)
except TypeError: # if no of inputs less than what the function needs
assert 1==5, "the objective function you have entered needs more inputs please enter then in a list as *args"
# print error
if printError != 0:
print(error)
# print(par)
fail = 0
except:
error = np.nan
fail = 1
return error, [], fail
### define the optimization components
opt_prob = Optimization('HBV Calibration', opt_fun)
for i in range(len(LB)):
opt_prob.addVar('x{0}'.format(i), type='c', lower=LB[i], upper=UB[i], value=InitialValues[i])
print(opt_prob)
opt_engine = HSapi(pll_type=pll_type , options=ApiObjArgs)
# parse the ApiSolveArgs inputs
# availablekeys = ['store_sol',"display_opts","store_hst","hot_start"]
store_sol = ApiSolveArgs['store_sol']
display_opts = ApiSolveArgs['display_opts']
store_hst = ApiSolveArgs['store_hst']
hot_start = ApiSolveArgs['hot_start']
# for i in range(len(availablekeys)):
# if availablekeys[i] in ApiSolveArgs.keys():
# exec(availablekeys[i] + "=" + str(ApiSolveArgs[availablekeys[i]]))
# print(availablekeys[i] + " = " + str(ApiSolveArgs[availablekeys[i]]))
res = opt_engine(opt_prob, store_sol=store_sol, display_opts=display_opts,
store_hst=store_hst, hot_start=hot_start)
return res | en | 0.60746 | # -*- coding: utf-8 -*- Calibration calibration contains functions to to connect the parameter spatial distribution function with the with both component of the spatial representation of the hydrological process (conceptual model & spatial routing) to calculate the performance of predicted runoff at known locations based on given performance function @author: Mostafa #%links #%library # from Oasis.optimizer import Optimizer # functions #import DistParameters as Dp #import PerformanceCriteria as PC ======================================================================= RunCalibration(ConceptualModel, Paths, p2, Q_obs, UB, LB, SpatialVarFun, lumpedParNo, lumpedParPos, objective_function, printError=None, *args): ======================================================================= this function runs the calibration algorithm for the conceptual distributed hydrological model Inputs: ---------- 1-ConceptualModel: [function] conceptual model and it should contain a function called simulate 1-Paths: 1-PrecPath: [String] path to the Folder contains precipitation rasters 2-Evap_Path: [String] path to the Folder contains Evapotranspiration rasters 3-TempPath: [String] path to the Folder contains Temperature rasters 4-FlowAccPath: [String] path to the Flow Accumulation raster of the catchment (it should include the raster name and extension) 5-FlowDPath: [String] path to the Flow Direction raster of the catchment (it should include the raster name and extension) 2-Basic_inputs: 1-p2: [List] list of unoptimized parameters p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step p2[1] = catchment area in km2 2-init_st: [list] initial values for the state variables [sp,sm,uz,lz,wc] in mm 3-UB: [Numeric] upper bound of the values of the parameters 4-LB: [Numeric] Lower bound of the values of the parameters 3-Q_obs: [Numeric] Observed values of discharge 6-lumpedParNo: [int] nomber of lumped parameters, you have to enter the value of the lumped parameter at the end of the list, default is 0 (no lumped parameters) 7-lumpedParPos: [List] list of order or position of the lumped parameter among all the parameters of the lumped model (order starts from 0 to the length of the model parameters), default is [] (empty), the following order of parameters is used for the lumped HBV model used [ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp, c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum] 8-objective_function: [function] objective function to calculate the performance of the model and to be used in the calibration 9-*args: other arguments needed on the objective function Outputs: ---------- 1- st: [4D array] state variables 2- q_out: [1D array] calculated Discharge at the outlet of the catchment 3- q_uz: [3D array] Distributed discharge for each cell Example: ---------- PrecPath = prec_path="meteodata/4000/calib/prec" Evap_Path = evap_path="meteodata/4000/calib/evap" TempPath = temp_path="meteodata/4000/calib/temp" FlowAccPath = "GIS/4000/acc4000.tif" FlowDPath = "GIS/4000/fd4000.tif" ParPath = "meteodata/4000/"+"parameters.txt" p2=[1, 227.31] st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath, FlowAccPath,FlowDPath,ParPath,p2) ### inputs validation # data type # DemPath=Paths[3] # assert type(DemPath)== str, "DemPath input should be string type" # input values # dem_ext=DemPath[-4:] # assert dem_ext == ".tif", "please add the extension at the end of the DEM raster path input" # check wether the path exists or not # assert os.path.exists(DemPath), DemPath+ " you have provided does not exist" # check wether the folder has the rasters or not # basic inputs # check if all inputs are included # check objective_function # read data ### meteorological data #### GIS data # dem= gdal.Open(DemPath) ### optimization # get arguments # check optimization arguement ### calculate the objective function # parameters #run the model # calculate performance of the model # if no of inputs less than what the function needs # print error ### define the optimization components ======================================================================= RunCalibration(ConceptualModel, data,parameters, p2, init_st, snow, Routing=0, RoutingFn=[], objective_function, printError=None, *args): ======================================================================= this function runs the calibration algorithm for the Lumped conceptual hydrological model Inputs: ---------- 1-ConceptualModel: [function] conceptual model and it should contain a function called simulate 2-data: [numpy array] meteorological data as array with the first column as precipitation second as evapotranspiration, third as temperature and forth column as long term average temperature 2-Basic_inputs: 1-p2: [List] list of unoptimized parameters p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step p2[1] = catchment area in km2 2-init_st: [list] initial values for the state variables [sp,sm,uz,lz,wc] in mm 3-UB: [Numeric] upper bound of the values of the parameters 4-LB: [Numeric] Lower bound of the values of the parameters 3-Q_obs: [Numeric] Observed values of discharge 6-lumpedParNo: [int] nomber of lumped parameters, you have to enter the value of the lumped parameter at the end of the list, default is 0 (no lumped parameters) 7-lumpedParPos: [List] list of order or position of the lumped parameter among all the parameters of the lumped model (order starts from 0 to the length of the model parameters), default is [] (empty), the following order of parameters is used for the lumped HBV model used [ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp, c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum] 8-objective_function: [function] objective function to calculate the performance of the model and to be used in the calibration 9-*args: other arguments needed on the objective function Outputs: ---------- 1- st: [4D array] state variables 2- q_out: [1D array] calculated Discharge at the outlet of the catchment 3- q_uz: [3D array] Distributed discharge for each cell Example: ---------- PrecPath = prec_path="meteodata/4000/calib/prec" Evap_Path = evap_path="meteodata/4000/calib/evap" TempPath = temp_path="meteodata/4000/calib/temp" FlowAccPath = "GIS/4000/acc4000.tif" FlowDPath = "GIS/4000/fd4000.tif" ParPath = "meteodata/4000/"+"parameters.txt" p2=[1, 227.31] st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath, FlowAccPath,FlowDPath,ParPath,p2) ### inputs validation # data type # input values # basic inputs # check if all inputs are included # check objective_function ### optimization # get arguments # check optimization arguement # assert history_fname[-4:] == ".txt", "history_fname should be txt file please change extension or add .txt ad the end of the history_fname" ### calculate the objective function # parameters #run the model # calculate performance of the model # if no of inputs less than what the function needs # print error # print(par) ### define the optimization components # parse the ApiSolveArgs inputs # availablekeys = ['store_sol',"display_opts","store_hst","hot_start"] # for i in range(len(availablekeys)): # if availablekeys[i] in ApiSolveArgs.keys(): # exec(availablekeys[i] + "=" + str(ApiSolveArgs[availablekeys[i]])) # print(availablekeys[i] + " = " + str(ApiSolveArgs[availablekeys[i]])) | 2.80424 | 3 |
python/blazingdb/protocol/io/__init__.py | HubBucket-Team/blazingdb-protocol | 1 | 6629982 | import flatbuffers
import copy
import numpy
import blazingdb.protocol.transport as transport
from blazingdb.messages.blazingdb.protocol.io \
import FileSystemRegisterRequest, FileSystemDeregisterRequest, HDFS, S3, POSIX, CsvFile, ParquetFile
from blazingdb.messages.blazingdb.protocol.io import DriverType, EncryptionType, FileSystemConnection, FileSchemaType
from blazingdb.messages.blazingdb.protocol.io import FileSystemDMLRequest, FileSystemTableGroup, FileSystemBlazingTable
from blazingdb.messages.blazingdb.protocol \
import BlazingTable
from blazingdb.protocol.gdf import gdf_columnSchema
DriverType = DriverType.DriverType
EncryptionType = EncryptionType.EncryptionType
FileSystemType = FileSystemConnection.FileSystemConnection
FileSchemaType = FileSchemaType.FileSchemaType
#todo, crear mappers para unions, see union_segment
class FileSystemRegisterRequestSchema:
def __init__(self, authority, root, type, params):
self.authority = authority
self.root = root
self.params = params
self.type = type
#todo, crear mappers para unions
def ToBuffer(self):
builder = flatbuffers.Builder(1024)
authority = builder.CreateString(self.authority)
root = builder.CreateString(self.root)
if self.type == FileSystemType.HDFS:
fileSystemConnection, fileSystemConnectionType = MakeHdfsFileSystemConnection(builder, self.params)
elif self.type == FileSystemType.S3:
fileSystemConnection, fileSystemConnectionType = MakeS3FileSystemRegisterRequest(builder, self.params)
else:
fileSystemConnection, fileSystemConnectionType = MakePosixFileSystemConnection(builder, self.params)
FileSystemRegisterRequest.FileSystemRegisterRequestStart(builder)
FileSystemRegisterRequest.FileSystemRegisterRequestAddAuthority(builder, authority)
FileSystemRegisterRequest.FileSystemRegisterRequestAddRoot(builder, root)
FileSystemRegisterRequest.FileSystemRegisterRequestAddFileSystemConnectionType(builder,
fileSystemConnectionType)
FileSystemRegisterRequest.FileSystemRegisterRequestAddFileSystemConnection(builder, fileSystemConnection)
fs = FileSystemRegisterRequest.FileSystemRegisterRequestEnd(builder)
builder.Finish(fs)
return builder.Output()
#todo, crear mappers para unions, see union_segment
class FileSystemDeregisterRequestSchema:
def __init__(self, authority):
self.authority = authority
def ToBuffer(self):
builder = flatbuffers.Builder(1024)
authority = builder.CreateString(self.authority)
FileSystemDeregisterRequest.FileSystemDeregisterRequestStart(builder)
FileSystemDeregisterRequest.FileSystemDeregisterRequestAddAuthority(builder, authority)
fs = FileSystemDeregisterRequest.FileSystemDeregisterRequestEnd(builder)
builder.Finish(fs)
return builder.Output()
def MakePosixFileSystemConnection(builder, params):
return 0, FileSystemType.POSIX
def MakeHdfsFileSystemConnection(builder, params):
host = builder.CreateString(params.host)
user = builder.CreateString(params.user)
ticket = builder.CreateString(params.kerberosTicket)
HDFS.HDFSStart(builder)
HDFS.HDFSAddHost(builder, host)
HDFS.HDFSAddPort(builder, params.port)
HDFS.HDFSAddUser(builder, user)
HDFS.HDFSAddDriverType(builder, params.driverType) # check if it is enum
HDFS.HDFSAddKerberosTicket(builder, ticket)
paramObj = HDFS.HDFSEnd(builder)
return paramObj, FileSystemType.HDFS
def MakeS3FileSystemRegisterRequest(builder, params):
bucketName = builder.CreateString(params.bucketName)
kmsKeyAmazonResourceName = builder.CreateString(params.kmsKeyAmazonResourceName)
accessKeyId = builder.CreateString(params.accessKeyId)
secretKey = builder.CreateString(params.secretKey)
sessionToken = builder.CreateString(params.sessionToken)
S3.S3Start(builder)
S3.S3AddBucketName(builder, bucketName)
S3.S3AddEncryptionType(builder, params.encryptionType) # check if it is enum
S3.S3AddKmsKeyAmazonResourceName(builder, kmsKeyAmazonResourceName)
S3.S3AddAccessKeyId(builder, accessKeyId)
S3.S3AddSecretKey(builder, secretKey)
S3.S3AddSessionToken(builder, sessionToken)
paramObj = S3.S3End(builder)
return paramObj, FileSystemType.S3
class CsvFileSchema(transport.schema(CsvFile)):
path = transport.StringSegment()
delimiter = transport.StringSegment()
lineTerminator = transport.StringSegment()
skipRows = transport.NumberSegment()
names = transport.VectorStringSegment(transport.StringSegment)
dtypes = transport.VectorSegment(transport.NumberSegment)
class ParquetFileSchema(transport.schema(ParquetFile)):
path = transport.StringSegment()
rowGroupIndices = transport.VectorSegment(transport.NumberSegment)
columnIndices = transport.VectorSegment(transport.NumberSegment)
class GdfSchema(transport.schema(BlazingTable)):
columns = transport.VectorSchemaSegment(gdf_columnSchema)
columnTokens = transport.VectorSegment(transport.NumberSegment)
resultToken = transport.NumberSegment()
class FileSystemBlazingTableSchema(transport.schema(FileSystemBlazingTable)):
name = transport.StringSegment()
schemaType = transport.NumberSegment()
csv = transport.SchemaSegment(CsvFileSchema)
parquet = transport.SchemaSegment(ParquetFileSchema)
gdf = transport.SchemaSegment(GdfSchema)
files = transport.VectorStringSegment(transport.StringSegment)
columnNames = transport.VectorStringSegment(transport.StringSegment)
class FileSystemTableGroupSchema(transport.schema(FileSystemTableGroup)):
tables = transport.VectorSchemaSegment(FileSystemBlazingTableSchema)
name = transport.StringSegment()
class FileSystemDMLRequestSchema(transport.schema(FileSystemDMLRequest)):
statement = transport.StringSegment()
tableGroup = transport.SchemaSegment(FileSystemTableGroupSchema)
def _GetParquetSchema(kwargs):
path = kwargs.get('path', '')
rowGroupIndices = kwargs.get('rowGroupIndices', [])
columnIndices = kwargs.get('columnIndices', [])
return ParquetFileSchema(path=path, rowGroupIndices=rowGroupIndices, columnIndices=columnIndices)
def _GetCsvSchema(kwargs):
path = kwargs.get('path', '')
delimiter = kwargs.get('delimiter', '')
lineTerminator = kwargs.get('lineTerminator', '')
skipRows = kwargs.get('skipRows', 0)
names = kwargs.get('names', [])
dtypes = kwargs.get('dtypes', [])
return CsvFileSchema(path=path, delimiter=delimiter, lineTerminator=lineTerminator, skipRows=skipRows, names=names,
dtypes=dtypes)
def _GetGdfSchema(kwargs):
columns = kwargs.get('columns', [])
columnTokens = kwargs.get('columnTokens', [])
resultToken = kwargs.get('resultToken', 0)
return GdfSchema(columns=columns, columnTokens=columnTokens, resultToken=resultToken)
def BuildFileSystemDMLRequestSchema(statement, tableGroupDto):
tableGroupName = tableGroupDto['name']
tables = []
for index, t in enumerate(tableGroupDto['tables']):
tableName = t['name']
columnNames = t['columnNames']
files = t['files']
schemaType = t['schemaType']
if schemaType == FileSchemaType.PARQUET:
parquet = _GetParquetSchema(t['parquet'])
csv = _GetCsvSchema({})
gdf = _GetGdfSchema({})
elif schemaType == FileSchemaType.CSV:
csv = _GetCsvSchema(t['csv'])
parquet = _GetParquetSchema({})
gdf = _GetGdfSchema({})
else:
csv = _GetCsvSchema({})
parquet = _GetParquetSchema({})
gdf = _GetGdfSchema(t['gdf'])
table = FileSystemBlazingTableSchema(name=tableName, schemaType=schemaType, parquet=parquet, csv=csv, gdf=gdf,
files=files, columnNames=columnNames)
tables.append(table)
tableGroup = FileSystemTableGroupSchema(tables=tables, name=tableGroupName)
return FileSystemDMLRequestSchema(statement=statement, tableGroup=tableGroup)
| import flatbuffers
import copy
import numpy
import blazingdb.protocol.transport as transport
from blazingdb.messages.blazingdb.protocol.io \
import FileSystemRegisterRequest, FileSystemDeregisterRequest, HDFS, S3, POSIX, CsvFile, ParquetFile
from blazingdb.messages.blazingdb.protocol.io import DriverType, EncryptionType, FileSystemConnection, FileSchemaType
from blazingdb.messages.blazingdb.protocol.io import FileSystemDMLRequest, FileSystemTableGroup, FileSystemBlazingTable
from blazingdb.messages.blazingdb.protocol \
import BlazingTable
from blazingdb.protocol.gdf import gdf_columnSchema
DriverType = DriverType.DriverType
EncryptionType = EncryptionType.EncryptionType
FileSystemType = FileSystemConnection.FileSystemConnection
FileSchemaType = FileSchemaType.FileSchemaType
#todo, crear mappers para unions, see union_segment
class FileSystemRegisterRequestSchema:
def __init__(self, authority, root, type, params):
self.authority = authority
self.root = root
self.params = params
self.type = type
#todo, crear mappers para unions
def ToBuffer(self):
builder = flatbuffers.Builder(1024)
authority = builder.CreateString(self.authority)
root = builder.CreateString(self.root)
if self.type == FileSystemType.HDFS:
fileSystemConnection, fileSystemConnectionType = MakeHdfsFileSystemConnection(builder, self.params)
elif self.type == FileSystemType.S3:
fileSystemConnection, fileSystemConnectionType = MakeS3FileSystemRegisterRequest(builder, self.params)
else:
fileSystemConnection, fileSystemConnectionType = MakePosixFileSystemConnection(builder, self.params)
FileSystemRegisterRequest.FileSystemRegisterRequestStart(builder)
FileSystemRegisterRequest.FileSystemRegisterRequestAddAuthority(builder, authority)
FileSystemRegisterRequest.FileSystemRegisterRequestAddRoot(builder, root)
FileSystemRegisterRequest.FileSystemRegisterRequestAddFileSystemConnectionType(builder,
fileSystemConnectionType)
FileSystemRegisterRequest.FileSystemRegisterRequestAddFileSystemConnection(builder, fileSystemConnection)
fs = FileSystemRegisterRequest.FileSystemRegisterRequestEnd(builder)
builder.Finish(fs)
return builder.Output()
#todo, crear mappers para unions, see union_segment
class FileSystemDeregisterRequestSchema:
def __init__(self, authority):
self.authority = authority
def ToBuffer(self):
builder = flatbuffers.Builder(1024)
authority = builder.CreateString(self.authority)
FileSystemDeregisterRequest.FileSystemDeregisterRequestStart(builder)
FileSystemDeregisterRequest.FileSystemDeregisterRequestAddAuthority(builder, authority)
fs = FileSystemDeregisterRequest.FileSystemDeregisterRequestEnd(builder)
builder.Finish(fs)
return builder.Output()
def MakePosixFileSystemConnection(builder, params):
return 0, FileSystemType.POSIX
def MakeHdfsFileSystemConnection(builder, params):
host = builder.CreateString(params.host)
user = builder.CreateString(params.user)
ticket = builder.CreateString(params.kerberosTicket)
HDFS.HDFSStart(builder)
HDFS.HDFSAddHost(builder, host)
HDFS.HDFSAddPort(builder, params.port)
HDFS.HDFSAddUser(builder, user)
HDFS.HDFSAddDriverType(builder, params.driverType) # check if it is enum
HDFS.HDFSAddKerberosTicket(builder, ticket)
paramObj = HDFS.HDFSEnd(builder)
return paramObj, FileSystemType.HDFS
def MakeS3FileSystemRegisterRequest(builder, params):
bucketName = builder.CreateString(params.bucketName)
kmsKeyAmazonResourceName = builder.CreateString(params.kmsKeyAmazonResourceName)
accessKeyId = builder.CreateString(params.accessKeyId)
secretKey = builder.CreateString(params.secretKey)
sessionToken = builder.CreateString(params.sessionToken)
S3.S3Start(builder)
S3.S3AddBucketName(builder, bucketName)
S3.S3AddEncryptionType(builder, params.encryptionType) # check if it is enum
S3.S3AddKmsKeyAmazonResourceName(builder, kmsKeyAmazonResourceName)
S3.S3AddAccessKeyId(builder, accessKeyId)
S3.S3AddSecretKey(builder, secretKey)
S3.S3AddSessionToken(builder, sessionToken)
paramObj = S3.S3End(builder)
return paramObj, FileSystemType.S3
class CsvFileSchema(transport.schema(CsvFile)):
path = transport.StringSegment()
delimiter = transport.StringSegment()
lineTerminator = transport.StringSegment()
skipRows = transport.NumberSegment()
names = transport.VectorStringSegment(transport.StringSegment)
dtypes = transport.VectorSegment(transport.NumberSegment)
class ParquetFileSchema(transport.schema(ParquetFile)):
path = transport.StringSegment()
rowGroupIndices = transport.VectorSegment(transport.NumberSegment)
columnIndices = transport.VectorSegment(transport.NumberSegment)
class GdfSchema(transport.schema(BlazingTable)):
columns = transport.VectorSchemaSegment(gdf_columnSchema)
columnTokens = transport.VectorSegment(transport.NumberSegment)
resultToken = transport.NumberSegment()
class FileSystemBlazingTableSchema(transport.schema(FileSystemBlazingTable)):
name = transport.StringSegment()
schemaType = transport.NumberSegment()
csv = transport.SchemaSegment(CsvFileSchema)
parquet = transport.SchemaSegment(ParquetFileSchema)
gdf = transport.SchemaSegment(GdfSchema)
files = transport.VectorStringSegment(transport.StringSegment)
columnNames = transport.VectorStringSegment(transport.StringSegment)
class FileSystemTableGroupSchema(transport.schema(FileSystemTableGroup)):
tables = transport.VectorSchemaSegment(FileSystemBlazingTableSchema)
name = transport.StringSegment()
class FileSystemDMLRequestSchema(transport.schema(FileSystemDMLRequest)):
statement = transport.StringSegment()
tableGroup = transport.SchemaSegment(FileSystemTableGroupSchema)
def _GetParquetSchema(kwargs):
path = kwargs.get('path', '')
rowGroupIndices = kwargs.get('rowGroupIndices', [])
columnIndices = kwargs.get('columnIndices', [])
return ParquetFileSchema(path=path, rowGroupIndices=rowGroupIndices, columnIndices=columnIndices)
def _GetCsvSchema(kwargs):
path = kwargs.get('path', '')
delimiter = kwargs.get('delimiter', '')
lineTerminator = kwargs.get('lineTerminator', '')
skipRows = kwargs.get('skipRows', 0)
names = kwargs.get('names', [])
dtypes = kwargs.get('dtypes', [])
return CsvFileSchema(path=path, delimiter=delimiter, lineTerminator=lineTerminator, skipRows=skipRows, names=names,
dtypes=dtypes)
def _GetGdfSchema(kwargs):
columns = kwargs.get('columns', [])
columnTokens = kwargs.get('columnTokens', [])
resultToken = kwargs.get('resultToken', 0)
return GdfSchema(columns=columns, columnTokens=columnTokens, resultToken=resultToken)
def BuildFileSystemDMLRequestSchema(statement, tableGroupDto):
tableGroupName = tableGroupDto['name']
tables = []
for index, t in enumerate(tableGroupDto['tables']):
tableName = t['name']
columnNames = t['columnNames']
files = t['files']
schemaType = t['schemaType']
if schemaType == FileSchemaType.PARQUET:
parquet = _GetParquetSchema(t['parquet'])
csv = _GetCsvSchema({})
gdf = _GetGdfSchema({})
elif schemaType == FileSchemaType.CSV:
csv = _GetCsvSchema(t['csv'])
parquet = _GetParquetSchema({})
gdf = _GetGdfSchema({})
else:
csv = _GetCsvSchema({})
parquet = _GetParquetSchema({})
gdf = _GetGdfSchema(t['gdf'])
table = FileSystemBlazingTableSchema(name=tableName, schemaType=schemaType, parquet=parquet, csv=csv, gdf=gdf,
files=files, columnNames=columnNames)
tables.append(table)
tableGroup = FileSystemTableGroupSchema(tables=tables, name=tableGroupName)
return FileSystemDMLRequestSchema(statement=statement, tableGroup=tableGroup)
| en | 0.464304 | #todo, crear mappers para unions, see union_segment #todo, crear mappers para unions #todo, crear mappers para unions, see union_segment # check if it is enum # check if it is enum | 2.317106 | 2 |
test/rules/provider_test.bzl | liuliu/rules_swift | 215 | 6629983 | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for testing the providers of a target under test."""
load("@build_bazel_rules_swift//swift:swift.bzl", "SwiftInfo")
load("@bazel_skylib//lib:types.bzl", "types")
load(
"@bazel_skylib//lib:unittest.bzl",
"analysistest",
"asserts",
"unittest",
)
_EVALUATE_FIELD_FAILED = provider(
doc = """
A sentinel value returned by `_evaluate_field` when a `None` value is
encountered during the evaluation of a dotted path on any component other than
the last component. This allows the caller to distinguish between a legitimate
`None` value being returned by the entire path vs. an unexpected `None` in an
earlier component.
A `provider` is used here because it is a simple way of getting a known unique
object from Bazel that cannot be equal to any other object.
""",
fields = [],
)
def _evaluate_field(env, source, field):
"""Evaluates a field or field path on an object and returns its value.
This function projects across collections. That is, if the result of
evaluating a field along the path is a depset or a list, then the result
will be normalized into a list and remaining fields in the path will be
evaluated on every item in that list, not on the list itself.
If a field path component in a projected collection is followed by an
exclamation point, then this indicates that any `None` values produced at
that stage of evaluation should be removed from the list before continuing.
If evaluating the path fails because a `None` value is encountered anywhere
before the last component and they are not filtered out, then an assertion
failure is logged and the special value `EVALUATE_FIELD_FAILED` is returned.
This value lets the caller short-circuit additional test logic that may not
be relevant if evaluation is known to have failed.
Args:
env: The analysis test environment.
source: The source object on which to evaluate the field or field path.
field: The field or field path to evaluate. This can be a simple field
name or a dotted path.
Returns:
The result of evaluating the field or field path on the source object.
If a `None` value was encountered during evaluation of a field path
component that was not the final component, then the special value
`_EVALUATE_FIELD_FAILED` is returned.
"""
components = field.split(".")
for component in components:
source = _normalize_collection(source)
filter_nones = component.endswith("!")
if filter_nones:
component = component[:-1]
if types.is_list(source):
if any([item == None for item in source]):
unittest.fail(
env,
"Got 'None' evaluating '{}' on an element in '{}'.".format(
component,
field,
),
)
return _EVALUATE_FIELD_FAILED
# If the elements are lists or depsets, flatten the whole thing into
# a single list.
flattened = []
for item in source:
item = _normalize_collection(item)
if types.is_list(item):
flattened.extend(item)
else:
flattened.append(item)
source = [getattr(item, component, None) for item in flattened]
if filter_nones:
source = [item for item in source if item != None]
else:
if source == None:
unittest.fail(
env,
"Got 'None' evaluating '{}' in '{}'.".format(
component,
field,
),
)
return _EVALUATE_FIELD_FAILED
source = getattr(source, component, None)
if filter_nones:
source = _normalize_collection(source)
if types.is_list(source):
source = [item for item in source if item != None]
else:
unittest.fail(
env,
("Expected to filter 'None' values evaluating '{}' " +
"on an element in '{}', but the result was not a " +
"collection.").format(component, field),
)
return _EVALUATE_FIELD_FAILED
return source
def _lookup_provider_by_name(env, target, provider_name):
"""Returns a provider on a target, given its name.
The `provider_test` rule needs to be able to specify which provider a field
should be looked up on, but it can't take provider objects directly as
attribute values, so we have to use strings and a fixed lookup table to find
them.
If the provider is not recognized or is not propagated by the target, then
an assertion failure is logged and `None` is returned. This lets the caller
short-circuit additional test logic that may not be relevant if the provider
is not present.
Args:
env: The analysis test environment.
target: The target whose provider should be looked up.
provider_name: The name of the provider to return.
Returns:
The provider value, or `None` if it was not propagated by the target.
"""
provider = None
if provider_name == "CcInfo":
provider = CcInfo
elif provider_name == "DefaultInfo":
provider = DefaultInfo
elif provider_name == "OutputGroupInfo":
provider = OutputGroupInfo
elif provider_name == "SwiftInfo":
provider = SwiftInfo
elif provider_name == "apple_common.Objc":
provider = apple_common.Objc
if not provider:
unittest.fail(
env,
"Provider '{}' is not supported.".format(provider_name),
)
return None
if provider in target:
return target[provider]
unittest.fail(
env,
"Target '{}' did not provide '{}'.".format(target.label, provider_name),
)
return None
def _field_access_description(target, provider, field):
"""Returns a string denoting field access to a provider on a target.
This function is used to generate a pretty string that can be used in
assertion failure messages, of the form
`<//package:target>[ProviderInfo].some.field.path`.
Args:
target: The target whose provider is being accessed.
provider: The name of the provider being accessed.
field: The field name or dotted field path being accessed.
Returns:
A string describing the field access that can be used in assertion
failure messages.
"""
return "<{}>[{}].{}".format(target.label, provider, field)
def _prettify(object):
"""Returns a prettified version of the given value for failure messages.
If the object is a list, it will be formatted as a multiline string;
otherwise, it will simply be the `repr` of the value.
Args:
object: The object to prettify.
Returns:
A string that can be used to display the value in a failure message.
"""
object = _normalize_collection(object)
if types.is_list(object):
return ("[\n " +
",\n ".join([repr(item) for item in object]) +
"\n]")
else:
return repr(object)
def _normalize_collection(object):
"""Returns object as a list if it is a collection, otherwise returns itself.
Args:
object: The object to normalize. If it is a list or a depset, it will be
returned as a list. Otherwise, it will be returned unchanged.
Returns:
A list containing the same items in `object` if it is a collection,
otherwise the original object is returned.
"""
if types.is_depset(object):
return object.to_list()
else:
return object
def _compare_expected_files(env, access_description, expected, actual):
"""Implements the `expected_files` comparison.
This compares a set of files retrieved from a provider field against a list
of expected strings that are equal to or suffixes of the paths to those
files, as well as excluded files and a wildcard. See the documentation of
the `expected_files` attribute on the rule definition below for specifics.
Args:
env: The analysis test environment.
access_description: A target/provider/field access description string
printed in assertion failure messages.
expected: The list of expected file path inclusions/exclusions.
actual: The collection of files obtained from the provider.
"""
actual = _normalize_collection(actual)
if (
not types.is_list(actual) or
any([type(item) != "File" for item in actual])
):
unittest.fail(
env,
("Expected '{}' to be a collection of files, " +
"but got a {}: {}.").format(
access_description,
type(actual),
_prettify(actual),
),
)
return
remaining = list(actual)
expected_is_subset = "*" in expected
expected_include = [
s
for s in expected
if not s.startswith("-") and s != "*"
]
expected_exclude = [s[1:] for s in expected if s.startswith("-")]
# For every expected file, pick off the first actual that we find that has
# the expected string as a suffix.
failed = False
for suffix in expected_include:
if not remaining:
# It's a failure if we are still expecting files but there are no
# more actual files.
failed = True
break
found_expected_file = False
for i in range(len(remaining)):
actual_path = remaining[i].path
if actual_path.endswith(suffix):
found_expected_file = True
remaining.pop(i)
break
# It's a failure if we never found a file we expected.
if not found_expected_file:
failed = True
break
# For every file expected to *not* be present, check the list of remaining
# files and fail if we find a match.
for suffix in expected_exclude:
for f in remaining:
if f.path.endswith(suffix):
failed = True
break
# If we found all the expected files, the remaining list should be empty.
# Fail if the list is not empty and we're not looking for a subset.
if not expected_is_subset and remaining:
failed = True
asserts.false(
env,
failed,
"Expected '{}' to match {}, but got {}.".format(
access_description,
_prettify(expected),
_prettify([
f.path if type(f) == "File" else repr(f)
for f in actual
]),
),
)
def _provider_test_impl(ctx):
env = analysistest.begin(ctx)
target_under_test = ctx.attr.target_under_test
# If configuration settings were provided, then we have a transition and
# target_under_test will be a list. In that case, get the actual target by
# pulling the first one out.
if types.is_list(target_under_test):
target_under_test = target_under_test[0]
provider_name = ctx.attr.provider
provider = _lookup_provider_by_name(env, target_under_test, provider_name)
if not provider:
return analysistest.end(env)
field = ctx.attr.field
actual = _evaluate_field(env, provider, field)
if actual == _EVALUATE_FIELD_FAILED:
return analysistest.end(env)
access_description = _field_access_description(
target_under_test,
provider_name,
field,
)
# TODO(allevato): Support other comparisons as they become needed.
if ctx.attr.expected_files:
_compare_expected_files(
env,
access_description,
ctx.attr.expected_files,
actual,
)
return analysistest.end(env)
def make_provider_test_rule(config_settings = {}):
"""Returns a new `provider_test`-like rule with custom config settings.
Args:
config_settings: A dictionary of configuration settings and their values
that should be applied during tests.
Returns:
A rule returned by `analysistest.make` that has the `provider_test`
interface and the given config settings.
"""
return analysistest.make(
_provider_test_impl,
attrs = {
"expected_files": attr.string_list(
mandatory = False,
doc = """\
The expected list of files when evaluating the given provider's field.
This list can contain three types of strings:
* A path suffix (`foo/bar/baz.ext`), denoting that a file whose path has the
given suffix must be present.
* A negated path suffix (`-foo/bar/baz.ext`), denoting that a file whose path
has the given suffix must *not* be present.
* A wildcard (`*`), denoting that the expected list of files can be a *subset*
of the actual list. If the wildcard is omitted, the expected list of files
must match the actual list completely; unmatched files will result in a test
failure.
The use of path suffixes allows the test to be unconcerned about specific
configuration details, such as output directories for generated files.
""",
),
"field": attr.string(
mandatory = True,
doc = """\
The field name or dotted field path of the provider that should be tested.
Evaluation of field path components is projected across collections. That is, if
the result of evaluating a field along the path is a depset or a list, then the
result will be normalized into a list and remaining fields in the path will be
evaluated on every item in that list, not on the list itself. Likewise, if such
a field path component is followed by `!`, then any `None` elements that may
have resulted during evaluation will be removed from the list before evaluating
the next component.
""",
),
"provider": attr.string(
mandatory = True,
doc = """\
The name of the provider expected to be propagated by the target under test, and
on which the field will be checked.
Currently, only the following providers are recognized:
* `CcInfo`
* `DefaultInfo`
* `OutputGroupInfo`
* `SwiftInfo`
* `apple_common.Objc`
""",
),
},
config_settings = config_settings,
)
# A default instantiation of the rule when no custom config settings are needed.
provider_test = make_provider_test_rule()
| # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for testing the providers of a target under test."""
load("@build_bazel_rules_swift//swift:swift.bzl", "SwiftInfo")
load("@bazel_skylib//lib:types.bzl", "types")
load(
"@bazel_skylib//lib:unittest.bzl",
"analysistest",
"asserts",
"unittest",
)
_EVALUATE_FIELD_FAILED = provider(
doc = """
A sentinel value returned by `_evaluate_field` when a `None` value is
encountered during the evaluation of a dotted path on any component other than
the last component. This allows the caller to distinguish between a legitimate
`None` value being returned by the entire path vs. an unexpected `None` in an
earlier component.
A `provider` is used here because it is a simple way of getting a known unique
object from Bazel that cannot be equal to any other object.
""",
fields = [],
)
def _evaluate_field(env, source, field):
"""Evaluates a field or field path on an object and returns its value.
This function projects across collections. That is, if the result of
evaluating a field along the path is a depset or a list, then the result
will be normalized into a list and remaining fields in the path will be
evaluated on every item in that list, not on the list itself.
If a field path component in a projected collection is followed by an
exclamation point, then this indicates that any `None` values produced at
that stage of evaluation should be removed from the list before continuing.
If evaluating the path fails because a `None` value is encountered anywhere
before the last component and they are not filtered out, then an assertion
failure is logged and the special value `EVALUATE_FIELD_FAILED` is returned.
This value lets the caller short-circuit additional test logic that may not
be relevant if evaluation is known to have failed.
Args:
env: The analysis test environment.
source: The source object on which to evaluate the field or field path.
field: The field or field path to evaluate. This can be a simple field
name or a dotted path.
Returns:
The result of evaluating the field or field path on the source object.
If a `None` value was encountered during evaluation of a field path
component that was not the final component, then the special value
`_EVALUATE_FIELD_FAILED` is returned.
"""
components = field.split(".")
for component in components:
source = _normalize_collection(source)
filter_nones = component.endswith("!")
if filter_nones:
component = component[:-1]
if types.is_list(source):
if any([item == None for item in source]):
unittest.fail(
env,
"Got 'None' evaluating '{}' on an element in '{}'.".format(
component,
field,
),
)
return _EVALUATE_FIELD_FAILED
# If the elements are lists or depsets, flatten the whole thing into
# a single list.
flattened = []
for item in source:
item = _normalize_collection(item)
if types.is_list(item):
flattened.extend(item)
else:
flattened.append(item)
source = [getattr(item, component, None) for item in flattened]
if filter_nones:
source = [item for item in source if item != None]
else:
if source == None:
unittest.fail(
env,
"Got 'None' evaluating '{}' in '{}'.".format(
component,
field,
),
)
return _EVALUATE_FIELD_FAILED
source = getattr(source, component, None)
if filter_nones:
source = _normalize_collection(source)
if types.is_list(source):
source = [item for item in source if item != None]
else:
unittest.fail(
env,
("Expected to filter 'None' values evaluating '{}' " +
"on an element in '{}', but the result was not a " +
"collection.").format(component, field),
)
return _EVALUATE_FIELD_FAILED
return source
def _lookup_provider_by_name(env, target, provider_name):
"""Returns a provider on a target, given its name.
The `provider_test` rule needs to be able to specify which provider a field
should be looked up on, but it can't take provider objects directly as
attribute values, so we have to use strings and a fixed lookup table to find
them.
If the provider is not recognized or is not propagated by the target, then
an assertion failure is logged and `None` is returned. This lets the caller
short-circuit additional test logic that may not be relevant if the provider
is not present.
Args:
env: The analysis test environment.
target: The target whose provider should be looked up.
provider_name: The name of the provider to return.
Returns:
The provider value, or `None` if it was not propagated by the target.
"""
provider = None
if provider_name == "CcInfo":
provider = CcInfo
elif provider_name == "DefaultInfo":
provider = DefaultInfo
elif provider_name == "OutputGroupInfo":
provider = OutputGroupInfo
elif provider_name == "SwiftInfo":
provider = SwiftInfo
elif provider_name == "apple_common.Objc":
provider = apple_common.Objc
if not provider:
unittest.fail(
env,
"Provider '{}' is not supported.".format(provider_name),
)
return None
if provider in target:
return target[provider]
unittest.fail(
env,
"Target '{}' did not provide '{}'.".format(target.label, provider_name),
)
return None
def _field_access_description(target, provider, field):
"""Returns a string denoting field access to a provider on a target.
This function is used to generate a pretty string that can be used in
assertion failure messages, of the form
`<//package:target>[ProviderInfo].some.field.path`.
Args:
target: The target whose provider is being accessed.
provider: The name of the provider being accessed.
field: The field name or dotted field path being accessed.
Returns:
A string describing the field access that can be used in assertion
failure messages.
"""
return "<{}>[{}].{}".format(target.label, provider, field)
def _prettify(object):
"""Returns a prettified version of the given value for failure messages.
If the object is a list, it will be formatted as a multiline string;
otherwise, it will simply be the `repr` of the value.
Args:
object: The object to prettify.
Returns:
A string that can be used to display the value in a failure message.
"""
object = _normalize_collection(object)
if types.is_list(object):
return ("[\n " +
",\n ".join([repr(item) for item in object]) +
"\n]")
else:
return repr(object)
def _normalize_collection(object):
"""Returns object as a list if it is a collection, otherwise returns itself.
Args:
object: The object to normalize. If it is a list or a depset, it will be
returned as a list. Otherwise, it will be returned unchanged.
Returns:
A list containing the same items in `object` if it is a collection,
otherwise the original object is returned.
"""
if types.is_depset(object):
return object.to_list()
else:
return object
def _compare_expected_files(env, access_description, expected, actual):
"""Implements the `expected_files` comparison.
This compares a set of files retrieved from a provider field against a list
of expected strings that are equal to or suffixes of the paths to those
files, as well as excluded files and a wildcard. See the documentation of
the `expected_files` attribute on the rule definition below for specifics.
Args:
env: The analysis test environment.
access_description: A target/provider/field access description string
printed in assertion failure messages.
expected: The list of expected file path inclusions/exclusions.
actual: The collection of files obtained from the provider.
"""
actual = _normalize_collection(actual)
if (
not types.is_list(actual) or
any([type(item) != "File" for item in actual])
):
unittest.fail(
env,
("Expected '{}' to be a collection of files, " +
"but got a {}: {}.").format(
access_description,
type(actual),
_prettify(actual),
),
)
return
remaining = list(actual)
expected_is_subset = "*" in expected
expected_include = [
s
for s in expected
if not s.startswith("-") and s != "*"
]
expected_exclude = [s[1:] for s in expected if s.startswith("-")]
# For every expected file, pick off the first actual that we find that has
# the expected string as a suffix.
failed = False
for suffix in expected_include:
if not remaining:
# It's a failure if we are still expecting files but there are no
# more actual files.
failed = True
break
found_expected_file = False
for i in range(len(remaining)):
actual_path = remaining[i].path
if actual_path.endswith(suffix):
found_expected_file = True
remaining.pop(i)
break
# It's a failure if we never found a file we expected.
if not found_expected_file:
failed = True
break
# For every file expected to *not* be present, check the list of remaining
# files and fail if we find a match.
for suffix in expected_exclude:
for f in remaining:
if f.path.endswith(suffix):
failed = True
break
# If we found all the expected files, the remaining list should be empty.
# Fail if the list is not empty and we're not looking for a subset.
if not expected_is_subset and remaining:
failed = True
asserts.false(
env,
failed,
"Expected '{}' to match {}, but got {}.".format(
access_description,
_prettify(expected),
_prettify([
f.path if type(f) == "File" else repr(f)
for f in actual
]),
),
)
def _provider_test_impl(ctx):
env = analysistest.begin(ctx)
target_under_test = ctx.attr.target_under_test
# If configuration settings were provided, then we have a transition and
# target_under_test will be a list. In that case, get the actual target by
# pulling the first one out.
if types.is_list(target_under_test):
target_under_test = target_under_test[0]
provider_name = ctx.attr.provider
provider = _lookup_provider_by_name(env, target_under_test, provider_name)
if not provider:
return analysistest.end(env)
field = ctx.attr.field
actual = _evaluate_field(env, provider, field)
if actual == _EVALUATE_FIELD_FAILED:
return analysistest.end(env)
access_description = _field_access_description(
target_under_test,
provider_name,
field,
)
# TODO(allevato): Support other comparisons as they become needed.
if ctx.attr.expected_files:
_compare_expected_files(
env,
access_description,
ctx.attr.expected_files,
actual,
)
return analysistest.end(env)
def make_provider_test_rule(config_settings = {}):
"""Returns a new `provider_test`-like rule with custom config settings.
Args:
config_settings: A dictionary of configuration settings and their values
that should be applied during tests.
Returns:
A rule returned by `analysistest.make` that has the `provider_test`
interface and the given config settings.
"""
return analysistest.make(
_provider_test_impl,
attrs = {
"expected_files": attr.string_list(
mandatory = False,
doc = """\
The expected list of files when evaluating the given provider's field.
This list can contain three types of strings:
* A path suffix (`foo/bar/baz.ext`), denoting that a file whose path has the
given suffix must be present.
* A negated path suffix (`-foo/bar/baz.ext`), denoting that a file whose path
has the given suffix must *not* be present.
* A wildcard (`*`), denoting that the expected list of files can be a *subset*
of the actual list. If the wildcard is omitted, the expected list of files
must match the actual list completely; unmatched files will result in a test
failure.
The use of path suffixes allows the test to be unconcerned about specific
configuration details, such as output directories for generated files.
""",
),
"field": attr.string(
mandatory = True,
doc = """\
The field name or dotted field path of the provider that should be tested.
Evaluation of field path components is projected across collections. That is, if
the result of evaluating a field along the path is a depset or a list, then the
result will be normalized into a list and remaining fields in the path will be
evaluated on every item in that list, not on the list itself. Likewise, if such
a field path component is followed by `!`, then any `None` elements that may
have resulted during evaluation will be removed from the list before evaluating
the next component.
""",
),
"provider": attr.string(
mandatory = True,
doc = """\
The name of the provider expected to be propagated by the target under test, and
on which the field will be checked.
Currently, only the following providers are recognized:
* `CcInfo`
* `DefaultInfo`
* `OutputGroupInfo`
* `SwiftInfo`
* `apple_common.Objc`
""",
),
},
config_settings = config_settings,
)
# A default instantiation of the rule when no custom config settings are needed.
provider_test = make_provider_test_rule()
| en | 0.882727 | # Copyright 2019 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Rules for testing the providers of a target under test. A sentinel value returned by `_evaluate_field` when a `None` value is encountered during the evaluation of a dotted path on any component other than the last component. This allows the caller to distinguish between a legitimate `None` value being returned by the entire path vs. an unexpected `None` in an earlier component. A `provider` is used here because it is a simple way of getting a known unique object from Bazel that cannot be equal to any other object. Evaluates a field or field path on an object and returns its value. This function projects across collections. That is, if the result of evaluating a field along the path is a depset or a list, then the result will be normalized into a list and remaining fields in the path will be evaluated on every item in that list, not on the list itself. If a field path component in a projected collection is followed by an exclamation point, then this indicates that any `None` values produced at that stage of evaluation should be removed from the list before continuing. If evaluating the path fails because a `None` value is encountered anywhere before the last component and they are not filtered out, then an assertion failure is logged and the special value `EVALUATE_FIELD_FAILED` is returned. This value lets the caller short-circuit additional test logic that may not be relevant if evaluation is known to have failed. Args: env: The analysis test environment. source: The source object on which to evaluate the field or field path. field: The field or field path to evaluate. This can be a simple field name or a dotted path. Returns: The result of evaluating the field or field path on the source object. If a `None` value was encountered during evaluation of a field path component that was not the final component, then the special value `_EVALUATE_FIELD_FAILED` is returned. # If the elements are lists or depsets, flatten the whole thing into # a single list. Returns a provider on a target, given its name. The `provider_test` rule needs to be able to specify which provider a field should be looked up on, but it can't take provider objects directly as attribute values, so we have to use strings and a fixed lookup table to find them. If the provider is not recognized or is not propagated by the target, then an assertion failure is logged and `None` is returned. This lets the caller short-circuit additional test logic that may not be relevant if the provider is not present. Args: env: The analysis test environment. target: The target whose provider should be looked up. provider_name: The name of the provider to return. Returns: The provider value, or `None` if it was not propagated by the target. Returns a string denoting field access to a provider on a target. This function is used to generate a pretty string that can be used in assertion failure messages, of the form `<//package:target>[ProviderInfo].some.field.path`. Args: target: The target whose provider is being accessed. provider: The name of the provider being accessed. field: The field name or dotted field path being accessed. Returns: A string describing the field access that can be used in assertion failure messages. Returns a prettified version of the given value for failure messages. If the object is a list, it will be formatted as a multiline string; otherwise, it will simply be the `repr` of the value. Args: object: The object to prettify. Returns: A string that can be used to display the value in a failure message. Returns object as a list if it is a collection, otherwise returns itself. Args: object: The object to normalize. If it is a list or a depset, it will be returned as a list. Otherwise, it will be returned unchanged. Returns: A list containing the same items in `object` if it is a collection, otherwise the original object is returned. Implements the `expected_files` comparison. This compares a set of files retrieved from a provider field against a list of expected strings that are equal to or suffixes of the paths to those files, as well as excluded files and a wildcard. See the documentation of the `expected_files` attribute on the rule definition below for specifics. Args: env: The analysis test environment. access_description: A target/provider/field access description string printed in assertion failure messages. expected: The list of expected file path inclusions/exclusions. actual: The collection of files obtained from the provider. # For every expected file, pick off the first actual that we find that has # the expected string as a suffix. # It's a failure if we are still expecting files but there are no # more actual files. # It's a failure if we never found a file we expected. # For every file expected to *not* be present, check the list of remaining # files and fail if we find a match. # If we found all the expected files, the remaining list should be empty. # Fail if the list is not empty and we're not looking for a subset. # If configuration settings were provided, then we have a transition and # target_under_test will be a list. In that case, get the actual target by # pulling the first one out. # TODO(allevato): Support other comparisons as they become needed. Returns a new `provider_test`-like rule with custom config settings. Args: config_settings: A dictionary of configuration settings and their values that should be applied during tests. Returns: A rule returned by `analysistest.make` that has the `provider_test` interface and the given config settings. \ The expected list of files when evaluating the given provider's field. This list can contain three types of strings: * A path suffix (`foo/bar/baz.ext`), denoting that a file whose path has the given suffix must be present. * A negated path suffix (`-foo/bar/baz.ext`), denoting that a file whose path has the given suffix must *not* be present. * A wildcard (`*`), denoting that the expected list of files can be a *subset* of the actual list. If the wildcard is omitted, the expected list of files must match the actual list completely; unmatched files will result in a test failure. The use of path suffixes allows the test to be unconcerned about specific configuration details, such as output directories for generated files. \ The field name or dotted field path of the provider that should be tested. Evaluation of field path components is projected across collections. That is, if the result of evaluating a field along the path is a depset or a list, then the result will be normalized into a list and remaining fields in the path will be evaluated on every item in that list, not on the list itself. Likewise, if such a field path component is followed by `!`, then any `None` elements that may have resulted during evaluation will be removed from the list before evaluating the next component. \ The name of the provider expected to be propagated by the target under test, and on which the field will be checked. Currently, only the following providers are recognized: * `CcInfo` * `DefaultInfo` * `OutputGroupInfo` * `SwiftInfo` * `apple_common.Objc` # A default instantiation of the rule when no custom config settings are needed. | 1.861793 | 2 |
DataPrev/joinAll.py | TRBaldim/Spark | 0 | 6629984 | #Version 0.0.0
def getEvents (a, b):
try:
return a.append(b[0])
except:
return a
def filterByEvent (line):
try:
if str(line[2]).strip() != '':
return (str(line[1]).strip(), str(line[2]).strip() + ";" +str(line[0]).strip())
elif str(line[3]).strip() != '':
return (str(line[1]).strip(), str(line[3]).strip() + ";" +str(line[0]).strip())
else:
return (str(line[1]).strip(), "Inc." + ";" + str(line[0]).strip())
except:
return (str(line[1]).strip(), "ERRO")
def filterDates(x):
splitedInfo = x[1].split("|")
incVal = 0
dateBase = '1800-01-01'
newestId = 'Inc.'
for i in splitedInfo:
if 'Inc.' in i:
incVal = 1
newestId = i.split(";")[1]
dateBase = 0
else:
DateInfo = i.split(";")
if DateInfo[0] > dateBase:
newestId = DateInfo[1]
dateBase = DateInfo[0]
incVal = 0
else:
newestId = 'Inc.'
incVal = 1
dateBase = 0
return (newestId, (x[0], dateBase, incVal))
def filterDivorceDates(x):
dateBase = '2999-01-01'
result = ''
for i in x[1][1]:
if i == '' and result == '':
result = 'NC'
else:
if i < dateBase and i != '' and i != 'NC':
result = i
dateBase = result
return (x[0], result)
def getNotInc (x):
if x[1][0] == 'Inc.':
return 'NONE'
else:
return (x[1][0], 1)
def finalFiltering (x):
#ID_PF, Data_Alteracao, Data_Divorcio, Inconcistencia
return (x[1][1][0], (x[1][1][1], x[1][0], x[1][1][2]))
def getNewstEnd(x):
lEnderecos = x[1].split('|')
baseDate = '1800-01-01'
incon = 0
for i in lEnderecos:
endElement = i.split(';')
while(len(endElement[3]) < 8):
endElement[3] = endElement[3] + '0'
if endElement == '':
incon = 1
idEnd = endElement[0]
idMuni = endElement[2]
CEP = endElement[3]
elif endElement[1] > baseDate:
baseDate = endElement[1]
idMuni = endElement[2]
CEP = endElement[3]
else:
incon = 1
idMuni = 'NaN'
CEP = 'NaN'
return (x[0], (idMuni, CEP, CEP[:4], incon))
def finalFormat(x):
try:
return (x[0], (x[1][0], x[1][1][0], x[1][1][1]))
except:
return None
def finalAggFormat(x):
try:
retorno = (x[0], (x[1][0], x[1][1][0], x[1][1][1], x[1][1][2]))
if retorno[1][0] == None:
return None
elif retorno[1][2] == None:
return None
elif retorno[1][2][1] == '00000000':
return None
return retorno
except:
return None
def calculateRelacoes(x):
relations = x[1]
active = False
inc = 0
contribDays = 0
baseDate = '2999-01-01'
remTotal = 0
rem13Total = 0
remFGTSTotal = 0
for i in relations:
if i[1] == '':
active = True
elif int(i[1]) < 0:
inc = 1
else:
contribDays = contribDays + int(i[1])
baseDate = i[0] if i[0] < baseDate and i[0] != '' else baseDate
try:
remTotal = remTotal + float(i[2])
except:
remTotal = remTotal + 0
try:
rem13Total = rem13Total + float(i[3])
except:
rem13Total = rem13Total + 0
try:
remFGTSTotal = remFGTSTotal + int(float(i[4]))
except:
remFGTSTotal = remFGTSTotal + 0
return (x[0], (contribDays, baseDate if active else 0, '%.2f'%remTotal, '%.2f'%rem13Total, '%.2f'%remFGTSTotal, inc))
def filtering(x):
try:
return x[1][2][1]
except:
return 0
fEnd = sc.textFile("/u01/data/base/ENDERECOS")
fPF = sc.textFile("/u01/data/base/PESSOAS_FISICAS")
fCert = sc.textFile("/u01/data/base/CERTIDOES_CIVIS")
fRes = sc.textFile("/u01/data/base/RESUMO_RELACOES_TRABALHISTAS")
'''
CERTIDOES_CIVIS_PART
'''
#Retorno da Linha: [[id_certidao_civil, id_pessoa_fisica, dt_evento, dt_emissao, dt_separacao, dt_divorcio]]
rddCertidoes = fCert.map(lambda line: line.split("|"))
#Retorno [('1027040844', 'Inc.'), ('1665214649', '1976-12-08;155259490')] -> (PF_ID, 'data mais recente entre dt_evento e dt_emissรฃo; id_certidao_civil')
rddDates = rddCertidoes.map(filterByEvent).reduceByKey(lambda a, b: a + ("|" + b))
#Retorno [('113927770', ('1229477534', '2006-07-15', 0))] -> (CERT_ID, (PF_ID, Data_Mais_Recente, Check_Inconsistencia (0-OK, 1-Inc)))
rddInfo = rddDates.map(filterDates)
#Retorno [('13047457', ('', ''))] -> (CERT_ID, (dt_separacao, dt_divorcio))
rddCertIds = rddCertidoes.map(lambda x: (str(x[0]).strip(), (str(x[4]).strip(), str(x[5]).strip())))
#Retorno [('14733170', 'NC')] -> (CERT_ID, Data Mais Antiga separacao e divorcio (Caso nรฃo conste data retorna NC))
rddDivorcio = rddInfo.map(lambda x: (x[0], 1)).join(rddCertIds).map(filterDivorceDates)
#Retorno [('1116310950', 0, 'NC', 1)] -> (PF_ID, dt_atualizaรงรฃo mais nova, data de divorcio ou separacao mais antiga, check_inconsistencia)
fullJoined = rddDivorcio.join(rddInfo).map(finalFiltering)
'''
ENDERECOS PART
'''
rddEnderecos = fEnd.map(lambda line: line.split('|'))
#('2090398103', '310267648;2015-05-29|310267648;2015-05-29')
rddEndByPF = rddEnderecos.map(lambda x: (str(x[1]).strip(), (str(x[0]).strip() + ";" + str(x[4]).strip() + ";" + str(x[2]).strip() + ";" + str(x[3]).strip()))).reduceByKey(lambda a, b: a + ("|" + b))
#(ID_PF, (ID_MUNICIPIO, CEP, CEP[:4], 0))
rddPFbyRecentEnd = rddEndByPF.map(getNewstEnd)
partialResult = rddPFbyRecentEnd.fullOuterJoin(fullJoined)
'''
PESSOAS FISICAS
'''
rddPF = fPF.map(lambda line: line.split('|')).map(lambda x: (str(x[0]).strip(), (str(x[1]).strip(), str(x[2]).strip(), str(x[3]).strip(), str(x[4]).strip(), str(x[5]).strip(), str(x[6]).strip(), str(x[7]).strip(), str(x[8]).strip())))
rddResult = rddPF.fullOuterJoin(partialResult).map(finalFormat).filter(lambda line: line != None)
'''
RELACOES TRABALHISTAS
'''
rddRelacoes = fRes.map(lambda line: line.split("|")).map(lambda x: (str(x[1]).strip(), ((str(x[3]).strip(), str(x[4]).strip(), str(x[5]).strip(), str(x[6]).strip(), str(x[7]).strip()),)))
rddAgg = rddRelacoes.reduceByKey(lambda a, b: a + b).map(calculateRelacoes)
#('1238510985', ((58, 0, '6762958.48', '772990.68', '0.00', 0), (None, None, ('1999-06-14', 'NC', 0))))
finalResult = rddAgg.fullOuterJoin(rddResult).map(finalAggFormat).filter(lambda line: line != None)
#id_pessoa_fisica|id_pessoa_fisica_dv|dt_nascimento|dt_obito|cs_etnia|cs_estado_civil|cs_grau_instrucao|cs_nacionalidade|cs_sexo
#(id_pessoa_fisica,((id_pessoa_fisica_dv,dt_nascimento,dt_obito,cs_etnia,cs_estado_civil,cs_grau_instrucao,cs_nacionalidade,cs_sexo),(dt_atualizaรงรฃo mais nova, data de divorcio ou separacao mais antiga, check_inconsistencia),(ID_MUNICIPIO, CEP, distrito, id_inconcistencia)))
finalResult.sortBy(filtering).saveAsTextFile("/u01/data/Resultados/")
(id_pessoa_fisica: '1283983077',
arr_objetos:
(
vida_trabalhista:
(15, 0, '128.51', '0.00', '0.00', 0),
dados_PF:
('0', '2067-06-13', '', '', '', '7', '10', '3'),
dados_endereco:
('354850', '11020150', '1102', 0),
dados_relacao_civil
(0, 'NC', 1)
)
)
('1281289112', ((486, 0, '3133.34', '260.00', '0.00', 0), ('4', '1980-08-21', '', '', '', '', '10', '3'), ('Jesus', '11020130', '1102', 0), ('1996-04-29', 'NC', 0)))
a = {
'ID_PF':x[0],
'OBJETOS':{
'VIDA_TRABALHISTA':{
'DIAS_CONTR':x[1][0][0],
'DATA_FIRST_TRAB':x[1][0][1],
}
}
}
('1214281582', ((647, 0, '463519.16', '8963.99', '0.00', 0), ('2', '2067-06-28', '', '', '4', '9', '10', '3'), ('354850', '11020080', '1102', 0), ('1991-09-06', '2005-12-21', 0)))
| #Version 0.0.0
def getEvents (a, b):
try:
return a.append(b[0])
except:
return a
def filterByEvent (line):
try:
if str(line[2]).strip() != '':
return (str(line[1]).strip(), str(line[2]).strip() + ";" +str(line[0]).strip())
elif str(line[3]).strip() != '':
return (str(line[1]).strip(), str(line[3]).strip() + ";" +str(line[0]).strip())
else:
return (str(line[1]).strip(), "Inc." + ";" + str(line[0]).strip())
except:
return (str(line[1]).strip(), "ERRO")
def filterDates(x):
splitedInfo = x[1].split("|")
incVal = 0
dateBase = '1800-01-01'
newestId = 'Inc.'
for i in splitedInfo:
if 'Inc.' in i:
incVal = 1
newestId = i.split(";")[1]
dateBase = 0
else:
DateInfo = i.split(";")
if DateInfo[0] > dateBase:
newestId = DateInfo[1]
dateBase = DateInfo[0]
incVal = 0
else:
newestId = 'Inc.'
incVal = 1
dateBase = 0
return (newestId, (x[0], dateBase, incVal))
def filterDivorceDates(x):
dateBase = '2999-01-01'
result = ''
for i in x[1][1]:
if i == '' and result == '':
result = 'NC'
else:
if i < dateBase and i != '' and i != 'NC':
result = i
dateBase = result
return (x[0], result)
def getNotInc (x):
if x[1][0] == 'Inc.':
return 'NONE'
else:
return (x[1][0], 1)
def finalFiltering (x):
#ID_PF, Data_Alteracao, Data_Divorcio, Inconcistencia
return (x[1][1][0], (x[1][1][1], x[1][0], x[1][1][2]))
def getNewstEnd(x):
lEnderecos = x[1].split('|')
baseDate = '1800-01-01'
incon = 0
for i in lEnderecos:
endElement = i.split(';')
while(len(endElement[3]) < 8):
endElement[3] = endElement[3] + '0'
if endElement == '':
incon = 1
idEnd = endElement[0]
idMuni = endElement[2]
CEP = endElement[3]
elif endElement[1] > baseDate:
baseDate = endElement[1]
idMuni = endElement[2]
CEP = endElement[3]
else:
incon = 1
idMuni = 'NaN'
CEP = 'NaN'
return (x[0], (idMuni, CEP, CEP[:4], incon))
def finalFormat(x):
try:
return (x[0], (x[1][0], x[1][1][0], x[1][1][1]))
except:
return None
def finalAggFormat(x):
try:
retorno = (x[0], (x[1][0], x[1][1][0], x[1][1][1], x[1][1][2]))
if retorno[1][0] == None:
return None
elif retorno[1][2] == None:
return None
elif retorno[1][2][1] == '00000000':
return None
return retorno
except:
return None
def calculateRelacoes(x):
relations = x[1]
active = False
inc = 0
contribDays = 0
baseDate = '2999-01-01'
remTotal = 0
rem13Total = 0
remFGTSTotal = 0
for i in relations:
if i[1] == '':
active = True
elif int(i[1]) < 0:
inc = 1
else:
contribDays = contribDays + int(i[1])
baseDate = i[0] if i[0] < baseDate and i[0] != '' else baseDate
try:
remTotal = remTotal + float(i[2])
except:
remTotal = remTotal + 0
try:
rem13Total = rem13Total + float(i[3])
except:
rem13Total = rem13Total + 0
try:
remFGTSTotal = remFGTSTotal + int(float(i[4]))
except:
remFGTSTotal = remFGTSTotal + 0
return (x[0], (contribDays, baseDate if active else 0, '%.2f'%remTotal, '%.2f'%rem13Total, '%.2f'%remFGTSTotal, inc))
def filtering(x):
try:
return x[1][2][1]
except:
return 0
fEnd = sc.textFile("/u01/data/base/ENDERECOS")
fPF = sc.textFile("/u01/data/base/PESSOAS_FISICAS")
fCert = sc.textFile("/u01/data/base/CERTIDOES_CIVIS")
fRes = sc.textFile("/u01/data/base/RESUMO_RELACOES_TRABALHISTAS")
'''
CERTIDOES_CIVIS_PART
'''
#Retorno da Linha: [[id_certidao_civil, id_pessoa_fisica, dt_evento, dt_emissao, dt_separacao, dt_divorcio]]
rddCertidoes = fCert.map(lambda line: line.split("|"))
#Retorno [('1027040844', 'Inc.'), ('1665214649', '1976-12-08;155259490')] -> (PF_ID, 'data mais recente entre dt_evento e dt_emissรฃo; id_certidao_civil')
rddDates = rddCertidoes.map(filterByEvent).reduceByKey(lambda a, b: a + ("|" + b))
#Retorno [('113927770', ('1229477534', '2006-07-15', 0))] -> (CERT_ID, (PF_ID, Data_Mais_Recente, Check_Inconsistencia (0-OK, 1-Inc)))
rddInfo = rddDates.map(filterDates)
#Retorno [('13047457', ('', ''))] -> (CERT_ID, (dt_separacao, dt_divorcio))
rddCertIds = rddCertidoes.map(lambda x: (str(x[0]).strip(), (str(x[4]).strip(), str(x[5]).strip())))
#Retorno [('14733170', 'NC')] -> (CERT_ID, Data Mais Antiga separacao e divorcio (Caso nรฃo conste data retorna NC))
rddDivorcio = rddInfo.map(lambda x: (x[0], 1)).join(rddCertIds).map(filterDivorceDates)
#Retorno [('1116310950', 0, 'NC', 1)] -> (PF_ID, dt_atualizaรงรฃo mais nova, data de divorcio ou separacao mais antiga, check_inconsistencia)
fullJoined = rddDivorcio.join(rddInfo).map(finalFiltering)
'''
ENDERECOS PART
'''
rddEnderecos = fEnd.map(lambda line: line.split('|'))
#('2090398103', '310267648;2015-05-29|310267648;2015-05-29')
rddEndByPF = rddEnderecos.map(lambda x: (str(x[1]).strip(), (str(x[0]).strip() + ";" + str(x[4]).strip() + ";" + str(x[2]).strip() + ";" + str(x[3]).strip()))).reduceByKey(lambda a, b: a + ("|" + b))
#(ID_PF, (ID_MUNICIPIO, CEP, CEP[:4], 0))
rddPFbyRecentEnd = rddEndByPF.map(getNewstEnd)
partialResult = rddPFbyRecentEnd.fullOuterJoin(fullJoined)
'''
PESSOAS FISICAS
'''
rddPF = fPF.map(lambda line: line.split('|')).map(lambda x: (str(x[0]).strip(), (str(x[1]).strip(), str(x[2]).strip(), str(x[3]).strip(), str(x[4]).strip(), str(x[5]).strip(), str(x[6]).strip(), str(x[7]).strip(), str(x[8]).strip())))
rddResult = rddPF.fullOuterJoin(partialResult).map(finalFormat).filter(lambda line: line != None)
'''
RELACOES TRABALHISTAS
'''
rddRelacoes = fRes.map(lambda line: line.split("|")).map(lambda x: (str(x[1]).strip(), ((str(x[3]).strip(), str(x[4]).strip(), str(x[5]).strip(), str(x[6]).strip(), str(x[7]).strip()),)))
rddAgg = rddRelacoes.reduceByKey(lambda a, b: a + b).map(calculateRelacoes)
#('1238510985', ((58, 0, '6762958.48', '772990.68', '0.00', 0), (None, None, ('1999-06-14', 'NC', 0))))
finalResult = rddAgg.fullOuterJoin(rddResult).map(finalAggFormat).filter(lambda line: line != None)
#id_pessoa_fisica|id_pessoa_fisica_dv|dt_nascimento|dt_obito|cs_etnia|cs_estado_civil|cs_grau_instrucao|cs_nacionalidade|cs_sexo
#(id_pessoa_fisica,((id_pessoa_fisica_dv,dt_nascimento,dt_obito,cs_etnia,cs_estado_civil,cs_grau_instrucao,cs_nacionalidade,cs_sexo),(dt_atualizaรงรฃo mais nova, data de divorcio ou separacao mais antiga, check_inconsistencia),(ID_MUNICIPIO, CEP, distrito, id_inconcistencia)))
finalResult.sortBy(filtering).saveAsTextFile("/u01/data/Resultados/")
(id_pessoa_fisica: '1283983077',
arr_objetos:
(
vida_trabalhista:
(15, 0, '128.51', '0.00', '0.00', 0),
dados_PF:
('0', '2067-06-13', '', '', '', '7', '10', '3'),
dados_endereco:
('354850', '11020150', '1102', 0),
dados_relacao_civil
(0, 'NC', 1)
)
)
('1281289112', ((486, 0, '3133.34', '260.00', '0.00', 0), ('4', '1980-08-21', '', '', '', '', '10', '3'), ('Jesus', '11020130', '1102', 0), ('1996-04-29', 'NC', 0)))
a = {
'ID_PF':x[0],
'OBJETOS':{
'VIDA_TRABALHISTA':{
'DIAS_CONTR':x[1][0][0],
'DATA_FIRST_TRAB':x[1][0][1],
}
}
}
('1214281582', ((647, 0, '463519.16', '8963.99', '0.00', 0), ('2', '2067-06-28', '', '', '4', '9', '10', '3'), ('354850', '11020080', '1102', 0), ('1991-09-06', '2005-12-21', 0)))
| pt | 0.603539 | #Version 0.0.0 #ID_PF, Data_Alteracao, Data_Divorcio, Inconcistencia CERTIDOES_CIVIS_PART #Retorno da Linha: [[id_certidao_civil, id_pessoa_fisica, dt_evento, dt_emissao, dt_separacao, dt_divorcio]] #Retorno [('1027040844', 'Inc.'), ('1665214649', '1976-12-08;155259490')] -> (PF_ID, 'data mais recente entre dt_evento e dt_emissรฃo; id_certidao_civil') #Retorno [('113927770', ('1229477534', '2006-07-15', 0))] -> (CERT_ID, (PF_ID, Data_Mais_Recente, Check_Inconsistencia (0-OK, 1-Inc))) #Retorno [('13047457', ('', ''))] -> (CERT_ID, (dt_separacao, dt_divorcio)) #Retorno [('14733170', 'NC')] -> (CERT_ID, Data Mais Antiga separacao e divorcio (Caso nรฃo conste data retorna NC)) #Retorno [('1116310950', 0, 'NC', 1)] -> (PF_ID, dt_atualizaรงรฃo mais nova, data de divorcio ou separacao mais antiga, check_inconsistencia) ENDERECOS PART #('2090398103', '310267648;2015-05-29|310267648;2015-05-29') #(ID_PF, (ID_MUNICIPIO, CEP, CEP[:4], 0)) PESSOAS FISICAS RELACOES TRABALHISTAS #('1238510985', ((58, 0, '6762958.48', '772990.68', '0.00', 0), (None, None, ('1999-06-14', 'NC', 0)))) #id_pessoa_fisica|id_pessoa_fisica_dv|dt_nascimento|dt_obito|cs_etnia|cs_estado_civil|cs_grau_instrucao|cs_nacionalidade|cs_sexo #(id_pessoa_fisica,((id_pessoa_fisica_dv,dt_nascimento,dt_obito,cs_etnia,cs_estado_civil,cs_grau_instrucao,cs_nacionalidade,cs_sexo),(dt_atualizaรงรฃo mais nova, data de divorcio ou separacao mais antiga, check_inconsistencia),(ID_MUNICIPIO, CEP, distrito, id_inconcistencia))) | 2.796461 | 3 |
react/reactLexer.py | caos21/test-ode | 1 | 6629985 | # Generated from react.g4 by ANTLR 4.6
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\16")
buf.write("k\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\3\2\3\2")
buf.write("\7\2(\n\2\f\2\16\2+\13\2\3\3\3\3\3\4\3\4\7\4\61\n\4\f")
buf.write("\4\16\4\64\13\4\3\5\3\5\3\5\5\59\n\5\3\5\3\5\5\5=\n\5")
buf.write("\3\6\3\6\3\6\5\6B\n\6\5\6D\n\6\3\6\3\6\3\6\5\6I\n\6\3")
buf.write("\7\6\7L\n\7\r\7\16\7M\3\b\3\b\3\t\3\t\5\tT\n\t\3\n\3\n")
buf.write("\3\13\3\13\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\17\3\17\3\17")
buf.write("\3\20\3\20\3\21\3\21\3\22\3\22\3\22\3\22\2\2\23\3\3\5")
buf.write("\4\7\5\t\6\13\2\r\2\17\2\21\2\23\2\25\7\27\b\31\t\33\n")
buf.write("\35\13\37\f!\r#\16\3\2\7\4\2\f\f\17\17\4\2C\\c|\t\2%%")
buf.write("((,-/;B\\aac|\4\2GGgg\5\2\13\f\17\17\"\"n\2\3\3\2\2\2")
buf.write("\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\25\3\2\2\2\2\27")
buf.write("\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3")
buf.write("\2\2\2\2!\3\2\2\2\2#\3\2\2\2\3%\3\2\2\2\5,\3\2\2\2\7.")
buf.write("\3\2\2\2\t\65\3\2\2\2\13H\3\2\2\2\rK\3\2\2\2\17O\3\2\2")
buf.write("\2\21S\3\2\2\2\23U\3\2\2\2\25W\3\2\2\2\27Y\3\2\2\2\31")
buf.write("[\3\2\2\2\33^\3\2\2\2\35`\3\2\2\2\37c\3\2\2\2!e\3\2\2")
buf.write("\2#g\3\2\2\2%)\7%\2\2&(\n\2\2\2\'&\3\2\2\2(+\3\2\2\2)")
buf.write("\'\3\2\2\2)*\3\2\2\2*\4\3\2\2\2+)\3\2\2\2,-\7=\2\2-\6")
buf.write("\3\2\2\2.\62\t\3\2\2/\61\t\4\2\2\60/\3\2\2\2\61\64\3\2")
buf.write("\2\2\62\60\3\2\2\2\62\63\3\2\2\2\63\b\3\2\2\2\64\62\3")
buf.write("\2\2\2\65<\5\13\6\2\668\5\17\b\2\679\5\21\t\28\67\3\2")
buf.write("\2\289\3\2\2\29:\3\2\2\2:;\5\r\7\2;=\3\2\2\2<\66\3\2\2")
buf.write("\2<=\3\2\2\2=\n\3\2\2\2>C\5\r\7\2?A\5!\21\2@B\5\r\7\2")
buf.write("A@\3\2\2\2AB\3\2\2\2BD\3\2\2\2C?\3\2\2\2CD\3\2\2\2DI\3")
buf.write("\2\2\2EF\5!\21\2FG\5\r\7\2GI\3\2\2\2H>\3\2\2\2HE\3\2\2")
buf.write("\2I\f\3\2\2\2JL\5\23\n\2KJ\3\2\2\2LM\3\2\2\2MK\3\2\2\2")
buf.write("MN\3\2\2\2N\16\3\2\2\2OP\t\5\2\2P\20\3\2\2\2QT\5\25\13")
buf.write("\2RT\5\27\f\2SQ\3\2\2\2SR\3\2\2\2T\22\3\2\2\2UV\4\62;")
buf.write("\2V\24\3\2\2\2WX\7-\2\2X\26\3\2\2\2YZ\7/\2\2Z\30\3\2\2")
buf.write("\2[\\\7F\2\2\\]\7]\2\2]\32\3\2\2\2^_\7_\2\2_\34\3\2\2")
buf.write("\2`a\7/\2\2ab\7@\2\2b\36\3\2\2\2cd\7?\2\2d \3\2\2\2ef")
buf.write("\7\60\2\2f\"\3\2\2\2gh\t\6\2\2hi\3\2\2\2ij\b\22\2\2j$")
buf.write("\3\2\2\2\f\2)\628<ACHMS\3\b\2\2")
return buf.getvalue()
class reactLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
COMMENT = 1
SEMICOLON = 2
SYMBOL = 3
SCIENTIFIC_NUMBER = 4
PLUS = 5
MINUS = 6
LDIFFBRACKET = 7
RBRACKET = 8
PRODUCE = 9
EQUAL = 10
POINT = 11
WHITESPACE = 12
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"';'", "'+'", "'-'", "'D['", "']'", "'->'", "'='", "'.'" ]
symbolicNames = [ "<INVALID>",
"COMMENT", "SEMICOLON", "SYMBOL", "SCIENTIFIC_NUMBER", "PLUS",
"MINUS", "LDIFFBRACKET", "RBRACKET", "PRODUCE", "EQUAL", "POINT",
"WHITESPACE" ]
ruleNames = [ "COMMENT", "SEMICOLON", "SYMBOL", "SCIENTIFIC_NUMBER",
"NUMBER", "INTEGER", "E", "SIGN", "DIGIT", "PLUS", "MINUS",
"LDIFFBRACKET", "RBRACKET", "PRODUCE", "EQUAL", "POINT",
"WHITESPACE" ]
grammarFileName = "react.g4"
def __init__(self, input=None):
super().__init__(input)
self.checkVersion("4.7")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| # Generated from react.g4 by ANTLR 4.6
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\16")
buf.write("k\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\3\2\3\2")
buf.write("\7\2(\n\2\f\2\16\2+\13\2\3\3\3\3\3\4\3\4\7\4\61\n\4\f")
buf.write("\4\16\4\64\13\4\3\5\3\5\3\5\5\59\n\5\3\5\3\5\5\5=\n\5")
buf.write("\3\6\3\6\3\6\5\6B\n\6\5\6D\n\6\3\6\3\6\3\6\5\6I\n\6\3")
buf.write("\7\6\7L\n\7\r\7\16\7M\3\b\3\b\3\t\3\t\5\tT\n\t\3\n\3\n")
buf.write("\3\13\3\13\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\17\3\17\3\17")
buf.write("\3\20\3\20\3\21\3\21\3\22\3\22\3\22\3\22\2\2\23\3\3\5")
buf.write("\4\7\5\t\6\13\2\r\2\17\2\21\2\23\2\25\7\27\b\31\t\33\n")
buf.write("\35\13\37\f!\r#\16\3\2\7\4\2\f\f\17\17\4\2C\\c|\t\2%%")
buf.write("((,-/;B\\aac|\4\2GGgg\5\2\13\f\17\17\"\"n\2\3\3\2\2\2")
buf.write("\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\25\3\2\2\2\2\27")
buf.write("\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3")
buf.write("\2\2\2\2!\3\2\2\2\2#\3\2\2\2\3%\3\2\2\2\5,\3\2\2\2\7.")
buf.write("\3\2\2\2\t\65\3\2\2\2\13H\3\2\2\2\rK\3\2\2\2\17O\3\2\2")
buf.write("\2\21S\3\2\2\2\23U\3\2\2\2\25W\3\2\2\2\27Y\3\2\2\2\31")
buf.write("[\3\2\2\2\33^\3\2\2\2\35`\3\2\2\2\37c\3\2\2\2!e\3\2\2")
buf.write("\2#g\3\2\2\2%)\7%\2\2&(\n\2\2\2\'&\3\2\2\2(+\3\2\2\2)")
buf.write("\'\3\2\2\2)*\3\2\2\2*\4\3\2\2\2+)\3\2\2\2,-\7=\2\2-\6")
buf.write("\3\2\2\2.\62\t\3\2\2/\61\t\4\2\2\60/\3\2\2\2\61\64\3\2")
buf.write("\2\2\62\60\3\2\2\2\62\63\3\2\2\2\63\b\3\2\2\2\64\62\3")
buf.write("\2\2\2\65<\5\13\6\2\668\5\17\b\2\679\5\21\t\28\67\3\2")
buf.write("\2\289\3\2\2\29:\3\2\2\2:;\5\r\7\2;=\3\2\2\2<\66\3\2\2")
buf.write("\2<=\3\2\2\2=\n\3\2\2\2>C\5\r\7\2?A\5!\21\2@B\5\r\7\2")
buf.write("A@\3\2\2\2AB\3\2\2\2BD\3\2\2\2C?\3\2\2\2CD\3\2\2\2DI\3")
buf.write("\2\2\2EF\5!\21\2FG\5\r\7\2GI\3\2\2\2H>\3\2\2\2HE\3\2\2")
buf.write("\2I\f\3\2\2\2JL\5\23\n\2KJ\3\2\2\2LM\3\2\2\2MK\3\2\2\2")
buf.write("MN\3\2\2\2N\16\3\2\2\2OP\t\5\2\2P\20\3\2\2\2QT\5\25\13")
buf.write("\2RT\5\27\f\2SQ\3\2\2\2SR\3\2\2\2T\22\3\2\2\2UV\4\62;")
buf.write("\2V\24\3\2\2\2WX\7-\2\2X\26\3\2\2\2YZ\7/\2\2Z\30\3\2\2")
buf.write("\2[\\\7F\2\2\\]\7]\2\2]\32\3\2\2\2^_\7_\2\2_\34\3\2\2")
buf.write("\2`a\7/\2\2ab\7@\2\2b\36\3\2\2\2cd\7?\2\2d \3\2\2\2ef")
buf.write("\7\60\2\2f\"\3\2\2\2gh\t\6\2\2hi\3\2\2\2ij\b\22\2\2j$")
buf.write("\3\2\2\2\f\2)\628<ACHMS\3\b\2\2")
return buf.getvalue()
class reactLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
COMMENT = 1
SEMICOLON = 2
SYMBOL = 3
SCIENTIFIC_NUMBER = 4
PLUS = 5
MINUS = 6
LDIFFBRACKET = 7
RBRACKET = 8
PRODUCE = 9
EQUAL = 10
POINT = 11
WHITESPACE = 12
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"';'", "'+'", "'-'", "'D['", "']'", "'->'", "'='", "'.'" ]
symbolicNames = [ "<INVALID>",
"COMMENT", "SEMICOLON", "SYMBOL", "SCIENTIFIC_NUMBER", "PLUS",
"MINUS", "LDIFFBRACKET", "RBRACKET", "PRODUCE", "EQUAL", "POINT",
"WHITESPACE" ]
ruleNames = [ "COMMENT", "SEMICOLON", "SYMBOL", "SCIENTIFIC_NUMBER",
"NUMBER", "INTEGER", "E", "SIGN", "DIGIT", "PLUS", "MINUS",
"LDIFFBRACKET", "RBRACKET", "PRODUCE", "EQUAL", "POINT",
"WHITESPACE" ]
grammarFileName = "react.g4"
def __init__(self, input=None):
super().__init__(input)
self.checkVersion("4.7")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| es | 0.246638 | # Generated from react.g4 by ANTLR 4.6 #\16\3\2\7\4\2\f\f\17\17\4\2C\\c|\t\2%%") #\3\2\2\2\3%\3\2\2\2\5,\3\2\2\2\7.") #g\3\2\2\2%)\7%\2\2&(\n\2\2\2\'&\3\2\2\2(+\3\2\2\2)") | 1.288764 | 1 |
corehq/motech/repeaters/views/repeaters.py | akyogi/commcare-hq | 0 | 6629986 | <filename>corehq/motech/repeaters/views/repeaters.py
import json
from collections import namedtuple
from django.contrib import messages
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.views.decorators.http import require_POST
from memoized import memoized
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from corehq.apps.accounting.decorators import requires_privilege_with_fallback
from dimagi.utils.post import simple_post
from corehq import toggles, privileges
from corehq.apps.domain.decorators import domain_admin_required
from corehq.apps.domain.views.settings import (
BaseAdminProjectSettingsView,
BaseProjectSettingsView,
)
from corehq.apps.users.decorators import (
require_can_edit_web_users,
require_permission,
)
from corehq.apps.users.models import Permissions
from corehq.motech.const import ALGO_AES, PASSWORD_PLACEHOLDER
from corehq.motech.repeaters.forms import (
CaseRepeaterForm,
FormRepeaterForm,
GenericRepeaterForm,
OpenmrsRepeaterForm,
)
from corehq.motech.repeaters.models import (
BASIC_AUTH,
DIGEST_AUTH,
Repeater,
RepeatRecord,
)
from corehq.motech.repeaters.repeater_generators import RegisterGenerator
from corehq.motech.repeaters.utils import get_all_repeater_types
from corehq.motech.utils import b64_aes_encrypt
RepeaterTypeInfo = namedtuple('RepeaterTypeInfo', 'class_name friendly_name has_config instances')
class DomainForwardingOptionsView(BaseAdminProjectSettingsView):
urlname = 'domain_forwarding'
page_title = ugettext_lazy("Data Forwarding")
template_name = 'repeaters/repeaters.html'
@method_decorator(require_permission(Permissions.edit_motech))
@method_decorator(requires_privilege_with_fallback(privileges.DATA_FORWARDING))
def dispatch(self, request, *args, **kwargs):
return super(BaseProjectSettingsView, self).dispatch(request, *args, **kwargs)
@property
def repeater_types_info(self):
return [
RepeaterTypeInfo(r.__name__, r.friendly_name, r._has_config, r.by_domain(self.domain))
for r in get_all_repeater_types().values() if r.available_for_domain(self.domain)
]
@property
def page_context(self):
return {
'repeater_types_info': self.repeater_types_info,
'pending_record_count': RepeatRecord.count(self.domain),
'user_can_configure': (
self.request.couch_user.is_superuser or
self.request.couch_user.can_edit_motech() or
toggles.IS_CONTRACTOR.enabled(self.request.couch_user.username)
)
}
class BaseRepeaterView(BaseAdminProjectSettingsView):
page_title = ugettext_lazy("Forward Data")
repeater_form_class = GenericRepeaterForm
template_name = 'repeaters/add_form_repeater.html'
@method_decorator(require_permission(Permissions.edit_motech))
@method_decorator(requires_privilege_with_fallback(privileges.DATA_FORWARDING))
def dispatch(self, request, *args, **kwargs):
return super(BaseRepeaterView, self).dispatch(request, *args, **kwargs)
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain, self.repeater_type])
@property
def parent_pages(self):
return [{
'title': DomainForwardingOptionsView.page_title,
'url': reverse(DomainForwardingOptionsView.urlname, args=[self.domain]),
}]
@property
def repeater_type(self):
return self.kwargs['repeater_type']
@property
def page_name(self):
return self.repeater_class.friendly_name
@property
@memoized
def repeater_class(self):
try:
return get_all_repeater_types()[self.repeater_type]
except KeyError:
raise Http404(
"No such repeater {}. Valid types: {}".format(
self.repeater_type, list(get_all_repeater_types())
)
)
@property
def add_repeater_form(self):
return None
@property
def page_context(self):
return {
'form': self.add_repeater_form,
'repeater_type': self.repeater_type,
}
def initialize_repeater(self):
raise NotImplementedError
def make_repeater(self):
repeater = self.initialize_repeater()
return self.set_repeater_attr(repeater, self.add_repeater_form.cleaned_data)
def set_repeater_attr(self, repeater, cleaned_data):
repeater.domain = self.domain
repeater.url = cleaned_data['url']
repeater.auth_type = cleaned_data['auth_type'] or None
repeater.username = cleaned_data['username']
if cleaned_data['password'] != PASSWORD_PLACEHOLDER:
repeater.password = <PASSWORD>(
algo=ALGO_AES,
ciphertext=<PASSWORD>_<PASSWORD>_encrypt(cleaned_data['password'])
)
repeater.format = cleaned_data['format']
repeater.notify_addresses_str = cleaned_data['notify_addresses_str']
repeater.skip_cert_verify = cleaned_data['skip_cert_verify']
return repeater
def post_save(self, request, repeater):
pass
def post(self, request, *args, **kwargs):
if self.add_repeater_form.is_valid():
repeater = self.make_repeater()
repeater.save()
return self.post_save(request, repeater)
return self.get(request, *args, **kwargs)
class AddRepeaterView(BaseRepeaterView):
urlname = 'add_repeater'
@property
@memoized
def add_repeater_form(self):
if self.request.method == 'POST':
return self.repeater_form_class(
self.request.POST,
domain=self.domain,
repeater_class=self.repeater_class
)
return self.repeater_form_class(
domain=self.domain,
repeater_class=self.repeater_class
)
def initialize_repeater(self):
return self.repeater_class()
def post_save(self, request, repeater):
messages.success(request, _("Forwarding set up to %s" % repeater.url))
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[self.domain]))
class AddFormRepeaterView(AddRepeaterView):
urlname = 'add_form_repeater'
repeater_form_class = FormRepeaterForm
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain])
def set_repeater_attr(self, repeater, cleaned_data):
repeater = super(AddFormRepeaterView, self).set_repeater_attr(repeater, cleaned_data)
repeater.include_app_id_param = self.add_repeater_form.cleaned_data['include_app_id_param']
return repeater
class AddCaseRepeaterView(AddRepeaterView):
urlname = 'add_case_repeater'
repeater_form_class = CaseRepeaterForm
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain])
def set_repeater_attr(self, repeater, cleaned_data):
repeater = super(AddCaseRepeaterView, self).set_repeater_attr(repeater, cleaned_data)
repeater.white_listed_case_types = self.add_repeater_form.cleaned_data['white_listed_case_types']
repeater.black_listed_users = self.add_repeater_form.cleaned_data['black_listed_users']
return repeater
class AddOpenmrsRepeaterView(AddCaseRepeaterView):
urlname = 'new_openmrs_repeater$'
repeater_form_class = OpenmrsRepeaterForm
page_title = ugettext_lazy("Forward to OpenMRS")
page_name = ugettext_lazy("Forward to OpenMRS")
def set_repeater_attr(self, repeater, cleaned_data):
repeater = super(AddOpenmrsRepeaterView, self).set_repeater_attr(repeater, cleaned_data)
repeater.location_id = self.add_repeater_form.cleaned_data['location_id']
repeater.atom_feed_enabled = self.add_repeater_form.cleaned_data['atom_feed_enabled']
return repeater
class AddDhis2RepeaterView(AddRepeaterView):
urlname = 'new_dhis2_repeater$'
repeater_form_class = GenericRepeaterForm
page_title = ugettext_lazy("Forward Forms to DHIS2 as Anonymous Events")
page_name = ugettext_lazy("Forward Forms to DHIS2 as Anonymous Events")
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain])
class EditRepeaterView(BaseRepeaterView):
urlname = 'edit_repeater'
template_name = 'repeaters/add_form_repeater.html'
@property
def repeater_id(self):
return self.kwargs['repeater_id']
@property
def page_url(self):
# The EditRepeaterView url routes to the correct edit form for its subclasses. It does this with
# `repeater_type` in r'^forwarding/(?P<repeater_type>\w+)/edit/(?P<repeater_id>\w+)/$'
# See corehq/apps/domain/urls.py for details.
return reverse(EditRepeaterView.urlname, args=[self.domain, self.repeater_type, self.repeater_id])
@property
@memoized
def add_repeater_form(self):
if self.request.method == 'POST':
return self.repeater_form_class(
self.request.POST,
domain=self.domain,
repeater_class=self.repeater_class
)
else:
repeater_id = self.kwargs['repeater_id']
repeater = Repeater.get(repeater_id)
data = repeater.to_json()
data['password'] = <PASSWORD>
return self.repeater_form_class(
domain=self.domain,
repeater_class=self.repeater_class,
data=data,
submit_btn_text=_("Update Repeater"),
)
@method_decorator(domain_admin_required)
def dispatch(self, request, *args, **kwargs):
if self.request.GET.get('repeater_type'):
self.kwargs['repeater_type'] = self.request.GET['repeater_type']
return super(EditRepeaterView, self).dispatch(request, *args, **kwargs)
def initialize_repeater(self):
return Repeater.get(self.kwargs['repeater_id'])
def post_save(self, request, repeater):
messages.success(request, _("Repeater Successfully Updated"))
if self.request.GET.get('repeater_type'):
return HttpResponseRedirect(
(reverse(self.urlname, args=[self.domain, repeater.get_id]) +
'?repeater_type=' + self.kwargs['repeater_type'])
)
else:
return HttpResponseRedirect(reverse(self.urlname, args=[self.domain, repeater.get_id]))
class EditCaseRepeaterView(EditRepeaterView, AddCaseRepeaterView):
urlname = 'edit_case_repeater'
page_title = ugettext_lazy("Edit Case Repeater")
@property
def page_url(self):
return reverse(AddCaseRepeaterView.urlname, args=[self.domain])
class EditFormRepeaterView(EditRepeaterView, AddFormRepeaterView):
urlname = 'edit_form_repeater'
page_title = ugettext_lazy("Edit Form Repeater")
@property
def page_url(self):
return reverse(AddFormRepeaterView.urlname, args=[self.domain])
class EditOpenmrsRepeaterView(EditRepeaterView, AddOpenmrsRepeaterView):
urlname = 'edit_openmrs_repeater'
page_title = ugettext_lazy("Edit OpenMRS Repeater")
class EditDhis2RepeaterView(EditRepeaterView, AddDhis2RepeaterView):
urlname = 'edit_dhis2_repeater'
page_title = ugettext_lazy("Edit DHIS2 Anonymous Event Repeater")
@require_POST
@require_can_edit_web_users
@requires_privilege_with_fallback(privileges.DATA_FORWARDING)
def drop_repeater(request, domain, repeater_id):
rep = Repeater.get(repeater_id)
rep.retire()
messages.success(request, "Forwarding stopped!")
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[domain]))
@require_POST
@require_can_edit_web_users
@requires_privilege_with_fallback(privileges.DATA_FORWARDING)
def pause_repeater(request, domain, repeater_id):
rep = Repeater.get(repeater_id)
rep.pause()
messages.success(request, "Forwarding paused!")
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[domain]))
@require_POST
@require_can_edit_web_users
@requires_privilege_with_fallback(privileges.DATA_FORWARDING)
def resume_repeater(request, domain, repeater_id):
rep = Repeater.get(repeater_id)
rep.resume()
messages.success(request, "Forwarding resumed!")
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[domain]))
@require_POST
@require_can_edit_web_users
@requires_privilege_with_fallback(privileges.DATA_FORWARDING)
def test_repeater(request, domain):
url = request.POST["url"]
repeater_type = request.POST['repeater_type']
format = request.POST.get('format', None)
repeater_class = get_all_repeater_types()[repeater_type]
auth_type = request.POST.get('auth_type')
form = GenericRepeaterForm(
{"url": url, "format": format},
domain=domain,
repeater_class=repeater_class
)
if form.is_valid():
url = form.cleaned_data["url"]
format = format or RegisterGenerator.default_format_by_repeater(repeater_class)
generator_class = RegisterGenerator.generator_class_by_repeater_format(repeater_class, format)
generator = generator_class(repeater_class())
fake_post = generator.get_test_payload(domain)
headers = generator.get_headers()
username = request.POST.get('username')
password = request.POST.get('password')
verify = not request.POST.get('skip_cert_verify') == 'true'
if auth_type == BASIC_AUTH:
auth = HTTPBasicAuth(username, password)
elif auth_type == DIGEST_AUTH:
auth = HTTPDigestAuth(username, password)
else:
auth = None
try:
resp = simple_post(fake_post, url, headers=headers, auth=auth, verify=verify)
if 200 <= resp.status_code < 300:
return HttpResponse(json.dumps({"success": True,
"response": resp.text,
"status": resp.status_code}))
else:
return HttpResponse(json.dumps({"success": False,
"response": resp.text,
"status": resp.status_code}))
except Exception as e:
errors = str(e)
return HttpResponse(json.dumps({"success": False, "response": errors}))
else:
return HttpResponse(json.dumps({"success": False, "response": "Please enter a valid url."}))
| <filename>corehq/motech/repeaters/views/repeaters.py
import json
from collections import namedtuple
from django.contrib import messages
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.views.decorators.http import require_POST
from memoized import memoized
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from corehq.apps.accounting.decorators import requires_privilege_with_fallback
from dimagi.utils.post import simple_post
from corehq import toggles, privileges
from corehq.apps.domain.decorators import domain_admin_required
from corehq.apps.domain.views.settings import (
BaseAdminProjectSettingsView,
BaseProjectSettingsView,
)
from corehq.apps.users.decorators import (
require_can_edit_web_users,
require_permission,
)
from corehq.apps.users.models import Permissions
from corehq.motech.const import ALGO_AES, PASSWORD_PLACEHOLDER
from corehq.motech.repeaters.forms import (
CaseRepeaterForm,
FormRepeaterForm,
GenericRepeaterForm,
OpenmrsRepeaterForm,
)
from corehq.motech.repeaters.models import (
BASIC_AUTH,
DIGEST_AUTH,
Repeater,
RepeatRecord,
)
from corehq.motech.repeaters.repeater_generators import RegisterGenerator
from corehq.motech.repeaters.utils import get_all_repeater_types
from corehq.motech.utils import b64_aes_encrypt
RepeaterTypeInfo = namedtuple('RepeaterTypeInfo', 'class_name friendly_name has_config instances')
class DomainForwardingOptionsView(BaseAdminProjectSettingsView):
urlname = 'domain_forwarding'
page_title = ugettext_lazy("Data Forwarding")
template_name = 'repeaters/repeaters.html'
@method_decorator(require_permission(Permissions.edit_motech))
@method_decorator(requires_privilege_with_fallback(privileges.DATA_FORWARDING))
def dispatch(self, request, *args, **kwargs):
return super(BaseProjectSettingsView, self).dispatch(request, *args, **kwargs)
@property
def repeater_types_info(self):
return [
RepeaterTypeInfo(r.__name__, r.friendly_name, r._has_config, r.by_domain(self.domain))
for r in get_all_repeater_types().values() if r.available_for_domain(self.domain)
]
@property
def page_context(self):
return {
'repeater_types_info': self.repeater_types_info,
'pending_record_count': RepeatRecord.count(self.domain),
'user_can_configure': (
self.request.couch_user.is_superuser or
self.request.couch_user.can_edit_motech() or
toggles.IS_CONTRACTOR.enabled(self.request.couch_user.username)
)
}
class BaseRepeaterView(BaseAdminProjectSettingsView):
page_title = ugettext_lazy("Forward Data")
repeater_form_class = GenericRepeaterForm
template_name = 'repeaters/add_form_repeater.html'
@method_decorator(require_permission(Permissions.edit_motech))
@method_decorator(requires_privilege_with_fallback(privileges.DATA_FORWARDING))
def dispatch(self, request, *args, **kwargs):
return super(BaseRepeaterView, self).dispatch(request, *args, **kwargs)
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain, self.repeater_type])
@property
def parent_pages(self):
return [{
'title': DomainForwardingOptionsView.page_title,
'url': reverse(DomainForwardingOptionsView.urlname, args=[self.domain]),
}]
@property
def repeater_type(self):
return self.kwargs['repeater_type']
@property
def page_name(self):
return self.repeater_class.friendly_name
@property
@memoized
def repeater_class(self):
try:
return get_all_repeater_types()[self.repeater_type]
except KeyError:
raise Http404(
"No such repeater {}. Valid types: {}".format(
self.repeater_type, list(get_all_repeater_types())
)
)
@property
def add_repeater_form(self):
return None
@property
def page_context(self):
return {
'form': self.add_repeater_form,
'repeater_type': self.repeater_type,
}
def initialize_repeater(self):
raise NotImplementedError
def make_repeater(self):
repeater = self.initialize_repeater()
return self.set_repeater_attr(repeater, self.add_repeater_form.cleaned_data)
def set_repeater_attr(self, repeater, cleaned_data):
repeater.domain = self.domain
repeater.url = cleaned_data['url']
repeater.auth_type = cleaned_data['auth_type'] or None
repeater.username = cleaned_data['username']
if cleaned_data['password'] != PASSWORD_PLACEHOLDER:
repeater.password = <PASSWORD>(
algo=ALGO_AES,
ciphertext=<PASSWORD>_<PASSWORD>_encrypt(cleaned_data['password'])
)
repeater.format = cleaned_data['format']
repeater.notify_addresses_str = cleaned_data['notify_addresses_str']
repeater.skip_cert_verify = cleaned_data['skip_cert_verify']
return repeater
def post_save(self, request, repeater):
pass
def post(self, request, *args, **kwargs):
if self.add_repeater_form.is_valid():
repeater = self.make_repeater()
repeater.save()
return self.post_save(request, repeater)
return self.get(request, *args, **kwargs)
class AddRepeaterView(BaseRepeaterView):
urlname = 'add_repeater'
@property
@memoized
def add_repeater_form(self):
if self.request.method == 'POST':
return self.repeater_form_class(
self.request.POST,
domain=self.domain,
repeater_class=self.repeater_class
)
return self.repeater_form_class(
domain=self.domain,
repeater_class=self.repeater_class
)
def initialize_repeater(self):
return self.repeater_class()
def post_save(self, request, repeater):
messages.success(request, _("Forwarding set up to %s" % repeater.url))
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[self.domain]))
class AddFormRepeaterView(AddRepeaterView):
urlname = 'add_form_repeater'
repeater_form_class = FormRepeaterForm
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain])
def set_repeater_attr(self, repeater, cleaned_data):
repeater = super(AddFormRepeaterView, self).set_repeater_attr(repeater, cleaned_data)
repeater.include_app_id_param = self.add_repeater_form.cleaned_data['include_app_id_param']
return repeater
class AddCaseRepeaterView(AddRepeaterView):
urlname = 'add_case_repeater'
repeater_form_class = CaseRepeaterForm
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain])
def set_repeater_attr(self, repeater, cleaned_data):
repeater = super(AddCaseRepeaterView, self).set_repeater_attr(repeater, cleaned_data)
repeater.white_listed_case_types = self.add_repeater_form.cleaned_data['white_listed_case_types']
repeater.black_listed_users = self.add_repeater_form.cleaned_data['black_listed_users']
return repeater
class AddOpenmrsRepeaterView(AddCaseRepeaterView):
urlname = 'new_openmrs_repeater$'
repeater_form_class = OpenmrsRepeaterForm
page_title = ugettext_lazy("Forward to OpenMRS")
page_name = ugettext_lazy("Forward to OpenMRS")
def set_repeater_attr(self, repeater, cleaned_data):
repeater = super(AddOpenmrsRepeaterView, self).set_repeater_attr(repeater, cleaned_data)
repeater.location_id = self.add_repeater_form.cleaned_data['location_id']
repeater.atom_feed_enabled = self.add_repeater_form.cleaned_data['atom_feed_enabled']
return repeater
class AddDhis2RepeaterView(AddRepeaterView):
urlname = 'new_dhis2_repeater$'
repeater_form_class = GenericRepeaterForm
page_title = ugettext_lazy("Forward Forms to DHIS2 as Anonymous Events")
page_name = ugettext_lazy("Forward Forms to DHIS2 as Anonymous Events")
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain])
class EditRepeaterView(BaseRepeaterView):
urlname = 'edit_repeater'
template_name = 'repeaters/add_form_repeater.html'
@property
def repeater_id(self):
return self.kwargs['repeater_id']
@property
def page_url(self):
# The EditRepeaterView url routes to the correct edit form for its subclasses. It does this with
# `repeater_type` in r'^forwarding/(?P<repeater_type>\w+)/edit/(?P<repeater_id>\w+)/$'
# See corehq/apps/domain/urls.py for details.
return reverse(EditRepeaterView.urlname, args=[self.domain, self.repeater_type, self.repeater_id])
@property
@memoized
def add_repeater_form(self):
if self.request.method == 'POST':
return self.repeater_form_class(
self.request.POST,
domain=self.domain,
repeater_class=self.repeater_class
)
else:
repeater_id = self.kwargs['repeater_id']
repeater = Repeater.get(repeater_id)
data = repeater.to_json()
data['password'] = <PASSWORD>
return self.repeater_form_class(
domain=self.domain,
repeater_class=self.repeater_class,
data=data,
submit_btn_text=_("Update Repeater"),
)
@method_decorator(domain_admin_required)
def dispatch(self, request, *args, **kwargs):
if self.request.GET.get('repeater_type'):
self.kwargs['repeater_type'] = self.request.GET['repeater_type']
return super(EditRepeaterView, self).dispatch(request, *args, **kwargs)
def initialize_repeater(self):
return Repeater.get(self.kwargs['repeater_id'])
def post_save(self, request, repeater):
messages.success(request, _("Repeater Successfully Updated"))
if self.request.GET.get('repeater_type'):
return HttpResponseRedirect(
(reverse(self.urlname, args=[self.domain, repeater.get_id]) +
'?repeater_type=' + self.kwargs['repeater_type'])
)
else:
return HttpResponseRedirect(reverse(self.urlname, args=[self.domain, repeater.get_id]))
class EditCaseRepeaterView(EditRepeaterView, AddCaseRepeaterView):
urlname = 'edit_case_repeater'
page_title = ugettext_lazy("Edit Case Repeater")
@property
def page_url(self):
return reverse(AddCaseRepeaterView.urlname, args=[self.domain])
class EditFormRepeaterView(EditRepeaterView, AddFormRepeaterView):
urlname = 'edit_form_repeater'
page_title = ugettext_lazy("Edit Form Repeater")
@property
def page_url(self):
return reverse(AddFormRepeaterView.urlname, args=[self.domain])
class EditOpenmrsRepeaterView(EditRepeaterView, AddOpenmrsRepeaterView):
urlname = 'edit_openmrs_repeater'
page_title = ugettext_lazy("Edit OpenMRS Repeater")
class EditDhis2RepeaterView(EditRepeaterView, AddDhis2RepeaterView):
urlname = 'edit_dhis2_repeater'
page_title = ugettext_lazy("Edit DHIS2 Anonymous Event Repeater")
@require_POST
@require_can_edit_web_users
@requires_privilege_with_fallback(privileges.DATA_FORWARDING)
def drop_repeater(request, domain, repeater_id):
rep = Repeater.get(repeater_id)
rep.retire()
messages.success(request, "Forwarding stopped!")
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[domain]))
@require_POST
@require_can_edit_web_users
@requires_privilege_with_fallback(privileges.DATA_FORWARDING)
def pause_repeater(request, domain, repeater_id):
rep = Repeater.get(repeater_id)
rep.pause()
messages.success(request, "Forwarding paused!")
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[domain]))
@require_POST
@require_can_edit_web_users
@requires_privilege_with_fallback(privileges.DATA_FORWARDING)
def resume_repeater(request, domain, repeater_id):
rep = Repeater.get(repeater_id)
rep.resume()
messages.success(request, "Forwarding resumed!")
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[domain]))
@require_POST
@require_can_edit_web_users
@requires_privilege_with_fallback(privileges.DATA_FORWARDING)
def test_repeater(request, domain):
url = request.POST["url"]
repeater_type = request.POST['repeater_type']
format = request.POST.get('format', None)
repeater_class = get_all_repeater_types()[repeater_type]
auth_type = request.POST.get('auth_type')
form = GenericRepeaterForm(
{"url": url, "format": format},
domain=domain,
repeater_class=repeater_class
)
if form.is_valid():
url = form.cleaned_data["url"]
format = format or RegisterGenerator.default_format_by_repeater(repeater_class)
generator_class = RegisterGenerator.generator_class_by_repeater_format(repeater_class, format)
generator = generator_class(repeater_class())
fake_post = generator.get_test_payload(domain)
headers = generator.get_headers()
username = request.POST.get('username')
password = request.POST.get('password')
verify = not request.POST.get('skip_cert_verify') == 'true'
if auth_type == BASIC_AUTH:
auth = HTTPBasicAuth(username, password)
elif auth_type == DIGEST_AUTH:
auth = HTTPDigestAuth(username, password)
else:
auth = None
try:
resp = simple_post(fake_post, url, headers=headers, auth=auth, verify=verify)
if 200 <= resp.status_code < 300:
return HttpResponse(json.dumps({"success": True,
"response": resp.text,
"status": resp.status_code}))
else:
return HttpResponse(json.dumps({"success": False,
"response": resp.text,
"status": resp.status_code}))
except Exception as e:
errors = str(e)
return HttpResponse(json.dumps({"success": False, "response": errors}))
else:
return HttpResponse(json.dumps({"success": False, "response": "Please enter a valid url."}))
| en | 0.754077 | # The EditRepeaterView url routes to the correct edit form for its subclasses. It does this with # `repeater_type` in r'^forwarding/(?P<repeater_type>\w+)/edit/(?P<repeater_id>\w+)/$' # See corehq/apps/domain/urls.py for details. | 1.668187 | 2 |
Code(2021)/20210116_Thor_NAMA.py | std-freejia/hymni.study | 1 | 6629987 | import sys
import math
import numpy as np
light_x, light_y, initial_tx, initial_ty = [int(i) for i in input().split()]
# ํ์ฌ ์์น๋ฅผ ๊ธฐ์ค์ผ๋ก light์ ์ฌ๋ถ๋ฉด ํ์
ํ๊ธฐ
remain_x = light_x - initial_tx
remain_y = light_y - initial_ty
dir_arr = ['N','S','E','W','']
val_arr = [[0,1], [0,-1], [1,0], [-1,0], [0,0]]
# game loop
while True:
remaining_turns = int(input()) # The remaining amount of turns Thor can move. Do not remove this line.
condition_arr = [(remain_y < 0), (remain_y > 0), \
(remain_x > 0), (remain_x < 0), \
(remain_x == 0 or remain_y == 0)]
ch_dir = ''
# True์ธ ๊ฐ๋ง ์ฐพ์ index ๊ฐ์ ธ์ค๊ธฐ
for cond_idx in np.where(condition_arr)[0]:
ch_dir += str(dir_arr[cond_idx])
remain_x += -1 * val_arr[cond_idx][0]
remain_y += val_arr[cond_idx][1]
print(ch_dir)
| import sys
import math
import numpy as np
light_x, light_y, initial_tx, initial_ty = [int(i) for i in input().split()]
# ํ์ฌ ์์น๋ฅผ ๊ธฐ์ค์ผ๋ก light์ ์ฌ๋ถ๋ฉด ํ์
ํ๊ธฐ
remain_x = light_x - initial_tx
remain_y = light_y - initial_ty
dir_arr = ['N','S','E','W','']
val_arr = [[0,1], [0,-1], [1,0], [-1,0], [0,0]]
# game loop
while True:
remaining_turns = int(input()) # The remaining amount of turns Thor can move. Do not remove this line.
condition_arr = [(remain_y < 0), (remain_y > 0), \
(remain_x > 0), (remain_x < 0), \
(remain_x == 0 or remain_y == 0)]
ch_dir = ''
# True์ธ ๊ฐ๋ง ์ฐพ์ index ๊ฐ์ ธ์ค๊ธฐ
for cond_idx in np.where(condition_arr)[0]:
ch_dir += str(dir_arr[cond_idx])
remain_x += -1 * val_arr[cond_idx][0]
remain_y += val_arr[cond_idx][1]
print(ch_dir)
| ko | 0.949588 | # ํ์ฌ ์์น๋ฅผ ๊ธฐ์ค์ผ๋ก light์ ์ฌ๋ถ๋ฉด ํ์
ํ๊ธฐ # game loop # The remaining amount of turns Thor can move. Do not remove this line. # True์ธ ๊ฐ๋ง ์ฐพ์ index ๊ฐ์ ธ์ค๊ธฐ | 2.951753 | 3 |
picmodels/migrations/0051_healthcareserviceexpertise.py | bbcawodu/careadvisors-backend | 0 | 6629988 | <filename>picmodels/migrations/0051_healthcareserviceexpertise.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('picmodels', '0050_auto_20180306_1502'),
]
operations = [
migrations.CreateModel(
name='HealthcareServiceExpertise',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(unique=True, max_length=1000)),
],
),
]
| <filename>picmodels/migrations/0051_healthcareserviceexpertise.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('picmodels', '0050_auto_20180306_1502'),
]
operations = [
migrations.CreateModel(
name='HealthcareServiceExpertise',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(unique=True, max_length=1000)),
],
),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.625984 | 2 |
trying_curses.py | Adel-Charef/scripts | 0 | 6629989 | import curses
import time
screen = curses.initscr()
screen.addstr(4, 10, "Hello from (4, 10)!")
screen.refresh()
time.sleep(2)
curses.endwin()
| import curses
import time
screen = curses.initscr()
screen.addstr(4, 10, "Hello from (4, 10)!")
screen.refresh()
time.sleep(2)
curses.endwin()
| none | 1 | 2.409385 | 2 |
|
login.py | nslm/Api-Portal-do-Aluno | 0 | 6629990 | import re
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from robobrowser import RoboBrowser
def Login(matricula,senha):
url = 'https://portalence.ibge.gov.br/gcad-aluno/'
br = RoboBrowser()
br.open(url)
form = br.get_form()
form['login-form:matricula-aluno'] = matricula
form['login-form:j_idt22'] = senha
br.submit_form(form)
page = str(br.parsed())
loged = False
name = ''
if '<!-- L O G I N & N O M E-->' in page:
loged = True
start = 'Nome: '
end = ' '
name = re.search('%s(.*)%s' % (start, end), page).group(1)
name = name.lower()
return { 'status':loged, 'name':name} | import re
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from robobrowser import RoboBrowser
def Login(matricula,senha):
url = 'https://portalence.ibge.gov.br/gcad-aluno/'
br = RoboBrowser()
br.open(url)
form = br.get_form()
form['login-form:matricula-aluno'] = matricula
form['login-form:j_idt22'] = senha
br.submit_form(form)
page = str(br.parsed())
loged = False
name = ''
if '<!-- L O G I N & N O M E-->' in page:
loged = True
start = 'Nome: '
end = ' '
name = re.search('%s(.*)%s' % (start, end), page).group(1)
name = name.lower()
return { 'status':loged, 'name':name} | none | 1 | 2.423797 | 2 |
|
ansible/my_env/lib/python2.7/site-packages/ansible/plugins/callback/dense.py | otus-devops-2019-02/yyashkin_infra | 1 | 6629991 | <gh_stars>1-10
# (c) 2016, <NAME> <<EMAIL>>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: dense
type: stdout
short_description: minimal stdout output
extends_documentation_fragment:
- default_callback
description:
- When in verbose mode it will act the same as the default callback
author:
- <NAME> (@dagwieers)
version_added: "2.3"
requirements:
- set as stdout in configuation
'''
from collections import MutableMapping, MutableSequence
HAS_OD = False
try:
from collections import OrderedDict
HAS_OD = True
except ImportError:
pass
from ansible.module_utils.six import binary_type, text_type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
from ansible.utils.color import colorize, hostcolor
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
import sys
# Design goals:
#
# + On screen there should only be relevant stuff
# - How far are we ? (during run, last line)
# - What issues occurred
# - What changes occurred
# - Diff output (in diff-mode)
#
# + If verbosity increases, act as default output
# So that users can easily switch to default for troubleshooting
#
# + Rewrite the output during processing
# - We use the cursor to indicate where in the task we are.
# Output after the prompt is the output of the previous task.
# - If we would clear the line at the start of a task, there would often
# be no information at all, so we leave it until it gets updated
#
# + Use the same color-conventions of Ansible
#
# + Ensure the verbose output (-v) is also dense.
# Remove information that is not essential (eg. timestamps, status)
# TODO:
#
# + Properly test for terminal capabilities, and fall back to default
# + Modify Ansible mechanism so we don't need to use sys.stdout directly
# + Find an elegant solution for progress bar line wrapping
# FIXME: Importing constants as C simply does not work, beats me :-/
# from ansible import constants as C
class C:
COLOR_HIGHLIGHT = 'white'
COLOR_VERBOSE = 'blue'
COLOR_WARN = 'bright purple'
COLOR_ERROR = 'red'
COLOR_DEBUG = 'dark gray'
COLOR_DEPRECATE = 'purple'
COLOR_SKIP = 'cyan'
COLOR_UNREACHABLE = 'bright red'
COLOR_OK = 'green'
COLOR_CHANGED = 'yellow'
# Taken from Dstat
class vt100:
black = '\033[0;30m'
darkred = '\033[0;31m'
darkgreen = '\033[0;32m'
darkyellow = '\033[0;33m'
darkblue = '\033[0;34m'
darkmagenta = '\033[0;35m'
darkcyan = '\033[0;36m'
gray = '\033[0;37m'
darkgray = '\033[1;30m'
red = '\033[1;31m'
green = '\033[1;32m'
yellow = '\033[1;33m'
blue = '\033[1;34m'
magenta = '\033[1;35m'
cyan = '\033[1;36m'
white = '\033[1;37m'
blackbg = '\033[40m'
redbg = '\033[41m'
greenbg = '\033[42m'
yellowbg = '\033[43m'
bluebg = '\033[44m'
magentabg = '\033[45m'
cyanbg = '\033[46m'
whitebg = '\033[47m'
reset = '\033[0;0m'
bold = '\033[1m'
reverse = '\033[2m'
underline = '\033[4m'
clear = '\033[2J'
# clearline = '\033[K'
clearline = '\033[2K'
save = '\033[s'
restore = '\033[u'
save_all = '\0337'
restore_all = '\0338'
linewrap = '\033[7h'
nolinewrap = '\033[7l'
up = '\033[1A'
down = '\033[1B'
right = '\033[1C'
left = '\033[1D'
colors = dict(
ok=vt100.darkgreen,
changed=vt100.darkyellow,
skipped=vt100.darkcyan,
ignored=vt100.cyanbg + vt100.red,
failed=vt100.darkred,
unreachable=vt100.red,
)
states = ('skipped', 'ok', 'changed', 'failed', 'unreachable')
class CallbackModule_dense(CallbackModule_default):
'''
This is the dense callback interface, where screen estate is still valued.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'dense'
def __init__(self):
# From CallbackModule
self._display = display
if HAS_OD:
self.disabled = False
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
# Attributes to remove from results for more density
self.removed_attributes = (
# 'changed',
'delta',
# 'diff',
'end',
'failed',
'failed_when_result',
'invocation',
'start',
'stdout_lines',
)
# Initiate data structures
self.hosts = OrderedDict()
self.keep = False
self.shown_title = False
self.count = dict(play=0, handler=0, task=0)
self.type = 'foo'
# Start immediately on the first line
sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
sys.stdout.flush()
else:
display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.")
self.disabled = True
def __del__(self):
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
def _add_host(self, result, status):
name = result._host.get_name()
# Add a new status in case a failed task is ignored
if status == 'failed' and result._task.ignore_errors:
status = 'ignored'
# Check if we have to update an existing state (when looping over items)
if name not in self.hosts:
self.hosts[name] = dict(state=status)
elif states.index(self.hosts[name]['state']) < states.index(status):
self.hosts[name]['state'] = status
# Store delegated hostname, if needed
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self.hosts[name]['delegate'] = delegated_vars['ansible_host']
# Print progress bar
self._display_progress(result)
# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode
# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)):
# Ensure that tasks with changes/failures stay on-screen
if status in ['changed', 'failed', 'unreachable']:
self.keep = True
if self._display.verbosity == 1:
# Print task title, if needed
self._display_task_banner()
self._display_results(result, status)
def _clean_results(self, result):
# Remove non-essential atributes
for attr in self.removed_attributes:
if attr in result:
del(result[attr])
# Remove empty attributes (list, dict, str)
for attr in result.copy():
if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)):
if not result[attr]:
del(result[attr])
def _handle_exceptions(self, result):
if 'exception' in result:
# Remove the exception from the result so it's not shown every time
del result['exception']
if self._display.verbosity == 1:
return "An exception occurred during task execution. To see the full traceback, use -vvv."
def _display_progress(self, result=None):
# Always rewrite the complete line
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
# Print out each host in its own status-color
for name in self.hosts:
sys.stdout.write(' ')
if self.hosts[name].get('delegate', None):
sys.stdout.write(self.hosts[name]['delegate'] + '>')
sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
sys.stdout.flush()
# if result._result.get('diff', False):
# sys.stdout.write('\n' + vt100.linewrap)
sys.stdout.write(vt100.linewrap)
# self.keep = True
def _display_task_banner(self):
if not self.shown_title:
self.shown_title = True
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
def _display_results(self, result, status):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
self._clean_results(result._result)
dump = ''
if result._task.action == 'include':
return
elif status == 'ok':
return
elif status == 'ignored':
dump = self._handle_exceptions(result._result)
elif status == 'failed':
dump = self._handle_exceptions(result._result)
elif status == 'unreachable':
dump = result._result['msg']
if not dump:
dump = self._dump_results(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
sys.stdout.write(colors[status] + status + ': ')
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
else:
sys.stdout.write(result._host.get_name())
sys.stdout.write(': ' + dump + '\n')
sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
sys.stdout.flush()
if status == 'changed':
self._handle_warnings(result._result)
def v2_playbook_on_play_start(self, play):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
# Reset at the start of each play
self.keep = False
self.count.update(dict(handler=0, task=0))
self.count['play'] += 1
self.play = play
# Write the next play on screen IN UPPERCASE, and make it permanent
name = play.get_name().strip()
if not name:
name = 'unnamed'
sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
def v2_playbook_on_task_start(self, task, is_conditional):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
else:
# Do not clear line, since we want to retain the previous output
sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
# Reset at the start of each task
self.keep = False
self.shown_title = False
self.hosts = OrderedDict()
self.task = task
self.type = 'task'
# Enumerate task if not setup (task names are too long for dense output)
if task.get_name() != 'setup':
self.count['task'] += 1
# Write the next task on screen (behind the prompt is the previous output)
sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_handler_task_start(self, task):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
# Reset at the start of each handler
self.keep = False
self.shown_title = False
self.hosts = OrderedDict()
self.task = task
self.type = 'handler'
# Enumerate handler if not setup (handler names may be too long for dense output)
if task.get_name() != 'setup':
self.count[self.type] += 1
# Write the next task on screen (behind the prompt is the previous output)
sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_cleanup_task_start(self, task):
# TBD
sys.stdout.write('cleanup.')
sys.stdout.flush()
def v2_runner_on_failed(self, result, ignore_errors=False):
self._add_host(result, 'failed')
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self._add_host(result, 'changed')
else:
self._add_host(result, 'ok')
def v2_runner_on_skipped(self, result):
self._add_host(result, 'skipped')
def v2_runner_on_unreachable(self, result):
self._add_host(result, 'unreachable')
def v2_runner_on_include(self, included_file):
pass
def v2_runner_on_file_diff(self, result, diff):
sys.stdout.write(vt100.bold)
self.super_ref.v2_runner_on_file_diff(result, diff)
sys.stdout.write(vt100.reset)
def v2_on_file_diff(self, result):
sys.stdout.write(vt100.bold)
self.super_ref.v2_on_file_diff(result)
sys.stdout.write(vt100.reset)
# Old definition in v2.0
def v2_playbook_item_on_ok(self, result):
self.v2_runner_item_on_ok(result)
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self._add_host(result, 'changed')
else:
self._add_host(result, 'ok')
# Old definition in v2.0
def v2_playbook_item_on_failed(self, result):
self.v2_runner_item_on_failed(result)
def v2_runner_item_on_failed(self, result):
self._add_host(result, 'failed')
# Old definition in v2.0
def v2_playbook_item_on_skipped(self, result):
self.v2_runner_item_on_skipped(result)
def v2_runner_item_on_skipped(self, result):
self._add_host(result, 'skipped')
def v2_playbook_on_no_hosts_remaining(self):
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
def v2_playbook_on_include(self, included_file):
pass
def v2_playbook_on_stats(self, stats):
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
# In normal mode screen output should be sufficient, summary is redundant
if self._display.verbosity == 0:
return
sys.stdout.write(vt100.bold + vt100.underline)
sys.stdout.write('SUMMARY')
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
# When using -vv or higher, simply do the default action
if display.verbosity >= 2 or not HAS_OD:
CallbackModule = CallbackModule_default
else:
CallbackModule = CallbackModule_dense
| # (c) 2016, <NAME> <<EMAIL>>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: dense
type: stdout
short_description: minimal stdout output
extends_documentation_fragment:
- default_callback
description:
- When in verbose mode it will act the same as the default callback
author:
- <NAME> (@dagwieers)
version_added: "2.3"
requirements:
- set as stdout in configuation
'''
from collections import MutableMapping, MutableSequence
HAS_OD = False
try:
from collections import OrderedDict
HAS_OD = True
except ImportError:
pass
from ansible.module_utils.six import binary_type, text_type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
from ansible.utils.color import colorize, hostcolor
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
import sys
# Design goals:
#
# + On screen there should only be relevant stuff
# - How far are we ? (during run, last line)
# - What issues occurred
# - What changes occurred
# - Diff output (in diff-mode)
#
# + If verbosity increases, act as default output
# So that users can easily switch to default for troubleshooting
#
# + Rewrite the output during processing
# - We use the cursor to indicate where in the task we are.
# Output after the prompt is the output of the previous task.
# - If we would clear the line at the start of a task, there would often
# be no information at all, so we leave it until it gets updated
#
# + Use the same color-conventions of Ansible
#
# + Ensure the verbose output (-v) is also dense.
# Remove information that is not essential (eg. timestamps, status)
# TODO:
#
# + Properly test for terminal capabilities, and fall back to default
# + Modify Ansible mechanism so we don't need to use sys.stdout directly
# + Find an elegant solution for progress bar line wrapping
# FIXME: Importing constants as C simply does not work, beats me :-/
# from ansible import constants as C
class C:
COLOR_HIGHLIGHT = 'white'
COLOR_VERBOSE = 'blue'
COLOR_WARN = 'bright purple'
COLOR_ERROR = 'red'
COLOR_DEBUG = 'dark gray'
COLOR_DEPRECATE = 'purple'
COLOR_SKIP = 'cyan'
COLOR_UNREACHABLE = 'bright red'
COLOR_OK = 'green'
COLOR_CHANGED = 'yellow'
# Taken from Dstat
class vt100:
black = '\033[0;30m'
darkred = '\033[0;31m'
darkgreen = '\033[0;32m'
darkyellow = '\033[0;33m'
darkblue = '\033[0;34m'
darkmagenta = '\033[0;35m'
darkcyan = '\033[0;36m'
gray = '\033[0;37m'
darkgray = '\033[1;30m'
red = '\033[1;31m'
green = '\033[1;32m'
yellow = '\033[1;33m'
blue = '\033[1;34m'
magenta = '\033[1;35m'
cyan = '\033[1;36m'
white = '\033[1;37m'
blackbg = '\033[40m'
redbg = '\033[41m'
greenbg = '\033[42m'
yellowbg = '\033[43m'
bluebg = '\033[44m'
magentabg = '\033[45m'
cyanbg = '\033[46m'
whitebg = '\033[47m'
reset = '\033[0;0m'
bold = '\033[1m'
reverse = '\033[2m'
underline = '\033[4m'
clear = '\033[2J'
# clearline = '\033[K'
clearline = '\033[2K'
save = '\033[s'
restore = '\033[u'
save_all = '\0337'
restore_all = '\0338'
linewrap = '\033[7h'
nolinewrap = '\033[7l'
up = '\033[1A'
down = '\033[1B'
right = '\033[1C'
left = '\033[1D'
colors = dict(
ok=vt100.darkgreen,
changed=vt100.darkyellow,
skipped=vt100.darkcyan,
ignored=vt100.cyanbg + vt100.red,
failed=vt100.darkred,
unreachable=vt100.red,
)
states = ('skipped', 'ok', 'changed', 'failed', 'unreachable')
class CallbackModule_dense(CallbackModule_default):
'''
This is the dense callback interface, where screen estate is still valued.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'dense'
def __init__(self):
# From CallbackModule
self._display = display
if HAS_OD:
self.disabled = False
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
# Attributes to remove from results for more density
self.removed_attributes = (
# 'changed',
'delta',
# 'diff',
'end',
'failed',
'failed_when_result',
'invocation',
'start',
'stdout_lines',
)
# Initiate data structures
self.hosts = OrderedDict()
self.keep = False
self.shown_title = False
self.count = dict(play=0, handler=0, task=0)
self.type = 'foo'
# Start immediately on the first line
sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
sys.stdout.flush()
else:
display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.")
self.disabled = True
def __del__(self):
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
def _add_host(self, result, status):
name = result._host.get_name()
# Add a new status in case a failed task is ignored
if status == 'failed' and result._task.ignore_errors:
status = 'ignored'
# Check if we have to update an existing state (when looping over items)
if name not in self.hosts:
self.hosts[name] = dict(state=status)
elif states.index(self.hosts[name]['state']) < states.index(status):
self.hosts[name]['state'] = status
# Store delegated hostname, if needed
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self.hosts[name]['delegate'] = delegated_vars['ansible_host']
# Print progress bar
self._display_progress(result)
# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode
# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)):
# Ensure that tasks with changes/failures stay on-screen
if status in ['changed', 'failed', 'unreachable']:
self.keep = True
if self._display.verbosity == 1:
# Print task title, if needed
self._display_task_banner()
self._display_results(result, status)
def _clean_results(self, result):
# Remove non-essential atributes
for attr in self.removed_attributes:
if attr in result:
del(result[attr])
# Remove empty attributes (list, dict, str)
for attr in result.copy():
if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)):
if not result[attr]:
del(result[attr])
def _handle_exceptions(self, result):
if 'exception' in result:
# Remove the exception from the result so it's not shown every time
del result['exception']
if self._display.verbosity == 1:
return "An exception occurred during task execution. To see the full traceback, use -vvv."
def _display_progress(self, result=None):
# Always rewrite the complete line
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
# Print out each host in its own status-color
for name in self.hosts:
sys.stdout.write(' ')
if self.hosts[name].get('delegate', None):
sys.stdout.write(self.hosts[name]['delegate'] + '>')
sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
sys.stdout.flush()
# if result._result.get('diff', False):
# sys.stdout.write('\n' + vt100.linewrap)
sys.stdout.write(vt100.linewrap)
# self.keep = True
def _display_task_banner(self):
if not self.shown_title:
self.shown_title = True
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
def _display_results(self, result, status):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
self._clean_results(result._result)
dump = ''
if result._task.action == 'include':
return
elif status == 'ok':
return
elif status == 'ignored':
dump = self._handle_exceptions(result._result)
elif status == 'failed':
dump = self._handle_exceptions(result._result)
elif status == 'unreachable':
dump = result._result['msg']
if not dump:
dump = self._dump_results(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
sys.stdout.write(colors[status] + status + ': ')
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
else:
sys.stdout.write(result._host.get_name())
sys.stdout.write(': ' + dump + '\n')
sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
sys.stdout.flush()
if status == 'changed':
self._handle_warnings(result._result)
def v2_playbook_on_play_start(self, play):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
# Reset at the start of each play
self.keep = False
self.count.update(dict(handler=0, task=0))
self.count['play'] += 1
self.play = play
# Write the next play on screen IN UPPERCASE, and make it permanent
name = play.get_name().strip()
if not name:
name = 'unnamed'
sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
def v2_playbook_on_task_start(self, task, is_conditional):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
else:
# Do not clear line, since we want to retain the previous output
sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
# Reset at the start of each task
self.keep = False
self.shown_title = False
self.hosts = OrderedDict()
self.task = task
self.type = 'task'
# Enumerate task if not setup (task names are too long for dense output)
if task.get_name() != 'setup':
self.count['task'] += 1
# Write the next task on screen (behind the prompt is the previous output)
sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_handler_task_start(self, task):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
# Reset at the start of each handler
self.keep = False
self.shown_title = False
self.hosts = OrderedDict()
self.task = task
self.type = 'handler'
# Enumerate handler if not setup (handler names may be too long for dense output)
if task.get_name() != 'setup':
self.count[self.type] += 1
# Write the next task on screen (behind the prompt is the previous output)
sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_cleanup_task_start(self, task):
# TBD
sys.stdout.write('cleanup.')
sys.stdout.flush()
def v2_runner_on_failed(self, result, ignore_errors=False):
self._add_host(result, 'failed')
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self._add_host(result, 'changed')
else:
self._add_host(result, 'ok')
def v2_runner_on_skipped(self, result):
self._add_host(result, 'skipped')
def v2_runner_on_unreachable(self, result):
self._add_host(result, 'unreachable')
def v2_runner_on_include(self, included_file):
pass
def v2_runner_on_file_diff(self, result, diff):
sys.stdout.write(vt100.bold)
self.super_ref.v2_runner_on_file_diff(result, diff)
sys.stdout.write(vt100.reset)
def v2_on_file_diff(self, result):
sys.stdout.write(vt100.bold)
self.super_ref.v2_on_file_diff(result)
sys.stdout.write(vt100.reset)
# Old definition in v2.0
def v2_playbook_item_on_ok(self, result):
self.v2_runner_item_on_ok(result)
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self._add_host(result, 'changed')
else:
self._add_host(result, 'ok')
# Old definition in v2.0
def v2_playbook_item_on_failed(self, result):
self.v2_runner_item_on_failed(result)
def v2_runner_item_on_failed(self, result):
self._add_host(result, 'failed')
# Old definition in v2.0
def v2_playbook_item_on_skipped(self, result):
self.v2_runner_item_on_skipped(result)
def v2_runner_item_on_skipped(self, result):
self._add_host(result, 'skipped')
def v2_playbook_on_no_hosts_remaining(self):
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
def v2_playbook_on_include(self, included_file):
pass
def v2_playbook_on_stats(self, stats):
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
# In normal mode screen output should be sufficient, summary is redundant
if self._display.verbosity == 0:
return
sys.stdout.write(vt100.bold + vt100.underline)
sys.stdout.write('SUMMARY')
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
# When using -vv or higher, simply do the default action
if display.verbosity >= 2 or not HAS_OD:
CallbackModule = CallbackModule_default
else:
CallbackModule = CallbackModule_dense | en | 0.813758 | # (c) 2016, <NAME> <<EMAIL>> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) callback: dense type: stdout short_description: minimal stdout output extends_documentation_fragment: - default_callback description: - When in verbose mode it will act the same as the default callback author: - <NAME> (@dagwieers) version_added: "2.3" requirements: - set as stdout in configuation # Design goals: # # + On screen there should only be relevant stuff # - How far are we ? (during run, last line) # - What issues occurred # - What changes occurred # - Diff output (in diff-mode) # # + If verbosity increases, act as default output # So that users can easily switch to default for troubleshooting # # + Rewrite the output during processing # - We use the cursor to indicate where in the task we are. # Output after the prompt is the output of the previous task. # - If we would clear the line at the start of a task, there would often # be no information at all, so we leave it until it gets updated # # + Use the same color-conventions of Ansible # # + Ensure the verbose output (-v) is also dense. # Remove information that is not essential (eg. timestamps, status) # TODO: # # + Properly test for terminal capabilities, and fall back to default # + Modify Ansible mechanism so we don't need to use sys.stdout directly # + Find an elegant solution for progress bar line wrapping # FIXME: Importing constants as C simply does not work, beats me :-/ # from ansible import constants as C # Taken from Dstat # clearline = '\033[K' This is the dense callback interface, where screen estate is still valued. # From CallbackModule # Attributes to remove from results for more density # 'changed', # 'diff', # Initiate data structures # Start immediately on the first line # Add a new status in case a failed task is ignored # Check if we have to update an existing state (when looping over items) # Store delegated hostname, if needed # Print progress bar # # Ensure that tasks with changes/failures stay on-screen, and during diff-mode # if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)): # Ensure that tasks with changes/failures stay on-screen # Print task title, if needed # Remove non-essential atributes # Remove empty attributes (list, dict, str) # Remove the exception from the result so it's not shown every time # Always rewrite the complete line # Print out each host in its own status-color # if result._result.get('diff', False): # sys.stdout.write('\n' + vt100.linewrap) # self.keep = True # Leave the previous task on screen (as it has changes/errors) # Leave the previous task on screen (as it has changes/errors) # Reset at the start of each play # Write the next play on screen IN UPPERCASE, and make it permanent # Leave the previous task on screen (as it has changes/errors) # Do not clear line, since we want to retain the previous output # Reset at the start of each task # Enumerate task if not setup (task names are too long for dense output) # Write the next task on screen (behind the prompt is the previous output) # Leave the previous task on screen (as it has changes/errors) # Reset at the start of each handler # Enumerate handler if not setup (handler names may be too long for dense output) # Write the next task on screen (behind the prompt is the previous output) # TBD # Old definition in v2.0 # Old definition in v2.0 # Old definition in v2.0 # In normal mode screen output should be sufficient, summary is redundant # When using -vv or higher, simply do the default action | 1.78408 | 2 |
kitsune/users/migrations/0019_auto_20190917_0422.py | AndrewDVXI/kitsune | 929 | 6629992 | <reponame>AndrewDVXI/kitsune
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-09-17 04:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0004_product_codename'),
('users', '0018_change_bn_BD_and_bn_IN_to_bn'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='has_subscriptions',
),
migrations.AddField(
model_name='profile',
name='products',
field=models.ManyToManyField(related_name='subscribed_users', to='products.Product'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-09-17 04:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0004_product_codename'),
('users', '0018_change_bn_BD_and_bn_IN_to_bn'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='has_subscriptions',
),
migrations.AddField(
model_name='profile',
name='products',
field=models.ManyToManyField(related_name='subscribed_users', to='products.Product'),
),
] | en | 0.689663 | # -*- coding: utf-8 -*- # Generated by Django 1.11.22 on 2019-09-17 04:22 | 1.542094 | 2 |
src/generate_answer.py | EmmanuelMess/SNLI-NLP-ECI-2019 | 2 | 6629993 | #!/usr/bin/env python
import argparse
import json
import csv
def generate():
"Junta el archivo con las oraciones de test (jsonl)"
" y los resultados de la clasificaciรณn de tu algoritmo (en tu formato)"
" en un archivo csv compatible con el formato de Kaggle"
sentences_filename = ".data/snli_1.0_test_filtered.jsonl"
labels_filename = ".data/test_cls.txt"
output_filename = "result.csv"
with open(output_filename, 'w') as fout:
csv_writer = csv.writer(fout)
csv_writer.writerow(['pairID', 'gold_label'])
for pairID, label in it_ID_label_pairs(sentences_filename, labels_filename):
formatted_label = format_label(label)
csv_writer.writerow([pairID, formatted_label])
def format_label(label):
return label[len("__label__"):]
def it_ID_label_pairs(sentences_filename, labels_filename):
sentence_data = open(sentences_filename, 'r')
labels_data = open(labels_filename, 'r')
for pairID, label in zip(it_ID(sentence_data), it_labels(labels_data)):
yield pairID, label
def it_ID(sentence_data):
for line in sentence_data:
example = json.loads(line)
yield example['pairID']
def it_labels(label_data):
for label in label_data:
label = label.rstrip('\n') # sacamos el fin de linea
yield label
| #!/usr/bin/env python
import argparse
import json
import csv
def generate():
"Junta el archivo con las oraciones de test (jsonl)"
" y los resultados de la clasificaciรณn de tu algoritmo (en tu formato)"
" en un archivo csv compatible con el formato de Kaggle"
sentences_filename = ".data/snli_1.0_test_filtered.jsonl"
labels_filename = ".data/test_cls.txt"
output_filename = "result.csv"
with open(output_filename, 'w') as fout:
csv_writer = csv.writer(fout)
csv_writer.writerow(['pairID', 'gold_label'])
for pairID, label in it_ID_label_pairs(sentences_filename, labels_filename):
formatted_label = format_label(label)
csv_writer.writerow([pairID, formatted_label])
def format_label(label):
return label[len("__label__"):]
def it_ID_label_pairs(sentences_filename, labels_filename):
sentence_data = open(sentences_filename, 'r')
labels_data = open(labels_filename, 'r')
for pairID, label in zip(it_ID(sentence_data), it_labels(labels_data)):
yield pairID, label
def it_ID(sentence_data):
for line in sentence_data:
example = json.loads(line)
yield example['pairID']
def it_labels(label_data):
for label in label_data:
label = label.rstrip('\n') # sacamos el fin de linea
yield label
| es | 0.398079 | #!/usr/bin/env python # sacamos el fin de linea | 3.131674 | 3 |
src/orders/services/order_shipper.py | vaibhavantil2/education-backend | 151 | 6629994 | <gh_stars>100-1000
from django.conf import settings
from django.utils import timezone
from app.tasks import send_happiness_message, send_mail
from orders.models import Order
class OrderShipper:
"""Ship the order (actualy calls item ship() method)"""
def __init__(self, order: Order, silent: bool = False):
self.order = order
self.silent = silent
def __call__(self):
if self.ship():
self.mark_order_as_shipped()
if not self.order.notification_to_giver_is_sent:
self.send_notification_to_giver()
if not self.silent:
self.send_happiness_message()
def ship(self) -> bool:
"""Ship the order. Returns true if order is shipped"""
desired_date = self.order.desired_shipment_date
if desired_date is None or desired_date <= timezone.now():
self.order.item.ship(to=self.order.user, order=self.order)
return True
return False
def mark_order_as_shipped(self):
self.order.shipped = timezone.now()
self.order.save()
def send_happiness_message(self):
if not settings.HAPPINESS_MESSAGES_CHAT_ID:
return
sum = str(self.order.price).replace('.00', '')
reason = str(self.order.item) if self.order.giver is None else f'{self.order.item} (ะฟะพะดะฐัะพะบ)'
send_happiness_message.delay(text=f'๐ฐ+{sum} โฝ, {self.order.user}, {reason}')
def send_notification_to_giver(self):
if self.order.giver is None:
return
if self.order.desired_shipment_date is None:
return
send_mail.delay(
to=self.order.giver.email,
template_id='gift-notification-for-giver', # postmark
disable_antispam=True,
ctx={
'item_name': self.order.item.full_name,
'receiver_name': str(self.order.user),
'receiver_email': self.order.user.email,
'desired_shipment_date': self.order.desired_shipment_date.strftime('%d.%m.%Y'),
},
)
self.order.notification_to_giver_is_sent = True
self.order.save()
| from django.conf import settings
from django.utils import timezone
from app.tasks import send_happiness_message, send_mail
from orders.models import Order
class OrderShipper:
"""Ship the order (actualy calls item ship() method)"""
def __init__(self, order: Order, silent: bool = False):
self.order = order
self.silent = silent
def __call__(self):
if self.ship():
self.mark_order_as_shipped()
if not self.order.notification_to_giver_is_sent:
self.send_notification_to_giver()
if not self.silent:
self.send_happiness_message()
def ship(self) -> bool:
"""Ship the order. Returns true if order is shipped"""
desired_date = self.order.desired_shipment_date
if desired_date is None or desired_date <= timezone.now():
self.order.item.ship(to=self.order.user, order=self.order)
return True
return False
def mark_order_as_shipped(self):
self.order.shipped = timezone.now()
self.order.save()
def send_happiness_message(self):
if not settings.HAPPINESS_MESSAGES_CHAT_ID:
return
sum = str(self.order.price).replace('.00', '')
reason = str(self.order.item) if self.order.giver is None else f'{self.order.item} (ะฟะพะดะฐัะพะบ)'
send_happiness_message.delay(text=f'๐ฐ+{sum} โฝ, {self.order.user}, {reason}')
def send_notification_to_giver(self):
if self.order.giver is None:
return
if self.order.desired_shipment_date is None:
return
send_mail.delay(
to=self.order.giver.email,
template_id='gift-notification-for-giver', # postmark
disable_antispam=True,
ctx={
'item_name': self.order.item.full_name,
'receiver_name': str(self.order.user),
'receiver_email': self.order.user.email,
'desired_shipment_date': self.order.desired_shipment_date.strftime('%d.%m.%Y'),
},
)
self.order.notification_to_giver_is_sent = True
self.order.save() | en | 0.725985 | Ship the order (actualy calls item ship() method) Ship the order. Returns true if order is shipped # postmark | 2.45441 | 2 |
setup.py | okcashpro/okshop | 3 | 6629995 | <filename>setup.py<gh_stars>1-10
import os
from setuptools import find_packages, setup
from pip.req import parse_requirements
# TODO: Fix nasty hack ;_;
install_reqs = parse_requirements(os.path.join(os.path.dirname(__file__), 'requirements.txt'), session='hack')
reqs = [str(ir.req) for ir in install_reqs]
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-okshop',
version='0.1',
packages=find_packages(),
install_requires=reqs,
include_package_data=True,
license='MIT License',
description='An ebay-like marketplace for okcash',
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10',
'License :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]) | <filename>setup.py<gh_stars>1-10
import os
from setuptools import find_packages, setup
from pip.req import parse_requirements
# TODO: Fix nasty hack ;_;
install_reqs = parse_requirements(os.path.join(os.path.dirname(__file__), 'requirements.txt'), session='hack')
reqs = [str(ir.req) for ir in install_reqs]
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-okshop',
version='0.1',
packages=find_packages(),
install_requires=reqs,
include_package_data=True,
license='MIT License',
description='An ebay-like marketplace for okcash',
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10',
'License :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]) | es | 0.238519 | # TODO: Fix nasty hack ;_; | 1.803246 | 2 |
verif/metric_type.py | yma042/verif | 77 | 6629996 | import sys
import inspect
class MetricType(object):
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def name(cls):
name = cls.__name__
return name
class Deterministic(MetricType):
description = "Deterministic"
pass
class Probabilistic(MetricType):
description = "Probabilistic"
pass
class Threshold(MetricType):
description = "Threshold"
pass
class Diagram(MetricType):
description = "Special diagrams"
pass
| import sys
import inspect
class MetricType(object):
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def name(cls):
name = cls.__name__
return name
class Deterministic(MetricType):
description = "Deterministic"
pass
class Probabilistic(MetricType):
description = "Probabilistic"
pass
class Threshold(MetricType):
description = "Threshold"
pass
class Diagram(MetricType):
description = "Special diagrams"
pass
| none | 1 | 2.931132 | 3 |
|
setup.py | jaketreacher/host_open | 0 | 6629997 | from setuptools import setup, Command
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as file:
long_description = file.read()
setup(
name='hostopen',
version='0.1.1',
description='Open files/directories in a vagrant synced folder on the host',
long_description=long_description,
url='https://github.com/jaketreacher/hostopen',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Utilities'
],
python_requires='>=3',
packages=['hostopen', ],
entry_points={
'console_scripts': [
'hostopen = hostopen.client:main',
'hostopen-server = hostopen.server:main'
],
},
)
| from setuptools import setup, Command
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as file:
long_description = file.read()
setup(
name='hostopen',
version='0.1.1',
description='Open files/directories in a vagrant synced folder on the host',
long_description=long_description,
url='https://github.com/jaketreacher/hostopen',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Utilities'
],
python_requires='>=3',
packages=['hostopen', ],
entry_points={
'console_scripts': [
'hostopen = hostopen.client:main',
'hostopen-server = hostopen.server:main'
],
},
)
| none | 1 | 1.584973 | 2 |
|
util/chplenv/chpl_wide_pointers.py | TomsyPaul/tomsychapel | 1 | 6629998 | <reponame>TomsyPaul/tomsychapel
#!/usr/bin/env python
import optparse
import os
import re
import sys
chplenv_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(chplenv_dir))
import overrides
from utils import memoize
@memoize
def get(flag='wide'):
wide_val = overrides.get('CHPL_WIDE_POINTERS', 'struct')
define = ''
if wide_val == 'struct':
define = '-DCHPL_WIDE_POINTER_STRUCT'
else:
match = re.match(r'node(\d+)', wide_val)
if match:
node_bits = int(match.group(1))
if node_bits < 2 or node_bits > 60:
sys.stderr.write("Error: Bad wide pointer node bit width: {0}\n".format(node_bits))
else:
define = "-DCHPL_WIDE_POINTER_PACKED " \
"-DCHPL_WIDE_POINTER_NODE_BITS={0}".format(node_bits)
else:
sys.stderr.write("Error: Unknown wide pointer format: {0}\n".format(wide_val))
if flag == 'wide':
return wide_val
elif flag == 'define':
return define
else:
raise ValueError("Invalid flag: '{0}'".format(flag))
def _main():
parser = optparse.OptionParser(usage='usage: %prog [--wide|define])')
parser.add_option('--wide', dest='flag', action='store_const',
const='wide', default='wide')
parser.add_option('--define', dest='flag', action='store_const',
const='define')
(options, args) = parser.parse_args()
wide_val = get(options.flag)
sys.stdout.write("{0}\n".format(wide_val))
if __name__ == '__main__':
_main()
| #!/usr/bin/env python
import optparse
import os
import re
import sys
chplenv_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(chplenv_dir))
import overrides
from utils import memoize
@memoize
def get(flag='wide'):
wide_val = overrides.get('CHPL_WIDE_POINTERS', 'struct')
define = ''
if wide_val == 'struct':
define = '-DCHPL_WIDE_POINTER_STRUCT'
else:
match = re.match(r'node(\d+)', wide_val)
if match:
node_bits = int(match.group(1))
if node_bits < 2 or node_bits > 60:
sys.stderr.write("Error: Bad wide pointer node bit width: {0}\n".format(node_bits))
else:
define = "-DCHPL_WIDE_POINTER_PACKED " \
"-DCHPL_WIDE_POINTER_NODE_BITS={0}".format(node_bits)
else:
sys.stderr.write("Error: Unknown wide pointer format: {0}\n".format(wide_val))
if flag == 'wide':
return wide_val
elif flag == 'define':
return define
else:
raise ValueError("Invalid flag: '{0}'".format(flag))
def _main():
parser = optparse.OptionParser(usage='usage: %prog [--wide|define])')
parser.add_option('--wide', dest='flag', action='store_const',
const='wide', default='wide')
parser.add_option('--define', dest='flag', action='store_const',
const='define')
(options, args) = parser.parse_args()
wide_val = get(options.flag)
sys.stdout.write("{0}\n".format(wide_val))
if __name__ == '__main__':
_main() | ru | 0.26433 | #!/usr/bin/env python | 2.302363 | 2 |
setup.py | nwtti/rainbowstream | 1 | 6629999 | from setuptools import setup, find_packages
import os
import os.path
# Bumped version
version = '1.2.3'
# Require
install_requires = [
"python-dateutil",
"arrow",
"requests",
"pyfiglet",
"twitter",
"Pillow",
"PySocks"
]
# Copy default config if not exists
default = os.path.expanduser("~") + os.sep + '.rainbow_config.json'
if not os.path.isfile(default):
cmd = 'cp rainbowstream/colorset/config ' + default
os.system(cmd)
cmd = 'chmod 777 ' + default
os.system(cmd)
# Setup
setup(name='rainbowstream',
version=version,
description="A smart and nice Twitter client on terminal.",
long_description=open("./README.rst", "r").read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
keywords='twitter, command-line tools, stream API',
author='<NAME>',
author_email='<EMAIL>',
url='http://www.rainbowstream.org/',
license='MIT License',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=install_requires,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
rainbowstream=rainbowstream.rainbow:fly
""",
)
| from setuptools import setup, find_packages
import os
import os.path
# Bumped version
version = '1.2.3'
# Require
install_requires = [
"python-dateutil",
"arrow",
"requests",
"pyfiglet",
"twitter",
"Pillow",
"PySocks"
]
# Copy default config if not exists
default = os.path.expanduser("~") + os.sep + '.rainbow_config.json'
if not os.path.isfile(default):
cmd = 'cp rainbowstream/colorset/config ' + default
os.system(cmd)
cmd = 'chmod 777 ' + default
os.system(cmd)
# Setup
setup(name='rainbowstream',
version=version,
description="A smart and nice Twitter client on terminal.",
long_description=open("./README.rst", "r").read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
keywords='twitter, command-line tools, stream API',
author='<NAME>',
author_email='<EMAIL>',
url='http://www.rainbowstream.org/',
license='MIT License',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=install_requires,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
rainbowstream=rainbowstream.rainbow:fly
""",
)
| en | 0.413156 | # Bumped version # Require # Copy default config if not exists # Setup # -*- Entry points: -*- [console_scripts] rainbowstream=rainbowstream.rainbow:fly | 1.817553 | 2 |
plantcv/plantcv/hyperspectral/extract_index.py | JamesChooWK/plantcv | 0 | 6630000 | <filename>plantcv/plantcv/hyperspectral/extract_index.py
# Extract one of the predefined indices from a hyperspectral datacube
import os
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import fatal_error
from plantcv.plantcv import Spectral_data
from plantcv.plantcv.hyperspectral import _find_closest
def extract_index(array, index="NDVI", distance=20):
"""Pull out indices of interest from a hyperspectral datacube.
Inputs:
array = hyperspectral data instance
index = index of interest, either "ndvi", "gdvi", or "savi"
distance = how lenient to be if the required wavelengths are not available
Returns:
index_array = Index data as a Spectral_data instance
:param array: __main__.Spectral_data
:param index: str
:param distance: int
:return index_array: __main__.Spectral_data
"""
params.device += 1
# Min and max available wavelength will be used to determine if an index can be extracted
max_wavelength = float(array.max_wavelength)
min_wavelength = float(array.min_wavelength)
# Dictionary of wavelength and it's index in the list
wavelength_dict = array.wavelength_dict.copy()
array_data = array.array_data.copy()
if index.upper() == "NDVI":
if (max_wavelength + distance) >= 800 and (min_wavelength - distance) <= 670:
# Obtain index that best represents NIR and red bands
nir_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 800)
red_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 670)
nir = (array_data[:, :, [nir_index]])
red = (array_data[:, :, [red_index]])
index_array_raw = (nir - red) / (nir + red)
else:
fatal_error("Available wavelengths are not suitable for calculating NDVI. Try increasing fudge factor.")
elif index.upper() == "GDVI":
# Green Difference Vegetation Index [Sripada et al. (2006)]
if (max_wavelength + distance) >= 800 and (min_wavelength - distance) <= 680:
nir_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 800)
red_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 680)
nir = (array_data[:, :, [nir_index]])
red = (array_data[:, :, [red_index]])
index_array_raw = nir - red
else:
fatal_error("Available wavelengths are not suitable for calculating GDVI. Try increasing fudge factor.")
elif index.upper() == "SAVI":
# Soil Adjusted Vegetation Index [Huete et al. (1988)]
if (max_wavelength + distance) >= 800 and (min_wavelength - distance) <= 680:
nir_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 800)
red_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 680)
nir = (array_data[:, :, [nir_index]])
red = (array_data[:, :, [red_index]])
index_array_raw = (1.5 * (nir - red)) / (red + nir + 0.5)
else:
fatal_error("Available wavelengths are not suitable for calculating SAVI. Try increasing fudge factor.")
else:
fatal_error(index + " is not one of the currently available indices for this function.")
# Reshape array into hyperspectral datacube shape
index_array_raw = np.transpose(np.transpose(index_array_raw)[0])
# Resulting array is float 32 from -1 to 1, transform into uint8 for plotting
all_positive = np.add(index_array_raw, np.ones(np.shape(index_array_raw)))
data = all_positive.astype(np.float64) / 2 # normalize the data to 0 - 1
index_array = (255 * data).astype(np.uint8) # scale to 255
index_array = Spectral_data(array_data=index_array, max_wavelength=0,
min_wavelength=0, d_type=np.uint8,
wavelength_dict={}, samples=array.samples,
lines=array.lines, interleave=array.interleave,
wavelength_units=array.wavelength_units, array_type="index_" + index.lower(),
pseudo_rgb=None, filename=array.filename, default_bands=None)
if params.debug == "plot":
plot_image(index_array.array_data)
elif params.debug == "print":
print_image(index_array.array_data,
os.path.join(params.debug_outdir, str(params.device) + index + "_index.png"))
return index_array
| <filename>plantcv/plantcv/hyperspectral/extract_index.py
# Extract one of the predefined indices from a hyperspectral datacube
import os
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import fatal_error
from plantcv.plantcv import Spectral_data
from plantcv.plantcv.hyperspectral import _find_closest
def extract_index(array, index="NDVI", distance=20):
"""Pull out indices of interest from a hyperspectral datacube.
Inputs:
array = hyperspectral data instance
index = index of interest, either "ndvi", "gdvi", or "savi"
distance = how lenient to be if the required wavelengths are not available
Returns:
index_array = Index data as a Spectral_data instance
:param array: __main__.Spectral_data
:param index: str
:param distance: int
:return index_array: __main__.Spectral_data
"""
params.device += 1
# Min and max available wavelength will be used to determine if an index can be extracted
max_wavelength = float(array.max_wavelength)
min_wavelength = float(array.min_wavelength)
# Dictionary of wavelength and it's index in the list
wavelength_dict = array.wavelength_dict.copy()
array_data = array.array_data.copy()
if index.upper() == "NDVI":
if (max_wavelength + distance) >= 800 and (min_wavelength - distance) <= 670:
# Obtain index that best represents NIR and red bands
nir_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 800)
red_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 670)
nir = (array_data[:, :, [nir_index]])
red = (array_data[:, :, [red_index]])
index_array_raw = (nir - red) / (nir + red)
else:
fatal_error("Available wavelengths are not suitable for calculating NDVI. Try increasing fudge factor.")
elif index.upper() == "GDVI":
# Green Difference Vegetation Index [Sripada et al. (2006)]
if (max_wavelength + distance) >= 800 and (min_wavelength - distance) <= 680:
nir_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 800)
red_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 680)
nir = (array_data[:, :, [nir_index]])
red = (array_data[:, :, [red_index]])
index_array_raw = nir - red
else:
fatal_error("Available wavelengths are not suitable for calculating GDVI. Try increasing fudge factor.")
elif index.upper() == "SAVI":
# Soil Adjusted Vegetation Index [Huete et al. (1988)]
if (max_wavelength + distance) >= 800 and (min_wavelength - distance) <= 680:
nir_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 800)
red_index = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 680)
nir = (array_data[:, :, [nir_index]])
red = (array_data[:, :, [red_index]])
index_array_raw = (1.5 * (nir - red)) / (red + nir + 0.5)
else:
fatal_error("Available wavelengths are not suitable for calculating SAVI. Try increasing fudge factor.")
else:
fatal_error(index + " is not one of the currently available indices for this function.")
# Reshape array into hyperspectral datacube shape
index_array_raw = np.transpose(np.transpose(index_array_raw)[0])
# Resulting array is float 32 from -1 to 1, transform into uint8 for plotting
all_positive = np.add(index_array_raw, np.ones(np.shape(index_array_raw)))
data = all_positive.astype(np.float64) / 2 # normalize the data to 0 - 1
index_array = (255 * data).astype(np.uint8) # scale to 255
index_array = Spectral_data(array_data=index_array, max_wavelength=0,
min_wavelength=0, d_type=np.uint8,
wavelength_dict={}, samples=array.samples,
lines=array.lines, interleave=array.interleave,
wavelength_units=array.wavelength_units, array_type="index_" + index.lower(),
pseudo_rgb=None, filename=array.filename, default_bands=None)
if params.debug == "plot":
plot_image(index_array.array_data)
elif params.debug == "print":
print_image(index_array.array_data,
os.path.join(params.debug_outdir, str(params.device) + index + "_index.png"))
return index_array
| en | 0.709746 | # Extract one of the predefined indices from a hyperspectral datacube Pull out indices of interest from a hyperspectral datacube. Inputs: array = hyperspectral data instance index = index of interest, either "ndvi", "gdvi", or "savi" distance = how lenient to be if the required wavelengths are not available Returns: index_array = Index data as a Spectral_data instance :param array: __main__.Spectral_data :param index: str :param distance: int :return index_array: __main__.Spectral_data # Min and max available wavelength will be used to determine if an index can be extracted # Dictionary of wavelength and it's index in the list # Obtain index that best represents NIR and red bands # Green Difference Vegetation Index [Sripada et al. (2006)] # Soil Adjusted Vegetation Index [Huete et al. (1988)] # Reshape array into hyperspectral datacube shape # Resulting array is float 32 from -1 to 1, transform into uint8 for plotting # normalize the data to 0 - 1 # scale to 255 | 2.998292 | 3 |
modality/util/ClustData.py | tompollard/modality | 16 | 6630001 | <reponame>tompollard/modality<filename>modality/util/ClustData.py
from __future__ import unicode_literals
from collections import Counter
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde
from . import MC_error_check
class ClustData(object):
'''
Object for handling clusters in data.
Can find closest clusters in data set using
Bhattacharyya distance.
'''
def __init__(self, data, labels, excludelab=None):
if excludelab is None:
excludelab = []
self.label_names = [lab for lab in np.unique(labels) if not lab in excludelab]
self.labels = labels
self.K = len(self.label_names)
self.data = data
self.n, self.d = data.shape
self._clusters = {}
self.bhattacharyya_measure = bhattacharyya_coefficient
def __iter__(self):
return iter(self.label_names)
def add_consensus_labelings(self, labels):
consensus_labels = self.labels*0
for k in range(self.K):
cc = ConsensusCluster(self.labels, k)
for lab in labels:
cc.add_labeling(lab)
consensus_labels[cc.in_cluster] = k
self.labels = consensus_labels
def cluster(self, k):
try:
return self._clusters[k]
except KeyError:
self._clusters[k] = self.data[self.labels == k, :]
return self._clusters[k]
def in_cluster(self, ks):
incl = np.zeros((self.n, len(ks)), dtype='bool')
for i, k in enumerate(ks):
incl[self.labels == k] = 1
return np.any(incl, axis=1)
def get_closest(self, n):
ind_1d = np.argpartition(-self.bhattacharyya_coefficient_toother, n,
axis=None)[:n]
ind_1d = ind_1d[self.bhattacharyya_coefficient_toother.ravel()[ind_1d] > 0]
ind = np.unravel_index(ind_1d, (self.K, self.K))
return zip([self.label_names[i] for i in ind[0]],
[self.label_names[i] for i in ind[1]])[:n]
def most_discriminating_dim(self, k1, k2):
bhd_1d = np.zeros(self.d)
for dd in range(self.d):
bhd_1d[dd] = self.bhattacharyya_measure(
self.cluster(k1)[:, dd], self.cluster(k2)[:, dd])
return np.argmin(bhd_1d)
def split_in_other_labelings(self, k1, k2, labelings):
'''
Check if most common label in k1 (by other labelings) is
different from most common label in k2.
'''
diffs = 0
for label in labelings:
most_common_k1 = Counter(label[self.labels == k1]).most_common()[0][0]
most_common_k2 = Counter(label[self.labels == k2]).most_common()[0][0]
if most_common_k1 != most_common_k2:
diffs += 1
return diffs
def scatterplot_most_discriminating_dim(self, k1, k2, axs):
dim = self.most_discriminating_dim(k1, k2)
for d, ax in enumerate(axs):
self.scatterplot([k1, k2], [d, dim], ax)
def hist2d_most_discriminating_dim(self, k1, k2, axs, **figargs):
dim = self.most_discriminating_dim(k1, k2)
for d, ax in enumerate(axs):
self.hist2d([k1, k2], [d, dim], ax, **figargs)
def scatterplot(self, ks, dim, ax):
cmap = plt.get_cmap('gist_rainbow')
K = len(ks)
colors = [cmap((0.2+k)*1./(K-1)) for k in range(K)]
for k, color in zip(ks, colors):
ax.scatter(self.cluster(k)[:, dim[0]], self.cluster(k)[:, dim[1]],
color=color, marker='+')
def hist2d(self, ks, dim, ax, **figargs):
data = np.vstack([self.cluster(k) for k in ks])
ax.hist2d(data[:, dim[0]], data[:, dim[1]], **figargs)
def boxplot_closest(self, n):
closest = self.get_closest(n)
n = len(closest)
fig, axs = plt.subplots(n, squeeze=False, figsize=(4, (n-1)*1.3+1))
for ax, ind in zip(axs.ravel(), closest):
for k in ind:
ax.boxplot(np.hsplit(self.cluster(k), self.d))
ax.set_title('Cluster {} and {}'.format(*ind))
def hist_closest(self, n):
closest = self.get_closest(n)
n = len(closest)
fig, axs = plt.subplots(n, self.d, squeeze=False, figsize=(4+(self.d-1)*2, (n-1)*1.3+1))
for ax_c, ind in zip(axs, closest):
ranges = zip(np.minimum(np.min(self.cluster(ind[0]), axis=0), np.min(self.cluster(ind[1]), axis=0)),
np.maximum(np.max(self.cluster(ind[0]), axis=0), np.max(self.cluster(ind[1]), axis=0)))
for dd, (ax, range_) in enumerate(zip(ax_c, ranges)):
for color, k in zip(['blue', 'red'], ind):
ax.hist(self.cluster(k)[:, dd], bins=20, range=range_, color=color, alpha=0.6)
#ax.set_ylim(0, 200)
ax.set_title('Cluster {} and {}'.format(*ind))
@property
def bhattacharyya_coefficient_toother(self):
try:
return self._bhattacharyya_coefficient_toother
except AttributeError:
bdb = np.zeros((self.K, self.K))
for i, k in enumerate(self):
for j, kk in enumerate(self):
if j <= i:
continue
bdb[i, j] = self.bhattacharyya_measure(
self.cluster(k), self.cluster(kk))
self._bhattacharyya_coefficient_toother = bdb
return bdb
@property
def bhattacharyya_coefficient_toself(self):
try:
return self._bhattacharyya_coefficient_toself
except AttributeError:
bdw = np.zeros(self.K)
for i, k in enumerate(self):
bdw[i] = self.bhattacharyya_measure(
self.cluster(k), self.cluster(k))
self._bhattacharyya_coefficient_toself = bdw
return bdw
@property
def bhattacharyya_distances(self):
bhattacharyya_coefficients = (
self.bhattacharyya_coefficient_toother +
self.bhattacharyya_coefficient_toother.T +
np.diag(self.bhattacharyya_coefficient_toself))
return -np.log(bhattacharyya_coefficients)
def plot_bhattacharrya(self):
plt.matshow(self.bhattacharyya_distances)
class ConsensusCluster(object):
'''
For finding a cluster that is common across a number of
labelings.
'''
def __init__(self, labels, k):
self.in_cluster = labels == k
@property
def size(self):
return np.sum(self.in_cluster)
def add_labeling(self, labels):
k = Counter(labels[self.in_cluster]).most_common(1)[0][0]
self.in_cluster *= labels == k
return k
def select_data(self, data):
return data[self.in_cluster, :]
def hist(self, data, bounds=(-np.inf, np.inf), fig=None):
d = data.shape[1]
data_cc = self.select_data(data)
if fig is None:
fig = plt.figure()
for dd in range(d):
ax = fig.add_subplot(1, d, dd+1)
data_cc_d = data_cc[:, dd]
ax.hist(data_cc_d[(data_cc_d > bounds[0])*(data_cc_d < bounds[1])], bins=100)
for ax in fig.axes:
ax.xaxis.set_major_locator(plt.MaxNLocator(3))
def hist2d(self, data, fig=None):
d = data.shape[1]
data_cc = self.select_data(data)
if fig is None:
fig = plt.figure()
for dd in range(d):
for ddd in range(dd+1, d):
ax = fig.add_subplot(d, d, dd*d+ddd+1)
ax.hist2d(data_cc[:, dd], data_cc[:, ddd], bins=30,
norm=colors.LogNorm(), vmin=1)
ax.set_xlim(np.min(data[:, dd]), np.max(data[:, dd]))
ax.set_ylim(np.min(data[:, ddd]), np.max(data[:, ddd]))
def scatter_data(self, data):
d = data.shape[1]
data_cc = self.select_data(data)
fig = plt.figure()
for dd in range(d):
for ddd in range(dd+1, d):
ax = fig.add_subplot(d, d, dd*d+ddd+1)
ax.scatter(data_cc[:, dd], data_cc[:, ddd], marker='+')
ax.set_xlim(np.min(data[:, dd]), np.max(data[:, dd]))
ax.set_ylim(np.min(data[:, ddd]), np.max(data[:, ddd]))
def bhattacharyya_coefficient_discrete(data1, data2, bins=10):
'''
Computing Bhattacharyya coefficient using (multidimensional)
histograms.
'''
hist_range = zip(np.minimum(np.min(data1, axis=0), np.min(data2, axis=0)),
np.maximum(np.max(data1, axis=0), np.max(data2, axis=0)))
bins_total_volume = np.prod([ma-mi for mi, ma in hist_range])
hist1, _ = np.histogramdd(data1, bins=bins, range=hist_range, normed=True)
hist2, _ = np.histogramdd(data2, bins=bins, range=hist_range, normed=True)
return np.mean(np.sqrt(hist1*hist2))*bins_total_volume
@MC_error_check
def bhattacharyya_coefficient(data1, data2, N=1000):
'''
Computing Bhattacharyya coefficient (using MC sampling)
between kernel density estimates of data with bandwith
selection by Scott's rule.
'''
try:
d = data1.shape[1]
except IndexError:
d = 1
if data1.shape[0] < d or data2.shape[0] < d:
return 0
try:
kde1 = gaussian_kde(data1.T)
kde2 = gaussian_kde(data2.T)
except np.linalg.linalg.LinAlgError:
return 0
samp1 = kde1.resample(N/2)
samp2 = kde2.resample(N/2)
return (np.mean(np.sqrt(kde2(samp1)/kde1(samp1))) +
np.mean(np.sqrt(kde1(samp2)/kde2(samp2))))/2 | from __future__ import unicode_literals
from collections import Counter
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde
from . import MC_error_check
class ClustData(object):
'''
Object for handling clusters in data.
Can find closest clusters in data set using
Bhattacharyya distance.
'''
def __init__(self, data, labels, excludelab=None):
if excludelab is None:
excludelab = []
self.label_names = [lab for lab in np.unique(labels) if not lab in excludelab]
self.labels = labels
self.K = len(self.label_names)
self.data = data
self.n, self.d = data.shape
self._clusters = {}
self.bhattacharyya_measure = bhattacharyya_coefficient
def __iter__(self):
return iter(self.label_names)
def add_consensus_labelings(self, labels):
consensus_labels = self.labels*0
for k in range(self.K):
cc = ConsensusCluster(self.labels, k)
for lab in labels:
cc.add_labeling(lab)
consensus_labels[cc.in_cluster] = k
self.labels = consensus_labels
def cluster(self, k):
try:
return self._clusters[k]
except KeyError:
self._clusters[k] = self.data[self.labels == k, :]
return self._clusters[k]
def in_cluster(self, ks):
incl = np.zeros((self.n, len(ks)), dtype='bool')
for i, k in enumerate(ks):
incl[self.labels == k] = 1
return np.any(incl, axis=1)
def get_closest(self, n):
ind_1d = np.argpartition(-self.bhattacharyya_coefficient_toother, n,
axis=None)[:n]
ind_1d = ind_1d[self.bhattacharyya_coefficient_toother.ravel()[ind_1d] > 0]
ind = np.unravel_index(ind_1d, (self.K, self.K))
return zip([self.label_names[i] for i in ind[0]],
[self.label_names[i] for i in ind[1]])[:n]
def most_discriminating_dim(self, k1, k2):
bhd_1d = np.zeros(self.d)
for dd in range(self.d):
bhd_1d[dd] = self.bhattacharyya_measure(
self.cluster(k1)[:, dd], self.cluster(k2)[:, dd])
return np.argmin(bhd_1d)
def split_in_other_labelings(self, k1, k2, labelings):
'''
Check if most common label in k1 (by other labelings) is
different from most common label in k2.
'''
diffs = 0
for label in labelings:
most_common_k1 = Counter(label[self.labels == k1]).most_common()[0][0]
most_common_k2 = Counter(label[self.labels == k2]).most_common()[0][0]
if most_common_k1 != most_common_k2:
diffs += 1
return diffs
def scatterplot_most_discriminating_dim(self, k1, k2, axs):
dim = self.most_discriminating_dim(k1, k2)
for d, ax in enumerate(axs):
self.scatterplot([k1, k2], [d, dim], ax)
def hist2d_most_discriminating_dim(self, k1, k2, axs, **figargs):
dim = self.most_discriminating_dim(k1, k2)
for d, ax in enumerate(axs):
self.hist2d([k1, k2], [d, dim], ax, **figargs)
def scatterplot(self, ks, dim, ax):
cmap = plt.get_cmap('gist_rainbow')
K = len(ks)
colors = [cmap((0.2+k)*1./(K-1)) for k in range(K)]
for k, color in zip(ks, colors):
ax.scatter(self.cluster(k)[:, dim[0]], self.cluster(k)[:, dim[1]],
color=color, marker='+')
def hist2d(self, ks, dim, ax, **figargs):
data = np.vstack([self.cluster(k) for k in ks])
ax.hist2d(data[:, dim[0]], data[:, dim[1]], **figargs)
def boxplot_closest(self, n):
closest = self.get_closest(n)
n = len(closest)
fig, axs = plt.subplots(n, squeeze=False, figsize=(4, (n-1)*1.3+1))
for ax, ind in zip(axs.ravel(), closest):
for k in ind:
ax.boxplot(np.hsplit(self.cluster(k), self.d))
ax.set_title('Cluster {} and {}'.format(*ind))
def hist_closest(self, n):
closest = self.get_closest(n)
n = len(closest)
fig, axs = plt.subplots(n, self.d, squeeze=False, figsize=(4+(self.d-1)*2, (n-1)*1.3+1))
for ax_c, ind in zip(axs, closest):
ranges = zip(np.minimum(np.min(self.cluster(ind[0]), axis=0), np.min(self.cluster(ind[1]), axis=0)),
np.maximum(np.max(self.cluster(ind[0]), axis=0), np.max(self.cluster(ind[1]), axis=0)))
for dd, (ax, range_) in enumerate(zip(ax_c, ranges)):
for color, k in zip(['blue', 'red'], ind):
ax.hist(self.cluster(k)[:, dd], bins=20, range=range_, color=color, alpha=0.6)
#ax.set_ylim(0, 200)
ax.set_title('Cluster {} and {}'.format(*ind))
@property
def bhattacharyya_coefficient_toother(self):
try:
return self._bhattacharyya_coefficient_toother
except AttributeError:
bdb = np.zeros((self.K, self.K))
for i, k in enumerate(self):
for j, kk in enumerate(self):
if j <= i:
continue
bdb[i, j] = self.bhattacharyya_measure(
self.cluster(k), self.cluster(kk))
self._bhattacharyya_coefficient_toother = bdb
return bdb
@property
def bhattacharyya_coefficient_toself(self):
try:
return self._bhattacharyya_coefficient_toself
except AttributeError:
bdw = np.zeros(self.K)
for i, k in enumerate(self):
bdw[i] = self.bhattacharyya_measure(
self.cluster(k), self.cluster(k))
self._bhattacharyya_coefficient_toself = bdw
return bdw
@property
def bhattacharyya_distances(self):
bhattacharyya_coefficients = (
self.bhattacharyya_coefficient_toother +
self.bhattacharyya_coefficient_toother.T +
np.diag(self.bhattacharyya_coefficient_toself))
return -np.log(bhattacharyya_coefficients)
def plot_bhattacharrya(self):
plt.matshow(self.bhattacharyya_distances)
class ConsensusCluster(object):
'''
For finding a cluster that is common across a number of
labelings.
'''
def __init__(self, labels, k):
self.in_cluster = labels == k
@property
def size(self):
return np.sum(self.in_cluster)
def add_labeling(self, labels):
k = Counter(labels[self.in_cluster]).most_common(1)[0][0]
self.in_cluster *= labels == k
return k
def select_data(self, data):
return data[self.in_cluster, :]
def hist(self, data, bounds=(-np.inf, np.inf), fig=None):
d = data.shape[1]
data_cc = self.select_data(data)
if fig is None:
fig = plt.figure()
for dd in range(d):
ax = fig.add_subplot(1, d, dd+1)
data_cc_d = data_cc[:, dd]
ax.hist(data_cc_d[(data_cc_d > bounds[0])*(data_cc_d < bounds[1])], bins=100)
for ax in fig.axes:
ax.xaxis.set_major_locator(plt.MaxNLocator(3))
def hist2d(self, data, fig=None):
d = data.shape[1]
data_cc = self.select_data(data)
if fig is None:
fig = plt.figure()
for dd in range(d):
for ddd in range(dd+1, d):
ax = fig.add_subplot(d, d, dd*d+ddd+1)
ax.hist2d(data_cc[:, dd], data_cc[:, ddd], bins=30,
norm=colors.LogNorm(), vmin=1)
ax.set_xlim(np.min(data[:, dd]), np.max(data[:, dd]))
ax.set_ylim(np.min(data[:, ddd]), np.max(data[:, ddd]))
def scatter_data(self, data):
d = data.shape[1]
data_cc = self.select_data(data)
fig = plt.figure()
for dd in range(d):
for ddd in range(dd+1, d):
ax = fig.add_subplot(d, d, dd*d+ddd+1)
ax.scatter(data_cc[:, dd], data_cc[:, ddd], marker='+')
ax.set_xlim(np.min(data[:, dd]), np.max(data[:, dd]))
ax.set_ylim(np.min(data[:, ddd]), np.max(data[:, ddd]))
def bhattacharyya_coefficient_discrete(data1, data2, bins=10):
'''
Computing Bhattacharyya coefficient using (multidimensional)
histograms.
'''
hist_range = zip(np.minimum(np.min(data1, axis=0), np.min(data2, axis=0)),
np.maximum(np.max(data1, axis=0), np.max(data2, axis=0)))
bins_total_volume = np.prod([ma-mi for mi, ma in hist_range])
hist1, _ = np.histogramdd(data1, bins=bins, range=hist_range, normed=True)
hist2, _ = np.histogramdd(data2, bins=bins, range=hist_range, normed=True)
return np.mean(np.sqrt(hist1*hist2))*bins_total_volume
@MC_error_check
def bhattacharyya_coefficient(data1, data2, N=1000):
'''
Computing Bhattacharyya coefficient (using MC sampling)
between kernel density estimates of data with bandwith
selection by Scott's rule.
'''
try:
d = data1.shape[1]
except IndexError:
d = 1
if data1.shape[0] < d or data2.shape[0] < d:
return 0
try:
kde1 = gaussian_kde(data1.T)
kde2 = gaussian_kde(data2.T)
except np.linalg.linalg.LinAlgError:
return 0
samp1 = kde1.resample(N/2)
samp2 = kde2.resample(N/2)
return (np.mean(np.sqrt(kde2(samp1)/kde1(samp1))) +
np.mean(np.sqrt(kde1(samp2)/kde2(samp2))))/2 | en | 0.853537 | Object for handling clusters in data. Can find closest clusters in data set using Bhattacharyya distance. Check if most common label in k1 (by other labelings) is different from most common label in k2. #ax.set_ylim(0, 200) For finding a cluster that is common across a number of labelings. Computing Bhattacharyya coefficient using (multidimensional) histograms. Computing Bhattacharyya coefficient (using MC sampling) between kernel density estimates of data with bandwith selection by Scott's rule. | 2.586525 | 3 |
django_drf_filepond/uploaders.py | MarketingCannon/django-drf-filepond | 0 | 6630002 | import logging
import os
from django.core.files.uploadedfile import UploadedFile, InMemoryUploadedFile
from rest_framework import status
from rest_framework.exceptions import ParseError, MethodNotAllowed
from rest_framework.response import Response
from django_drf_filepond.models import TemporaryUpload, storage,\
TemporaryUploadChunked
from io import BytesIO, StringIO
from django_drf_filepond.utils import _get_user
from six import text_type, binary_type
# There's no built in FileNotFoundError in Python 2
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
LOG = logging.getLogger(__name__)
class FilepondFileUploader(object):
@classmethod
def get_uploader(cls, request):
# Process the request to identify if it's a standard upload request
# or a request that is related to a chunked upload. Return the right
# kind of uploader to handle this.
if request.method == 'PATCH':
return FilepondChunkedFileUploader()
if request.method == 'HEAD':
return FilepondChunkedFileUploader()
elif request.method == 'POST':
file_obj = cls._get_file_obj(request)
if (file_obj == '{}' and
request.META.get('HTTP_UPLOAD_LENGTH', None)):
LOG.debug('Returning CHUNKED uploader to handle '
'upload request... ')
return FilepondChunkedFileUploader()
else:
raise MethodNotAllowed('%s is an invalid method type'
% (request.method))
# If we didn't identify the need for a chunked uploader in any of the
# above tests, treat this as a standard upload
LOG.debug('Returning STANDARD uploader to handle upload request... ')
return FilepondStandardFileUploader()
@classmethod
def _get_file_obj(cls, request):
# By default the upload element name is expected to be "filepond"
# As raised in issue #4, there are cases where there may be more
# than one filepond instance on a page, or the developer has opted
# not to use the name "filepond" for the filepond instance.
# Using the example from #4, this provides support these cases.
upload_field_name = 'filepond'
if 'fp_upload_field' in request.data:
upload_field_name = request.data['fp_upload_field']
if upload_field_name not in request.data:
raise ParseError('Invalid request data has been provided.')
# The content of the upload field is a django.http.QueryDict.
# The dict may have multiple values for a given field name.
# When accessing the QueryDict by field name, e.g.
# request.data['filepond'], if there is more than one value provided
# for the requested field name, the last one is returned, to get all
# values we use getlist(). For the file upload being handled here, we
# expect the file field to contain two values, the first is the file
# metadata, the second is the file object.
upload_fields = request.data.getlist(upload_field_name)
if len(upload_fields) == 1:
file_obj = upload_fields[0]
elif len(upload_fields) == 2:
# file_metadata = upload_fields[0]
file_obj = upload_fields[1]
else:
raise ParseError('Invalid number of fields in request data.')
return file_obj
# The file ID and upload ID are generated by _get_file_id in
# django_drf_filepond.views. The TemporaryUpload model should validate
# that the values provided are within spec but in some cases, e.g. when
# using SQLite, that doesn't happen. We therefore provide these two
# methods for doing local validation of these values since they are
# passed in as parameters to handle_upload.
@classmethod
def _file_id_valid(cls, file_id):
if isinstance(file_id, text_type) and (len(file_id) == 22):
return True
return False
@classmethod
def _upload_id_valid(cls, upload_id):
if isinstance(upload_id, text_type) and (len(upload_id) == 22):
return True
return False
class FilepondStandardFileUploader(FilepondFileUploader):
def handle_upload(self, request, upload_id, file_id):
# Since the upload_id and file_id are being provided here as
# parameters, we check that they are valid. This should be done by
# the DB and an error would be generated in the tu.save() call below
# however SQLite doesn't handle field length validation so this won't
# be picked up when using SQLite.
if ((not self._file_id_valid(file_id)) or
(not self._upload_id_valid(upload_id))):
return Response('Invalid ID for handling upload.',
content_type='text/plain',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
file_obj = self._get_file_obj(request)
# The type of parsed data should be a descendant of an UploadedFile
# type.
if not isinstance(file_obj, UploadedFile):
raise ParseError('Invalid data type has been parsed.')
# Save original file name and set name of saved file to the unique ID
upload_filename = file_obj.name
file_obj.name = file_id
# Before we attempt to save the file, make sure that the upload
# directory we're going to save to exists.
# *** It's not necessary to explicitly create the directory since
# *** the FileSystemStorage object creates the directory on save
# if not os.path.exists(storage.location):
# LOG.debug('Filepond app: Creating file upload directory '
# '<%s>...' % storage.location)
# os.makedirs(storage.location, mode=0o700)
LOG.debug('About to store uploaded temp file with filename: %s'
% (upload_filename))
# We now need to create the temporary upload object and store the
# file and metadata.
tu = TemporaryUpload(upload_id=upload_id, file_id=file_id,
file=file_obj, upload_name=upload_filename,
upload_type=TemporaryUpload.FILE_DATA,
uploaded_by=_get_user(request))
tu.save()
response = Response(upload_id, status=status.HTTP_200_OK,
content_type='text/plain')
return response
# Handles chunked file uploads as per the approach described in filepond's
# docs at https://pqina.nl/filepond/docs/patterns/api/server/#process-chunks
class FilepondChunkedFileUploader(FilepondFileUploader):
def handle_upload(self, request, upload_id, file_id=None):
# Since the upload_id is being provided here as a paramter, we check
# it is valid. This should be done by the DB but for some DBs, e.g.
# SQLite field length validation isn't handled. The same check is
# done for file_id in the case of POST requests.
if not self._upload_id_valid(upload_id):
return Response('Invalid ID for handling upload.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if request.method == 'PATCH':
return self._handle_chunk_upload(request, upload_id)
elif request.method == 'HEAD':
return self._handle_chunk_restart(request, upload_id)
elif request.method == 'POST':
if not self._file_id_valid(file_id):
return Response('Invalid ID for handling upload.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return self._handle_new_chunk_upload(request, upload_id, file_id)
def _handle_new_chunk_upload(self, request, upload_id, file_id):
LOG.debug('Processing a new chunked upload request...')
file_obj = self._get_file_obj(request)
if file_obj != '{}':
return Response('An invalid file object has been received '
'for a new chunked upload request.',
status=status.HTTP_400_BAD_REQUEST)
ulen = request.META.get('HTTP_UPLOAD_LENGTH', None)
if not ulen:
return Response('No length for new chunked upload request.',
status=status.HTTP_400_BAD_REQUEST)
LOG.debug('Handling a new chunked upload request for an upload '
'with total length %s bytes' % (ulen))
# Do some general checks to make sure that the storage location
# exists and that we're not being made to try and store something
# outside the base storage location. Then create the new
# temporary directory into which chunks will be stored
base_loc = storage.base_location
chunk_dir = os.path.abspath(os.path.join(base_loc, upload_id))
if not chunk_dir.startswith(base_loc):
return Response('Unable to create storage for upload data.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if os.path.exists(base_loc):
try:
os.makedirs(chunk_dir, exist_ok=False)
except OSError as e:
LOG.debug('Unable to create chunk storage dir: %s' %
(str(e)))
return Response(
'Unable to prepare storage for upload data.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
LOG.debug('The base data store location <%s> doesn\'t exist.'
' Unable to create chunk dir.' % (base_loc))
return Response('Data storage error occurred.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# We now create the temporary chunked upload object
# this will be updated as we receive the chunks.
tuc = TemporaryUploadChunked(upload_id=upload_id, file_id=file_id,
upload_dir=upload_id, total_size=ulen,
uploaded_by=_get_user(request))
tuc.save()
return Response(upload_id, status=status.HTTP_200_OK,
content_type='text/plain')
def _handle_chunk_upload(self, request, chunk_id):
# Check that the incoming data can be accessed. If the request
# content type was invalid then we want to raise an error here
# Trying to access request data should result in a 415 response if
# the data couldn't be handled by the configured parser.
file_data = request.data
if (not chunk_id) or (chunk_id == ''):
return Response('A required chunk parameter is missing.',
status=status.HTTP_400_BAD_REQUEST)
if isinstance(file_data, binary_type):
fd = BytesIO(file_data)
elif isinstance(file_data, text_type):
fd = StringIO(file_data)
else:
return Response('Upload data type not recognised.',
status=status.HTTP_400_BAD_REQUEST)
# Try to load a temporary chunked upload object for the provided id
try:
tuc = TemporaryUploadChunked.objects.get(upload_id=chunk_id)
except TemporaryUploadChunked.DoesNotExist:
return Response('Invalid chunk upload request data',
status=status.HTTP_400_BAD_REQUEST)
# Get the required header information to handle the new data
uoffset = request.META.get('HTTP_UPLOAD_OFFSET', None)
ulength = request.META.get('HTTP_UPLOAD_LENGTH', None)
uname = request.META.get('HTTP_UPLOAD_NAME', None)
if (uoffset is None) or (ulength is None) or (uname is None):
return Response('Chunk upload is missing required metadata',
status=status.HTTP_400_BAD_REQUEST)
if int(ulength) != tuc.total_size:
return Response('ERROR: Upload metadata is invalid - size changed',
status=status.HTTP_400_BAD_REQUEST)
# if this is the first chunk, store the filename
if tuc.last_chunk == 0:
tuc.upload_name = uname
else:
if tuc.upload_name != uname:
return Response('Chunk upload file metadata is invalid',
status=status.HTTP_400_BAD_REQUEST)
LOG.debug('Handling chunk <%s> for upload id <%s> with name <%s> '
'size <%s> and offset <%s>...'
% (tuc.last_chunk+1, chunk_id, uname, ulength, uoffset))
LOG.debug('Current length and offset in the record is: length <%s> '
' offset <%s>' % (tuc.total_size, tuc.offset))
# Check that our recorded offset matches the offset provided by the
# client...if not, there's an error.
if not (int(uoffset) == tuc.offset):
LOG.error('Offset provided by client <%s> doesn\'t match the '
'stored offset <%s> for chunked upload id <%s>'
% (uoffset, tuc.offset, chunk_id))
return Response('ERROR: Chunked upload metadata is invalid.',
status=status.HTTP_400_BAD_REQUEST)
file_data_len = len(file_data)
LOG.debug('Got data from request with length %s bytes'
% (file_data_len))
# Store the chunk and check if we've now completed the upload
upload_dir = os.path.join(storage.base_location, tuc.upload_dir)
upload_file = os.path.join(tuc.upload_dir,
'%s_%s' % (tuc.file_id, tuc.last_chunk+1))
if not os.path.exists(upload_dir):
return Response('Chunk storage location error',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
storage.save(upload_file, fd)
# Set the updated chunk number and the new offset
tuc.last_chunk = tuc.last_chunk + 1
tuc.offset = tuc.offset + file_data_len
if tuc.offset == tuc.total_size:
tuc.upload_complete = True
tuc.save()
# At this point, if the upload is complete, we can rebuild the chunks
# into the complete file and store it with a TemporaryUpload object.
if tuc.upload_complete:
try:
self._store_upload(tuc)
except (ValueError, FileNotFoundError) as e:
LOG.error('Error storing upload: %s' % (str(e)))
return Response('Error storing uploaded file.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(chunk_id, status=status.HTTP_200_OK,
content_type='text/plain')
def _store_upload(self, tuc):
if not tuc.upload_complete:
LOG.error('Attempt to store an incomplete upload with ID <%s>'
% (tuc.upload_id))
raise ValueError('Attempt to store an incomplete upload with ID '
'<%s>' % (tuc.upload_id))
# Load each of the file parts into a BytesIO object and store them
# via a TemporaryUpload object.
chunk_dir = os.path.join(storage.base_location, tuc.upload_dir)
file_data = BytesIO()
for i in range(1, tuc.last_chunk+1):
chunk_file = os.path.join(chunk_dir, '%s_%s' % (tuc.file_id, i))
if not os.path.exists(chunk_file):
raise FileNotFoundError('Chunk file not found for chunk <%s>'
% (i))
with open(chunk_file, 'rb') as cf:
file_data.write(cf.read())
# Prepare an InMemoryUploadedFile object so that the data can be
# successfully saved via the FileField in the TemporaryUpload object
memfile = InMemoryUploadedFile(file_data, None, tuc.file_id,
'application/octet-stream',
tuc.total_size, None)
tu = TemporaryUpload(upload_id=tuc.upload_id, file_id=tuc.file_id,
file=memfile, upload_name=tuc.upload_name,
upload_type=TemporaryUpload.FILE_DATA,
uploaded_by=tuc.uploaded_by)
tu.save()
# Check that the final file is stored and of the correct size
stored_file_path = os.path.join(chunk_dir, tuc.file_id)
if ((not os.path.exists(stored_file_path)) or
(not os.path.getsize(stored_file_path) == tuc.total_size)):
raise ValueError('Stored file size wrong or file not found.')
LOG.debug('Full file built from chunks and saved. Deleting chunks '
'and TemporaryUploadChunked object.')
for i in range(1, tuc.last_chunk+1):
chunk_file = os.path.join(chunk_dir, '%s_%s' % (tuc.file_id, i))
os.remove(chunk_file)
tuc.delete()
def _handle_chunk_restart(self, request, upload_id):
try:
tuc = TemporaryUploadChunked.objects.get(upload_id=upload_id)
except TemporaryUploadChunked.DoesNotExist:
return Response('Invalid upload ID specified.',
status=status.HTTP_404_NOT_FOUND,
content_type='text/plain')
if tuc.upload_complete is True:
return Response('Invalid upload ID specified.',
status=status.HTTP_400_BAD_REQUEST,
content_type='text/plain')
# Check that the directory for the chunks exists
if not os.path.exists(os.path.join(storage.base_location,
tuc.upload_dir)):
return Response('Invalid upload location, can\'t continue upload.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
content_type='text/plain')
# TODO: Is it necessary to check for the existence of all previous
# chunk files here?
LOG.debug('Returning offset to continue chunked upload. We have <%s> '
'chunks so far and are at offest <%s>.'
% (tuc.last_chunk, tuc.offset))
return Response(upload_id, status=status.HTTP_200_OK,
headers={'Upload-Offset': str(tuc.offset)},
content_type='text/plain')
| import logging
import os
from django.core.files.uploadedfile import UploadedFile, InMemoryUploadedFile
from rest_framework import status
from rest_framework.exceptions import ParseError, MethodNotAllowed
from rest_framework.response import Response
from django_drf_filepond.models import TemporaryUpload, storage,\
TemporaryUploadChunked
from io import BytesIO, StringIO
from django_drf_filepond.utils import _get_user
from six import text_type, binary_type
# There's no built in FileNotFoundError in Python 2
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
LOG = logging.getLogger(__name__)
class FilepondFileUploader(object):
@classmethod
def get_uploader(cls, request):
# Process the request to identify if it's a standard upload request
# or a request that is related to a chunked upload. Return the right
# kind of uploader to handle this.
if request.method == 'PATCH':
return FilepondChunkedFileUploader()
if request.method == 'HEAD':
return FilepondChunkedFileUploader()
elif request.method == 'POST':
file_obj = cls._get_file_obj(request)
if (file_obj == '{}' and
request.META.get('HTTP_UPLOAD_LENGTH', None)):
LOG.debug('Returning CHUNKED uploader to handle '
'upload request... ')
return FilepondChunkedFileUploader()
else:
raise MethodNotAllowed('%s is an invalid method type'
% (request.method))
# If we didn't identify the need for a chunked uploader in any of the
# above tests, treat this as a standard upload
LOG.debug('Returning STANDARD uploader to handle upload request... ')
return FilepondStandardFileUploader()
@classmethod
def _get_file_obj(cls, request):
# By default the upload element name is expected to be "filepond"
# As raised in issue #4, there are cases where there may be more
# than one filepond instance on a page, or the developer has opted
# not to use the name "filepond" for the filepond instance.
# Using the example from #4, this provides support these cases.
upload_field_name = 'filepond'
if 'fp_upload_field' in request.data:
upload_field_name = request.data['fp_upload_field']
if upload_field_name not in request.data:
raise ParseError('Invalid request data has been provided.')
# The content of the upload field is a django.http.QueryDict.
# The dict may have multiple values for a given field name.
# When accessing the QueryDict by field name, e.g.
# request.data['filepond'], if there is more than one value provided
# for the requested field name, the last one is returned, to get all
# values we use getlist(). For the file upload being handled here, we
# expect the file field to contain two values, the first is the file
# metadata, the second is the file object.
upload_fields = request.data.getlist(upload_field_name)
if len(upload_fields) == 1:
file_obj = upload_fields[0]
elif len(upload_fields) == 2:
# file_metadata = upload_fields[0]
file_obj = upload_fields[1]
else:
raise ParseError('Invalid number of fields in request data.')
return file_obj
# The file ID and upload ID are generated by _get_file_id in
# django_drf_filepond.views. The TemporaryUpload model should validate
# that the values provided are within spec but in some cases, e.g. when
# using SQLite, that doesn't happen. We therefore provide these two
# methods for doing local validation of these values since they are
# passed in as parameters to handle_upload.
@classmethod
def _file_id_valid(cls, file_id):
if isinstance(file_id, text_type) and (len(file_id) == 22):
return True
return False
@classmethod
def _upload_id_valid(cls, upload_id):
if isinstance(upload_id, text_type) and (len(upload_id) == 22):
return True
return False
class FilepondStandardFileUploader(FilepondFileUploader):
def handle_upload(self, request, upload_id, file_id):
# Since the upload_id and file_id are being provided here as
# parameters, we check that they are valid. This should be done by
# the DB and an error would be generated in the tu.save() call below
# however SQLite doesn't handle field length validation so this won't
# be picked up when using SQLite.
if ((not self._file_id_valid(file_id)) or
(not self._upload_id_valid(upload_id))):
return Response('Invalid ID for handling upload.',
content_type='text/plain',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
file_obj = self._get_file_obj(request)
# The type of parsed data should be a descendant of an UploadedFile
# type.
if not isinstance(file_obj, UploadedFile):
raise ParseError('Invalid data type has been parsed.')
# Save original file name and set name of saved file to the unique ID
upload_filename = file_obj.name
file_obj.name = file_id
# Before we attempt to save the file, make sure that the upload
# directory we're going to save to exists.
# *** It's not necessary to explicitly create the directory since
# *** the FileSystemStorage object creates the directory on save
# if not os.path.exists(storage.location):
# LOG.debug('Filepond app: Creating file upload directory '
# '<%s>...' % storage.location)
# os.makedirs(storage.location, mode=0o700)
LOG.debug('About to store uploaded temp file with filename: %s'
% (upload_filename))
# We now need to create the temporary upload object and store the
# file and metadata.
tu = TemporaryUpload(upload_id=upload_id, file_id=file_id,
file=file_obj, upload_name=upload_filename,
upload_type=TemporaryUpload.FILE_DATA,
uploaded_by=_get_user(request))
tu.save()
response = Response(upload_id, status=status.HTTP_200_OK,
content_type='text/plain')
return response
# Handles chunked file uploads as per the approach described in filepond's
# docs at https://pqina.nl/filepond/docs/patterns/api/server/#process-chunks
class FilepondChunkedFileUploader(FilepondFileUploader):
def handle_upload(self, request, upload_id, file_id=None):
# Since the upload_id is being provided here as a paramter, we check
# it is valid. This should be done by the DB but for some DBs, e.g.
# SQLite field length validation isn't handled. The same check is
# done for file_id in the case of POST requests.
if not self._upload_id_valid(upload_id):
return Response('Invalid ID for handling upload.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if request.method == 'PATCH':
return self._handle_chunk_upload(request, upload_id)
elif request.method == 'HEAD':
return self._handle_chunk_restart(request, upload_id)
elif request.method == 'POST':
if not self._file_id_valid(file_id):
return Response('Invalid ID for handling upload.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return self._handle_new_chunk_upload(request, upload_id, file_id)
def _handle_new_chunk_upload(self, request, upload_id, file_id):
LOG.debug('Processing a new chunked upload request...')
file_obj = self._get_file_obj(request)
if file_obj != '{}':
return Response('An invalid file object has been received '
'for a new chunked upload request.',
status=status.HTTP_400_BAD_REQUEST)
ulen = request.META.get('HTTP_UPLOAD_LENGTH', None)
if not ulen:
return Response('No length for new chunked upload request.',
status=status.HTTP_400_BAD_REQUEST)
LOG.debug('Handling a new chunked upload request for an upload '
'with total length %s bytes' % (ulen))
# Do some general checks to make sure that the storage location
# exists and that we're not being made to try and store something
# outside the base storage location. Then create the new
# temporary directory into which chunks will be stored
base_loc = storage.base_location
chunk_dir = os.path.abspath(os.path.join(base_loc, upload_id))
if not chunk_dir.startswith(base_loc):
return Response('Unable to create storage for upload data.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if os.path.exists(base_loc):
try:
os.makedirs(chunk_dir, exist_ok=False)
except OSError as e:
LOG.debug('Unable to create chunk storage dir: %s' %
(str(e)))
return Response(
'Unable to prepare storage for upload data.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
LOG.debug('The base data store location <%s> doesn\'t exist.'
' Unable to create chunk dir.' % (base_loc))
return Response('Data storage error occurred.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# We now create the temporary chunked upload object
# this will be updated as we receive the chunks.
tuc = TemporaryUploadChunked(upload_id=upload_id, file_id=file_id,
upload_dir=upload_id, total_size=ulen,
uploaded_by=_get_user(request))
tuc.save()
return Response(upload_id, status=status.HTTP_200_OK,
content_type='text/plain')
def _handle_chunk_upload(self, request, chunk_id):
# Check that the incoming data can be accessed. If the request
# content type was invalid then we want to raise an error here
# Trying to access request data should result in a 415 response if
# the data couldn't be handled by the configured parser.
file_data = request.data
if (not chunk_id) or (chunk_id == ''):
return Response('A required chunk parameter is missing.',
status=status.HTTP_400_BAD_REQUEST)
if isinstance(file_data, binary_type):
fd = BytesIO(file_data)
elif isinstance(file_data, text_type):
fd = StringIO(file_data)
else:
return Response('Upload data type not recognised.',
status=status.HTTP_400_BAD_REQUEST)
# Try to load a temporary chunked upload object for the provided id
try:
tuc = TemporaryUploadChunked.objects.get(upload_id=chunk_id)
except TemporaryUploadChunked.DoesNotExist:
return Response('Invalid chunk upload request data',
status=status.HTTP_400_BAD_REQUEST)
# Get the required header information to handle the new data
uoffset = request.META.get('HTTP_UPLOAD_OFFSET', None)
ulength = request.META.get('HTTP_UPLOAD_LENGTH', None)
uname = request.META.get('HTTP_UPLOAD_NAME', None)
if (uoffset is None) or (ulength is None) or (uname is None):
return Response('Chunk upload is missing required metadata',
status=status.HTTP_400_BAD_REQUEST)
if int(ulength) != tuc.total_size:
return Response('ERROR: Upload metadata is invalid - size changed',
status=status.HTTP_400_BAD_REQUEST)
# if this is the first chunk, store the filename
if tuc.last_chunk == 0:
tuc.upload_name = uname
else:
if tuc.upload_name != uname:
return Response('Chunk upload file metadata is invalid',
status=status.HTTP_400_BAD_REQUEST)
LOG.debug('Handling chunk <%s> for upload id <%s> with name <%s> '
'size <%s> and offset <%s>...'
% (tuc.last_chunk+1, chunk_id, uname, ulength, uoffset))
LOG.debug('Current length and offset in the record is: length <%s> '
' offset <%s>' % (tuc.total_size, tuc.offset))
# Check that our recorded offset matches the offset provided by the
# client...if not, there's an error.
if not (int(uoffset) == tuc.offset):
LOG.error('Offset provided by client <%s> doesn\'t match the '
'stored offset <%s> for chunked upload id <%s>'
% (uoffset, tuc.offset, chunk_id))
return Response('ERROR: Chunked upload metadata is invalid.',
status=status.HTTP_400_BAD_REQUEST)
file_data_len = len(file_data)
LOG.debug('Got data from request with length %s bytes'
% (file_data_len))
# Store the chunk and check if we've now completed the upload
upload_dir = os.path.join(storage.base_location, tuc.upload_dir)
upload_file = os.path.join(tuc.upload_dir,
'%s_%s' % (tuc.file_id, tuc.last_chunk+1))
if not os.path.exists(upload_dir):
return Response('Chunk storage location error',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
storage.save(upload_file, fd)
# Set the updated chunk number and the new offset
tuc.last_chunk = tuc.last_chunk + 1
tuc.offset = tuc.offset + file_data_len
if tuc.offset == tuc.total_size:
tuc.upload_complete = True
tuc.save()
# At this point, if the upload is complete, we can rebuild the chunks
# into the complete file and store it with a TemporaryUpload object.
if tuc.upload_complete:
try:
self._store_upload(tuc)
except (ValueError, FileNotFoundError) as e:
LOG.error('Error storing upload: %s' % (str(e)))
return Response('Error storing uploaded file.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(chunk_id, status=status.HTTP_200_OK,
content_type='text/plain')
def _store_upload(self, tuc):
if not tuc.upload_complete:
LOG.error('Attempt to store an incomplete upload with ID <%s>'
% (tuc.upload_id))
raise ValueError('Attempt to store an incomplete upload with ID '
'<%s>' % (tuc.upload_id))
# Load each of the file parts into a BytesIO object and store them
# via a TemporaryUpload object.
chunk_dir = os.path.join(storage.base_location, tuc.upload_dir)
file_data = BytesIO()
for i in range(1, tuc.last_chunk+1):
chunk_file = os.path.join(chunk_dir, '%s_%s' % (tuc.file_id, i))
if not os.path.exists(chunk_file):
raise FileNotFoundError('Chunk file not found for chunk <%s>'
% (i))
with open(chunk_file, 'rb') as cf:
file_data.write(cf.read())
# Prepare an InMemoryUploadedFile object so that the data can be
# successfully saved via the FileField in the TemporaryUpload object
memfile = InMemoryUploadedFile(file_data, None, tuc.file_id,
'application/octet-stream',
tuc.total_size, None)
tu = TemporaryUpload(upload_id=tuc.upload_id, file_id=tuc.file_id,
file=memfile, upload_name=tuc.upload_name,
upload_type=TemporaryUpload.FILE_DATA,
uploaded_by=tuc.uploaded_by)
tu.save()
# Check that the final file is stored and of the correct size
stored_file_path = os.path.join(chunk_dir, tuc.file_id)
if ((not os.path.exists(stored_file_path)) or
(not os.path.getsize(stored_file_path) == tuc.total_size)):
raise ValueError('Stored file size wrong or file not found.')
LOG.debug('Full file built from chunks and saved. Deleting chunks '
'and TemporaryUploadChunked object.')
for i in range(1, tuc.last_chunk+1):
chunk_file = os.path.join(chunk_dir, '%s_%s' % (tuc.file_id, i))
os.remove(chunk_file)
tuc.delete()
def _handle_chunk_restart(self, request, upload_id):
try:
tuc = TemporaryUploadChunked.objects.get(upload_id=upload_id)
except TemporaryUploadChunked.DoesNotExist:
return Response('Invalid upload ID specified.',
status=status.HTTP_404_NOT_FOUND,
content_type='text/plain')
if tuc.upload_complete is True:
return Response('Invalid upload ID specified.',
status=status.HTTP_400_BAD_REQUEST,
content_type='text/plain')
# Check that the directory for the chunks exists
if not os.path.exists(os.path.join(storage.base_location,
tuc.upload_dir)):
return Response('Invalid upload location, can\'t continue upload.',
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
content_type='text/plain')
# TODO: Is it necessary to check for the existence of all previous
# chunk files here?
LOG.debug('Returning offset to continue chunked upload. We have <%s> '
'chunks so far and are at offest <%s>.'
% (tuc.last_chunk, tuc.offset))
return Response(upload_id, status=status.HTTP_200_OK,
headers={'Upload-Offset': str(tuc.offset)},
content_type='text/plain')
| en | 0.888115 | # There's no built in FileNotFoundError in Python 2 # Process the request to identify if it's a standard upload request # or a request that is related to a chunked upload. Return the right # kind of uploader to handle this. # If we didn't identify the need for a chunked uploader in any of the # above tests, treat this as a standard upload # By default the upload element name is expected to be "filepond" # As raised in issue #4, there are cases where there may be more # than one filepond instance on a page, or the developer has opted # not to use the name "filepond" for the filepond instance. # Using the example from #4, this provides support these cases. # The content of the upload field is a django.http.QueryDict. # The dict may have multiple values for a given field name. # When accessing the QueryDict by field name, e.g. # request.data['filepond'], if there is more than one value provided # for the requested field name, the last one is returned, to get all # values we use getlist(). For the file upload being handled here, we # expect the file field to contain two values, the first is the file # metadata, the second is the file object. # file_metadata = upload_fields[0] # The file ID and upload ID are generated by _get_file_id in # django_drf_filepond.views. The TemporaryUpload model should validate # that the values provided are within spec but in some cases, e.g. when # using SQLite, that doesn't happen. We therefore provide these two # methods for doing local validation of these values since they are # passed in as parameters to handle_upload. # Since the upload_id and file_id are being provided here as # parameters, we check that they are valid. This should be done by # the DB and an error would be generated in the tu.save() call below # however SQLite doesn't handle field length validation so this won't # be picked up when using SQLite. # The type of parsed data should be a descendant of an UploadedFile # type. # Save original file name and set name of saved file to the unique ID # Before we attempt to save the file, make sure that the upload # directory we're going to save to exists. # *** It's not necessary to explicitly create the directory since # *** the FileSystemStorage object creates the directory on save # if not os.path.exists(storage.location): # LOG.debug('Filepond app: Creating file upload directory ' # '<%s>...' % storage.location) # os.makedirs(storage.location, mode=0o700) # We now need to create the temporary upload object and store the # file and metadata. # Handles chunked file uploads as per the approach described in filepond's # docs at https://pqina.nl/filepond/docs/patterns/api/server/#process-chunks # Since the upload_id is being provided here as a paramter, we check # it is valid. This should be done by the DB but for some DBs, e.g. # SQLite field length validation isn't handled. The same check is # done for file_id in the case of POST requests. # Do some general checks to make sure that the storage location # exists and that we're not being made to try and store something # outside the base storage location. Then create the new # temporary directory into which chunks will be stored # We now create the temporary chunked upload object # this will be updated as we receive the chunks. # Check that the incoming data can be accessed. If the request # content type was invalid then we want to raise an error here # Trying to access request data should result in a 415 response if # the data couldn't be handled by the configured parser. # Try to load a temporary chunked upload object for the provided id # Get the required header information to handle the new data # if this is the first chunk, store the filename # Check that our recorded offset matches the offset provided by the # client...if not, there's an error. # Store the chunk and check if we've now completed the upload # Set the updated chunk number and the new offset # At this point, if the upload is complete, we can rebuild the chunks # into the complete file and store it with a TemporaryUpload object. # Load each of the file parts into a BytesIO object and store them # via a TemporaryUpload object. # Prepare an InMemoryUploadedFile object so that the data can be # successfully saved via the FileField in the TemporaryUpload object # Check that the final file is stored and of the correct size # Check that the directory for the chunks exists # TODO: Is it necessary to check for the existence of all previous # chunk files here? | 2.11818 | 2 |
guests/migrations/0020_alter_guest_meal.py | MarinaMcGrath/django-wedding-website | 0 | 6630003 | <filename>guests/migrations/0020_alter_guest_meal.py<gh_stars>0
# Generated by Django 4.0.1 on 2022-01-29 04:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guests', '0019_party_display_name'),
]
operations = [
migrations.AlterField(
model_name='guest',
name='meal',
field=models.CharField(blank=True, choices=[('vegetarian', 'vegetarian'), ('vegan', 'vegan'), ('none', 'none')], max_length=20, null=True),
),
]
| <filename>guests/migrations/0020_alter_guest_meal.py<gh_stars>0
# Generated by Django 4.0.1 on 2022-01-29 04:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guests', '0019_party_display_name'),
]
operations = [
migrations.AlterField(
model_name='guest',
name='meal',
field=models.CharField(blank=True, choices=[('vegetarian', 'vegetarian'), ('vegan', 'vegan'), ('none', 'none')], max_length=20, null=True),
),
]
| en | 0.857644 | # Generated by Django 4.0.1 on 2022-01-29 04:56 | 1.619657 | 2 |
src/api/v1/__init__.py | BuildForSDG/Team-115-Product | 0 | 6630004 | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from dotenv import load_dotenv
application = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__)) # set db base directory
project_dir = os.path.expanduser('~/team-115-product')
load_dotenv(os.path.join(project_dir, '.env'))
class Config(object):
SECRET_KEY = os.getenv('SECRET_KEY') # secret keys for forms & sessions
# set the database uri
SQLALCHEMY_DATABASE_URI = process.env.DATABASE_URL #os.environ['DATABASE_URL']
# enable automatic commit of database changes at the end of each request
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
# disable signaling the app anytime a database change is made
SQLALCHEMY_TRACK_MODIFICATIONS = False
application.config.from_object(Config)
# Bootstrap(app)
db = SQLAlchemy(application)
db.init_app(application)
migrate = Migrate(application, db)
# Resgister blueprints
from .users import users as users_blueprint
from .solutions import solutions as solutions_blueprint
application.register_blueprint(users_blueprint)
application.register_blueprint(solutions_blueprint)
| import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from dotenv import load_dotenv
application = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__)) # set db base directory
project_dir = os.path.expanduser('~/team-115-product')
load_dotenv(os.path.join(project_dir, '.env'))
class Config(object):
SECRET_KEY = os.getenv('SECRET_KEY') # secret keys for forms & sessions
# set the database uri
SQLALCHEMY_DATABASE_URI = process.env.DATABASE_URL #os.environ['DATABASE_URL']
# enable automatic commit of database changes at the end of each request
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
# disable signaling the app anytime a database change is made
SQLALCHEMY_TRACK_MODIFICATIONS = False
application.config.from_object(Config)
# Bootstrap(app)
db = SQLAlchemy(application)
db.init_app(application)
migrate = Migrate(application, db)
# Resgister blueprints
from .users import users as users_blueprint
from .solutions import solutions as solutions_blueprint
application.register_blueprint(users_blueprint)
application.register_blueprint(solutions_blueprint)
| en | 0.678602 | # set db base directory # secret keys for forms & sessions # set the database uri #os.environ['DATABASE_URL'] # enable automatic commit of database changes at the end of each request # disable signaling the app anytime a database change is made # Bootstrap(app) # Resgister blueprints | 2.299396 | 2 |
pyteal/compiler/sort.py | CiottiGiorgio/pyteal | 0 | 6630005 | <reponame>CiottiGiorgio/pyteal<filename>pyteal/compiler/sort.py
from typing import List
from pyteal.ir import TealBlock
from pyteal.errors import TealInternalError
def sortBlocks(start: TealBlock, end: TealBlock) -> List[TealBlock]:
"""Topologically sort the graph which starts with the input TealBlock.
Args:
start: The starting point of the graph to sort.
Returns:
An ordered list of TealBlocks that is sorted such that every block is guaranteed to appear
in the list before all of its outgoing blocks.
"""
S = [start]
order = []
visited = set() # I changed visited to a set to be more efficient
while len(S) != 0:
n = S.pop()
if id(n) in visited:
continue
S += n.getOutgoing()
order.append(n)
visited.add(id(n))
endIndex = -1
for i, block in enumerate(order):
if block is end:
endIndex = i
break
if endIndex == -1:
raise TealInternalError("End block not present")
order.pop(endIndex)
order.append(end)
return order
| from typing import List
from pyteal.ir import TealBlock
from pyteal.errors import TealInternalError
def sortBlocks(start: TealBlock, end: TealBlock) -> List[TealBlock]:
"""Topologically sort the graph which starts with the input TealBlock.
Args:
start: The starting point of the graph to sort.
Returns:
An ordered list of TealBlocks that is sorted such that every block is guaranteed to appear
in the list before all of its outgoing blocks.
"""
S = [start]
order = []
visited = set() # I changed visited to a set to be more efficient
while len(S) != 0:
n = S.pop()
if id(n) in visited:
continue
S += n.getOutgoing()
order.append(n)
visited.add(id(n))
endIndex = -1
for i, block in enumerate(order):
if block is end:
endIndex = i
break
if endIndex == -1:
raise TealInternalError("End block not present")
order.pop(endIndex)
order.append(end)
return order | en | 0.943845 | Topologically sort the graph which starts with the input TealBlock. Args: start: The starting point of the graph to sort. Returns: An ordered list of TealBlocks that is sorted such that every block is guaranteed to appear in the list before all of its outgoing blocks. # I changed visited to a set to be more efficient | 2.87246 | 3 |
app/pages/choices/unm.py | zferic/harmonization-website | 1 | 6630006 | CAT_UNM_ANALYTES = [('Analytes', (
('BCD', 'Cadmium - Blood'),
('BHGE', 'Ethyl Mercury - Blood'),
('BHGM', 'Methyl Mercury - Blood'),
('BMN', 'Manganese - Blood'),
('BPB', 'Lead - Blood'),
('BSE', 'Selenium - Blood'),
('IHG', 'Inorganic Mercury - Blood'),
('THG', 'Mercury Total - Blood'),
('SCU', 'Copper - Serum'),
('SSE', 'Selenium - Serum'),
('SZN', 'Zinc - Serum'),
('UAS3', 'Arsenous (III) acid - Urine'),
('UAS5', 'Arsenic (V) acid - Urine'),
('UASB', 'Arsenobetaine - Urine'),
('UASC', 'Arsenocholine - Urine'),
('UBA', 'Barium - Urine'),
('UBE', 'Beryllium - Urine'),
('UCD', 'Cadmium - Urine'),
('UCO', 'Cobalt - Urine'),
('UCS', 'Cesium - Urine'),
('UDMA', 'Dimethylarsinic Acid - Urine'),
('UHG', 'Mercury - Urine'),
('UIO', 'Iodine - Urine'),
('UMMA', 'Monomethylarsinic Acid - Urine'),
('UMN', 'Manganese - Urine'),
('UMO', 'Molybdenum - Urine'),
('UPB', 'Lead - Urine'),
('UPT', 'Platinum - Urine'),
('USB', 'Antimony - Urine'),
('USN', 'Tin - Urine'),
('USR', 'Strontium - Urine'),
('UTAS', 'Arsenic Total - Urine'),
('UTL', 'Thallium - Urine'),
('UTMO', 'Trimethylarsine - Urine'),
('UTU', 'Tungsten - Urine'),
('UUR', 'Uranium - Urine'),
))]
CAT_UNM_MEMBER_C = (
('1', 'mother'),
('2', 'father'),
('3', 'child'),
)
CAT_UNM_TIME_PERIOD = (
('9', 'any'), # all time periods ploted together
('1', 'enrollment'),
('3', 'week 36/delivery'),
)
ADDITIONAL_FEATURES = [('Categorical', (
('Outcome', 'Outcome'),
('Member_c', 'Family Member'),
('TimePeriod', 'Collection Time'),
('CohortType', 'Cohort Type'),
)),
('Outcomes', (
('SGA', 'SGA'),
('LGA', 'LGA'),
('birthWt', 'Birth Weight'),
('headCirc', 'headCirc'),
('birthLen','birthLen'),
('Outcome_weeks', 'Outcome Weeks'),
('Outcome', 'Outcome'),
)
)]
UNM_FEATURE_CHOICES = CAT_UNM_ANALYTES + ADDITIONAL_FEATURES
#UNM_FEATURE_CHOICES = CAT_UNM_ANALYTES
UNM_CATEGORICAL_CHOICES = ADDITIONAL_FEATURES
| CAT_UNM_ANALYTES = [('Analytes', (
('BCD', 'Cadmium - Blood'),
('BHGE', 'Ethyl Mercury - Blood'),
('BHGM', 'Methyl Mercury - Blood'),
('BMN', 'Manganese - Blood'),
('BPB', 'Lead - Blood'),
('BSE', 'Selenium - Blood'),
('IHG', 'Inorganic Mercury - Blood'),
('THG', 'Mercury Total - Blood'),
('SCU', 'Copper - Serum'),
('SSE', 'Selenium - Serum'),
('SZN', 'Zinc - Serum'),
('UAS3', 'Arsenous (III) acid - Urine'),
('UAS5', 'Arsenic (V) acid - Urine'),
('UASB', 'Arsenobetaine - Urine'),
('UASC', 'Arsenocholine - Urine'),
('UBA', 'Barium - Urine'),
('UBE', 'Beryllium - Urine'),
('UCD', 'Cadmium - Urine'),
('UCO', 'Cobalt - Urine'),
('UCS', 'Cesium - Urine'),
('UDMA', 'Dimethylarsinic Acid - Urine'),
('UHG', 'Mercury - Urine'),
('UIO', 'Iodine - Urine'),
('UMMA', 'Monomethylarsinic Acid - Urine'),
('UMN', 'Manganese - Urine'),
('UMO', 'Molybdenum - Urine'),
('UPB', 'Lead - Urine'),
('UPT', 'Platinum - Urine'),
('USB', 'Antimony - Urine'),
('USN', 'Tin - Urine'),
('USR', 'Strontium - Urine'),
('UTAS', 'Arsenic Total - Urine'),
('UTL', 'Thallium - Urine'),
('UTMO', 'Trimethylarsine - Urine'),
('UTU', 'Tungsten - Urine'),
('UUR', 'Uranium - Urine'),
))]
CAT_UNM_MEMBER_C = (
('1', 'mother'),
('2', 'father'),
('3', 'child'),
)
CAT_UNM_TIME_PERIOD = (
('9', 'any'), # all time periods ploted together
('1', 'enrollment'),
('3', 'week 36/delivery'),
)
ADDITIONAL_FEATURES = [('Categorical', (
('Outcome', 'Outcome'),
('Member_c', 'Family Member'),
('TimePeriod', 'Collection Time'),
('CohortType', 'Cohort Type'),
)),
('Outcomes', (
('SGA', 'SGA'),
('LGA', 'LGA'),
('birthWt', 'Birth Weight'),
('headCirc', 'headCirc'),
('birthLen','birthLen'),
('Outcome_weeks', 'Outcome Weeks'),
('Outcome', 'Outcome'),
)
)]
UNM_FEATURE_CHOICES = CAT_UNM_ANALYTES + ADDITIONAL_FEATURES
#UNM_FEATURE_CHOICES = CAT_UNM_ANALYTES
UNM_CATEGORICAL_CHOICES = ADDITIONAL_FEATURES
| en | 0.619707 | # all time periods ploted together #UNM_FEATURE_CHOICES = CAT_UNM_ANALYTES | 1.267251 | 1 |
scripts/_dparser.py | WinstonLHS/tofu | 56 | 6630007 | import sys
import os
import getpass
import argparse
# tofu
# test if in a tofu git repo
_HERE = os.path.abspath(os.path.dirname(__file__))
_TOFUPATH = os.path.dirname(_HERE)
def get_mods():
istofugit = False
if '.git' in os.listdir(_TOFUPATH) and 'tofu' in _TOFUPATH:
istofugit = True
if istofugit:
# Make sure we load the corresponding tofu
sys.path.insert(1, _TOFUPATH)
import tofu as tf
from tofu.imas2tofu import MultiIDSLoader
_ = sys.path.pop(1)
else:
import tofu as tf
from tofu.imas2tofu import MultiIDSLoader
# default parameters
pfe = os.path.join(os.path.expanduser('~'), '.tofu', '_scripts_def.py')
if os.path.isfile(pfe):
# Make sure we load the user-specific file
# sys.path method
# sys.path.insert(1, os.path.join(os.path.expanduser('~'), '.tofu'))
# import _scripts_def as _defscripts
# _ = sys.path.pop(1)
# importlib method
import importlib.util
spec = importlib.util.spec_from_file_location("_defscripts", pfe)
_defscripts = importlib.util.module_from_spec(spec)
spec.loader.exec_module(_defscripts)
else:
try:
import tofu.entrypoints._def as _defscripts
except Exception as err:
from . import _def as _defscripts
return tf, MultiIDSLoader, _defscripts
# #############################################################################
# utility functions
# #############################################################################
def _str2bool(v):
if isinstance(v, bool):
return v
elif v.lower() in ['yes', 'true', 'y', 't', '1']:
return True
elif v.lower() in ['no', 'false', 'n', 'f', '0']:
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected!')
def _str2boolstr(v):
if isinstance(v, bool):
return v
elif isinstance(v, str):
if v.lower() in ['yes', 'true', 'y', 't', '1']:
return True
elif v.lower() in ['no', 'false', 'n', 'f', '0']:
return False
elif v.lower() == 'none':
return None
else:
return v
else:
raise argparse.ArgumentTypeError('Boolean, None or str expected!')
def _str2tlim(v):
c0 = (v.isdigit()
or ('.' in v
and len(v.split('.')) == 2
and all([vv.isdigit() for vv in v.split('.')])))
if c0 is True:
v = float(v)
elif v.lower() == 'none':
v = None
return v
# #############################################################################
# Parser for tofu version
# #############################################################################
def parser_version():
msg = """ Get tofu version from bash optionally set an enviroment variable
If run from a git repo containing tofu, simply returns git describe
Otherwise reads the tofu version stored in tofu/version.py
"""
ddef = {
'path': os.path.join(_TOFUPATH, 'tofu'),
'envvar': False,
'verb': True,
'warn': True,
'force': False,
'name': 'TOFU_VERSION',
}
# Instanciate parser
parser = argparse.ArgumentParser(description=msg)
# Define input arguments
parser.add_argument('-p', '--path',
type=str,
help='tofu source directory where version.py is found',
required=False, default=ddef['path'])
parser.add_argument('-v', '--verb',
type=_str2bool,
help='flag indicating whether to print the version',
required=False, default=ddef['verb'])
parser.add_argument('-ev', '--envvar',
type=_str2boolstr,
help='name of the environment variable to set, if any',
required=False, default=ddef['envvar'])
parser.add_argument('-w', '--warn',
type=_str2bool,
help=('flag indicatin whether to print a warning when'
+ 'the desired environment variable (envvar)'
+ 'already exists'),
required=False, default=ddef['warn'])
parser.add_argument('-f', '--force',
type=_str2bool,
help=('flag indicating whether to force the update of '
+ 'the desired environment variable (envvar)'
+ ' even if it already exists'),
required=False, default=ddef['force'])
return ddef, parser
# #############################################################################
# Parser for tofu custom
# #############################################################################
def parser_custom():
msg = """ Create a local copy of tofu default parameters
This creates a local copy, in your home, of tofu default parameters
A directory .tofu is created in your home directory
In this directory, modules containing default parameters are copied
You can then customize them without impacting other users
"""
_USER = getpass.getuser()
_USER_HOME = os.path.expanduser('~')
ddef = {
'target': os.path.join(_USER_HOME, '.tofu'),
'source': os.path.join(_TOFUPATH, 'tofu'),
'files': [
'_imas2tofu_def.py',
'_entrypoints_def.py',
],
'directories': [
'openadas2tofu',
'nist2tofu',
os.path.join('nist2tofu', 'ASD'),
],
}
# Instanciate parser
parser = argparse.ArgumentParser(description=msg)
# Define input arguments
parser.add_argument('-s', '--source',
type=str,
help='tofu source directory',
required=False,
default=ddef['source'])
parser.add_argument('-t', '--target',
type=str,
help=('directory where .tofu/ should be created'
+ ' (default: {})'.format(ddef['target'])),
required=False,
default=ddef['target'])
parser.add_argument('-f', '--files',
type=str,
help='list of files to be copied',
required=False,
nargs='+',
default=ddef['files'],
choices=ddef['files'])
return ddef, parser
# #############################################################################
# Parser for tofu plot
# #############################################################################
def parser_plot():
tf, MultiIDSLoader, _defscripts = get_mods()
_LIDS_CONFIG = MultiIDSLoader._lidsconfig
_LIDS_DIAG = MultiIDSLoader._lidsdiag
_LIDS_PLASMA = tf.imas2tofu.MultiIDSLoader._lidsplasma
_LIDS = _LIDS_CONFIG + _LIDS_DIAG + _LIDS_PLASMA + tf.utils._LIDS_CUSTOM
msg = """Fast interactive visualization tool for diagnostics data in
imas
This is merely a wrapper around the function tofu.load_from_imas()
It loads (from imas) and displays diagnostics data from the following
ids:
{}
""".format(_LIDS)
ddef = {
# User-customizable
'run': _defscripts._TFPLOT_RUN,
'user': _defscripts._TFPLOT_USER,
'database': _defscripts._TFPLOT_DATABASE,
'version': _defscripts._TFPLOT_VERSION,
't0': _defscripts._TFPLOT_T0,
'tlim': None,
'sharex': _defscripts._TFPLOT_SHAREX,
'bck': _defscripts._TFPLOT_BCK,
'extra': _defscripts._TFPLOT_EXTRA,
'indch_auto': _defscripts._TFPLOT_INDCH_AUTO,
'config': _defscripts._TFPLOT_CONFIG,
'tosoledge3x': _defscripts._TFPLOT_TOSOLEDGE3X,
'mag_sep_nbpts': _defscripts._TFPLOT_MAG_SEP_NBPTS,
'mag_sep_dR': _defscripts._TFPLOT_MAG_SEP_DR,
'mag_init_pts': _defscripts._TFPLOT_MAG_INIT_PTS,
# Non user-customizable
'lids_plasma': _LIDS_PLASMA,
'lids_diag': _LIDS_DIAG,
'lids': _LIDS,
}
parser = argparse.ArgumentParser(description=msg)
msg = 'config name to be loaded'
parser.add_argument('-c', '--config', help=msg,
required=False, type=str,
default=ddef['config'])
msg = 'path in which to save the tofu config in SOLEDGE3X format'
parser.add_argument('-tse3x', '--tosoledge3x', help=msg,
required=False, type=str,
default=ddef['tosoledge3x'])
parser.add_argument('-s', '--shot', type=int,
help='shot number', required=False, nargs='+')
msg = 'username of the DB where the datafile is located'
parser.add_argument('-u', '--user', help=msg, required=False,
default=ddef['user'])
msg = 'database name where the datafile is located'
parser.add_argument('-db', '--database', help=msg, required=False,
default=ddef['database'])
parser.add_argument('-r', '--run', help='run number',
required=False, type=int,
default=ddef['run'])
parser.add_argument('-v', '--version', help='version number',
required=False, type=str,
default=ddef['version'])
msg = ("ids from which to load diagnostics data,"
+ " can be:\n{}".format(ddef['lids']))
parser.add_argument('-i', '--ids', type=str, required=True,
help=msg, nargs='+', choices=ddef['lids'])
parser.add_argument('-q', '--quantity', type=str, required=False,
help='Desired quantity from the plasma ids',
nargs='+', default=None)
parser.add_argument('-X', '--X', type=str, required=False,
help='Quantity from the plasma ids for abscissa',
nargs='+', default=None)
parser.add_argument('-t0', '--t0', type=_str2boolstr, required=False,
help='Reference time event setting t = 0',
default=ddef['t0'])
parser.add_argument('-t', '--t', type=float, required=False,
help='Input time when needed')
parser.add_argument('-tl', '--tlim', type=_str2tlim,
required=False,
help='limits of the time interval',
nargs='+', default=ddef['tlim'])
parser.add_argument('-ich', '--indch', type=int, required=False,
help='indices of channels to be loaded',
nargs='+', default=None)
parser.add_argument('-ichauto', '--indch_auto', type=_str2bool,
required=False,
help='automatically determine indices of'
+ ' channels to be loaded', default=ddef['indch_auto'])
parser.add_argument('-e', '--extra', type=_str2bool, required=False,
help='If True loads separatrix and heating power',
default=ddef['extra'])
parser.add_argument('-sx', '--sharex', type=_str2bool, required=False,
help='Should X axis be shared between diag ids ?',
default=ddef['sharex'], const=True, nargs='?')
parser.add_argument('-bck', '--background', type=_str2bool, required=False,
help='Plot data enveloppe as grey background ?',
default=ddef['bck'], const=True, nargs='?')
parser.add_argument('-mag_sep_dR', '--mag_sep_dR', type=float,
required=False,
default=ddef['mag_sep_dR'],
help='Distance to separatrix from r_ext to plot'
+ ' magnetic field lines')
parser.add_argument('-mag_sep_nbpts', '--mag_sep_nbpts', type=int,
required=False,
default=ddef['mag_sep_nbpts'],
help=('Number of mag. field lines to plot '
+ 'from separatrix'))
parser.add_argument('-mag_init_pts', '--mag_init_pts',
type=float, required=False, nargs=3,
default=ddef['mag_init_pts'],
help='Manual coordinates of point that a RED magnetic'
+ ' field line will cross on graphics,'
+ ' give coordinates as: R [m], Phi [rad], Z [m]')
return ddef, parser
# #############################################################################
# Parser for tofu calc
# #############################################################################
def parser_calc():
tf, MultiIDSLoader, _defscripts = get_mods()
_LIDS_DIAG = MultiIDSLoader._lidsdiag
_LIDS_PLASMA = tf.imas2tofu.MultiIDSLoader._lidsplasma
_LIDS = _LIDS_DIAG + _LIDS_PLASMA + tf.utils._LIDS_CUSTOM
# Parse input arguments
msg = """Fast interactive visualization tool for diagnostics data in
imas
This is merely a wrapper around the function tofu.calc_from_imas()
It calculates and dsplays synthetic signal (from imas) from the following
ids:
{}
""".format(_LIDS)
ddef = {
# User-customizable
'run': _defscripts._TFCALC_RUN,
'user': _defscripts._TFCALC_USER,
'database': _defscripts._TFCALC_DATABASE,
'version': _defscripts._TFCALC_VERSION,
't0': _defscripts._TFCALC_T0,
'tlim': None,
'sharex': _defscripts._TFCALC_SHAREX,
'bck': _defscripts._TFCALC_BCK,
'extra': _defscripts._TFCALC_EXTRA,
'indch_auto': _defscripts._TFCALC_INDCH_AUTO,
'coefs': None,
# Non user-customizable
'lids_plasma': _LIDS_PLASMA,
'lids_diag': _LIDS_DIAG,
'lids': _LIDS,
}
parser = argparse.ArgumentParser(description=msg)
# Main idd parameters
parser.add_argument('-s', '--shot', type=int,
help='shot number', required=True)
msg = 'username of the DB where the datafile is located'
parser.add_argument('-u', '--user',
help=msg, required=False, default=ddef['user'])
msg = 'database name where the datafile is located'
parser.add_argument('-db', '--database', help=msg, required=False,
default=ddef['database'])
parser.add_argument('-r', '--run', help='run number',
required=False, type=int, default=ddef['run'])
parser.add_argument('-v', '--version', help='version number',
required=False, type=str, default=ddef['version'])
# Equilibrium idd parameters
parser.add_argument('-s_eq', '--shot_eq', type=int,
help='shot number for equilibrium, defaults to -s',
required=False, default=None)
msg = 'username for the equilibrium, defaults to -u'
parser.add_argument('-u_eq', '--user_eq',
help=msg, required=False, default=None)
msg = 'database name for the equilibrium, defaults to -tok'
parser.add_argument('-db_eq', '--database_eq',
help=msg, required=False, default=None)
parser.add_argument('-r_eq', '--run_eq',
help='run number for the equilibrium, defaults to -r',
required=False, type=int, default=None)
# Profile idd parameters
parser.add_argument('-s_prof', '--shot_prof', type=int,
help='shot number for profiles, defaults to -s',
required=False, default=None)
msg = 'username for the profiles, defaults to -u'
parser.add_argument('-u_prof', '--user_prof',
help=msg, required=False, default=None)
msg = 'database name for the profiles, defaults to -tok'
parser.add_argument('-db_prof', '--database_prof',
help=msg, required=False, default=None)
parser.add_argument('-r_prof', '--run_prof',
help='run number for the profiles, defaults to -r',
required=False, type=int, default=None)
msg = ("ids from which to load diagnostics data,"
+ " can be:\n{}".format(ddef['lids']))
parser.add_argument('-i', '--ids', type=str, required=True,
help=msg, nargs='+', choices=ddef['lids'])
parser.add_argument('-B', '--Brightness', type=bool, required=False,
help='Whether to express result as brightness',
default=None)
parser.add_argument('-res', '--res', type=float, required=False,
help='Space resolution for the LOS-discretization',
default=None)
parser.add_argument('-t0', '--t0', type=_str2boolstr, required=False,
help='Reference time event setting t = 0',
default=ddef['t0'])
parser.add_argument('-tl', '--tlim', type=_str2tlim,
required=False,
help='limits of the time interval',
nargs='+', default=ddef['tlim'])
parser.add_argument('-c', '--coefs', type=float, required=False,
help='Corrective coefficient, if any',
default=ddef['coefs'])
parser.add_argument('-ich', '--indch', type=int, required=False,
help='indices of channels to be loaded',
nargs='+', default=None)
parser.add_argument('-ichauto', '--indch_auto', type=bool, required=False,
help=('automatically determine indices '
+ 'of channels to be loaded'),
default=ddef['indch_auto'])
parser.add_argument('-e', '--extra', type=_str2bool, required=False,
help='If True loads separatrix and heating power',
default=ddef['extra'])
parser.add_argument('-sx', '--sharex', type=_str2bool, required=False,
help='Should X axis be shared between diag ids?',
default=ddef['sharex'], const=True, nargs='?')
parser.add_argument('-if', '--input_file', type=str, required=False,
help='mat file from which to load core_profiles',
default=None)
parser.add_argument('-of', '--output_file', type=str, required=False,
help='mat file into which to save synthetic signal',
default=None)
parser.add_argument('-bck', '--background', type=_str2bool, required=False,
help='Plot data enveloppe as grey background ?',
default=ddef['bck'], const=True, nargs='?')
return ddef, parser
# #############################################################################
# Parser dict
# #############################################################################
_DPARSER = {
'version': parser_version,
'custom': parser_custom,
'plot': parser_plot,
'calc': parser_calc,
}
| import sys
import os
import getpass
import argparse
# tofu
# test if in a tofu git repo
_HERE = os.path.abspath(os.path.dirname(__file__))
_TOFUPATH = os.path.dirname(_HERE)
def get_mods():
istofugit = False
if '.git' in os.listdir(_TOFUPATH) and 'tofu' in _TOFUPATH:
istofugit = True
if istofugit:
# Make sure we load the corresponding tofu
sys.path.insert(1, _TOFUPATH)
import tofu as tf
from tofu.imas2tofu import MultiIDSLoader
_ = sys.path.pop(1)
else:
import tofu as tf
from tofu.imas2tofu import MultiIDSLoader
# default parameters
pfe = os.path.join(os.path.expanduser('~'), '.tofu', '_scripts_def.py')
if os.path.isfile(pfe):
# Make sure we load the user-specific file
# sys.path method
# sys.path.insert(1, os.path.join(os.path.expanduser('~'), '.tofu'))
# import _scripts_def as _defscripts
# _ = sys.path.pop(1)
# importlib method
import importlib.util
spec = importlib.util.spec_from_file_location("_defscripts", pfe)
_defscripts = importlib.util.module_from_spec(spec)
spec.loader.exec_module(_defscripts)
else:
try:
import tofu.entrypoints._def as _defscripts
except Exception as err:
from . import _def as _defscripts
return tf, MultiIDSLoader, _defscripts
# #############################################################################
# utility functions
# #############################################################################
def _str2bool(v):
if isinstance(v, bool):
return v
elif v.lower() in ['yes', 'true', 'y', 't', '1']:
return True
elif v.lower() in ['no', 'false', 'n', 'f', '0']:
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected!')
def _str2boolstr(v):
if isinstance(v, bool):
return v
elif isinstance(v, str):
if v.lower() in ['yes', 'true', 'y', 't', '1']:
return True
elif v.lower() in ['no', 'false', 'n', 'f', '0']:
return False
elif v.lower() == 'none':
return None
else:
return v
else:
raise argparse.ArgumentTypeError('Boolean, None or str expected!')
def _str2tlim(v):
c0 = (v.isdigit()
or ('.' in v
and len(v.split('.')) == 2
and all([vv.isdigit() for vv in v.split('.')])))
if c0 is True:
v = float(v)
elif v.lower() == 'none':
v = None
return v
# #############################################################################
# Parser for tofu version
# #############################################################################
def parser_version():
msg = """ Get tofu version from bash optionally set an enviroment variable
If run from a git repo containing tofu, simply returns git describe
Otherwise reads the tofu version stored in tofu/version.py
"""
ddef = {
'path': os.path.join(_TOFUPATH, 'tofu'),
'envvar': False,
'verb': True,
'warn': True,
'force': False,
'name': 'TOFU_VERSION',
}
# Instanciate parser
parser = argparse.ArgumentParser(description=msg)
# Define input arguments
parser.add_argument('-p', '--path',
type=str,
help='tofu source directory where version.py is found',
required=False, default=ddef['path'])
parser.add_argument('-v', '--verb',
type=_str2bool,
help='flag indicating whether to print the version',
required=False, default=ddef['verb'])
parser.add_argument('-ev', '--envvar',
type=_str2boolstr,
help='name of the environment variable to set, if any',
required=False, default=ddef['envvar'])
parser.add_argument('-w', '--warn',
type=_str2bool,
help=('flag indicatin whether to print a warning when'
+ 'the desired environment variable (envvar)'
+ 'already exists'),
required=False, default=ddef['warn'])
parser.add_argument('-f', '--force',
type=_str2bool,
help=('flag indicating whether to force the update of '
+ 'the desired environment variable (envvar)'
+ ' even if it already exists'),
required=False, default=ddef['force'])
return ddef, parser
# #############################################################################
# Parser for tofu custom
# #############################################################################
def parser_custom():
msg = """ Create a local copy of tofu default parameters
This creates a local copy, in your home, of tofu default parameters
A directory .tofu is created in your home directory
In this directory, modules containing default parameters are copied
You can then customize them without impacting other users
"""
_USER = getpass.getuser()
_USER_HOME = os.path.expanduser('~')
ddef = {
'target': os.path.join(_USER_HOME, '.tofu'),
'source': os.path.join(_TOFUPATH, 'tofu'),
'files': [
'_imas2tofu_def.py',
'_entrypoints_def.py',
],
'directories': [
'openadas2tofu',
'nist2tofu',
os.path.join('nist2tofu', 'ASD'),
],
}
# Instanciate parser
parser = argparse.ArgumentParser(description=msg)
# Define input arguments
parser.add_argument('-s', '--source',
type=str,
help='tofu source directory',
required=False,
default=ddef['source'])
parser.add_argument('-t', '--target',
type=str,
help=('directory where .tofu/ should be created'
+ ' (default: {})'.format(ddef['target'])),
required=False,
default=ddef['target'])
parser.add_argument('-f', '--files',
type=str,
help='list of files to be copied',
required=False,
nargs='+',
default=ddef['files'],
choices=ddef['files'])
return ddef, parser
# #############################################################################
# Parser for tofu plot
# #############################################################################
def parser_plot():
tf, MultiIDSLoader, _defscripts = get_mods()
_LIDS_CONFIG = MultiIDSLoader._lidsconfig
_LIDS_DIAG = MultiIDSLoader._lidsdiag
_LIDS_PLASMA = tf.imas2tofu.MultiIDSLoader._lidsplasma
_LIDS = _LIDS_CONFIG + _LIDS_DIAG + _LIDS_PLASMA + tf.utils._LIDS_CUSTOM
msg = """Fast interactive visualization tool for diagnostics data in
imas
This is merely a wrapper around the function tofu.load_from_imas()
It loads (from imas) and displays diagnostics data from the following
ids:
{}
""".format(_LIDS)
ddef = {
# User-customizable
'run': _defscripts._TFPLOT_RUN,
'user': _defscripts._TFPLOT_USER,
'database': _defscripts._TFPLOT_DATABASE,
'version': _defscripts._TFPLOT_VERSION,
't0': _defscripts._TFPLOT_T0,
'tlim': None,
'sharex': _defscripts._TFPLOT_SHAREX,
'bck': _defscripts._TFPLOT_BCK,
'extra': _defscripts._TFPLOT_EXTRA,
'indch_auto': _defscripts._TFPLOT_INDCH_AUTO,
'config': _defscripts._TFPLOT_CONFIG,
'tosoledge3x': _defscripts._TFPLOT_TOSOLEDGE3X,
'mag_sep_nbpts': _defscripts._TFPLOT_MAG_SEP_NBPTS,
'mag_sep_dR': _defscripts._TFPLOT_MAG_SEP_DR,
'mag_init_pts': _defscripts._TFPLOT_MAG_INIT_PTS,
# Non user-customizable
'lids_plasma': _LIDS_PLASMA,
'lids_diag': _LIDS_DIAG,
'lids': _LIDS,
}
parser = argparse.ArgumentParser(description=msg)
msg = 'config name to be loaded'
parser.add_argument('-c', '--config', help=msg,
required=False, type=str,
default=ddef['config'])
msg = 'path in which to save the tofu config in SOLEDGE3X format'
parser.add_argument('-tse3x', '--tosoledge3x', help=msg,
required=False, type=str,
default=ddef['tosoledge3x'])
parser.add_argument('-s', '--shot', type=int,
help='shot number', required=False, nargs='+')
msg = 'username of the DB where the datafile is located'
parser.add_argument('-u', '--user', help=msg, required=False,
default=ddef['user'])
msg = 'database name where the datafile is located'
parser.add_argument('-db', '--database', help=msg, required=False,
default=ddef['database'])
parser.add_argument('-r', '--run', help='run number',
required=False, type=int,
default=ddef['run'])
parser.add_argument('-v', '--version', help='version number',
required=False, type=str,
default=ddef['version'])
msg = ("ids from which to load diagnostics data,"
+ " can be:\n{}".format(ddef['lids']))
parser.add_argument('-i', '--ids', type=str, required=True,
help=msg, nargs='+', choices=ddef['lids'])
parser.add_argument('-q', '--quantity', type=str, required=False,
help='Desired quantity from the plasma ids',
nargs='+', default=None)
parser.add_argument('-X', '--X', type=str, required=False,
help='Quantity from the plasma ids for abscissa',
nargs='+', default=None)
parser.add_argument('-t0', '--t0', type=_str2boolstr, required=False,
help='Reference time event setting t = 0',
default=ddef['t0'])
parser.add_argument('-t', '--t', type=float, required=False,
help='Input time when needed')
parser.add_argument('-tl', '--tlim', type=_str2tlim,
required=False,
help='limits of the time interval',
nargs='+', default=ddef['tlim'])
parser.add_argument('-ich', '--indch', type=int, required=False,
help='indices of channels to be loaded',
nargs='+', default=None)
parser.add_argument('-ichauto', '--indch_auto', type=_str2bool,
required=False,
help='automatically determine indices of'
+ ' channels to be loaded', default=ddef['indch_auto'])
parser.add_argument('-e', '--extra', type=_str2bool, required=False,
help='If True loads separatrix and heating power',
default=ddef['extra'])
parser.add_argument('-sx', '--sharex', type=_str2bool, required=False,
help='Should X axis be shared between diag ids ?',
default=ddef['sharex'], const=True, nargs='?')
parser.add_argument('-bck', '--background', type=_str2bool, required=False,
help='Plot data enveloppe as grey background ?',
default=ddef['bck'], const=True, nargs='?')
parser.add_argument('-mag_sep_dR', '--mag_sep_dR', type=float,
required=False,
default=ddef['mag_sep_dR'],
help='Distance to separatrix from r_ext to plot'
+ ' magnetic field lines')
parser.add_argument('-mag_sep_nbpts', '--mag_sep_nbpts', type=int,
required=False,
default=ddef['mag_sep_nbpts'],
help=('Number of mag. field lines to plot '
+ 'from separatrix'))
parser.add_argument('-mag_init_pts', '--mag_init_pts',
type=float, required=False, nargs=3,
default=ddef['mag_init_pts'],
help='Manual coordinates of point that a RED magnetic'
+ ' field line will cross on graphics,'
+ ' give coordinates as: R [m], Phi [rad], Z [m]')
return ddef, parser
# #############################################################################
# Parser for tofu calc
# #############################################################################
def parser_calc():
tf, MultiIDSLoader, _defscripts = get_mods()
_LIDS_DIAG = MultiIDSLoader._lidsdiag
_LIDS_PLASMA = tf.imas2tofu.MultiIDSLoader._lidsplasma
_LIDS = _LIDS_DIAG + _LIDS_PLASMA + tf.utils._LIDS_CUSTOM
# Parse input arguments
msg = """Fast interactive visualization tool for diagnostics data in
imas
This is merely a wrapper around the function tofu.calc_from_imas()
It calculates and dsplays synthetic signal (from imas) from the following
ids:
{}
""".format(_LIDS)
ddef = {
# User-customizable
'run': _defscripts._TFCALC_RUN,
'user': _defscripts._TFCALC_USER,
'database': _defscripts._TFCALC_DATABASE,
'version': _defscripts._TFCALC_VERSION,
't0': _defscripts._TFCALC_T0,
'tlim': None,
'sharex': _defscripts._TFCALC_SHAREX,
'bck': _defscripts._TFCALC_BCK,
'extra': _defscripts._TFCALC_EXTRA,
'indch_auto': _defscripts._TFCALC_INDCH_AUTO,
'coefs': None,
# Non user-customizable
'lids_plasma': _LIDS_PLASMA,
'lids_diag': _LIDS_DIAG,
'lids': _LIDS,
}
parser = argparse.ArgumentParser(description=msg)
# Main idd parameters
parser.add_argument('-s', '--shot', type=int,
help='shot number', required=True)
msg = 'username of the DB where the datafile is located'
parser.add_argument('-u', '--user',
help=msg, required=False, default=ddef['user'])
msg = 'database name where the datafile is located'
parser.add_argument('-db', '--database', help=msg, required=False,
default=ddef['database'])
parser.add_argument('-r', '--run', help='run number',
required=False, type=int, default=ddef['run'])
parser.add_argument('-v', '--version', help='version number',
required=False, type=str, default=ddef['version'])
# Equilibrium idd parameters
parser.add_argument('-s_eq', '--shot_eq', type=int,
help='shot number for equilibrium, defaults to -s',
required=False, default=None)
msg = 'username for the equilibrium, defaults to -u'
parser.add_argument('-u_eq', '--user_eq',
help=msg, required=False, default=None)
msg = 'database name for the equilibrium, defaults to -tok'
parser.add_argument('-db_eq', '--database_eq',
help=msg, required=False, default=None)
parser.add_argument('-r_eq', '--run_eq',
help='run number for the equilibrium, defaults to -r',
required=False, type=int, default=None)
# Profile idd parameters
parser.add_argument('-s_prof', '--shot_prof', type=int,
help='shot number for profiles, defaults to -s',
required=False, default=None)
msg = 'username for the profiles, defaults to -u'
parser.add_argument('-u_prof', '--user_prof',
help=msg, required=False, default=None)
msg = 'database name for the profiles, defaults to -tok'
parser.add_argument('-db_prof', '--database_prof',
help=msg, required=False, default=None)
parser.add_argument('-r_prof', '--run_prof',
help='run number for the profiles, defaults to -r',
required=False, type=int, default=None)
msg = ("ids from which to load diagnostics data,"
+ " can be:\n{}".format(ddef['lids']))
parser.add_argument('-i', '--ids', type=str, required=True,
help=msg, nargs='+', choices=ddef['lids'])
parser.add_argument('-B', '--Brightness', type=bool, required=False,
help='Whether to express result as brightness',
default=None)
parser.add_argument('-res', '--res', type=float, required=False,
help='Space resolution for the LOS-discretization',
default=None)
parser.add_argument('-t0', '--t0', type=_str2boolstr, required=False,
help='Reference time event setting t = 0',
default=ddef['t0'])
parser.add_argument('-tl', '--tlim', type=_str2tlim,
required=False,
help='limits of the time interval',
nargs='+', default=ddef['tlim'])
parser.add_argument('-c', '--coefs', type=float, required=False,
help='Corrective coefficient, if any',
default=ddef['coefs'])
parser.add_argument('-ich', '--indch', type=int, required=False,
help='indices of channels to be loaded',
nargs='+', default=None)
parser.add_argument('-ichauto', '--indch_auto', type=bool, required=False,
help=('automatically determine indices '
+ 'of channels to be loaded'),
default=ddef['indch_auto'])
parser.add_argument('-e', '--extra', type=_str2bool, required=False,
help='If True loads separatrix and heating power',
default=ddef['extra'])
parser.add_argument('-sx', '--sharex', type=_str2bool, required=False,
help='Should X axis be shared between diag ids?',
default=ddef['sharex'], const=True, nargs='?')
parser.add_argument('-if', '--input_file', type=str, required=False,
help='mat file from which to load core_profiles',
default=None)
parser.add_argument('-of', '--output_file', type=str, required=False,
help='mat file into which to save synthetic signal',
default=None)
parser.add_argument('-bck', '--background', type=_str2bool, required=False,
help='Plot data enveloppe as grey background ?',
default=ddef['bck'], const=True, nargs='?')
return ddef, parser
# #############################################################################
# Parser dict
# #############################################################################
_DPARSER = {
'version': parser_version,
'custom': parser_custom,
'plot': parser_plot,
'calc': parser_calc,
}
| de | 0.327291 | # tofu # test if in a tofu git repo # Make sure we load the corresponding tofu # default parameters # Make sure we load the user-specific file # sys.path method # sys.path.insert(1, os.path.join(os.path.expanduser('~'), '.tofu')) # import _scripts_def as _defscripts # _ = sys.path.pop(1) # importlib method # ############################################################################# # utility functions # ############################################################################# # ############################################################################# # Parser for tofu version # ############################################################################# Get tofu version from bash optionally set an enviroment variable If run from a git repo containing tofu, simply returns git describe Otherwise reads the tofu version stored in tofu/version.py # Instanciate parser # Define input arguments # ############################################################################# # Parser for tofu custom # ############################################################################# Create a local copy of tofu default parameters This creates a local copy, in your home, of tofu default parameters A directory .tofu is created in your home directory In this directory, modules containing default parameters are copied You can then customize them without impacting other users # Instanciate parser # Define input arguments # ############################################################################# # Parser for tofu plot # ############################################################################# Fast interactive visualization tool for diagnostics data in imas This is merely a wrapper around the function tofu.load_from_imas() It loads (from imas) and displays diagnostics data from the following ids: {} # User-customizable # Non user-customizable # ############################################################################# # Parser for tofu calc # ############################################################################# # Parse input arguments Fast interactive visualization tool for diagnostics data in imas This is merely a wrapper around the function tofu.calc_from_imas() It calculates and dsplays synthetic signal (from imas) from the following ids: {} # User-customizable # Non user-customizable # Main idd parameters # Equilibrium idd parameters # Profile idd parameters # ############################################################################# # Parser dict # ############################################################################# | 2.268006 | 2 |
src/verify_ce_config.py | bbockelm/htcondor-ce | 0 | 6630008 | <filename>src/verify_ce_config.py
#!/bin/env python
"""
Verify HTCondor-CE configuration before service startup
"""
import re
import sys
# Verify that the HTCondor Python bindings are in the PYTHONPATH
try:
import classad
import htcondor
except ImportError:
sys.exit("ERROR: Could not load HTCondor Python bindings. " + \
"Please ensure that the 'htcondor' and 'classad' are in your PYTHONPATH")
# Create dict whose values are lists of ads specified in the relevant JOB_ROUTER_* variables
JOB_ROUTER_CONFIG = {}
for attr in ['JOB_ROUTER_DEFAULTS', 'JOB_ROUTER_ENTRIES']:
ads = classad.parseAds(htcondor.param[attr])
JOB_ROUTER_CONFIG[attr] = list(ads) # store the ads (iterating through ClassAdStringIterator consumes them)
# Verify job routes. classad.parseAds() ignores malformed ads so we have to compare the unparsed string to the
# parsed string, counting the number of ads by proxy: the number of opening square brackets, "["
for attr, ads in JOB_ROUTER_CONFIG.items():
if htcondor.param[attr].count('[') != len(ads):
sys.exit("ERROR: Could not read %s in the HTCondor CE configuration. Please verify syntax correctness" % attr)
# Find all eval_set_ attributes in the JOB_ROUTER_DEFAULTS
EVAL_SET_DEFAULTS = set([x.lstrip('eval_') for x in JOB_ROUTER_CONFIG['JOB_ROUTER_DEFAULTS'][0].keys()
if x.startswith('eval_set_')])
# Find all default_ attributes used in expressions in the JOB_ROUTER_DEFAULTS
DEFAULT_ATTR = set([re.sub(r'.*(default_\w*).*', 'eval_set_\\1', str(x))
for x in JOB_ROUTER_CONFIG['JOB_ROUTER_DEFAULTS'][0].values()
if isinstance(x, classad.ExprTree) and str(x).find('default_') != -1])
for entry in JOB_ROUTER_CONFIG['JOB_ROUTER_ENTRIES']:
# Warn users if they've set_ attributes that would be overriden by eval_set in the JOB_ROUTER_DEFAULTS
overriden_attr = EVAL_SET_DEFAULTS.intersection(set(entry.keys()))
if overriden_attr:
print "WARNING: %s in JOB_ROUTER_ENTRIES will be overriden by the JOB_ROUTER_DEFAULTS." \
% ', '.join(overriden_attr) \
+ " Use the 'eval_set_' prefix instead."
# Ensure that users don't set the job environment in the Job Router
if any(x.endswith('environment') for x in entry.keys()):
sys.exit("ERROR: Do not use the Job Router to set the environment. Place variables under " +\
"[Local Settings] in /etc/osg/config.d/40-localsettings.ini")
# Warn users about eval_set_ default attributes in the ENTRIES since their
# evaluation may occur after the eval_set_ expressions containg them in the
# JOB_ROUTER_DEFAULTS
no_effect_attr = DEFAULT_ATTR.intersection(set([x for x in entry.keys() if x.startswith('eval_set_')]))
if no_effect_attr:
print "WARNING: %s in JOB_ROUTER_ENTRIES " % ', '.join(no_effect_attr) + \
"may not have any effect. Use the 'set_' prefix instead."
# Warn users if osg-configure has not been run
try:
htcondor.param['OSG_CONFIGURED']
except KeyError:
print "WARNING: osg-configure has not been run, degrading the functionality " + \
"of the CE. Please run 'osg-configure -c' and restart condor-ce."
| <filename>src/verify_ce_config.py
#!/bin/env python
"""
Verify HTCondor-CE configuration before service startup
"""
import re
import sys
# Verify that the HTCondor Python bindings are in the PYTHONPATH
try:
import classad
import htcondor
except ImportError:
sys.exit("ERROR: Could not load HTCondor Python bindings. " + \
"Please ensure that the 'htcondor' and 'classad' are in your PYTHONPATH")
# Create dict whose values are lists of ads specified in the relevant JOB_ROUTER_* variables
JOB_ROUTER_CONFIG = {}
for attr in ['JOB_ROUTER_DEFAULTS', 'JOB_ROUTER_ENTRIES']:
ads = classad.parseAds(htcondor.param[attr])
JOB_ROUTER_CONFIG[attr] = list(ads) # store the ads (iterating through ClassAdStringIterator consumes them)
# Verify job routes. classad.parseAds() ignores malformed ads so we have to compare the unparsed string to the
# parsed string, counting the number of ads by proxy: the number of opening square brackets, "["
for attr, ads in JOB_ROUTER_CONFIG.items():
if htcondor.param[attr].count('[') != len(ads):
sys.exit("ERROR: Could not read %s in the HTCondor CE configuration. Please verify syntax correctness" % attr)
# Find all eval_set_ attributes in the JOB_ROUTER_DEFAULTS
EVAL_SET_DEFAULTS = set([x.lstrip('eval_') for x in JOB_ROUTER_CONFIG['JOB_ROUTER_DEFAULTS'][0].keys()
if x.startswith('eval_set_')])
# Find all default_ attributes used in expressions in the JOB_ROUTER_DEFAULTS
DEFAULT_ATTR = set([re.sub(r'.*(default_\w*).*', 'eval_set_\\1', str(x))
for x in JOB_ROUTER_CONFIG['JOB_ROUTER_DEFAULTS'][0].values()
if isinstance(x, classad.ExprTree) and str(x).find('default_') != -1])
for entry in JOB_ROUTER_CONFIG['JOB_ROUTER_ENTRIES']:
# Warn users if they've set_ attributes that would be overriden by eval_set in the JOB_ROUTER_DEFAULTS
overriden_attr = EVAL_SET_DEFAULTS.intersection(set(entry.keys()))
if overriden_attr:
print "WARNING: %s in JOB_ROUTER_ENTRIES will be overriden by the JOB_ROUTER_DEFAULTS." \
% ', '.join(overriden_attr) \
+ " Use the 'eval_set_' prefix instead."
# Ensure that users don't set the job environment in the Job Router
if any(x.endswith('environment') for x in entry.keys()):
sys.exit("ERROR: Do not use the Job Router to set the environment. Place variables under " +\
"[Local Settings] in /etc/osg/config.d/40-localsettings.ini")
# Warn users about eval_set_ default attributes in the ENTRIES since their
# evaluation may occur after the eval_set_ expressions containg them in the
# JOB_ROUTER_DEFAULTS
no_effect_attr = DEFAULT_ATTR.intersection(set([x for x in entry.keys() if x.startswith('eval_set_')]))
if no_effect_attr:
print "WARNING: %s in JOB_ROUTER_ENTRIES " % ', '.join(no_effect_attr) + \
"may not have any effect. Use the 'set_' prefix instead."
# Warn users if osg-configure has not been run
try:
htcondor.param['OSG_CONFIGURED']
except KeyError:
print "WARNING: osg-configure has not been run, degrading the functionality " + \
"of the CE. Please run 'osg-configure -c' and restart condor-ce."
| en | 0.761069 | #!/bin/env python Verify HTCondor-CE configuration before service startup # Verify that the HTCondor Python bindings are in the PYTHONPATH # Create dict whose values are lists of ads specified in the relevant JOB_ROUTER_* variables # store the ads (iterating through ClassAdStringIterator consumes them) # Verify job routes. classad.parseAds() ignores malformed ads so we have to compare the unparsed string to the # parsed string, counting the number of ads by proxy: the number of opening square brackets, "[" # Find all eval_set_ attributes in the JOB_ROUTER_DEFAULTS # Find all default_ attributes used in expressions in the JOB_ROUTER_DEFAULTS # Warn users if they've set_ attributes that would be overriden by eval_set in the JOB_ROUTER_DEFAULTS # Ensure that users don't set the job environment in the Job Router # Warn users about eval_set_ default attributes in the ENTRIES since their # evaluation may occur after the eval_set_ expressions containg them in the # JOB_ROUTER_DEFAULTS # Warn users if osg-configure has not been run | 2.420116 | 2 |
src/ocr/ocr_image.py | fgulan/PyOCR | 0 | 6630009 | import sys
import cv2
import numpy as np
from utils import hist
import imutils
from scipy.ndimage import interpolation as inter
class OCRImage:
MAX_ROTATE_ANGLE = 3
ANGLE_DELTA = 0.05
MAX_SCALED_DIMENSION = 800
def __init__(self, image, width, height, x_offset=0, y_offset=0):
self._image = image
self._x_offset = x_offset
self._y_offset = y_offset
self._width = width
self._height = height
def save(self, name):
cv2.imwrite(name, self._image)
def get_image(self):
return self._image
def set_image(self, image):
self._image = image
def get_x(self):
return self._x_offset
def get_y(self):
return self._y_offset
def get_height(self):
return self._height
def get_width(self):
return self._width
def get_bounding_box(self):
return {'x': self.get_x(),
'y': self.get_y(),
'width': self.get_width(),
'height': self.get_height()}
def get_segments(self):
image = self.get_image()
h_proj = hist.horizontal_projection(image)
v_proj = hist.vertical_projection(image)
min_x, max_x = hist.blob_range(v_proj)
min_y, max_y = hist.blob_range(h_proj)
width = max_x - min_x + 1
height = max_y - min_y + 1
roi_image = image[min_y:max_y + 1, min_x:max_x + 1]
return roi_image, width, height, min_x, min_y
def fix_skew(self):
angle = self._calculate_skewed_angle_projection(self._image)
if abs(angle) < self.MAX_ROTATE_ANGLE:
self._image = self._rotate_image(self._image, angle)
self._height, self._width = self._image.shape
return angle
def _calculate_skewed_angle_projection(self, input_image):
height, width = input_image.shape
new_image = input_image.copy()
biggest_dimension = max(height, width)
scale = self.MAX_SCALED_DIMENSION / biggest_dimension
new_height, new_width = round(height * scale), round(width * scale)
scaled_image = cv2.resize(new_image, (new_width, new_height))
angles = np.arange(-self.MAX_ROTATE_ANGLE, self.MAX_ROTATE_ANGLE + self.ANGLE_DELTA, self.ANGLE_DELTA)
scores = []
for angle in angles:
score = self._find_rotation_score(scaled_image, angle)
scores.append(score)
best_angle = angles[np.argmax(scores)]
return best_angle
def _find_rotation_score(self, image, angle):
# Rotate image for given angle
rotated_image = inter.rotate(image, angle, reshape=False, order=0)
# Calculate horizontal projection
h_proj = hist.horizontal_projection(rotated_image)
# Calculate projection gradient
score = np.sum((h_proj[1:] - h_proj[:-1]) ** 2)
return score
def _calculate_skewed_angle_bbox(self, image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
return -(90 + angle)
else:
return -angle
def _rotate_image(self, image, angle):
# Add border so when image is rotated - black pixels will be filled
image = cv2.copyMakeBorder(
image, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=[0, 0, 0])
height, width = image.shape[:2]
center = (width // 2, height // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated_image = cv2.warpAffine(image, rotation_matrix,
(width, height),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_REPLICATE)
output_image = cv2.threshold(
rotated_image, 127, 255, cv2.THRESH_BINARY)[1]
return output_image
| import sys
import cv2
import numpy as np
from utils import hist
import imutils
from scipy.ndimage import interpolation as inter
class OCRImage:
MAX_ROTATE_ANGLE = 3
ANGLE_DELTA = 0.05
MAX_SCALED_DIMENSION = 800
def __init__(self, image, width, height, x_offset=0, y_offset=0):
self._image = image
self._x_offset = x_offset
self._y_offset = y_offset
self._width = width
self._height = height
def save(self, name):
cv2.imwrite(name, self._image)
def get_image(self):
return self._image
def set_image(self, image):
self._image = image
def get_x(self):
return self._x_offset
def get_y(self):
return self._y_offset
def get_height(self):
return self._height
def get_width(self):
return self._width
def get_bounding_box(self):
return {'x': self.get_x(),
'y': self.get_y(),
'width': self.get_width(),
'height': self.get_height()}
def get_segments(self):
image = self.get_image()
h_proj = hist.horizontal_projection(image)
v_proj = hist.vertical_projection(image)
min_x, max_x = hist.blob_range(v_proj)
min_y, max_y = hist.blob_range(h_proj)
width = max_x - min_x + 1
height = max_y - min_y + 1
roi_image = image[min_y:max_y + 1, min_x:max_x + 1]
return roi_image, width, height, min_x, min_y
def fix_skew(self):
angle = self._calculate_skewed_angle_projection(self._image)
if abs(angle) < self.MAX_ROTATE_ANGLE:
self._image = self._rotate_image(self._image, angle)
self._height, self._width = self._image.shape
return angle
def _calculate_skewed_angle_projection(self, input_image):
height, width = input_image.shape
new_image = input_image.copy()
biggest_dimension = max(height, width)
scale = self.MAX_SCALED_DIMENSION / biggest_dimension
new_height, new_width = round(height * scale), round(width * scale)
scaled_image = cv2.resize(new_image, (new_width, new_height))
angles = np.arange(-self.MAX_ROTATE_ANGLE, self.MAX_ROTATE_ANGLE + self.ANGLE_DELTA, self.ANGLE_DELTA)
scores = []
for angle in angles:
score = self._find_rotation_score(scaled_image, angle)
scores.append(score)
best_angle = angles[np.argmax(scores)]
return best_angle
def _find_rotation_score(self, image, angle):
# Rotate image for given angle
rotated_image = inter.rotate(image, angle, reshape=False, order=0)
# Calculate horizontal projection
h_proj = hist.horizontal_projection(rotated_image)
# Calculate projection gradient
score = np.sum((h_proj[1:] - h_proj[:-1]) ** 2)
return score
def _calculate_skewed_angle_bbox(self, image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
return -(90 + angle)
else:
return -angle
def _rotate_image(self, image, angle):
# Add border so when image is rotated - black pixels will be filled
image = cv2.copyMakeBorder(
image, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=[0, 0, 0])
height, width = image.shape[:2]
center = (width // 2, height // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated_image = cv2.warpAffine(image, rotation_matrix,
(width, height),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_REPLICATE)
output_image = cv2.threshold(
rotated_image, 127, 255, cv2.THRESH_BINARY)[1]
return output_image
| en | 0.681967 | # Rotate image for given angle # Calculate horizontal projection # Calculate projection gradient # Add border so when image is rotated - black pixels will be filled | 2.46182 | 2 |
setup.py | aryamanarora/hindi | 0 | 6630010 | <reponame>aryamanarora/hindi
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='hindi',
version='0.0.0',
description='Useful tools for dealing with Hindi',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/aryamanarora/hindi',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
| from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='hindi',
version='0.0.0',
description='Useful tools for dealing with Hindi',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/aryamanarora/hindi',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
) | none | 1 | 1.228493 | 1 |
|
setup.py | thiviyanT/torch-rgcn | 49 | 6630011 | from setuptools import setup
setup(name='torch-rgcn',
version='1.0',
description='A PyTorch library for Relational Graph Convolutional Networks',
url='https://github.com/thiviyanT/torch-RGCN',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['torch_rgcn'],
python_requires='>=3.7',
zip_safe=False)
| from setuptools import setup
setup(name='torch-rgcn',
version='1.0',
description='A PyTorch library for Relational Graph Convolutional Networks',
url='https://github.com/thiviyanT/torch-RGCN',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['torch_rgcn'],
python_requires='>=3.7',
zip_safe=False)
| none | 1 | 1.055237 | 1 |
|
atlas/foundations_contrib/src/foundations_contrib/helpers/redis_connector.py | DeepLearnI/atlas | 296 | 6630012 |
class RedisConnector(object):
"""Acts as a callback for the LazyRedis class,
allowing it to be called directly without knowledge of
how configuring a Redis url works
Arguments:
config_manager {ConfigManager} -- A provider for the Redis connection string
connection_callback {callable} -- Callback to provide the connection string to
"""
def __init__(self, config_manager, connection_callback, environment):
self._config = config_manager.config()
self._connection_callback = connection_callback
self._environment = environment
def __call__(self):
"""Returns the result of the callback, given a connection string
Returns:
object -- The return value of the evaluated callback
"""
connection_with_password = self._build_connection_string()
connection = self._connection_callback(connection_with_password)
self._validate_redis_connection(connection)
return connection
def _validate_redis_connection(self, connection):
import redis
try:
connection.ping()
except redis.connection.ConnectionError as error:
if self._redis_is_possibly_encrypted_error(error):
raise ConnectionError('Unable to connect to Redis, due to potential encryption error.')
else:
raise error
def _redis_is_possibly_encrypted_error(self, error):
import redis
return error.args[0] == redis.connection.SERVER_CLOSED_CONNECTION_ERROR
def _get_connection_string(self):
return self._config.get('redis_url', 'redis://localhost:6379')
def _build_connection_string(self):
split_connection_string = self._get_connection_string().split('//')
scheme = split_connection_string[0]
host_with_port = split_connection_string[1]
return scheme + '//:@' + host_with_port
|
class RedisConnector(object):
"""Acts as a callback for the LazyRedis class,
allowing it to be called directly without knowledge of
how configuring a Redis url works
Arguments:
config_manager {ConfigManager} -- A provider for the Redis connection string
connection_callback {callable} -- Callback to provide the connection string to
"""
def __init__(self, config_manager, connection_callback, environment):
self._config = config_manager.config()
self._connection_callback = connection_callback
self._environment = environment
def __call__(self):
"""Returns the result of the callback, given a connection string
Returns:
object -- The return value of the evaluated callback
"""
connection_with_password = self._build_connection_string()
connection = self._connection_callback(connection_with_password)
self._validate_redis_connection(connection)
return connection
def _validate_redis_connection(self, connection):
import redis
try:
connection.ping()
except redis.connection.ConnectionError as error:
if self._redis_is_possibly_encrypted_error(error):
raise ConnectionError('Unable to connect to Redis, due to potential encryption error.')
else:
raise error
def _redis_is_possibly_encrypted_error(self, error):
import redis
return error.args[0] == redis.connection.SERVER_CLOSED_CONNECTION_ERROR
def _get_connection_string(self):
return self._config.get('redis_url', 'redis://localhost:6379')
def _build_connection_string(self):
split_connection_string = self._get_connection_string().split('//')
scheme = split_connection_string[0]
host_with_port = split_connection_string[1]
return scheme + '//:@' + host_with_port
| en | 0.660402 | Acts as a callback for the LazyRedis class, allowing it to be called directly without knowledge of how configuring a Redis url works Arguments: config_manager {ConfigManager} -- A provider for the Redis connection string connection_callback {callable} -- Callback to provide the connection string to Returns the result of the callback, given a connection string Returns: object -- The return value of the evaluated callback | 3.346551 | 3 |
qtpyvcp/plugins/gcode_properties.py | robertspark/qtpyvcp | 0 | 6630013 | <reponame>robertspark/qtpyvcp
"""
GCodeProperties
--------
This plugin provides the information about sizes and times
This plugin is not loaded by default, so to use it you will first
need to add it to your VCPs YAML config file.
YAML configuration:
.. code-block:: yaml
data_plugins:
gcode_properties:
provider: qtpyvcp.plugins.gcode_properties:GCodeProperties
"""
import os
import pprint
import shutil
import gcode
import linuxcnc
from qtpyvcp.utilities.logger import getLogger
from qtpyvcp.plugins import getPlugin
from qtpyvcp.utilities.info import Info
from qtpyvcp.plugins import DataPlugin, DataChannel
from qtpyvcp.widgets.display_widgets.vtk_backplot.base_canon import BaseCanon
LOG = getLogger(__name__)
STATUS = getPlugin('status')
INFO = Info()
INIFILE = linuxcnc.ini(os.getenv("INI_FILE_NAME"))
MAX_LINEAR_VELOCITY = bool(INIFILE.find("TRAJ", "MAX_LINEAR_VELOCITY"))
MAX_ANGULAR_VELOCITY = bool(INIFILE.find("TRAJ", "MAX_ANGULAR_VELOCITY"))
MACHINE_UNITS = 2 if INFO.getIsMachineMetric() else 1
class GCodeProperties(DataPlugin):
"""GCodeProperties Plugin"""
def __init__(self):
super(GCodeProperties, self).__init__()
inifile = os.getenv("INI_FILE_NAME")
self.stat = STATUS
self.ini = linuxcnc.ini(os.getenv("INI_FILE_NAME"))
self.config_dir = os.path.dirname(inifile)
self.canon = None
self.stat.file.notify(self._file_event)
self.loaded_file = None
temp = self.ini.find("RS274NGC", "PARAMETER_FILE") or "linuxcnc.var"
self.parameter_file = os.path.join(self.config_dir, temp)
self.temp_parameter_file = os.path.join(self.parameter_file + '.temp')
@DataChannel
def file_name(self, chan):
"""The current file name.
Args:
None
Returns:
The current open file name as a string.
Channel syntax::
gcode_properties:file_name
"""
if not self.loaded_file:
chan.value = "No file loaded"
return chan.value
@file_name.tostring
def file_name(self, chan):
return chan.value
@DataChannel
def tool_calls_num(self, chan):
"""The total tool calls number.
Args:
None
Returns:
The total tool calls number
Channel syntax::
gcode_properties:tool_calls_num
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@tool_calls_num.tostring
def tool_calls_num(self, chan):
return chan.value
@DataChannel
def file_size(self, chan):
"""The current file size.
Args:
None
Returns:
The current file size in bytes
Channel syntax::
gcode_properties:size
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@file_size.tostring
def file_size(self, chan):
return chan.value
@DataChannel
def file_rapids(self, chan):
"""The current file rapis distance.
Args:
None
Returns:
The current file rapis distance in machine units
Channel syntax::
gcode_properties:rapids
"""
if not self.loaded_file:
chan.value.float(0.0)
if MACHINE_UNITS == 2:
conv = 1
units = "mm"
fmt = "%.3f"
else:
conv = 1/25.4
units = "in"
fmt = "%.4f"
return chan.value
@file_rapids.tostring
def file_rapids(self, chan):
return chan.value
@DataChannel
def file_lines(self, chan):
"""The current number of lines.
Args:
None
Returns:
The current number of lines the file has
Channel syntax::
gcode_properties:file_lines
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@file_lines.tostring
def file_lines(self, chan):
return chan.value
@DataChannel
def file_time(self, chan):
"""The current file run time.
Args:
format (str) : Format spec. Defaults to ``%I:%M:%S %p``.
See http://strftime.org for supported formats.
Returns:
The run time of the loaded file as a formatted string. Default HH:MM:SS AM
Channel syntax::
gcode_properties:time
gcode_properties:time?string
gcode_properties:time?string&format=%S
"""
if not self.loaded_file:
chan.value(0)
return chan.value
@file_time.tostring
def file_time(self, chan, format="%I:%M:%S %p"):
return chan.value.strftime(format)
@DataChannel
def file_rapid_distance(self, chan):
"""The full distance done in rapid of the path.
Args:
Returns:
The full distance done in rapid of the path
Channel syntax::
gcode_properties:file_rapid_distance
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@file_rapid_distance.tostring
def file_rapid_distance(self, chan):
return chan.value
@DataChannel
def file_feed_distance(self, chan):
"""The full distance done in feed velocity of the path.
Args:
Returns:
The full distance done in feed velocity of the path
Channel syntax::
gcode_properties:file_feed_distance
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@file_feed_distance.tostring
def file_feed_distance(self, chan):
return chan.value
@DataChannel
def file_work_planes(self, chan):
"""The current file plane.
Args:
None
Returns:
The file work planes
Channel syntax::
gcode_properties:file_work_planes
"""
if not self.loaded_file:
chan.value = []
return chan.value
@file_work_planes.tostring
def file_work_planes(self, chan):
return chan.value
@DataChannel
def file_rigid_taps(self, chan):
"""The rigid taps found in file.
Args:
None
Returns:
The rigid taps found in file.
Channel syntax::
gcode_properties:file_rigid_taps
"""
if not self.loaded_file:
chan.value = []
return chan.value
@file_rigid_taps.tostring
def file_rigid_taps(self, chan):
return chan.value
@DataChannel
def file_offsets(self, chan):
"""The offsets found in file.
Args:
None
Returns:
The offsets found in file.
Channel syntax::
gcode_properties:file_offsets
"""
if not self.loaded_file:
chan.value = dict()
return chan.value
@file_offsets.tostring
def file_offsets(self, chan):
return chan.value
@DataChannel
def file_feed(self, chan):
"""The current file run distance.
Args:
None
Returns:
The distance the machine will run with the loaded file
Channel syntax::
gcode_properties:feed
"""
return chan.value
@file_feed.tostring
def file_feed(self, chan):
return chan.value
def initialise(self):
pass
def terminate(self):
pass
def _file_event(self, file_path):
"""" This function gets notified about files begin loaded """
self.loaded_file = file_path
self.canon = PropertiesCanon()
if os.path.exists(self.parameter_file):
shutil.copy(self.parameter_file, self.temp_parameter_file)
self.canon.parameter_file = self.temp_parameter_file
# Some initialization g-code to set the units and optional user code
unitcode = "G%d" % (20 + (self.stat.linear_units == 1))
initcode = self.ini.find("RS274NGC", "RS274NGC_STARTUP_CODE") or ""
# THIS IS WHERE IT ALL HAPPENS: load_preview will execute the code,
# call back to the canon with motion commands, and record a history
# of all the movements.
try:
result, seq = gcode.parse(self.loaded_file, self.canon, unitcode, initcode)
if result > gcode.MIN_ERROR:
msg = gcode.strerror(result)
LOG.debug(f"Error in {self.loaded_file} line {seq - 1}\n{msg}")
except Exception as e:
LOG.debug(f"Error {e}")
# clean up temp var file and the backup
os.unlink(self.temp_parameter_file)
os.unlink(self.temp_parameter_file + '.bak')
file_name = self.loaded_file
file_size = os.stat(self.loaded_file).st_size
file_lines = self.canon.num_lines
tool_calls = self.canon.tool_calls
g0 = sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.traverse)
g1 = (sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.feed) +
sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.arcfeed))
self.file_name.setValue(file_name)
self.file_size.setValue(file_size)
self.file_lines.setValue(file_lines)
self.tool_calls_num.setValue(tool_calls)
self.file_rapid_distance.setValue(g0)
self.file_feed_distance.setValue(g1)
self.file_work_planes.setValue(self.canon.work_planes)
self.file_rigid_taps.setValue(self.canon.rigid_taps)
self.file_offsets.setValue(self.canon.g5x_offset_dict)
def calc_distance(self):
mf = 100.0
g0 = sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.traverse)
g1 = (sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.feed) +
sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.arcfeed))
# gt = (sum(self.dist(l[0][:3], l[1][:3])/min(mf, l[1][0]) for l in self.canon.feed) +
# sum(self.dist(l[0][:3], l[1][:3])/min(mf, l[1][0]) for l in self.canon.arcfeed) +
# sum(self.dist(l[0][:3], l[1][:3])/mf for l in self.canon.traverse) +
# self.canon.dwell_time
# )
#
# LOG.debug(f"path time {gt} secconds")
#
# min_extents = self.from_internal_units(self.canon.min_extents, conv)
# max_extents = self.from_internal_units(self.canon.max_extents, conv)
#
# for (i, c) in enumerate("xyz"):
# a = min_extents[i]
# b = max_extents[i]
# if a != b:
# props[c] = ("%(a)f to %(b)f = %(diff)f %(units)s").replace("%f", fmt) % {'a': a, 'b': b, 'diff': b-a, 'units': units}
# # properties(root_window, _("G-Code Properties"), property_names, props)
# pprint.pprint(props)
def dist(self, xxx, xxx_1):
(x,y,z) = xxx # todo changeme
(p,q,r) = xxx_1 # todo changeme
return ((x-p)**2 + (y-q)**2 + (z-r)**2) ** .5
def from_internal_units(self, pos, unit=None):
if unit is None:
unit = s.linear_units
lu = (unit or 1) * 25.4
lus = [lu, lu, lu, 1, 1, 1, lu, lu, lu]
return [a*b for a, b in zip(pos, lus)]
def from_internal_linear_unit(self, v, unit=None):
if unit is None:
unit = s.linear_units
lu = (unit or 1) * 25.4
return v*lu
class PropertiesCanon(BaseCanon):
def __init__(self):
self.num_lines = 0
self.tool_calls = 0
# traverse list - [line number, [start position], [end position], [tlo x, tlo y, tlo z]]
self.traverse = []
# feed list - [line number, [start position], [end position], feedrate, [tlo x, tlo y, tlo z]]
self.feed = []
# arcfeed list - [line number, [start position], [end position], feedrate, [tlo x, tlo y, tlo z]]
self.arcfeed = []
# dwell list - [line number, color, pos x, pos y, pos z, plane]
self.dwells = []
self.work_planes = []
self.rigid_taps = []
self.feedrate = 1
self.dwell_time = 0
self.seq_num = -1
self.last_pos = (0,) * 9
self.first_move = True
self.in_arc = False
self.suppress = 0
self.plane = 1
self.arcdivision = 64
# extents
self.min_extents = [9e99, 9e99, 9e99]
self.max_extents = [-9e99, -9e99, -9e99]
self.min_extents_notool = [9e99, 9e99, 9e99]
self.max_extents_notool = [-9e99, -9e99, -9e99]
# tool length offsets
self.tlo_x = 0.0
self.tlo_y = 0.0
self.tlo_z = 0.0
self.tlo_a = 0.0
self.tlo_b = 0.0
self.tlo_c = 0.0
self.tlo_u = 0.0
self.tlo_v = 0.0
self.tlo_w = 0.0
self.tool_offsets = (0.0,) * 9
# G92/G52 offsets
self.g92_offset_x = 0.0
self.g92_offset_y = 0.0
self.g92_offset_z = 0.0
self.g92_offset_a = 0.0
self.g92_offset_b = 0.0
self.g92_offset_c = 0.0
self.g92_offset_u = 0.0
self.g92_offset_v = 0.0
self.g92_offset_w = 0.0
# g5x offsets
self.g5x_offset_x = 0.0
self.g5x_offset_y = 0.0
self.g5x_offset_z = 0.0
self.g5x_offset_a = 0.0
self.g5x_offset_b = 0.0
self.g5x_offset_c = 0.0
self.g5x_offset_u = 0.0
self.g5x_offset_v = 0.0
self.g5x_offset_w = 0.0
self.g5x_offset_dict = dict()
# XY rotation (degrees)
self.rotation_xy = 0
self.rotation_cos = 1
self.rotation_sin = 0
def set_g5x_offset(self, offset, x, y, z, a, b, c, u, v, w):
try:
self.g5x_offset_dict[str(offset)] = (x, y, z, a, b, c, u, v, w)
except Exception as e:
LOG.debug(f"straight_traverse: {e}")
def set_g92_offset(self, x, y, z, a, b, c, u, v, w):
self.g92_offset_x = x
self.g92_offset_y = y
self.g92_offset_z = z
self.g92_offset_a = z
self.g92_offset_b = b
self.g92_offset_c = c
self.g92_offset_u = u
self.g92_offset_v = v
self.g92_offset_w = w
def set_plane(self, plane):
self.work_planes.append(plane)
def set_feed_rate(self, arg):
pass
# print(("set feed rate", arg))
def comment(self, comment):
pass
# print(("#", comment))
def straight_traverse(self, x, y, z, a, b, c, u, v, w):
try:
if self.suppress > 0:
return
pos = self.rotate_and_translate(x, y, z, a, b, c, u, v, w)
if not self.first_move:
self.traverse.append([self.last_pos, pos])
self.last_pos = pos
except Exception as e:
LOG.debug(f"straight_traverse: {e}")
def straight_feed(self, x, y, z, a, b, c, u, v, w):
try:
if self.suppress > 0:
return
self.first_move = False
pos = self.rotate_and_translate(x, y, z, a, b, c, u, v, w)
self.feed.append([self.last_pos, pos])
self.last_pos = pos
except Exception as e:
LOG.debug(f"straight_feed: {e}")
def dwell(self, arg):
if arg < .1:
print(("dwell %f ms" % (1000 * arg)))
else:
print(("dwell %f seconds" % arg))
def arc_feed(self, end_x, end_y, center_x, center_y, rot, end_z, a, b, c, u, v, w):
try:
if self.suppress > 0:
return
self.first_move = False
self.in_arc = True
try:
# this self.lo goes straight into the c code, cannot be changed
self.lo = tuple(self.last_pos)
segs = gcode.arc_to_segments(self, end_x, end_y, center_x, center_y,
rot, end_z, a, b, c, u, v, w, self.arcdivision)
self.straight_arcsegments(segs)
finally:
self.in_arc = False
except Exception as e:
LOG.debug(f"straight_feed: {e}")
def get_axis_mask(self):
return 7 # XYZ
def rigid_tap(self, x, y, z):
try:
if self.suppress > 0:
return
self.rigid_taps.append((x, y, z))
self.first_move = False
pos = self.rotate_and_translate(x, y, z, 0, 0, 0, 0, 0, 0)[:3]
pos += self.last_pos[3:]
self.feed.append([self.last_pos, pos])
except Exception as e:
LOG.debug(f"straight_feed: {e}")
def change_tool(self, pocket):
if pocket != -1:
self.tool_calls += 1
# print(("pocket", pocket))
def next_line(self, st):
self.num_lines += 1
# state attributes
# 'block', 'cutter_side', 'distance_mode', 'feed_mode', 'feed_rate',
# 'flood', 'gcodes', 'mcodes', 'mist', 'motion_mode', 'origin', 'units',
# 'overrides', 'path_mode', 'plane', 'retract_mode', 'sequence_number',
# 'speed', 'spindle', 'stopping', 'tool_length_offset', 'toolchange',
# print(("state", st))
# print(("seq", st.sequence_number))
# print(("MCODES", st.mcodes))
# print(("TOOLCHANGE", st.toolchange))
| """
GCodeProperties
--------
This plugin provides the information about sizes and times
This plugin is not loaded by default, so to use it you will first
need to add it to your VCPs YAML config file.
YAML configuration:
.. code-block:: yaml
data_plugins:
gcode_properties:
provider: qtpyvcp.plugins.gcode_properties:GCodeProperties
"""
import os
import pprint
import shutil
import gcode
import linuxcnc
from qtpyvcp.utilities.logger import getLogger
from qtpyvcp.plugins import getPlugin
from qtpyvcp.utilities.info import Info
from qtpyvcp.plugins import DataPlugin, DataChannel
from qtpyvcp.widgets.display_widgets.vtk_backplot.base_canon import BaseCanon
LOG = getLogger(__name__)
STATUS = getPlugin('status')
INFO = Info()
INIFILE = linuxcnc.ini(os.getenv("INI_FILE_NAME"))
MAX_LINEAR_VELOCITY = bool(INIFILE.find("TRAJ", "MAX_LINEAR_VELOCITY"))
MAX_ANGULAR_VELOCITY = bool(INIFILE.find("TRAJ", "MAX_ANGULAR_VELOCITY"))
MACHINE_UNITS = 2 if INFO.getIsMachineMetric() else 1
class GCodeProperties(DataPlugin):
"""GCodeProperties Plugin"""
def __init__(self):
super(GCodeProperties, self).__init__()
inifile = os.getenv("INI_FILE_NAME")
self.stat = STATUS
self.ini = linuxcnc.ini(os.getenv("INI_FILE_NAME"))
self.config_dir = os.path.dirname(inifile)
self.canon = None
self.stat.file.notify(self._file_event)
self.loaded_file = None
temp = self.ini.find("RS274NGC", "PARAMETER_FILE") or "linuxcnc.var"
self.parameter_file = os.path.join(self.config_dir, temp)
self.temp_parameter_file = os.path.join(self.parameter_file + '.temp')
@DataChannel
def file_name(self, chan):
"""The current file name.
Args:
None
Returns:
The current open file name as a string.
Channel syntax::
gcode_properties:file_name
"""
if not self.loaded_file:
chan.value = "No file loaded"
return chan.value
@file_name.tostring
def file_name(self, chan):
return chan.value
@DataChannel
def tool_calls_num(self, chan):
"""The total tool calls number.
Args:
None
Returns:
The total tool calls number
Channel syntax::
gcode_properties:tool_calls_num
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@tool_calls_num.tostring
def tool_calls_num(self, chan):
return chan.value
@DataChannel
def file_size(self, chan):
"""The current file size.
Args:
None
Returns:
The current file size in bytes
Channel syntax::
gcode_properties:size
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@file_size.tostring
def file_size(self, chan):
return chan.value
@DataChannel
def file_rapids(self, chan):
"""The current file rapis distance.
Args:
None
Returns:
The current file rapis distance in machine units
Channel syntax::
gcode_properties:rapids
"""
if not self.loaded_file:
chan.value.float(0.0)
if MACHINE_UNITS == 2:
conv = 1
units = "mm"
fmt = "%.3f"
else:
conv = 1/25.4
units = "in"
fmt = "%.4f"
return chan.value
@file_rapids.tostring
def file_rapids(self, chan):
return chan.value
@DataChannel
def file_lines(self, chan):
"""The current number of lines.
Args:
None
Returns:
The current number of lines the file has
Channel syntax::
gcode_properties:file_lines
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@file_lines.tostring
def file_lines(self, chan):
return chan.value
@DataChannel
def file_time(self, chan):
"""The current file run time.
Args:
format (str) : Format spec. Defaults to ``%I:%M:%S %p``.
See http://strftime.org for supported formats.
Returns:
The run time of the loaded file as a formatted string. Default HH:MM:SS AM
Channel syntax::
gcode_properties:time
gcode_properties:time?string
gcode_properties:time?string&format=%S
"""
if not self.loaded_file:
chan.value(0)
return chan.value
@file_time.tostring
def file_time(self, chan, format="%I:%M:%S %p"):
return chan.value.strftime(format)
@DataChannel
def file_rapid_distance(self, chan):
"""The full distance done in rapid of the path.
Args:
Returns:
The full distance done in rapid of the path
Channel syntax::
gcode_properties:file_rapid_distance
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@file_rapid_distance.tostring
def file_rapid_distance(self, chan):
return chan.value
@DataChannel
def file_feed_distance(self, chan):
"""The full distance done in feed velocity of the path.
Args:
Returns:
The full distance done in feed velocity of the path
Channel syntax::
gcode_properties:file_feed_distance
"""
if not self.loaded_file:
chan.value = 0
return chan.value
@file_feed_distance.tostring
def file_feed_distance(self, chan):
return chan.value
@DataChannel
def file_work_planes(self, chan):
"""The current file plane.
Args:
None
Returns:
The file work planes
Channel syntax::
gcode_properties:file_work_planes
"""
if not self.loaded_file:
chan.value = []
return chan.value
@file_work_planes.tostring
def file_work_planes(self, chan):
return chan.value
@DataChannel
def file_rigid_taps(self, chan):
"""The rigid taps found in file.
Args:
None
Returns:
The rigid taps found in file.
Channel syntax::
gcode_properties:file_rigid_taps
"""
if not self.loaded_file:
chan.value = []
return chan.value
@file_rigid_taps.tostring
def file_rigid_taps(self, chan):
return chan.value
@DataChannel
def file_offsets(self, chan):
"""The offsets found in file.
Args:
None
Returns:
The offsets found in file.
Channel syntax::
gcode_properties:file_offsets
"""
if not self.loaded_file:
chan.value = dict()
return chan.value
@file_offsets.tostring
def file_offsets(self, chan):
return chan.value
@DataChannel
def file_feed(self, chan):
"""The current file run distance.
Args:
None
Returns:
The distance the machine will run with the loaded file
Channel syntax::
gcode_properties:feed
"""
return chan.value
@file_feed.tostring
def file_feed(self, chan):
return chan.value
def initialise(self):
pass
def terminate(self):
pass
def _file_event(self, file_path):
"""" This function gets notified about files begin loaded """
self.loaded_file = file_path
self.canon = PropertiesCanon()
if os.path.exists(self.parameter_file):
shutil.copy(self.parameter_file, self.temp_parameter_file)
self.canon.parameter_file = self.temp_parameter_file
# Some initialization g-code to set the units and optional user code
unitcode = "G%d" % (20 + (self.stat.linear_units == 1))
initcode = self.ini.find("RS274NGC", "RS274NGC_STARTUP_CODE") or ""
# THIS IS WHERE IT ALL HAPPENS: load_preview will execute the code,
# call back to the canon with motion commands, and record a history
# of all the movements.
try:
result, seq = gcode.parse(self.loaded_file, self.canon, unitcode, initcode)
if result > gcode.MIN_ERROR:
msg = gcode.strerror(result)
LOG.debug(f"Error in {self.loaded_file} line {seq - 1}\n{msg}")
except Exception as e:
LOG.debug(f"Error {e}")
# clean up temp var file and the backup
os.unlink(self.temp_parameter_file)
os.unlink(self.temp_parameter_file + '.bak')
file_name = self.loaded_file
file_size = os.stat(self.loaded_file).st_size
file_lines = self.canon.num_lines
tool_calls = self.canon.tool_calls
g0 = sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.traverse)
g1 = (sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.feed) +
sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.arcfeed))
self.file_name.setValue(file_name)
self.file_size.setValue(file_size)
self.file_lines.setValue(file_lines)
self.tool_calls_num.setValue(tool_calls)
self.file_rapid_distance.setValue(g0)
self.file_feed_distance.setValue(g1)
self.file_work_planes.setValue(self.canon.work_planes)
self.file_rigid_taps.setValue(self.canon.rigid_taps)
self.file_offsets.setValue(self.canon.g5x_offset_dict)
def calc_distance(self):
mf = 100.0
g0 = sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.traverse)
g1 = (sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.feed) +
sum(self.dist(l[0][:3], l[1][:3]) for l in self.canon.arcfeed))
# gt = (sum(self.dist(l[0][:3], l[1][:3])/min(mf, l[1][0]) for l in self.canon.feed) +
# sum(self.dist(l[0][:3], l[1][:3])/min(mf, l[1][0]) for l in self.canon.arcfeed) +
# sum(self.dist(l[0][:3], l[1][:3])/mf for l in self.canon.traverse) +
# self.canon.dwell_time
# )
#
# LOG.debug(f"path time {gt} secconds")
#
# min_extents = self.from_internal_units(self.canon.min_extents, conv)
# max_extents = self.from_internal_units(self.canon.max_extents, conv)
#
# for (i, c) in enumerate("xyz"):
# a = min_extents[i]
# b = max_extents[i]
# if a != b:
# props[c] = ("%(a)f to %(b)f = %(diff)f %(units)s").replace("%f", fmt) % {'a': a, 'b': b, 'diff': b-a, 'units': units}
# # properties(root_window, _("G-Code Properties"), property_names, props)
# pprint.pprint(props)
def dist(self, xxx, xxx_1):
(x,y,z) = xxx # todo changeme
(p,q,r) = xxx_1 # todo changeme
return ((x-p)**2 + (y-q)**2 + (z-r)**2) ** .5
def from_internal_units(self, pos, unit=None):
if unit is None:
unit = s.linear_units
lu = (unit or 1) * 25.4
lus = [lu, lu, lu, 1, 1, 1, lu, lu, lu]
return [a*b for a, b in zip(pos, lus)]
def from_internal_linear_unit(self, v, unit=None):
if unit is None:
unit = s.linear_units
lu = (unit or 1) * 25.4
return v*lu
class PropertiesCanon(BaseCanon):
def __init__(self):
self.num_lines = 0
self.tool_calls = 0
# traverse list - [line number, [start position], [end position], [tlo x, tlo y, tlo z]]
self.traverse = []
# feed list - [line number, [start position], [end position], feedrate, [tlo x, tlo y, tlo z]]
self.feed = []
# arcfeed list - [line number, [start position], [end position], feedrate, [tlo x, tlo y, tlo z]]
self.arcfeed = []
# dwell list - [line number, color, pos x, pos y, pos z, plane]
self.dwells = []
self.work_planes = []
self.rigid_taps = []
self.feedrate = 1
self.dwell_time = 0
self.seq_num = -1
self.last_pos = (0,) * 9
self.first_move = True
self.in_arc = False
self.suppress = 0
self.plane = 1
self.arcdivision = 64
# extents
self.min_extents = [9e99, 9e99, 9e99]
self.max_extents = [-9e99, -9e99, -9e99]
self.min_extents_notool = [9e99, 9e99, 9e99]
self.max_extents_notool = [-9e99, -9e99, -9e99]
# tool length offsets
self.tlo_x = 0.0
self.tlo_y = 0.0
self.tlo_z = 0.0
self.tlo_a = 0.0
self.tlo_b = 0.0
self.tlo_c = 0.0
self.tlo_u = 0.0
self.tlo_v = 0.0
self.tlo_w = 0.0
self.tool_offsets = (0.0,) * 9
# G92/G52 offsets
self.g92_offset_x = 0.0
self.g92_offset_y = 0.0
self.g92_offset_z = 0.0
self.g92_offset_a = 0.0
self.g92_offset_b = 0.0
self.g92_offset_c = 0.0
self.g92_offset_u = 0.0
self.g92_offset_v = 0.0
self.g92_offset_w = 0.0
# g5x offsets
self.g5x_offset_x = 0.0
self.g5x_offset_y = 0.0
self.g5x_offset_z = 0.0
self.g5x_offset_a = 0.0
self.g5x_offset_b = 0.0
self.g5x_offset_c = 0.0
self.g5x_offset_u = 0.0
self.g5x_offset_v = 0.0
self.g5x_offset_w = 0.0
self.g5x_offset_dict = dict()
# XY rotation (degrees)
self.rotation_xy = 0
self.rotation_cos = 1
self.rotation_sin = 0
def set_g5x_offset(self, offset, x, y, z, a, b, c, u, v, w):
try:
self.g5x_offset_dict[str(offset)] = (x, y, z, a, b, c, u, v, w)
except Exception as e:
LOG.debug(f"straight_traverse: {e}")
def set_g92_offset(self, x, y, z, a, b, c, u, v, w):
self.g92_offset_x = x
self.g92_offset_y = y
self.g92_offset_z = z
self.g92_offset_a = z
self.g92_offset_b = b
self.g92_offset_c = c
self.g92_offset_u = u
self.g92_offset_v = v
self.g92_offset_w = w
def set_plane(self, plane):
self.work_planes.append(plane)
def set_feed_rate(self, arg):
pass
# print(("set feed rate", arg))
def comment(self, comment):
pass
# print(("#", comment))
def straight_traverse(self, x, y, z, a, b, c, u, v, w):
try:
if self.suppress > 0:
return
pos = self.rotate_and_translate(x, y, z, a, b, c, u, v, w)
if not self.first_move:
self.traverse.append([self.last_pos, pos])
self.last_pos = pos
except Exception as e:
LOG.debug(f"straight_traverse: {e}")
def straight_feed(self, x, y, z, a, b, c, u, v, w):
try:
if self.suppress > 0:
return
self.first_move = False
pos = self.rotate_and_translate(x, y, z, a, b, c, u, v, w)
self.feed.append([self.last_pos, pos])
self.last_pos = pos
except Exception as e:
LOG.debug(f"straight_feed: {e}")
def dwell(self, arg):
if arg < .1:
print(("dwell %f ms" % (1000 * arg)))
else:
print(("dwell %f seconds" % arg))
def arc_feed(self, end_x, end_y, center_x, center_y, rot, end_z, a, b, c, u, v, w):
try:
if self.suppress > 0:
return
self.first_move = False
self.in_arc = True
try:
# this self.lo goes straight into the c code, cannot be changed
self.lo = tuple(self.last_pos)
segs = gcode.arc_to_segments(self, end_x, end_y, center_x, center_y,
rot, end_z, a, b, c, u, v, w, self.arcdivision)
self.straight_arcsegments(segs)
finally:
self.in_arc = False
except Exception as e:
LOG.debug(f"straight_feed: {e}")
def get_axis_mask(self):
return 7 # XYZ
def rigid_tap(self, x, y, z):
try:
if self.suppress > 0:
return
self.rigid_taps.append((x, y, z))
self.first_move = False
pos = self.rotate_and_translate(x, y, z, 0, 0, 0, 0, 0, 0)[:3]
pos += self.last_pos[3:]
self.feed.append([self.last_pos, pos])
except Exception as e:
LOG.debug(f"straight_feed: {e}")
def change_tool(self, pocket):
if pocket != -1:
self.tool_calls += 1
# print(("pocket", pocket))
def next_line(self, st):
self.num_lines += 1
# state attributes
# 'block', 'cutter_side', 'distance_mode', 'feed_mode', 'feed_rate',
# 'flood', 'gcodes', 'mcodes', 'mist', 'motion_mode', 'origin', 'units',
# 'overrides', 'path_mode', 'plane', 'retract_mode', 'sequence_number',
# 'speed', 'spindle', 'stopping', 'tool_length_offset', 'toolchange',
# print(("state", st))
# print(("seq", st.sequence_number))
# print(("MCODES", st.mcodes))
# print(("TOOLCHANGE", st.toolchange)) | en | 0.519702 | GCodeProperties -------- This plugin provides the information about sizes and times This plugin is not loaded by default, so to use it you will first need to add it to your VCPs YAML config file. YAML configuration: .. code-block:: yaml data_plugins: gcode_properties: provider: qtpyvcp.plugins.gcode_properties:GCodeProperties GCodeProperties Plugin The current file name. Args: None Returns: The current open file name as a string. Channel syntax:: gcode_properties:file_name The total tool calls number. Args: None Returns: The total tool calls number Channel syntax:: gcode_properties:tool_calls_num The current file size. Args: None Returns: The current file size in bytes Channel syntax:: gcode_properties:size The current file rapis distance. Args: None Returns: The current file rapis distance in machine units Channel syntax:: gcode_properties:rapids The current number of lines. Args: None Returns: The current number of lines the file has Channel syntax:: gcode_properties:file_lines The current file run time. Args: format (str) : Format spec. Defaults to ``%I:%M:%S %p``. See http://strftime.org for supported formats. Returns: The run time of the loaded file as a formatted string. Default HH:MM:SS AM Channel syntax:: gcode_properties:time gcode_properties:time?string gcode_properties:time?string&format=%S The full distance done in rapid of the path. Args: Returns: The full distance done in rapid of the path Channel syntax:: gcode_properties:file_rapid_distance The full distance done in feed velocity of the path. Args: Returns: The full distance done in feed velocity of the path Channel syntax:: gcode_properties:file_feed_distance The current file plane. Args: None Returns: The file work planes Channel syntax:: gcode_properties:file_work_planes The rigid taps found in file. Args: None Returns: The rigid taps found in file. Channel syntax:: gcode_properties:file_rigid_taps The offsets found in file. Args: None Returns: The offsets found in file. Channel syntax:: gcode_properties:file_offsets The current file run distance. Args: None Returns: The distance the machine will run with the loaded file Channel syntax:: gcode_properties:feed " This function gets notified about files begin loaded # Some initialization g-code to set the units and optional user code # THIS IS WHERE IT ALL HAPPENS: load_preview will execute the code, # call back to the canon with motion commands, and record a history # of all the movements. # clean up temp var file and the backup # gt = (sum(self.dist(l[0][:3], l[1][:3])/min(mf, l[1][0]) for l in self.canon.feed) + # sum(self.dist(l[0][:3], l[1][:3])/min(mf, l[1][0]) for l in self.canon.arcfeed) + # sum(self.dist(l[0][:3], l[1][:3])/mf for l in self.canon.traverse) + # self.canon.dwell_time # ) # # LOG.debug(f"path time {gt} secconds") # # min_extents = self.from_internal_units(self.canon.min_extents, conv) # max_extents = self.from_internal_units(self.canon.max_extents, conv) # # for (i, c) in enumerate("xyz"): # a = min_extents[i] # b = max_extents[i] # if a != b: # props[c] = ("%(a)f to %(b)f = %(diff)f %(units)s").replace("%f", fmt) % {'a': a, 'b': b, 'diff': b-a, 'units': units} # # properties(root_window, _("G-Code Properties"), property_names, props) # pprint.pprint(props) # todo changeme # todo changeme # traverse list - [line number, [start position], [end position], [tlo x, tlo y, tlo z]] # feed list - [line number, [start position], [end position], feedrate, [tlo x, tlo y, tlo z]] # arcfeed list - [line number, [start position], [end position], feedrate, [tlo x, tlo y, tlo z]] # dwell list - [line number, color, pos x, pos y, pos z, plane] # extents # tool length offsets # G92/G52 offsets # g5x offsets # XY rotation (degrees) # print(("set feed rate", arg)) # print(("#", comment)) # this self.lo goes straight into the c code, cannot be changed # XYZ # print(("pocket", pocket)) # state attributes # 'block', 'cutter_side', 'distance_mode', 'feed_mode', 'feed_rate', # 'flood', 'gcodes', 'mcodes', 'mist', 'motion_mode', 'origin', 'units', # 'overrides', 'path_mode', 'plane', 'retract_mode', 'sequence_number', # 'speed', 'spindle', 'stopping', 'tool_length_offset', 'toolchange', # print(("state", st)) # print(("seq", st.sequence_number)) # print(("MCODES", st.mcodes)) # print(("TOOLCHANGE", st.toolchange)) | 2.308093 | 2 |
setup.py | deanle17/urlSigner | 0 | 6630014 | <gh_stars>0
from setuptools import setup
setup(
name='urlSigner',
version='0.0.3',
description='Library to sign a URL',
license='MIT',
packages=['urlSigner'],
author='deanle17',
author_email='<EMAIL>',
keywords=['url', 'signer'],
url='https://github.com/deanle17/fsecure-assignment',
download_url='https://github.com/deanle17/urlSigner/archive/0.0.3.tar.gz'
)
| from setuptools import setup
setup(
name='urlSigner',
version='0.0.3',
description='Library to sign a URL',
license='MIT',
packages=['urlSigner'],
author='deanle17',
author_email='<EMAIL>',
keywords=['url', 'signer'],
url='https://github.com/deanle17/fsecure-assignment',
download_url='https://github.com/deanle17/urlSigner/archive/0.0.3.tar.gz'
) | none | 1 | 1.27672 | 1 |
|
large_app/python/env.py | sahilGupta89/large_flask_app | 0 | 6630015 | import os
database="devdb"
user="devuser"
host="localhost"
password="password"
VERSION = os.environ.get("VERSION", "n/a")
DB_CONNECTION_STRING = os.environ.get(
"DB_CONNECTION_STRING", "mysql+pymysql://localhost:3306/dev"
)
PGDB_CONNECTION_STRING = os.environ.get(
"PGDB_CONNECTION_STRING", "postgresql+psycopg2://user_db:password@localhost:5432/user_db"
)
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false") in ("1", "true", "True")
# Auth0 settings
AUTH0_DOMAIN = os.environ.get("AUTH0_DOMAIN", "trueenergy.eu.auth0.com")
AUTH0_API_AUDIENCE = os.environ.get("AUTH0_API_AUDIENCE", "apiv2.auth0.do.trnrg.co")
AUTH0_ZEAPI_AUDIENCE = os.environ.get("AUTH0_ZeAPI_AUDIENCE", "https://zeapi.trnrg.co")
AUTH0_ALGORITHMS = os.environ.get("AUTH0_ALGORITHMS", "RS256").split(",")
AUTH0_CLIENT_ID = os.environ.get("AUTH0_CLIENT_ID", "Z1myKXcwci61mGKFZhsWXoQ5Lz3WMErv")
AUTH0_CLIENT_SECRET = os.environ.get(
"AUTH0_CLIENT_SECRET",
"<KEY>",
)
AUTH0_UP_CONNECTION_NAME = os.environ.get(
"AUTH0_UP_CONNECTION_NAME", "Username-Password-Authentication"
)
# Sentry
SENTRY_ADMIN_DSN = os.environ.get("SENTRY_ADMIN_DSN")
SENTRY_API_DSN = os.environ.get("SENTRY_API_DSN")
SENTRY_ZEAPI_DSN = os.environ.get("SENTRY_ZEAPI_DSN")
SENTRY_ENVIRONMENT = os.environ.get("SENTRY_ENVIRONMENT")
SENTRY_CHAMPAPI_DSN = os.environ.get("SENTRY_CHAMPAPI_DSN")
SENTRY_BMWAPI_DSN = os.environ.get("SENTRY_BMWAPI_DSN")
SENTRY_JAGAPI_DSN = os.environ.get("SENTRY_JAGAPI_DSN")
SENTRY_SMARTMEAPI_DSN = os.environ.get("SENTRY_SMARTMEAPI_DSN")
FLASK_SECRET_KEY = os.environ.get("FLASK_SECRET_KEY")
# FREQ
FREQ_DASHBOARD_URL = os.environ.get("FREQ_DASHBOARD_URL", "")
FREQ_DK1_DASHBOARD_URL = os.environ.get("FREQ_DK1_DASHBOARD_URL", "")
FCR_POOL_AREA = os.environ.get("FCR_POOL_AREA", "DK2")
FCR_PLAN_BACKOFF_MINUTES = int(os.environ.get("FCR_PLAN_BACKOFF_MINUTES", 10))
# FOR CHAMP Service
CHAMP_BASE = os.environ.get("CHAMP_BASE", "https://iapi.charge.space/v1/chargers/")
# BMW Auth
BMW_AUTH_BASE = os.environ.get(
"BMW_AUTH_BASE", "https://customer.bmwgroup.com/gcdm/oauth/authenticate"
)
# JAG Auth
JAG_AUTH_BASE = os.environ.get(
"JAG_AUTH_BASE", "https://jlp-ifas.wirelesscar.net/ifas/jlr/tokens"
)
LOG_CARAPI_STATUS_RESPONSES = os.environ.get(
"LOG_CARAPI_STATUS_RESPONSES", "false"
) not in ("false", "0", "False")
| import os
database="devdb"
user="devuser"
host="localhost"
password="password"
VERSION = os.environ.get("VERSION", "n/a")
DB_CONNECTION_STRING = os.environ.get(
"DB_CONNECTION_STRING", "mysql+pymysql://localhost:3306/dev"
)
PGDB_CONNECTION_STRING = os.environ.get(
"PGDB_CONNECTION_STRING", "postgresql+psycopg2://user_db:password@localhost:5432/user_db"
)
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false") in ("1", "true", "True")
# Auth0 settings
AUTH0_DOMAIN = os.environ.get("AUTH0_DOMAIN", "trueenergy.eu.auth0.com")
AUTH0_API_AUDIENCE = os.environ.get("AUTH0_API_AUDIENCE", "apiv2.auth0.do.trnrg.co")
AUTH0_ZEAPI_AUDIENCE = os.environ.get("AUTH0_ZeAPI_AUDIENCE", "https://zeapi.trnrg.co")
AUTH0_ALGORITHMS = os.environ.get("AUTH0_ALGORITHMS", "RS256").split(",")
AUTH0_CLIENT_ID = os.environ.get("AUTH0_CLIENT_ID", "Z1myKXcwci61mGKFZhsWXoQ5Lz3WMErv")
AUTH0_CLIENT_SECRET = os.environ.get(
"AUTH0_CLIENT_SECRET",
"<KEY>",
)
AUTH0_UP_CONNECTION_NAME = os.environ.get(
"AUTH0_UP_CONNECTION_NAME", "Username-Password-Authentication"
)
# Sentry
SENTRY_ADMIN_DSN = os.environ.get("SENTRY_ADMIN_DSN")
SENTRY_API_DSN = os.environ.get("SENTRY_API_DSN")
SENTRY_ZEAPI_DSN = os.environ.get("SENTRY_ZEAPI_DSN")
SENTRY_ENVIRONMENT = os.environ.get("SENTRY_ENVIRONMENT")
SENTRY_CHAMPAPI_DSN = os.environ.get("SENTRY_CHAMPAPI_DSN")
SENTRY_BMWAPI_DSN = os.environ.get("SENTRY_BMWAPI_DSN")
SENTRY_JAGAPI_DSN = os.environ.get("SENTRY_JAGAPI_DSN")
SENTRY_SMARTMEAPI_DSN = os.environ.get("SENTRY_SMARTMEAPI_DSN")
FLASK_SECRET_KEY = os.environ.get("FLASK_SECRET_KEY")
# FREQ
FREQ_DASHBOARD_URL = os.environ.get("FREQ_DASHBOARD_URL", "")
FREQ_DK1_DASHBOARD_URL = os.environ.get("FREQ_DK1_DASHBOARD_URL", "")
FCR_POOL_AREA = os.environ.get("FCR_POOL_AREA", "DK2")
FCR_PLAN_BACKOFF_MINUTES = int(os.environ.get("FCR_PLAN_BACKOFF_MINUTES", 10))
# FOR CHAMP Service
CHAMP_BASE = os.environ.get("CHAMP_BASE", "https://iapi.charge.space/v1/chargers/")
# BMW Auth
BMW_AUTH_BASE = os.environ.get(
"BMW_AUTH_BASE", "https://customer.bmwgroup.com/gcdm/oauth/authenticate"
)
# JAG Auth
JAG_AUTH_BASE = os.environ.get(
"JAG_AUTH_BASE", "https://jlp-ifas.wirelesscar.net/ifas/jlr/tokens"
)
LOG_CARAPI_STATUS_RESPONSES = os.environ.get(
"LOG_CARAPI_STATUS_RESPONSES", "false"
) not in ("false", "0", "False")
| en | 0.596434 | # Auth0 settings # Sentry # FREQ # FOR CHAMP Service # BMW Auth # JAG Auth | 1.91448 | 2 |
indico/modules/rb/operations/bookings.py | CrownedSilverFox/conference-platform | 1 | 6630016 | <filename>indico/modules/rb/operations/bookings.py
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from datetime import date, datetime, time
from itertools import chain, groupby
from operator import attrgetter, itemgetter
from flask import flash, session
from pytz import timezone
from sqlalchemy.orm import contains_eager, joinedload
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.util.queries import db_dates_overlap, with_total_rows
from indico.core.errors import NoReportError
from indico.modules.events.models.events import Event
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.rb import rb_settings
from indico.modules.rb.models.reservation_edit_logs import ReservationEditLog
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence
from indico.modules.rb.models.reservations import RepeatFrequency, Reservation, ReservationLink
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
from indico.modules.rb.models.rooms import Room
from indico.modules.rb.operations.blockings import filter_blocked_rooms, get_rooms_blockings, group_blocked_rooms
from indico.modules.rb.operations.conflicts import get_concurrent_pre_bookings, get_rooms_conflicts
from indico.modules.rb.operations.misc import get_rooms_nonbookable_periods, get_rooms_unbookable_hours
from indico.modules.rb.util import (group_by_occurrence_date, serialize_availability, serialize_blockings,
serialize_booking_details, serialize_nonbookable_periods, serialize_occurrences,
serialize_unbookable_hours)
from indico.util.date_time import iterdays, overlaps, server_to_utc
from indico.util.i18n import _
from indico.util.iterables import group_list
from indico.util.string import natural_sort_key
def group_blockings(blocked_rooms, dates):
if not blocked_rooms:
return {}
occurrences = {}
for blocked_room in blocked_rooms:
blocking = blocked_room.blocking
for date_ in dates:
if blocking.start_date <= date_ <= blocking.end_date:
occurrences[date_] = [blocking]
return occurrences
def group_nonbookable_periods(periods, dates):
if not periods:
return {}
occurrences = defaultdict(list)
for period in periods:
for d in dates:
if period.start_dt.date() <= d <= period.end_dt.date():
period_occurrence = NonBookablePeriod()
period_occurrence.start_dt = ((datetime.combine(d, time(0)))
if period.start_dt.date() != d else period.start_dt)
period_occurrence.end_dt = ((datetime.combine(d, time(23, 59)))
if period.end_dt.date() != d else period.end_dt)
occurrences[d].append(period_occurrence)
return occurrences
def get_existing_room_occurrences(room, start_dt, end_dt, repeat_frequency=RepeatFrequency.NEVER, repeat_interval=None,
allow_overlapping=False, only_accepted=False, skip_booking_id=None):
return get_existing_rooms_occurrences([room], start_dt, end_dt, repeat_frequency, repeat_interval,
allow_overlapping, only_accepted, skip_booking_id).get(room.id, [])
def get_existing_rooms_occurrences(rooms, start_dt, end_dt, repeat_frequency, repeat_interval, allow_overlapping=False,
only_accepted=False, skip_booking_id=None):
room_ids = [room.id for room in rooms]
query = (ReservationOccurrence.query
.filter(ReservationOccurrence.is_valid, Reservation.room_id.in_(room_ids))
.join(ReservationOccurrence.reservation)
.options(ReservationOccurrence.NO_RESERVATION_USER_STRATEGY,
contains_eager(ReservationOccurrence.reservation)))
if allow_overlapping:
query = query.filter(db_dates_overlap(ReservationOccurrence, 'start_dt', start_dt, 'end_dt', end_dt))
else:
query = query.filter(ReservationOccurrence.start_dt >= start_dt, ReservationOccurrence.end_dt <= end_dt)
if only_accepted:
query = query.filter(Reservation.is_accepted)
if repeat_frequency != RepeatFrequency.NEVER:
candidates = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
dates = [candidate.start_dt for candidate in candidates]
query = query.filter(db.cast(ReservationOccurrence.start_dt, db.Date).in_(dates))
if skip_booking_id is not None:
query = query.filter(ReservationOccurrence.reservation_id != skip_booking_id)
return group_list(query, key=lambda obj: obj.reservation.room_id,
sort_by=lambda obj: (obj.reservation.room_id, obj.start_dt))
def get_rooms_availability(rooms, start_dt, end_dt, repeat_frequency, repeat_interval, skip_conflicts_with=None,
admin_override_enabled=False, skip_past_conflicts=False):
availability = {}
candidates = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
date_range = sorted({cand.start_dt.date() for cand in candidates})
occurrences = get_existing_rooms_occurrences(rooms, start_dt.replace(hour=0, minute=0),
end_dt.replace(hour=23, minute=59), repeat_frequency, repeat_interval)
blocked_rooms = get_rooms_blockings(rooms, start_dt.date(), end_dt.date())
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True))
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True))
unbookable_hours = get_rooms_unbookable_hours(rooms)
nonbookable_periods = get_rooms_nonbookable_periods(rooms, start_dt, end_dt)
conflicts, pre_conflicts, conflicting_candidates = get_rooms_conflicts(
rooms, start_dt.replace(tzinfo=None), end_dt.replace(tzinfo=None),
repeat_frequency, repeat_interval, nonoverridable_blocked_rooms,
nonbookable_periods, unbookable_hours, skip_conflicts_with,
allow_admin=admin_override_enabled, skip_past_conflicts=skip_past_conflicts
)
dates = list(candidate.start_dt.date() for candidate in candidates)
for room in rooms:
room_occurrences = occurrences.get(room.id, [])
room_conflicting_candidates = conflicting_candidates.get(room.id, [])
room_conflicts = conflicts.get(room.id, [])
pre_room_conflicts = pre_conflicts.get(room.id, [])
pre_bookings = [occ for occ in room_occurrences if not occ.reservation.is_accepted]
concurrent_pre_bookings = get_concurrent_pre_bookings(pre_bookings) if pre_bookings else []
existing_bookings = [occ for occ in room_occurrences if occ.reservation.is_accepted]
room_nonoverridable_blocked_rooms = nonoverridable_blocked_rooms.get(room.id, [])
room_overridable_blocked_rooms = overridable_blocked_rooms.get(room.id, [])
room_nonbookable_periods = nonbookable_periods.get(room.id, [])
room_unbookable_hours = unbookable_hours.get(room.id, [])
room_candidates = get_room_candidates(candidates, room_conflicts)
availability[room.id] = {'room_id': room.id,
'candidates': group_by_occurrence_date(room_candidates),
'conflicting_candidates': group_by_occurrence_date(room_conflicting_candidates),
'pre_bookings': group_by_occurrence_date(pre_bookings),
'concurrent_pre_bookings': group_by_occurrence_date(concurrent_pre_bookings),
'bookings': group_by_occurrence_date(existing_bookings),
'conflicts': group_by_occurrence_date(room_conflicts),
'pre_conflicts': group_by_occurrence_date(pre_room_conflicts),
'blockings': group_blockings(room_nonoverridable_blocked_rooms, dates),
'overridable_blockings': group_blockings(room_overridable_blocked_rooms, dates),
'nonbookable_periods': group_nonbookable_periods(room_nonbookable_periods, dates),
'unbookable_hours': room_unbookable_hours}
return date_range, availability
def get_room_candidates(candidates, conflicts):
return [candidate for candidate in candidates
if not (any(candidate.overlaps(conflict) for conflict in conflicts))]
def _bookings_query(filters, noload_room=False):
reservation_strategy = contains_eager('reservation')
if noload_room:
reservation_strategy.raiseload('room')
else:
reservation_strategy.joinedload('room')
reservation_strategy.noload('booked_for_user')
reservation_strategy.noload('created_by_user')
query = (ReservationOccurrence.query
.join(Reservation)
.join(Room)
.filter(~Room.is_deleted)
.options(reservation_strategy))
text = filters.get('text')
room_ids = filters.get('room_ids')
booking_criteria = [Reservation.booking_reason.ilike(f'%{text}%'),
Reservation.booked_for_name.ilike(f'%{text}%')]
if room_ids and text:
query = query.filter(db.or_(Room.id.in_(room_ids), *booking_criteria))
elif room_ids:
query = query.filter(Room.id.in_(room_ids))
elif text:
query = query.filter(db.or_(*booking_criteria))
if filters.get('start_dt'):
query = query.filter(ReservationOccurrence.start_dt >= filters['start_dt'])
if filters.get('end_dt'):
query = query.filter(ReservationOccurrence.end_dt <= filters['end_dt'])
booked_for_user = filters.get('booked_for_user')
if booked_for_user:
query = query.filter(db.or_(Reservation.booked_for_user == booked_for_user,
Reservation.created_by_user == booked_for_user))
if not filters.get('include_inactive'):
query = query.filter(ReservationOccurrence.is_valid)
return query
def get_room_calendar(start_date, end_date, room_ids, include_inactive=False, **filters):
start_dt = datetime.combine(start_date, time(hour=0, minute=0))
end_dt = datetime.combine(end_date, time(hour=23, minute=59))
query = _bookings_query(dict(filters, start_dt=start_dt, end_dt=end_dt, room_ids=room_ids,
include_inactive=include_inactive))
bookings = query.order_by(db.func.indico.natsort(Room.full_name)).all()
rooms = set()
if room_ids:
rooms = set(Room.query
.filter(~Room.is_deleted, Room.id.in_(room_ids))
.options(joinedload('location')))
rooms.update(b.reservation.room for b in bookings)
rooms = sorted(rooms, key=lambda r: natural_sort_key(r.full_name))
occurrences_by_room = groupby(bookings, attrgetter('reservation.room_id'))
unbookable_hours = get_rooms_unbookable_hours(rooms)
nonbookable_periods = get_rooms_nonbookable_periods(rooms, start_dt, end_dt)
blocked_rooms = get_rooms_blockings(rooms, start_dt, end_dt)
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True))
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True))
dates = [d.date() for d in iterdays(start_dt, end_dt)]
calendar = {room.id: {
'room_id': room.id,
'nonbookable_periods': group_nonbookable_periods(nonbookable_periods.get(room.id, []), dates),
'unbookable_hours': unbookable_hours.get(room.id, []),
'blockings': group_blockings(nonoverridable_blocked_rooms.get(room.id, []), dates),
'overridable_blockings': group_blockings(overridable_blocked_rooms.get(room.id, []), dates),
} for room in rooms}
for room_id, occurrences in occurrences_by_room:
occurrences = list(occurrences)
pre_bookings = [occ for occ in occurrences if occ.reservation.is_pending]
existing_bookings = [occ for occ in occurrences if not occ.reservation.is_pending and occ.is_valid]
concurrent_pre_bookings = get_concurrent_pre_bookings(pre_bookings)
additional_data = {
'bookings': group_by_occurrence_date(existing_bookings),
'pre_bookings': group_by_occurrence_date(pre_bookings),
'concurrent_pre_bookings': group_by_occurrence_date(concurrent_pre_bookings)
}
if include_inactive:
additional_data.update({
'cancellations': group_by_occurrence_date(occ for occ in occurrences if occ.is_cancelled),
'rejections': group_by_occurrence_date(occ for occ in occurrences if occ.is_rejected)
})
calendar[room_id].update(additional_data)
return calendar
def get_room_details_availability(room, start_dt, end_dt):
dates = [d.date() for d in iterdays(start_dt, end_dt)]
occurrences = get_existing_room_occurrences(room, start_dt, end_dt, RepeatFrequency.DAY, 1)
pre_bookings = [occ for occ in occurrences if not occ.reservation.is_accepted]
bookings = [occ for occ in occurrences if occ.reservation.is_accepted]
blocked_rooms = get_rooms_blockings([room], start_dt.date(), end_dt.date())
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True)).get(room.id, [])
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True)).get(room.id, [])
unbookable_hours = get_rooms_unbookable_hours([room]).get(room.id, [])
nonbookable_periods = get_rooms_nonbookable_periods([room], start_dt, end_dt).get(room.id, [])
availability = []
for day in dates:
iso_day = day.isoformat()
nb_periods = serialize_nonbookable_periods(group_nonbookable_periods(nonbookable_periods, dates)).get(iso_day)
availability.append({
'bookings': serialize_occurrences(group_by_occurrence_date(bookings)).get(iso_day),
'pre_bookings': serialize_occurrences(group_by_occurrence_date(pre_bookings)).get(iso_day),
'blockings': serialize_blockings(group_blockings(nonoverridable_blocked_rooms, dates)).get(iso_day),
'overridable_blockings': (serialize_blockings(group_blockings(overridable_blocked_rooms, dates))
.get(iso_day)),
'nonbookable_periods': nb_periods,
'unbookable_hours': serialize_unbookable_hours(unbookable_hours),
'day': iso_day,
})
return sorted(availability, key=itemgetter('day'))
def get_booking_occurrences(booking):
date_range = sorted({cand.start_dt.date() for cand in booking.occurrences})
occurrences = group_by_occurrence_date(booking.occurrences)
return date_range, occurrences
def check_room_available(room, start_dt, end_dt):
occurrences = get_existing_room_occurrences(room, start_dt, end_dt, allow_overlapping=True)
prebookings = [occ for occ in occurrences if not occ.reservation.is_accepted]
bookings = [occ for occ in occurrences if occ.reservation.is_accepted]
unbookable_hours = get_rooms_unbookable_hours([room]).get(room.id, [])
hours_overlap = any(hours for hours in unbookable_hours
if overlaps((start_dt.time(), end_dt.time()), (hours.start_time, hours.end_time)))
nonbookable_periods = any(get_rooms_nonbookable_periods([room], start_dt, end_dt))
blocked_rooms = get_rooms_blockings([room], start_dt, end_dt)
nonoverridable_blocked_rooms = filter_blocked_rooms(blocked_rooms, nonoverridable_only=True, explicit=True)
blocked_for_user = any(nonoverridable_blocked_rooms)
user_booking = any(booking for booking in bookings if booking.reservation.booked_for_id == session.user.id)
user_prebooking = any(prebooking for prebooking in prebookings
if prebooking.reservation.booked_for_id == session.user.id)
return {
'can_book': room.can_book(session.user, allow_admin=False),
'can_prebook': room.can_prebook(session.user, allow_admin=False),
'conflict_booking': any(bookings),
'conflict_prebooking': any(prebookings),
'unbookable': (hours_overlap or nonbookable_periods or blocked_for_user),
'user_booking': user_booking,
'user_prebooking': user_prebooking,
}
def create_booking_for_event(room_id, event):
try:
room = Room.get_or_404(room_id)
default_timezone = timezone(config.DEFAULT_TIMEZONE)
start_dt = event.start_dt.astimezone(default_timezone).replace(tzinfo=None)
end_dt = event.end_dt.astimezone(default_timezone).replace(tzinfo=None)
booking_reason = f"Event '{event.title}'"
data = dict(start_dt=start_dt, end_dt=end_dt, booked_for_user=event.creator, booking_reason=booking_reason,
repeat_frequency=RepeatFrequency.NEVER, event_id=event.id)
booking = Reservation.create_from_data(room, data, session.user, ignore_admin=True)
booking.linked_object = event
return booking
except NoReportError:
flash(_("Booking could not be created. Probably somebody else booked the room in the meantime."), 'error')
return None
def get_active_bookings(limit, start_dt, last_reservation_id=None, **filters):
criteria = [ReservationOccurrence.start_dt > start_dt]
if last_reservation_id is not None:
criteria.append(db.and_(db.cast(ReservationOccurrence.start_dt, db.Date) >= start_dt,
ReservationOccurrence.reservation_id > last_reservation_id))
query = (_bookings_query(filters, noload_room=True)
.filter(db.or_(*criteria))
.order_by(ReservationOccurrence.start_dt,
ReservationOccurrence.reservation_id,
db.func.indico.natsort(Room.full_name))
.limit(limit))
bookings, total = with_total_rows(query)
rows_left = total - limit if total > limit else total
return group_by_occurrence_date(query, sort_by=lambda obj: (obj.start_dt, obj.reservation_id)), rows_left
def has_same_dates(old_booking, new_booking):
return (old_booking.start_dt == new_booking['start_dt'] and
old_booking.end_dt == new_booking['end_dt'] and
old_booking.repeat_interval == new_booking['repeat_interval'] and
old_booking.repeat_frequency == new_booking['repeat_frequency'])
def has_same_slots(old_booking, new_booking):
if (
old_booking.repeat_interval != new_booking['repeat_interval']
or old_booking.repeat_frequency != new_booking['repeat_frequency']
):
return False
return old_booking.start_dt <= new_booking['start_dt'] and old_booking.end_dt >= new_booking['end_dt']
def should_split_booking(booking, new_data):
today = date.today()
is_ongoing_booking = booking.start_dt.date() < today < booking.end_dt.date()
old_start_time = booking.start_dt.time()
old_end_time = booking.end_dt.time()
old_repeat_frequency = booking.repeat_frequency
old_repeat_interval = booking.repeat_interval
times_changed = new_data['start_dt'].time() != old_start_time or new_data['end_dt'].time() != old_end_time
new_repeat_frequency = new_data['repeat_frequency']
new_repeat_interval = new_data['repeat_interval']
repetition_changed = (new_repeat_frequency, new_repeat_interval) != (old_repeat_frequency, old_repeat_interval)
return is_ongoing_booking and (times_changed or repetition_changed)
def split_booking(booking, new_booking_data):
is_ongoing_booking = booking.start_dt.date() < date.today() < booking.end_dt.date()
if not is_ongoing_booking:
return
cancelled_dates = []
rejected_occs = {}
room = booking.room
occurrences = sorted(booking.occurrences, key=attrgetter('start_dt'))
old_frequency = booking.repeat_frequency
occurrences_to_cancel = [occ for occ in occurrences if occ.start_dt >= datetime.now() and occ.is_valid]
if old_frequency != RepeatFrequency.NEVER and new_booking_data['repeat_frequency'] == RepeatFrequency.NEVER:
new_start_dt = new_booking_data['start_dt']
else:
new_start_dt = datetime.combine(occurrences_to_cancel[0].start_dt.date(), new_booking_data['start_dt'].time())
cancelled_dates = [occ.start_dt.date() for occ in occurrences if occ.is_cancelled]
rejected_occs = {occ.start_dt.date(): occ.rejection_reason for occ in occurrences if occ.is_rejected}
new_end_dt = [occ for occ in occurrences if occ.start_dt < datetime.now()][-1].end_dt
old_booking_data = {
'booking_reason': booking.booking_reason,
'booked_for_user': booking.booked_for_user,
'start_dt': booking.start_dt,
'end_dt': new_end_dt,
'repeat_frequency': booking.repeat_frequency,
'repeat_interval': booking.repeat_interval,
}
booking.modify(old_booking_data, session.user)
for occurrence_to_cancel in occurrences_to_cancel:
occurrence_to_cancel.cancel(session.user, silent=True)
prebook = not room.can_book(session.user, allow_admin=False) and room.can_prebook(session.user, allow_admin=False)
resv = Reservation.create_from_data(room, dict(new_booking_data, start_dt=new_start_dt), session.user,
prebook=prebook, ignore_admin=True)
for new_occ in resv.occurrences:
new_occ_start = new_occ.start_dt.date()
if new_occ_start in cancelled_dates:
new_occ.cancel(None, silent=True)
if new_occ_start in rejected_occs:
new_occ.reject(None, rejected_occs[new_occ_start], silent=True)
booking.edit_logs.append(ReservationEditLog(user_name=session.user.full_name, info=[
'Split into a new booking',
f'booking_link:{resv.id}'
]))
resv.edit_logs.append(ReservationEditLog(user_name=session.user.full_name, info=[
'Split from another booking',
f'booking_link:{booking.id}'
]))
return resv
def get_matching_events(start_dt, end_dt, repeat_frequency, repeat_interval):
"""Get events suitable for booking linking.
This finds events that overlap with an occurrence of a booking
with the given dates where the user is a manager.
"""
occurrences = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
excluded_categories = rb_settings.get('excluded_categories')
return (Event.query
.filter(~Event.is_deleted,
~Event.room_reservation_links.any(ReservationLink.reservation.has(Reservation.is_accepted)),
db.or_(Event.happens_between(server_to_utc(occ.start_dt), server_to_utc(occ.end_dt))
for occ in occurrences),
Event.timezone == config.DEFAULT_TIMEZONE,
db.and_(Event.category_id != cat.id for cat in excluded_categories),
Event.acl_entries.any(db.and_(EventPrincipal.type == PrincipalType.user,
EventPrincipal.user_id == session.user.id,
EventPrincipal.full_access)))
.all())
def get_booking_edit_calendar_data(booking, booking_changes):
"""Return calendar-related data for the booking edit modal."""
room = booking.room
booking_details = serialize_booking_details(booking)
old_date_range = booking_details['date_range']
booking_availability = dict(booking_details['occurrences'], candidates={}, conflicts={}, conflicting_candidates={},
pre_bookings={}, pre_conflicts={}, pending_cancellations={}, num_days_available=None,
num_conflicts=None)
response = {
'will_be_split': False,
'calendars': [{'date_range': old_date_range, 'data': booking_availability}]
}
cancelled_dates = [occ.start_dt.date() for occ in booking.occurrences if occ.is_cancelled]
rejected_dates = [occ.start_dt.date() for occ in booking.occurrences if occ.is_rejected]
if should_split_booking(booking, booking_changes):
old_frequency = booking.repeat_frequency
future_occurrences = [occ for occ in sorted(booking.occurrences, key=attrgetter('start_dt'))
if occ.start_dt >= datetime.now()]
if old_frequency != RepeatFrequency.NEVER and booking_changes['repeat_frequency'] == RepeatFrequency.NEVER:
cancelled_dates = []
rejected_dates = []
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id], **booking_changes)
else:
new_booking_start_dt = datetime.combine(future_occurrences[0].start_dt.date(),
booking_changes['start_dt'].time())
availability_filters = dict(booking_changes, start_dt=new_booking_start_dt)
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id],
**availability_filters)
for occ in booking.occurrences:
serialized = serialize_occurrences({occ.start_dt.date(): [occ]})
if occ in future_occurrences and occ.is_valid:
booking_availability['pending_cancellations'].update(serialized)
elif not occ.is_rejected and not occ.is_cancelled:
booking_availability['bookings'].update(serialized)
response['will_be_split'] = True
elif not has_same_dates(booking, booking_changes):
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id],
skip_past_conflicts=True, **booking_changes)
else:
return response
room_availability = data[room.id]
room_availability['cancellations'] = {}
room_availability['rejections'] = {}
others = defaultdict(list)
for k, v in chain(room_availability['bookings'].items(), room_availability['pre_bookings'].items()):
others[k].extend(v)
other_bookings = {dt: [x for x in other if x.reservation.id != booking.id] for dt, other in others.items()}
candidates = room_availability['candidates']
for dt, dt_candidates in candidates.items():
if dt in cancelled_dates:
candidates[dt] = []
room_availability['cancellations'].update({dt: dt_candidates})
elif dt in rejected_dates:
candidates[dt] = []
room_availability['rejections'].update({dt: dt_candidates})
room_availability['num_days_available'] = (
len(new_date_range) -
len(room_availability['conflicts']) -
len(room_availability['cancellations']) -
len(room_availability['rejections'])
)
room_availability['num_conflicts'] = len(room_availability['conflicts'])
room_availability['bookings'] = {}
room_availability['other'] = serialize_occurrences(other_bookings)
room_availability['pending_cancellations'] = {}
response['calendars'].append({'date_range': new_date_range, 'data': serialize_availability(data)[room.id]})
return response
| <filename>indico/modules/rb/operations/bookings.py
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from datetime import date, datetime, time
from itertools import chain, groupby
from operator import attrgetter, itemgetter
from flask import flash, session
from pytz import timezone
from sqlalchemy.orm import contains_eager, joinedload
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.util.queries import db_dates_overlap, with_total_rows
from indico.core.errors import NoReportError
from indico.modules.events.models.events import Event
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.rb import rb_settings
from indico.modules.rb.models.reservation_edit_logs import ReservationEditLog
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence
from indico.modules.rb.models.reservations import RepeatFrequency, Reservation, ReservationLink
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
from indico.modules.rb.models.rooms import Room
from indico.modules.rb.operations.blockings import filter_blocked_rooms, get_rooms_blockings, group_blocked_rooms
from indico.modules.rb.operations.conflicts import get_concurrent_pre_bookings, get_rooms_conflicts
from indico.modules.rb.operations.misc import get_rooms_nonbookable_periods, get_rooms_unbookable_hours
from indico.modules.rb.util import (group_by_occurrence_date, serialize_availability, serialize_blockings,
serialize_booking_details, serialize_nonbookable_periods, serialize_occurrences,
serialize_unbookable_hours)
from indico.util.date_time import iterdays, overlaps, server_to_utc
from indico.util.i18n import _
from indico.util.iterables import group_list
from indico.util.string import natural_sort_key
def group_blockings(blocked_rooms, dates):
if not blocked_rooms:
return {}
occurrences = {}
for blocked_room in blocked_rooms:
blocking = blocked_room.blocking
for date_ in dates:
if blocking.start_date <= date_ <= blocking.end_date:
occurrences[date_] = [blocking]
return occurrences
def group_nonbookable_periods(periods, dates):
if not periods:
return {}
occurrences = defaultdict(list)
for period in periods:
for d in dates:
if period.start_dt.date() <= d <= period.end_dt.date():
period_occurrence = NonBookablePeriod()
period_occurrence.start_dt = ((datetime.combine(d, time(0)))
if period.start_dt.date() != d else period.start_dt)
period_occurrence.end_dt = ((datetime.combine(d, time(23, 59)))
if period.end_dt.date() != d else period.end_dt)
occurrences[d].append(period_occurrence)
return occurrences
def get_existing_room_occurrences(room, start_dt, end_dt, repeat_frequency=RepeatFrequency.NEVER, repeat_interval=None,
allow_overlapping=False, only_accepted=False, skip_booking_id=None):
return get_existing_rooms_occurrences([room], start_dt, end_dt, repeat_frequency, repeat_interval,
allow_overlapping, only_accepted, skip_booking_id).get(room.id, [])
def get_existing_rooms_occurrences(rooms, start_dt, end_dt, repeat_frequency, repeat_interval, allow_overlapping=False,
only_accepted=False, skip_booking_id=None):
room_ids = [room.id for room in rooms]
query = (ReservationOccurrence.query
.filter(ReservationOccurrence.is_valid, Reservation.room_id.in_(room_ids))
.join(ReservationOccurrence.reservation)
.options(ReservationOccurrence.NO_RESERVATION_USER_STRATEGY,
contains_eager(ReservationOccurrence.reservation)))
if allow_overlapping:
query = query.filter(db_dates_overlap(ReservationOccurrence, 'start_dt', start_dt, 'end_dt', end_dt))
else:
query = query.filter(ReservationOccurrence.start_dt >= start_dt, ReservationOccurrence.end_dt <= end_dt)
if only_accepted:
query = query.filter(Reservation.is_accepted)
if repeat_frequency != RepeatFrequency.NEVER:
candidates = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
dates = [candidate.start_dt for candidate in candidates]
query = query.filter(db.cast(ReservationOccurrence.start_dt, db.Date).in_(dates))
if skip_booking_id is not None:
query = query.filter(ReservationOccurrence.reservation_id != skip_booking_id)
return group_list(query, key=lambda obj: obj.reservation.room_id,
sort_by=lambda obj: (obj.reservation.room_id, obj.start_dt))
def get_rooms_availability(rooms, start_dt, end_dt, repeat_frequency, repeat_interval, skip_conflicts_with=None,
admin_override_enabled=False, skip_past_conflicts=False):
availability = {}
candidates = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
date_range = sorted({cand.start_dt.date() for cand in candidates})
occurrences = get_existing_rooms_occurrences(rooms, start_dt.replace(hour=0, minute=0),
end_dt.replace(hour=23, minute=59), repeat_frequency, repeat_interval)
blocked_rooms = get_rooms_blockings(rooms, start_dt.date(), end_dt.date())
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True))
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True))
unbookable_hours = get_rooms_unbookable_hours(rooms)
nonbookable_periods = get_rooms_nonbookable_periods(rooms, start_dt, end_dt)
conflicts, pre_conflicts, conflicting_candidates = get_rooms_conflicts(
rooms, start_dt.replace(tzinfo=None), end_dt.replace(tzinfo=None),
repeat_frequency, repeat_interval, nonoverridable_blocked_rooms,
nonbookable_periods, unbookable_hours, skip_conflicts_with,
allow_admin=admin_override_enabled, skip_past_conflicts=skip_past_conflicts
)
dates = list(candidate.start_dt.date() for candidate in candidates)
for room in rooms:
room_occurrences = occurrences.get(room.id, [])
room_conflicting_candidates = conflicting_candidates.get(room.id, [])
room_conflicts = conflicts.get(room.id, [])
pre_room_conflicts = pre_conflicts.get(room.id, [])
pre_bookings = [occ for occ in room_occurrences if not occ.reservation.is_accepted]
concurrent_pre_bookings = get_concurrent_pre_bookings(pre_bookings) if pre_bookings else []
existing_bookings = [occ for occ in room_occurrences if occ.reservation.is_accepted]
room_nonoverridable_blocked_rooms = nonoverridable_blocked_rooms.get(room.id, [])
room_overridable_blocked_rooms = overridable_blocked_rooms.get(room.id, [])
room_nonbookable_periods = nonbookable_periods.get(room.id, [])
room_unbookable_hours = unbookable_hours.get(room.id, [])
room_candidates = get_room_candidates(candidates, room_conflicts)
availability[room.id] = {'room_id': room.id,
'candidates': group_by_occurrence_date(room_candidates),
'conflicting_candidates': group_by_occurrence_date(room_conflicting_candidates),
'pre_bookings': group_by_occurrence_date(pre_bookings),
'concurrent_pre_bookings': group_by_occurrence_date(concurrent_pre_bookings),
'bookings': group_by_occurrence_date(existing_bookings),
'conflicts': group_by_occurrence_date(room_conflicts),
'pre_conflicts': group_by_occurrence_date(pre_room_conflicts),
'blockings': group_blockings(room_nonoverridable_blocked_rooms, dates),
'overridable_blockings': group_blockings(room_overridable_blocked_rooms, dates),
'nonbookable_periods': group_nonbookable_periods(room_nonbookable_periods, dates),
'unbookable_hours': room_unbookable_hours}
return date_range, availability
def get_room_candidates(candidates, conflicts):
return [candidate for candidate in candidates
if not (any(candidate.overlaps(conflict) for conflict in conflicts))]
def _bookings_query(filters, noload_room=False):
reservation_strategy = contains_eager('reservation')
if noload_room:
reservation_strategy.raiseload('room')
else:
reservation_strategy.joinedload('room')
reservation_strategy.noload('booked_for_user')
reservation_strategy.noload('created_by_user')
query = (ReservationOccurrence.query
.join(Reservation)
.join(Room)
.filter(~Room.is_deleted)
.options(reservation_strategy))
text = filters.get('text')
room_ids = filters.get('room_ids')
booking_criteria = [Reservation.booking_reason.ilike(f'%{text}%'),
Reservation.booked_for_name.ilike(f'%{text}%')]
if room_ids and text:
query = query.filter(db.or_(Room.id.in_(room_ids), *booking_criteria))
elif room_ids:
query = query.filter(Room.id.in_(room_ids))
elif text:
query = query.filter(db.or_(*booking_criteria))
if filters.get('start_dt'):
query = query.filter(ReservationOccurrence.start_dt >= filters['start_dt'])
if filters.get('end_dt'):
query = query.filter(ReservationOccurrence.end_dt <= filters['end_dt'])
booked_for_user = filters.get('booked_for_user')
if booked_for_user:
query = query.filter(db.or_(Reservation.booked_for_user == booked_for_user,
Reservation.created_by_user == booked_for_user))
if not filters.get('include_inactive'):
query = query.filter(ReservationOccurrence.is_valid)
return query
def get_room_calendar(start_date, end_date, room_ids, include_inactive=False, **filters):
start_dt = datetime.combine(start_date, time(hour=0, minute=0))
end_dt = datetime.combine(end_date, time(hour=23, minute=59))
query = _bookings_query(dict(filters, start_dt=start_dt, end_dt=end_dt, room_ids=room_ids,
include_inactive=include_inactive))
bookings = query.order_by(db.func.indico.natsort(Room.full_name)).all()
rooms = set()
if room_ids:
rooms = set(Room.query
.filter(~Room.is_deleted, Room.id.in_(room_ids))
.options(joinedload('location')))
rooms.update(b.reservation.room for b in bookings)
rooms = sorted(rooms, key=lambda r: natural_sort_key(r.full_name))
occurrences_by_room = groupby(bookings, attrgetter('reservation.room_id'))
unbookable_hours = get_rooms_unbookable_hours(rooms)
nonbookable_periods = get_rooms_nonbookable_periods(rooms, start_dt, end_dt)
blocked_rooms = get_rooms_blockings(rooms, start_dt, end_dt)
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True))
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True))
dates = [d.date() for d in iterdays(start_dt, end_dt)]
calendar = {room.id: {
'room_id': room.id,
'nonbookable_periods': group_nonbookable_periods(nonbookable_periods.get(room.id, []), dates),
'unbookable_hours': unbookable_hours.get(room.id, []),
'blockings': group_blockings(nonoverridable_blocked_rooms.get(room.id, []), dates),
'overridable_blockings': group_blockings(overridable_blocked_rooms.get(room.id, []), dates),
} for room in rooms}
for room_id, occurrences in occurrences_by_room:
occurrences = list(occurrences)
pre_bookings = [occ for occ in occurrences if occ.reservation.is_pending]
existing_bookings = [occ for occ in occurrences if not occ.reservation.is_pending and occ.is_valid]
concurrent_pre_bookings = get_concurrent_pre_bookings(pre_bookings)
additional_data = {
'bookings': group_by_occurrence_date(existing_bookings),
'pre_bookings': group_by_occurrence_date(pre_bookings),
'concurrent_pre_bookings': group_by_occurrence_date(concurrent_pre_bookings)
}
if include_inactive:
additional_data.update({
'cancellations': group_by_occurrence_date(occ for occ in occurrences if occ.is_cancelled),
'rejections': group_by_occurrence_date(occ for occ in occurrences if occ.is_rejected)
})
calendar[room_id].update(additional_data)
return calendar
def get_room_details_availability(room, start_dt, end_dt):
dates = [d.date() for d in iterdays(start_dt, end_dt)]
occurrences = get_existing_room_occurrences(room, start_dt, end_dt, RepeatFrequency.DAY, 1)
pre_bookings = [occ for occ in occurrences if not occ.reservation.is_accepted]
bookings = [occ for occ in occurrences if occ.reservation.is_accepted]
blocked_rooms = get_rooms_blockings([room], start_dt.date(), end_dt.date())
nonoverridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
nonoverridable_only=True,
explicit=True)).get(room.id, [])
overridable_blocked_rooms = group_blocked_rooms(filter_blocked_rooms(blocked_rooms,
overridable_only=True,
explicit=True)).get(room.id, [])
unbookable_hours = get_rooms_unbookable_hours([room]).get(room.id, [])
nonbookable_periods = get_rooms_nonbookable_periods([room], start_dt, end_dt).get(room.id, [])
availability = []
for day in dates:
iso_day = day.isoformat()
nb_periods = serialize_nonbookable_periods(group_nonbookable_periods(nonbookable_periods, dates)).get(iso_day)
availability.append({
'bookings': serialize_occurrences(group_by_occurrence_date(bookings)).get(iso_day),
'pre_bookings': serialize_occurrences(group_by_occurrence_date(pre_bookings)).get(iso_day),
'blockings': serialize_blockings(group_blockings(nonoverridable_blocked_rooms, dates)).get(iso_day),
'overridable_blockings': (serialize_blockings(group_blockings(overridable_blocked_rooms, dates))
.get(iso_day)),
'nonbookable_periods': nb_periods,
'unbookable_hours': serialize_unbookable_hours(unbookable_hours),
'day': iso_day,
})
return sorted(availability, key=itemgetter('day'))
def get_booking_occurrences(booking):
date_range = sorted({cand.start_dt.date() for cand in booking.occurrences})
occurrences = group_by_occurrence_date(booking.occurrences)
return date_range, occurrences
def check_room_available(room, start_dt, end_dt):
occurrences = get_existing_room_occurrences(room, start_dt, end_dt, allow_overlapping=True)
prebookings = [occ for occ in occurrences if not occ.reservation.is_accepted]
bookings = [occ for occ in occurrences if occ.reservation.is_accepted]
unbookable_hours = get_rooms_unbookable_hours([room]).get(room.id, [])
hours_overlap = any(hours for hours in unbookable_hours
if overlaps((start_dt.time(), end_dt.time()), (hours.start_time, hours.end_time)))
nonbookable_periods = any(get_rooms_nonbookable_periods([room], start_dt, end_dt))
blocked_rooms = get_rooms_blockings([room], start_dt, end_dt)
nonoverridable_blocked_rooms = filter_blocked_rooms(blocked_rooms, nonoverridable_only=True, explicit=True)
blocked_for_user = any(nonoverridable_blocked_rooms)
user_booking = any(booking for booking in bookings if booking.reservation.booked_for_id == session.user.id)
user_prebooking = any(prebooking for prebooking in prebookings
if prebooking.reservation.booked_for_id == session.user.id)
return {
'can_book': room.can_book(session.user, allow_admin=False),
'can_prebook': room.can_prebook(session.user, allow_admin=False),
'conflict_booking': any(bookings),
'conflict_prebooking': any(prebookings),
'unbookable': (hours_overlap or nonbookable_periods or blocked_for_user),
'user_booking': user_booking,
'user_prebooking': user_prebooking,
}
def create_booking_for_event(room_id, event):
try:
room = Room.get_or_404(room_id)
default_timezone = timezone(config.DEFAULT_TIMEZONE)
start_dt = event.start_dt.astimezone(default_timezone).replace(tzinfo=None)
end_dt = event.end_dt.astimezone(default_timezone).replace(tzinfo=None)
booking_reason = f"Event '{event.title}'"
data = dict(start_dt=start_dt, end_dt=end_dt, booked_for_user=event.creator, booking_reason=booking_reason,
repeat_frequency=RepeatFrequency.NEVER, event_id=event.id)
booking = Reservation.create_from_data(room, data, session.user, ignore_admin=True)
booking.linked_object = event
return booking
except NoReportError:
flash(_("Booking could not be created. Probably somebody else booked the room in the meantime."), 'error')
return None
def get_active_bookings(limit, start_dt, last_reservation_id=None, **filters):
criteria = [ReservationOccurrence.start_dt > start_dt]
if last_reservation_id is not None:
criteria.append(db.and_(db.cast(ReservationOccurrence.start_dt, db.Date) >= start_dt,
ReservationOccurrence.reservation_id > last_reservation_id))
query = (_bookings_query(filters, noload_room=True)
.filter(db.or_(*criteria))
.order_by(ReservationOccurrence.start_dt,
ReservationOccurrence.reservation_id,
db.func.indico.natsort(Room.full_name))
.limit(limit))
bookings, total = with_total_rows(query)
rows_left = total - limit if total > limit else total
return group_by_occurrence_date(query, sort_by=lambda obj: (obj.start_dt, obj.reservation_id)), rows_left
def has_same_dates(old_booking, new_booking):
return (old_booking.start_dt == new_booking['start_dt'] and
old_booking.end_dt == new_booking['end_dt'] and
old_booking.repeat_interval == new_booking['repeat_interval'] and
old_booking.repeat_frequency == new_booking['repeat_frequency'])
def has_same_slots(old_booking, new_booking):
if (
old_booking.repeat_interval != new_booking['repeat_interval']
or old_booking.repeat_frequency != new_booking['repeat_frequency']
):
return False
return old_booking.start_dt <= new_booking['start_dt'] and old_booking.end_dt >= new_booking['end_dt']
def should_split_booking(booking, new_data):
today = date.today()
is_ongoing_booking = booking.start_dt.date() < today < booking.end_dt.date()
old_start_time = booking.start_dt.time()
old_end_time = booking.end_dt.time()
old_repeat_frequency = booking.repeat_frequency
old_repeat_interval = booking.repeat_interval
times_changed = new_data['start_dt'].time() != old_start_time or new_data['end_dt'].time() != old_end_time
new_repeat_frequency = new_data['repeat_frequency']
new_repeat_interval = new_data['repeat_interval']
repetition_changed = (new_repeat_frequency, new_repeat_interval) != (old_repeat_frequency, old_repeat_interval)
return is_ongoing_booking and (times_changed or repetition_changed)
def split_booking(booking, new_booking_data):
is_ongoing_booking = booking.start_dt.date() < date.today() < booking.end_dt.date()
if not is_ongoing_booking:
return
cancelled_dates = []
rejected_occs = {}
room = booking.room
occurrences = sorted(booking.occurrences, key=attrgetter('start_dt'))
old_frequency = booking.repeat_frequency
occurrences_to_cancel = [occ for occ in occurrences if occ.start_dt >= datetime.now() and occ.is_valid]
if old_frequency != RepeatFrequency.NEVER and new_booking_data['repeat_frequency'] == RepeatFrequency.NEVER:
new_start_dt = new_booking_data['start_dt']
else:
new_start_dt = datetime.combine(occurrences_to_cancel[0].start_dt.date(), new_booking_data['start_dt'].time())
cancelled_dates = [occ.start_dt.date() for occ in occurrences if occ.is_cancelled]
rejected_occs = {occ.start_dt.date(): occ.rejection_reason for occ in occurrences if occ.is_rejected}
new_end_dt = [occ for occ in occurrences if occ.start_dt < datetime.now()][-1].end_dt
old_booking_data = {
'booking_reason': booking.booking_reason,
'booked_for_user': booking.booked_for_user,
'start_dt': booking.start_dt,
'end_dt': new_end_dt,
'repeat_frequency': booking.repeat_frequency,
'repeat_interval': booking.repeat_interval,
}
booking.modify(old_booking_data, session.user)
for occurrence_to_cancel in occurrences_to_cancel:
occurrence_to_cancel.cancel(session.user, silent=True)
prebook = not room.can_book(session.user, allow_admin=False) and room.can_prebook(session.user, allow_admin=False)
resv = Reservation.create_from_data(room, dict(new_booking_data, start_dt=new_start_dt), session.user,
prebook=prebook, ignore_admin=True)
for new_occ in resv.occurrences:
new_occ_start = new_occ.start_dt.date()
if new_occ_start in cancelled_dates:
new_occ.cancel(None, silent=True)
if new_occ_start in rejected_occs:
new_occ.reject(None, rejected_occs[new_occ_start], silent=True)
booking.edit_logs.append(ReservationEditLog(user_name=session.user.full_name, info=[
'Split into a new booking',
f'booking_link:{resv.id}'
]))
resv.edit_logs.append(ReservationEditLog(user_name=session.user.full_name, info=[
'Split from another booking',
f'booking_link:{booking.id}'
]))
return resv
def get_matching_events(start_dt, end_dt, repeat_frequency, repeat_interval):
"""Get events suitable for booking linking.
This finds events that overlap with an occurrence of a booking
with the given dates where the user is a manager.
"""
occurrences = ReservationOccurrence.create_series(start_dt, end_dt, (repeat_frequency, repeat_interval))
excluded_categories = rb_settings.get('excluded_categories')
return (Event.query
.filter(~Event.is_deleted,
~Event.room_reservation_links.any(ReservationLink.reservation.has(Reservation.is_accepted)),
db.or_(Event.happens_between(server_to_utc(occ.start_dt), server_to_utc(occ.end_dt))
for occ in occurrences),
Event.timezone == config.DEFAULT_TIMEZONE,
db.and_(Event.category_id != cat.id for cat in excluded_categories),
Event.acl_entries.any(db.and_(EventPrincipal.type == PrincipalType.user,
EventPrincipal.user_id == session.user.id,
EventPrincipal.full_access)))
.all())
def get_booking_edit_calendar_data(booking, booking_changes):
"""Return calendar-related data for the booking edit modal."""
room = booking.room
booking_details = serialize_booking_details(booking)
old_date_range = booking_details['date_range']
booking_availability = dict(booking_details['occurrences'], candidates={}, conflicts={}, conflicting_candidates={},
pre_bookings={}, pre_conflicts={}, pending_cancellations={}, num_days_available=None,
num_conflicts=None)
response = {
'will_be_split': False,
'calendars': [{'date_range': old_date_range, 'data': booking_availability}]
}
cancelled_dates = [occ.start_dt.date() for occ in booking.occurrences if occ.is_cancelled]
rejected_dates = [occ.start_dt.date() for occ in booking.occurrences if occ.is_rejected]
if should_split_booking(booking, booking_changes):
old_frequency = booking.repeat_frequency
future_occurrences = [occ for occ in sorted(booking.occurrences, key=attrgetter('start_dt'))
if occ.start_dt >= datetime.now()]
if old_frequency != RepeatFrequency.NEVER and booking_changes['repeat_frequency'] == RepeatFrequency.NEVER:
cancelled_dates = []
rejected_dates = []
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id], **booking_changes)
else:
new_booking_start_dt = datetime.combine(future_occurrences[0].start_dt.date(),
booking_changes['start_dt'].time())
availability_filters = dict(booking_changes, start_dt=new_booking_start_dt)
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id],
**availability_filters)
for occ in booking.occurrences:
serialized = serialize_occurrences({occ.start_dt.date(): [occ]})
if occ in future_occurrences and occ.is_valid:
booking_availability['pending_cancellations'].update(serialized)
elif not occ.is_rejected and not occ.is_cancelled:
booking_availability['bookings'].update(serialized)
response['will_be_split'] = True
elif not has_same_dates(booking, booking_changes):
new_date_range, data = get_rooms_availability([room], skip_conflicts_with=[booking.id],
skip_past_conflicts=True, **booking_changes)
else:
return response
room_availability = data[room.id]
room_availability['cancellations'] = {}
room_availability['rejections'] = {}
others = defaultdict(list)
for k, v in chain(room_availability['bookings'].items(), room_availability['pre_bookings'].items()):
others[k].extend(v)
other_bookings = {dt: [x for x in other if x.reservation.id != booking.id] for dt, other in others.items()}
candidates = room_availability['candidates']
for dt, dt_candidates in candidates.items():
if dt in cancelled_dates:
candidates[dt] = []
room_availability['cancellations'].update({dt: dt_candidates})
elif dt in rejected_dates:
candidates[dt] = []
room_availability['rejections'].update({dt: dt_candidates})
room_availability['num_days_available'] = (
len(new_date_range) -
len(room_availability['conflicts']) -
len(room_availability['cancellations']) -
len(room_availability['rejections'])
)
room_availability['num_conflicts'] = len(room_availability['conflicts'])
room_availability['bookings'] = {}
room_availability['other'] = serialize_occurrences(other_bookings)
room_availability['pending_cancellations'] = {}
response['calendars'].append({'date_range': new_date_range, 'data': serialize_availability(data)[room.id]})
return response
| en | 0.920913 | # This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. Get events suitable for booking linking. This finds events that overlap with an occurrence of a booking with the given dates where the user is a manager. Return calendar-related data for the booking edit modal. | 1.619913 | 2 |
Stack/9-stack-sorting-with-recursion.py | mahmutcankurt/DataStructures_Python | 1 | 6630017 | <reponame>mahmutcankurt/DataStructures_Python<gh_stars>1-10
def insert(s, element):
if(len(s) == 0 or element > s[-1]):
s.append(element)
return
else:
temp = s.pop()
insert(s, element)
s.append(temp)
def sorting(s):
if(len(s) != 0):
temp = s.pop()
sorting(s)
insert(s, temp)
def printStack(s):
for i in s[::-1]:
print(i, end=" ")
print()
if __name__ =="__main__":
s = []
s.append(31)
s.append(-6)
s.append(19)
s.append(14)
s.append(-4)
print("Stack elements before sorting: ")
printStack(s)
sorting(s)
print("\nStack elements after sorting: ")
printStack(s)
| def insert(s, element):
if(len(s) == 0 or element > s[-1]):
s.append(element)
return
else:
temp = s.pop()
insert(s, element)
s.append(temp)
def sorting(s):
if(len(s) != 0):
temp = s.pop()
sorting(s)
insert(s, temp)
def printStack(s):
for i in s[::-1]:
print(i, end=" ")
print()
if __name__ =="__main__":
s = []
s.append(31)
s.append(-6)
s.append(19)
s.append(14)
s.append(-4)
print("Stack elements before sorting: ")
printStack(s)
sorting(s)
print("\nStack elements after sorting: ")
printStack(s) | none | 1 | 4.116955 | 4 |
|
functions/test/faceapi_test.py | yokawasa/facetag-functions | 2 | 6630018 | # -*- coding: utf-8 -*-
import glob,os,io
import sys
sys.path.append('../')
from commons.config import Config
from commons.faceapi import AzureCognitiveFaceAPI
from commons.blockblob import AzureStorageBlockBlob
config = Config()
# Face API
# pip install azure-cognitiveservices-vision-face
# FaceAPI Python SDK
# https://docs.microsoft.com/en-us/azure/cognitive-services/face/quickstarts/python-sdk
# https://azure.microsoft.com/en-us/services/cognitive-services/face/
# https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/python/Face/FaceQuickstart.py
if __name__ == "__main__":
storage_info = AzureStorageBlockBlob.parse_storage_conn_string(config.get_value('AzureWebJobsStorage'))
api = AzureCognitiveFaceAPI(
config.get_value('FACEAPI_ENDPOINT'),
config.get_value('FACEAPI_SUBKEY'),
storage_info['AccountName'],
storage_info['AccountKey'])
# person group id should be lowercase and alphanumeric (dash is ok)
person_group_id = "my-unique-person-group00"
## Create PersonGroup
#print('Create Person group:', person_group_id)
#try:
# faceapi.create_person_group(person_group_id, person_group_id)
#except Exception as e:
# # print("[Errno {0}] {1}".format(e.errno, e.strerror))
# print(e)
## Create Person
#person_name = "Yoichi01"
#print('Create Person:', person_name)
#try:
# person = faceapi.create_person(person_group_id, person_name)
# print("person: id={}".format(person.person_id))
#except Exception as e:
# print(e)
## Add images to a person
person1_id = "47ca6a82-f5d1-45a8-9ac2-86a61ad6de90"
person2_id = "4caa393c-510f-4d56-9438-f7fc8eadb52c"
# Find all jpeg images of friends in working directory
## ็ตถๅฏพใในๆๅฎใใชใใใใพใใใใชใ
#person1_images = [file for file in glob.glob('/Users/yoichika/dev/github/facetag-services/samples/*.jpg') if file.startswith("man")]
## ็ธๅฏพใในๆๅฎใ ใจใใพใใใ
os.chdir("/Users/yoichika/dev/github/facetag-services/samples")
person1_images = [file for file in glob.glob('*.jpg') if file.startswith("man")]
# Add to a woman person
for image in person1_images:
print(image)
#w = open(image, 'r+b')
#face_client.person_group_person.add_face_from_stream(PERSON_GROUP_ID, woman.person_id, w)
## ใใใblob storageใใ่ชญใฟ่พผใใใๆน
"""
storage_account_name = "facetagstore"
storage_account_key= "<KEY>
compvision_endpoint = "https://yoichikacompvision01.cognitiveservices.azure.com"
container_name = "test"
blobclient = blockblob.AzureStorageBlockBlob(storage_account_name,storage_account_key)
blob_names = ["man1-person-group.jpg", "man2-person-group.jpg", "man3-person-group.jpg"]
for blob_name in blob_names:
## get blob to bytes
# image_blob1 = blobclient.get_blob(container_name, blob1_name)
## get blob to stream
in_stream = io.BytesIO()
blobclient.get_blob_stream(container_name, blob_name, in_stream)
in_stream.seek(0)
persisted_face = api.add_face(person_group_id, person1_id, in_stream)
print("persion_id={} persisted_face id={}".format(person1_id, persisted_face.persisted_face_id))
blob_names = ["woman1-person-group.jpg", "woman2-person-group.jpg", "woman3-person-group.jpg"]
for blob_name in blob_names:
## get blob to bytes
# image_blob1 = blobclient.get_blob(container_name, blob1_name)
## get blob to stream
in_stream = io.BytesIO()
blobclient.get_blob_stream(container_name, blob_name, in_stream)
in_stream.seek(0)
persisted_face = api.add_face(person_group_id, person2_id, in_stream)
print("persion_id={} persisted_face id={}".format(person2_id, persisted_face.persisted_face_id))
# expected output
#persion_id=47ca6a82-f5d1-45a8-9ac2-86a61ad6de90 persisted_face id=837f5342-fa1c-4c3d-bf85-7ace20632f7d
"""
"""
## Train PersonGroup
print("Train Person group:", person_group_id)
try:
api.train_person_group(person_group_id)
except Exception as e:
print(e)
"""
container_name = "imageslocal"
blob_name = "test-image-person-group.jpg"
"""
blobclient = AzureStorageBlockBlob(storage_account_name,storage_account_key)
in_stream = io.BytesIO()
blobclient.get_blob_stream(container_name, blob_name, in_stream)
in_stream.seek(0)
## Identify Face
api.identify_face(person_group_id, in_stream, max_num_of_candidates_returned=1, confidence_threshold=0.5)
"""
ret = api.identify_face_blob(person_group_id, container_name, blob_name, max_num_of_candidates_returned=1, confidence_threshold=0.5)
print(ret)
| # -*- coding: utf-8 -*-
import glob,os,io
import sys
sys.path.append('../')
from commons.config import Config
from commons.faceapi import AzureCognitiveFaceAPI
from commons.blockblob import AzureStorageBlockBlob
config = Config()
# Face API
# pip install azure-cognitiveservices-vision-face
# FaceAPI Python SDK
# https://docs.microsoft.com/en-us/azure/cognitive-services/face/quickstarts/python-sdk
# https://azure.microsoft.com/en-us/services/cognitive-services/face/
# https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/python/Face/FaceQuickstart.py
if __name__ == "__main__":
storage_info = AzureStorageBlockBlob.parse_storage_conn_string(config.get_value('AzureWebJobsStorage'))
api = AzureCognitiveFaceAPI(
config.get_value('FACEAPI_ENDPOINT'),
config.get_value('FACEAPI_SUBKEY'),
storage_info['AccountName'],
storage_info['AccountKey'])
# person group id should be lowercase and alphanumeric (dash is ok)
person_group_id = "my-unique-person-group00"
## Create PersonGroup
#print('Create Person group:', person_group_id)
#try:
# faceapi.create_person_group(person_group_id, person_group_id)
#except Exception as e:
# # print("[Errno {0}] {1}".format(e.errno, e.strerror))
# print(e)
## Create Person
#person_name = "Yoichi01"
#print('Create Person:', person_name)
#try:
# person = faceapi.create_person(person_group_id, person_name)
# print("person: id={}".format(person.person_id))
#except Exception as e:
# print(e)
## Add images to a person
person1_id = "47ca6a82-f5d1-45a8-9ac2-86a61ad6de90"
person2_id = "4caa393c-510f-4d56-9438-f7fc8eadb52c"
# Find all jpeg images of friends in working directory
## ็ตถๅฏพใในๆๅฎใใชใใใใพใใใใชใ
#person1_images = [file for file in glob.glob('/Users/yoichika/dev/github/facetag-services/samples/*.jpg') if file.startswith("man")]
## ็ธๅฏพใในๆๅฎใ ใจใใพใใใ
os.chdir("/Users/yoichika/dev/github/facetag-services/samples")
person1_images = [file for file in glob.glob('*.jpg') if file.startswith("man")]
# Add to a woman person
for image in person1_images:
print(image)
#w = open(image, 'r+b')
#face_client.person_group_person.add_face_from_stream(PERSON_GROUP_ID, woman.person_id, w)
## ใใใblob storageใใ่ชญใฟ่พผใใใๆน
"""
storage_account_name = "facetagstore"
storage_account_key= "<KEY>
compvision_endpoint = "https://yoichikacompvision01.cognitiveservices.azure.com"
container_name = "test"
blobclient = blockblob.AzureStorageBlockBlob(storage_account_name,storage_account_key)
blob_names = ["man1-person-group.jpg", "man2-person-group.jpg", "man3-person-group.jpg"]
for blob_name in blob_names:
## get blob to bytes
# image_blob1 = blobclient.get_blob(container_name, blob1_name)
## get blob to stream
in_stream = io.BytesIO()
blobclient.get_blob_stream(container_name, blob_name, in_stream)
in_stream.seek(0)
persisted_face = api.add_face(person_group_id, person1_id, in_stream)
print("persion_id={} persisted_face id={}".format(person1_id, persisted_face.persisted_face_id))
blob_names = ["woman1-person-group.jpg", "woman2-person-group.jpg", "woman3-person-group.jpg"]
for blob_name in blob_names:
## get blob to bytes
# image_blob1 = blobclient.get_blob(container_name, blob1_name)
## get blob to stream
in_stream = io.BytesIO()
blobclient.get_blob_stream(container_name, blob_name, in_stream)
in_stream.seek(0)
persisted_face = api.add_face(person_group_id, person2_id, in_stream)
print("persion_id={} persisted_face id={}".format(person2_id, persisted_face.persisted_face_id))
# expected output
#persion_id=47ca6a82-f5d1-45a8-9ac2-86a61ad6de90 persisted_face id=837f5342-fa1c-4c3d-bf85-7ace20632f7d
"""
"""
## Train PersonGroup
print("Train Person group:", person_group_id)
try:
api.train_person_group(person_group_id)
except Exception as e:
print(e)
"""
container_name = "imageslocal"
blob_name = "test-image-person-group.jpg"
"""
blobclient = AzureStorageBlockBlob(storage_account_name,storage_account_key)
in_stream = io.BytesIO()
blobclient.get_blob_stream(container_name, blob_name, in_stream)
in_stream.seek(0)
## Identify Face
api.identify_face(person_group_id, in_stream, max_num_of_candidates_returned=1, confidence_threshold=0.5)
"""
ret = api.identify_face_blob(person_group_id, container_name, blob_name, max_num_of_candidates_returned=1, confidence_threshold=0.5)
print(ret)
| en | 0.376871 | # -*- coding: utf-8 -*- # Face API # pip install azure-cognitiveservices-vision-face # FaceAPI Python SDK # https://docs.microsoft.com/en-us/azure/cognitive-services/face/quickstarts/python-sdk # https://azure.microsoft.com/en-us/services/cognitive-services/face/ # https://github.com/Azure-Samples/cognitive-services-quickstart-code/blob/master/python/Face/FaceQuickstart.py # person group id should be lowercase and alphanumeric (dash is ok) ## Create PersonGroup #print('Create Person group:', person_group_id) #try: # faceapi.create_person_group(person_group_id, person_group_id) #except Exception as e: # # print("[Errno {0}] {1}".format(e.errno, e.strerror)) # print(e) ## Create Person #person_name = "Yoichi01" #print('Create Person:', person_name) #try: # person = faceapi.create_person(person_group_id, person_name) # print("person: id={}".format(person.person_id)) #except Exception as e: # print(e) ## Add images to a person # Find all jpeg images of friends in working directory ## ็ตถๅฏพใในๆๅฎใใชใใใใพใใใใชใ #person1_images = [file for file in glob.glob('/Users/yoichika/dev/github/facetag-services/samples/*.jpg') if file.startswith("man")] ## ็ธๅฏพใในๆๅฎใ ใจใใพใใใ # Add to a woman person #w = open(image, 'r+b') #face_client.person_group_person.add_face_from_stream(PERSON_GROUP_ID, woman.person_id, w) ## ใใใblob storageใใ่ชญใฟ่พผใใใๆน storage_account_name = "facetagstore" storage_account_key= "<KEY> compvision_endpoint = "https://yoichikacompvision01.cognitiveservices.azure.com" container_name = "test" blobclient = blockblob.AzureStorageBlockBlob(storage_account_name,storage_account_key) blob_names = ["man1-person-group.jpg", "man2-person-group.jpg", "man3-person-group.jpg"] for blob_name in blob_names: ## get blob to bytes # image_blob1 = blobclient.get_blob(container_name, blob1_name) ## get blob to stream in_stream = io.BytesIO() blobclient.get_blob_stream(container_name, blob_name, in_stream) in_stream.seek(0) persisted_face = api.add_face(person_group_id, person1_id, in_stream) print("persion_id={} persisted_face id={}".format(person1_id, persisted_face.persisted_face_id)) blob_names = ["woman1-person-group.jpg", "woman2-person-group.jpg", "woman3-person-group.jpg"] for blob_name in blob_names: ## get blob to bytes # image_blob1 = blobclient.get_blob(container_name, blob1_name) ## get blob to stream in_stream = io.BytesIO() blobclient.get_blob_stream(container_name, blob_name, in_stream) in_stream.seek(0) persisted_face = api.add_face(person_group_id, person2_id, in_stream) print("persion_id={} persisted_face id={}".format(person2_id, persisted_face.persisted_face_id)) # expected output #persion_id=47ca6a82-f5d1-45a8-9ac2-86a61ad6de90 persisted_face id=837f5342-fa1c-4c3d-bf85-7ace20632f7d ## Train PersonGroup print("Train Person group:", person_group_id) try: api.train_person_group(person_group_id) except Exception as e: print(e) blobclient = AzureStorageBlockBlob(storage_account_name,storage_account_key) in_stream = io.BytesIO() blobclient.get_blob_stream(container_name, blob_name, in_stream) in_stream.seek(0) ## Identify Face api.identify_face(person_group_id, in_stream, max_num_of_candidates_returned=1, confidence_threshold=0.5) | 2.203276 | 2 |
pymtl3/passes/backends/verilog/translation/behavioral/VBehavioralTranslatorL2.py | kevinyuan/pymtl3 | 152 | 6630019 | #=========================================================================
# VBehavioralTranslatorL2.py
#=========================================================================
# Author : <NAME>
# Date : March 18, 2019
"""Provide the level 2 SystemVerilog translator implementation."""
from pymtl3.passes.backends.generic.behavioral.BehavioralTranslatorL2 import (
BehavioralTranslatorL2,
)
from pymtl3.passes.rtlir import BehavioralRTLIR as bir
from pymtl3.passes.rtlir import RTLIRType as rt
from ...util.utility import make_indent
from .VBehavioralTranslatorL1 import (
BehavioralRTLIRToVVisitorL1,
VBehavioralTranslatorL1,
)
class VBehavioralTranslatorL2( VBehavioralTranslatorL1, BehavioralTranslatorL2 ):
def _get_rtlir2v_visitor( s ):
return BehavioralRTLIRToVVisitorL2
def rtlir_tr_behavioral_tmpvars( s, tmpvars ):
make_indent( tmpvars, 1 )
return '\n'.join( tmpvars )
def rtlir_tr_behavioral_tmpvar( s, id_, upblk_id, dtype ):
return s.rtlir_tr_wire_decl(
"__tmpvar__"+upblk_id+'_'+id_, rt.Wire(dtype['raw_dtype']),
s.rtlir_tr_unpacked_array_type(None), dtype )
#-------------------------------------------------------------------------
# BehavioralRTLIRToVVisitorL2
#-------------------------------------------------------------------------
class BehavioralRTLIRToVVisitorL2( BehavioralRTLIRToVVisitorL1 ):
"""Visitor that translates RTLIR to SystemVerilog for a single upblk."""
def __init__( s, is_reserved ):
super().__init__( is_reserved )
# The dictionary of operator-character pairs
s.ops = {
# Unary operators
# bir.Invert : '~', bir.Not : '!', bir.UAdd : '+', bir.USub : '-',
bir.Invert : '~', bir.UAdd : '+', bir.USub : '-',
# Boolean operators
# bir.And : '&&', bir.Or : '||',
# Binary operators
bir.Add : '+', bir.Sub : '-', bir.Mult : '*', bir.Div : '/',
bir.Mod : '%', bir.Pow : '**',
bir.ShiftLeft : '<<', bir.ShiftRightLogic : '>>',
bir.BitAnd : '&', bir.BitOr : '|', bir.BitXor : '^',
# Comparison operators
bir.Eq : '==', bir.NotEq : '!=', bir.Lt : '<', bir.LtE : '<=',
bir.Gt : '>', bir.GtE : '>='
}
def visit_expr_wrap( s, node ):
"""Return expressions selectively wrapped with brackets."""
if isinstance( node,
# ( bir.IfExp, bir.UnaryOp, bir.BoolOp, bir.BinOp, bir.Compare ) ):
( bir.IfExp, bir.UnaryOp, bir.BinOp, bir.Compare ) ):
return f"( {s.visit(node)} )"
else:
return s.visit( node )
#-----------------------------------------------------------------------
# Statements
#-----------------------------------------------------------------------
# All statement nodes return a list of strings.
#-----------------------------------------------------------------------
# visit_If
#-----------------------------------------------------------------------
def visit_If( s, node ):
node.cond._top_expr = True
src = []
body = []
orelse = []
# Grab condition, if-body, and orelse-body
cond = s.visit( node.cond )
for stmt in node.body:
body.extend( s.visit( stmt ) )
make_indent( body, 1 )
for stmt in node.orelse:
orelse.extend( s.visit( stmt ) )
# Assemble the statement, starting with if-body
if_begin = f'if ( {cond} ) begin'
src.extend( [ if_begin ] )
src.extend( body )
# if len( node.body ) > 1:
src.extend( [ 'end' ] )
# If orelse-body is not empty, add it to the list of strings
if node.orelse != []:
# If an if statement is the only statement in the orelse-body
if len( node.orelse ) == 1 and isinstance( node.orelse[ 0 ], bir.If ):
# No indent will be added, also append if-begin to else-begin
else_begin = f'else {orelse[0]}'
orelse = orelse[ 1 : ]
# Else indent orelse-body
else:
else_begin = 'else' + ( ' begin' if len( node.orelse ) > 1 else '' )
make_indent( orelse, 1 )
src.extend( [ else_begin ] )
src.extend( orelse )
if len( node.orelse ) > 1:
src.extend( [ 'end' ] )
return src
#-----------------------------------------------------------------------
# visit_For
#-----------------------------------------------------------------------
def visit_For( s, node ):
node.start._top_expr = True
node.end._top_expr = True
node.step._top_expr = True
src = []
body = []
loop_var = s.visit( node.var )
start = s.visit( node.start )
end = s.visit( node.end )
begin = ' begin' if len( node.body ) > 1 else ''
cmp_op = '>' if node.step._value < 0 else '<'
inc_op = '-' if node.step._value < 0 else '+'
step_abs = s.visit( node.step )
if node.step._value < 0 and step_abs[0] == '-':
step_abs = step_abs[1:]
for stmt in node.body:
body.extend( s.visit( stmt ) )
make_indent( body, 1 )
for_begin = \
'for ( int unsigned {v} = {s}; {v} {comp} {t}; {v} {inc}= {stp} ){begin}'.format(
v = loop_var, s = start, t = end, stp = step_abs,
comp = cmp_op, inc = inc_op, begin = begin
)
# Assemble for statement
src.extend( [ for_begin ] )
src.extend( body )
if len( node.body ) > 1:
src.extend( [ 'end' ] )
return src
#-----------------------------------------------------------------------
# Expressions
#-----------------------------------------------------------------------
# All expression nodes return a single string.
#-----------------------------------------------------------------------
# visit_IfExp
#-----------------------------------------------------------------------
def visit_IfExp( s, node ):
node.cond._top_expr = True
node.body._top_expr = True
node.orelse._top_expr = True
cond = s.visit_expr_wrap( node.cond )
true = s.visit( node.body )
false = s.visit( node.orelse )
return f'{cond} ? {true} : {false}'
#-----------------------------------------------------------------------
# visit_UnaryOp
#-----------------------------------------------------------------------
def visit_UnaryOp( s, node ):
node.operand._top_expr = True
op = s.ops[ type( node.op ) ]
operand = s.visit_expr_wrap( node.operand )
return f'{op}{operand}'
#-----------------------------------------------------------------------
# visit_BoolOp
#-----------------------------------------------------------------------
# def visit_BoolOp( s, node ):
# for value in node.values:
# value._top_expr = True
# op = s.ops[ type( node.op ) ]
# values = []
# for value in node.values:
# values.append( s.visit_expr_wrap( value ) )
# src = f' {op} '.join( values )
# return src
#-----------------------------------------------------------------------
# visit_BinOp
#-----------------------------------------------------------------------
def visit_BinOp( s, node ):
node.left._top_expr = True
node.right._top_expr = True
op = s.ops[ type( node.op ) ]
lhs = s.visit_expr_wrap( node.left )
rhs = s.visit_expr_wrap( node.right )
return f'{lhs} {op} {rhs}'
#-----------------------------------------------------------------------
# visit_Compare
#-----------------------------------------------------------------------
def visit_Compare( s, node ):
node.left._top_expr = True
node.right._top_expr = True
op = s.ops[ type( node.op ) ]
lhs = s.visit_expr_wrap( node.left )
rhs = s.visit_expr_wrap( node.right )
return f'{lhs} {op} {rhs}'
#-----------------------------------------------------------------------
# visit_LoopVar
#-----------------------------------------------------------------------
def visit_LoopVar( s, node ):
s.check_res( node, node.name )
nbits = node.Type.get_dtype().get_length()
return f"{nbits}'({node.name})"
#-----------------------------------------------------------------------
# visit_TmpVar
#-----------------------------------------------------------------------
def visit_TmpVar( s, node ):
tmpvar = f"__tmpvar__{node.upblk_name}_{node.name}"
if not node._is_explicit and not s.is_assign_LHS:
nbits = node.Type.get_dtype().get_length()
return f"{nbits}'({tmpvar})"
else:
return tmpvar
#-----------------------------------------------------------------------
# visit_LoopVarDecl
#-----------------------------------------------------------------------
def visit_LoopVarDecl( s, node ):
s.check_res( node, node.name )
return node.name
| #=========================================================================
# VBehavioralTranslatorL2.py
#=========================================================================
# Author : <NAME>
# Date : March 18, 2019
"""Provide the level 2 SystemVerilog translator implementation."""
from pymtl3.passes.backends.generic.behavioral.BehavioralTranslatorL2 import (
BehavioralTranslatorL2,
)
from pymtl3.passes.rtlir import BehavioralRTLIR as bir
from pymtl3.passes.rtlir import RTLIRType as rt
from ...util.utility import make_indent
from .VBehavioralTranslatorL1 import (
BehavioralRTLIRToVVisitorL1,
VBehavioralTranslatorL1,
)
class VBehavioralTranslatorL2( VBehavioralTranslatorL1, BehavioralTranslatorL2 ):
def _get_rtlir2v_visitor( s ):
return BehavioralRTLIRToVVisitorL2
def rtlir_tr_behavioral_tmpvars( s, tmpvars ):
make_indent( tmpvars, 1 )
return '\n'.join( tmpvars )
def rtlir_tr_behavioral_tmpvar( s, id_, upblk_id, dtype ):
return s.rtlir_tr_wire_decl(
"__tmpvar__"+upblk_id+'_'+id_, rt.Wire(dtype['raw_dtype']),
s.rtlir_tr_unpacked_array_type(None), dtype )
#-------------------------------------------------------------------------
# BehavioralRTLIRToVVisitorL2
#-------------------------------------------------------------------------
class BehavioralRTLIRToVVisitorL2( BehavioralRTLIRToVVisitorL1 ):
"""Visitor that translates RTLIR to SystemVerilog for a single upblk."""
def __init__( s, is_reserved ):
super().__init__( is_reserved )
# The dictionary of operator-character pairs
s.ops = {
# Unary operators
# bir.Invert : '~', bir.Not : '!', bir.UAdd : '+', bir.USub : '-',
bir.Invert : '~', bir.UAdd : '+', bir.USub : '-',
# Boolean operators
# bir.And : '&&', bir.Or : '||',
# Binary operators
bir.Add : '+', bir.Sub : '-', bir.Mult : '*', bir.Div : '/',
bir.Mod : '%', bir.Pow : '**',
bir.ShiftLeft : '<<', bir.ShiftRightLogic : '>>',
bir.BitAnd : '&', bir.BitOr : '|', bir.BitXor : '^',
# Comparison operators
bir.Eq : '==', bir.NotEq : '!=', bir.Lt : '<', bir.LtE : '<=',
bir.Gt : '>', bir.GtE : '>='
}
def visit_expr_wrap( s, node ):
"""Return expressions selectively wrapped with brackets."""
if isinstance( node,
# ( bir.IfExp, bir.UnaryOp, bir.BoolOp, bir.BinOp, bir.Compare ) ):
( bir.IfExp, bir.UnaryOp, bir.BinOp, bir.Compare ) ):
return f"( {s.visit(node)} )"
else:
return s.visit( node )
#-----------------------------------------------------------------------
# Statements
#-----------------------------------------------------------------------
# All statement nodes return a list of strings.
#-----------------------------------------------------------------------
# visit_If
#-----------------------------------------------------------------------
def visit_If( s, node ):
node.cond._top_expr = True
src = []
body = []
orelse = []
# Grab condition, if-body, and orelse-body
cond = s.visit( node.cond )
for stmt in node.body:
body.extend( s.visit( stmt ) )
make_indent( body, 1 )
for stmt in node.orelse:
orelse.extend( s.visit( stmt ) )
# Assemble the statement, starting with if-body
if_begin = f'if ( {cond} ) begin'
src.extend( [ if_begin ] )
src.extend( body )
# if len( node.body ) > 1:
src.extend( [ 'end' ] )
# If orelse-body is not empty, add it to the list of strings
if node.orelse != []:
# If an if statement is the only statement in the orelse-body
if len( node.orelse ) == 1 and isinstance( node.orelse[ 0 ], bir.If ):
# No indent will be added, also append if-begin to else-begin
else_begin = f'else {orelse[0]}'
orelse = orelse[ 1 : ]
# Else indent orelse-body
else:
else_begin = 'else' + ( ' begin' if len( node.orelse ) > 1 else '' )
make_indent( orelse, 1 )
src.extend( [ else_begin ] )
src.extend( orelse )
if len( node.orelse ) > 1:
src.extend( [ 'end' ] )
return src
#-----------------------------------------------------------------------
# visit_For
#-----------------------------------------------------------------------
def visit_For( s, node ):
node.start._top_expr = True
node.end._top_expr = True
node.step._top_expr = True
src = []
body = []
loop_var = s.visit( node.var )
start = s.visit( node.start )
end = s.visit( node.end )
begin = ' begin' if len( node.body ) > 1 else ''
cmp_op = '>' if node.step._value < 0 else '<'
inc_op = '-' if node.step._value < 0 else '+'
step_abs = s.visit( node.step )
if node.step._value < 0 and step_abs[0] == '-':
step_abs = step_abs[1:]
for stmt in node.body:
body.extend( s.visit( stmt ) )
make_indent( body, 1 )
for_begin = \
'for ( int unsigned {v} = {s}; {v} {comp} {t}; {v} {inc}= {stp} ){begin}'.format(
v = loop_var, s = start, t = end, stp = step_abs,
comp = cmp_op, inc = inc_op, begin = begin
)
# Assemble for statement
src.extend( [ for_begin ] )
src.extend( body )
if len( node.body ) > 1:
src.extend( [ 'end' ] )
return src
#-----------------------------------------------------------------------
# Expressions
#-----------------------------------------------------------------------
# All expression nodes return a single string.
#-----------------------------------------------------------------------
# visit_IfExp
#-----------------------------------------------------------------------
def visit_IfExp( s, node ):
node.cond._top_expr = True
node.body._top_expr = True
node.orelse._top_expr = True
cond = s.visit_expr_wrap( node.cond )
true = s.visit( node.body )
false = s.visit( node.orelse )
return f'{cond} ? {true} : {false}'
#-----------------------------------------------------------------------
# visit_UnaryOp
#-----------------------------------------------------------------------
def visit_UnaryOp( s, node ):
node.operand._top_expr = True
op = s.ops[ type( node.op ) ]
operand = s.visit_expr_wrap( node.operand )
return f'{op}{operand}'
#-----------------------------------------------------------------------
# visit_BoolOp
#-----------------------------------------------------------------------
# def visit_BoolOp( s, node ):
# for value in node.values:
# value._top_expr = True
# op = s.ops[ type( node.op ) ]
# values = []
# for value in node.values:
# values.append( s.visit_expr_wrap( value ) )
# src = f' {op} '.join( values )
# return src
#-----------------------------------------------------------------------
# visit_BinOp
#-----------------------------------------------------------------------
def visit_BinOp( s, node ):
node.left._top_expr = True
node.right._top_expr = True
op = s.ops[ type( node.op ) ]
lhs = s.visit_expr_wrap( node.left )
rhs = s.visit_expr_wrap( node.right )
return f'{lhs} {op} {rhs}'
#-----------------------------------------------------------------------
# visit_Compare
#-----------------------------------------------------------------------
def visit_Compare( s, node ):
node.left._top_expr = True
node.right._top_expr = True
op = s.ops[ type( node.op ) ]
lhs = s.visit_expr_wrap( node.left )
rhs = s.visit_expr_wrap( node.right )
return f'{lhs} {op} {rhs}'
#-----------------------------------------------------------------------
# visit_LoopVar
#-----------------------------------------------------------------------
def visit_LoopVar( s, node ):
s.check_res( node, node.name )
nbits = node.Type.get_dtype().get_length()
return f"{nbits}'({node.name})"
#-----------------------------------------------------------------------
# visit_TmpVar
#-----------------------------------------------------------------------
def visit_TmpVar( s, node ):
tmpvar = f"__tmpvar__{node.upblk_name}_{node.name}"
if not node._is_explicit and not s.is_assign_LHS:
nbits = node.Type.get_dtype().get_length()
return f"{nbits}'({tmpvar})"
else:
return tmpvar
#-----------------------------------------------------------------------
# visit_LoopVarDecl
#-----------------------------------------------------------------------
def visit_LoopVarDecl( s, node ):
s.check_res( node, node.name )
return node.name
| en | 0.144218 | #========================================================================= # VBehavioralTranslatorL2.py #========================================================================= # Author : <NAME> # Date : March 18, 2019 Provide the level 2 SystemVerilog translator implementation. #------------------------------------------------------------------------- # BehavioralRTLIRToVVisitorL2 #------------------------------------------------------------------------- Visitor that translates RTLIR to SystemVerilog for a single upblk. # The dictionary of operator-character pairs # Unary operators # bir.Invert : '~', bir.Not : '!', bir.UAdd : '+', bir.USub : '-', # Boolean operators # bir.And : '&&', bir.Or : '||', # Binary operators # Comparison operators Return expressions selectively wrapped with brackets. # ( bir.IfExp, bir.UnaryOp, bir.BoolOp, bir.BinOp, bir.Compare ) ): #----------------------------------------------------------------------- # Statements #----------------------------------------------------------------------- # All statement nodes return a list of strings. #----------------------------------------------------------------------- # visit_If #----------------------------------------------------------------------- # Grab condition, if-body, and orelse-body # Assemble the statement, starting with if-body # if len( node.body ) > 1: # If orelse-body is not empty, add it to the list of strings # If an if statement is the only statement in the orelse-body # No indent will be added, also append if-begin to else-begin # Else indent orelse-body #----------------------------------------------------------------------- # visit_For #----------------------------------------------------------------------- # Assemble for statement #----------------------------------------------------------------------- # Expressions #----------------------------------------------------------------------- # All expression nodes return a single string. #----------------------------------------------------------------------- # visit_IfExp #----------------------------------------------------------------------- #----------------------------------------------------------------------- # visit_UnaryOp #----------------------------------------------------------------------- #----------------------------------------------------------------------- # visit_BoolOp #----------------------------------------------------------------------- # def visit_BoolOp( s, node ): # for value in node.values: # value._top_expr = True # op = s.ops[ type( node.op ) ] # values = [] # for value in node.values: # values.append( s.visit_expr_wrap( value ) ) # src = f' {op} '.join( values ) # return src #----------------------------------------------------------------------- # visit_BinOp #----------------------------------------------------------------------- #----------------------------------------------------------------------- # visit_Compare #----------------------------------------------------------------------- #----------------------------------------------------------------------- # visit_LoopVar #----------------------------------------------------------------------- #----------------------------------------------------------------------- # visit_TmpVar #----------------------------------------------------------------------- #----------------------------------------------------------------------- # visit_LoopVarDecl #----------------------------------------------------------------------- | 2.214565 | 2 |
mk_release.py | vika-sonne/blender-xray | 0 | 6630020 | #!/usr/bin/env python3
from io_scene_xray import bl_info
from zipfile import ZipFile, ZIP_DEFLATED
from os import path, walk
with ZipFile('blender-xray-' + ('.'.join(map(str, bl_info['version']))) + '.zip', 'w') as z:
z.write('LICENSE', 'io_scene_xray/LICENSE', compress_type=ZIP_DEFLATED)
for root, _, files in walk('io_scene_xray'):
for file in files:
if not file.endswith('.py'):
continue
z.write(path.join(root, file), compress_type=ZIP_DEFLATED)
| #!/usr/bin/env python3
from io_scene_xray import bl_info
from zipfile import ZipFile, ZIP_DEFLATED
from os import path, walk
with ZipFile('blender-xray-' + ('.'.join(map(str, bl_info['version']))) + '.zip', 'w') as z:
z.write('LICENSE', 'io_scene_xray/LICENSE', compress_type=ZIP_DEFLATED)
for root, _, files in walk('io_scene_xray'):
for file in files:
if not file.endswith('.py'):
continue
z.write(path.join(root, file), compress_type=ZIP_DEFLATED)
| fr | 0.221828 | #!/usr/bin/env python3 | 2.256592 | 2 |
ch4/hebberr_combo/hebberr_combo.py | CompCogNeuro/sims | 76 | 6630021 | <gh_stars>10-100
#!/usr/local/bin/pyleabra -i
# Copyright (c) 2019, The Emergent Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# use:
# just type file name to run, or:
# pyleabra -i <file>.py
# hebberr_combo shows how XCal hebbian learning in shallower layers
# of a network can aid an error driven learning network to
# generalize to unseen combinations of patterns.
from leabra import go, leabra, emer, relpos, eplot, env, agg, patgen, prjn, etable, efile, split, etensor, params, netview, rand, erand, gi, giv, pygiv, pyparams, mat32
import importlib as il
import io, sys, getopt
from datetime import datetime, timezone
from enum import Enum
# this will become Sim later..
TheSim = 1
# LogPrec is precision for saving float values in logs
LogPrec = 4
# note: we cannot use methods for callbacks from Go -- must be separate functions
# so below are all the callbacks from the GUI toolbar actions
def InitCB(recv, send, sig, data):
TheSim.Init()
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def TrainCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.Train()
def StopCB(recv, send, sig, data):
TheSim.Stop()
def StepTrialCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.TrainTrial()
TheSim.IsRunning = False
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def StepEpochCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.TrainEpoch()
def StepRunCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.TrainRun()
def TestTrialCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.TestTrial(False)
TheSim.IsRunning = False
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def TestItemCB2(recv, send, sig, data):
win = gi.Window(handle=recv)
vp = win.WinViewport2D()
dlg = gi.Dialog(handle=send)
if sig != gi.DialogAccepted:
return
val = gi.StringPromptDialogValue(dlg)
idxs = TheSim.TestEnv.Table.RowsByString("Name", val, True, True) # contains, ignoreCase
if len(idxs) == 0:
gi.PromptDialog(vp, gi.DlgOpts(Title="Name Not Found", Prompt="No patterns found containing: " + val), True, False, go.nil, go.nil)
else:
if not TheSim.IsRunning:
TheSim.IsRunning = True
print("testing index: %s" % idxs[0])
TheSim.TestItem(idxs[0])
TheSim.IsRunning = False
vp.SetNeedsFullRender()
def TestItemCB(recv, send, sig, data):
win = gi.Window(handle=recv)
gi.StringPromptDialog(win.WinViewport2D(), "", "Test Item",
gi.DlgOpts(Title="Test Item", Prompt="Enter the Name of a given input pattern to test (case insensitive, contains given string."), win, TestItemCB2)
def TestAllCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.RunTestAll()
def ResetRunLogCB(recv, send, sig, data):
TheSim.RunLog.SetNumRows(0)
TheSim.RunPlot.Update()
def NewRndSeedCB(recv, send, sig, data):
TheSim.NewRndSeed()
def ReadmeCB(recv, send, sig, data):
gi.OpenURL("https://github.com/CompCogNeuro/sims/blob/master/ch4/hebberr_combo/README.md")
def UpdtFuncNotRunning(act):
act.SetActiveStateUpdt(not TheSim.IsRunning)
def UpdtFuncRunning(act):
act.SetActiveStateUpdt(TheSim.IsRunning)
############################################
# Enums -- note: must start at 0 for GUI
class PatsType(Enum):
Easy = 0
Hard = 1
Impossible = 2
Lines2 = 3
class LearnType(Enum):
Hebbian = 0
ErrorDriven = 1
ErrorHebbIn = 2
#####################################################
# Sim
class Sim(pygiv.ClassViewObj):
"""
Sim encapsulates the entire simulation model, and we define all the
functionality as methods on this struct. This structure keeps all relevant
state information organized and available without having to pass everything around
as arguments to methods, and provides the core GUI interface (note the view tags
for the fields which provide hints to how things should be displayed).
"""
def __init__(self):
super(Sim, self).__init__()
self.Net = leabra.Network()
self.SetTags("Net", 'view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"')
self.Learn = LearnType.ErrorDriven
self.SetTags("Learn", 'desc:"select which type of learning to use"')
self.Pats = PatsType.Lines2
self.SetTags("Pats", 'desc:"select which type of patterns to use"')
self.Easy = etable.Table()
self.SetTags("Easy", 'view:"no-inline" desc:"easy training patterns -- can be learned with Hebbian"')
self.Hard = etable.Table()
self.SetTags("Hard", 'view:"no-inline" desc:"hard training patterns -- require error-driven"')
self.Impossible = etable.Table()
self.SetTags("Impossible", 'view:"no-inline" desc:"impossible training patterns -- require error-driven + hidden layer"')
self.Lines2 = etable.Table()
self.SetTags("Lines2", 'view:"no-inline" desc:"lines training patterns"')
self.TrnEpcLog = etable.Table()
self.SetTags("TrnEpcLog", 'view:"no-inline" desc:"training epoch-level log data"')
self.TstEpcLog = etable.Table()
self.SetTags("TstEpcLog", 'view:"no-inline" desc:"testing epoch-level log data"')
self.TstTrlLog = etable.Table()
self.SetTags("TstTrlLog", 'view:"no-inline" desc:"testing trial-level log data"')
self.RunLog = etable.Table()
self.SetTags("RunLog", 'view:"no-inline" desc:"summary log of each run"')
self.RunStats = etable.Table()
self.SetTags("RunStats", 'view:"no-inline" desc:"aggregate stats on all runs"')
self.Params = params.Sets()
self.SetTags("Params", 'view:"no-inline" desc:"full collection of param sets"')
self.ParamSet = str()
self.SetTags("ParamSet", 'view:"-" desc:"which set of *additional* parameters to use -- always applies Base and optionaly this next if set"')
self.MaxRuns = int(10)
self.SetTags("MaxRuns", 'desc:"maximum number of model runs to perform"')
self.MaxEpcs = int(100)
self.SetTags("MaxEpcs", 'desc:"maximum number of epochs to run per model run"')
self.NZeroStop = int(20)
self.SetTags("NZeroStop", 'desc:"if a positive number, training will stop after this many epochs with zero SSE"')
self.TrainEnv = env.FixedTable()
self.SetTags("TrainEnv", 'desc:"Training environment -- contains everything about iterating over input / output patterns over training"')
self.TestEnv = env.FixedTable()
self.SetTags("TestEnv", 'desc:"Testing environment -- manages iterating over testing"')
self.Time = leabra.Time()
self.SetTags("Time", 'desc:"leabra timing parameters and state"')
self.ViewOn = True
self.SetTags("ViewOn", 'desc:"whether to update the network view while running"')
self.TrainUpdt = leabra.TimeScales.AlphaCycle
self.SetTags("TrainUpdt", 'desc:"at what time scale to update the display during training? Anything longer than Epoch updates at Epoch in this model"')
self.TestUpdt = leabra.TimeScales.Cycle
self.SetTags("TestUpdt", 'desc:"at what time scale to update the display during testing? Anything longer than Epoch updates at Epoch in this model"')
self.TestInterval = int(5)
self.SetTags("TestInterval", 'desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"')
self.LayStatNms = go.Slice_string(["Input", "Output"])
self.SetTags("LayStatNms", 'desc:"names of layers to collect more detailed stats on (avg act, etc)"')
# statistics: note use float64 as that is best for etable.Table
self.TrlErr = float(0)
self.SetTags("TrlErr", 'inactive:"+" desc:"1 if trial was error, 0 if correct -- based on SSE = 0 (subject to .5 unit-wise tolerance)"')
self.TrlSSE = float(0)
self.SetTags("TrlSSE", 'inactive:"+" desc:"current trial\'s sum squared error"')
self.TrlAvgSSE = float(0)
self.SetTags("TrlAvgSSE", 'inactive:"+" desc:"current trial\'s average sum squared error"')
self.TrlCosDiff = float(0)
self.SetTags("TrlCosDiff", 'inactive:"+" desc:"current trial\'s cosine difference"')
self.EpcSSE = float(0)
self.SetTags("EpcSSE", 'inactive:"+" desc:"last epoch\'s total sum squared error"')
self.EpcAvgSSE = float(0)
self.SetTags("EpcAvgSSE", 'inactive:"+" desc:"last epoch\'s average sum squared error (average over trials, and over units within layer)"')
self.EpcPctErr = float(0)
self.SetTags("EpcPctErr", 'inactive:"+" desc:"last epoch\'s average TrlErr"')
self.EpcPctCor = float(0)
self.SetTags("EpcPctCor", 'inactive:"+" desc:"1 - last epoch\'s average TrlErr"')
self.EpcCosDiff = float(0)
self.SetTags("EpcCosDiff", 'inactive:"+" desc:"last epoch\'s average cosine difference for output layer (a normalized error measure, maximum of 1 when the minus phase exactly matches the plus)"')
self.FirstZero = int(-1)
self.SetTags("FirstZero", 'inactive:"+" desc:"epoch at when SSE first went to zero"')
self.NZero = int(0)
self.SetTags("NZero", 'inactive:"+" desc:"number of epochs in a row with zero SSE"')
# internal state - view:"-"
self.SumErr = float(0)
self.SetTags("SumErr", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumSSE = float(0)
self.SetTags("SumSSE", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumAvgSSE = float(0)
self.SetTags("SumAvgSSE", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumCosDiff = float(0)
self.SetTags("SumCosDiff", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.Win = 0
self.SetTags("Win", 'view:"-" desc:"main GUI window"')
self.NetView = 0
self.SetTags("NetView", 'view:"-" desc:"the network viewer"')
self.ToolBar = 0
self.SetTags("ToolBar", 'view:"-" desc:"the master toolbar"')
self.TrnEpcPlot = 0
self.SetTags("TrnEpcPlot", 'view:"-" desc:"the training epoch plot"')
self.TstEpcPlot = 0
self.SetTags("TstEpcPlot", 'view:"-" desc:"the testing epoch plot"')
self.TstTrlPlot = 0
self.SetTags("TstTrlPlot", 'view:"-" desc:"the test-trial plot"')
self.RunPlot = 0
self.SetTags("RunPlot", 'view:"-" desc:"the run plot"')
self.TrnEpcFile = 0
self.SetTags("TrnEpcFile", 'view:"-" desc:"log file"')
self.RunFile = 0
self.SetTags("RunFile", 'view:"-" desc:"log file"')
self.ValsTsrs = {}
self.SetTags("ValsTsrs", 'view:"-" desc:"for holding layer values"')
self.IsRunning = False
self.SetTags("IsRunning", 'view:"-" desc:"true if sim is running"')
self.StopNow = False
self.SetTags("StopNow", 'view:"-" desc:"flag to stop running"')
self.NeedsNewRun = False
self.SetTags("NeedsNewRun", 'view:"-" desc:"flag to initialize NewRun if last one finished"')
self.RndSeed = int(1)
self.SetTags("RndSeed", 'view:"-" desc:"the current random seed"')
self.vp = 0
self.SetTags("vp", 'view:"-" desc:"viewport"')
def InitParams(ss):
"""
Sets the default set of parameters -- Base is always applied, and others can be optionally
selected to apply on top of that
"""
ss.Params.OpenJSON("hebberr_combo.params")
def Config(ss):
"""
Config configures all the elements using the standard functions
"""
ss.InitParams()
ss.OpenPats()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigTrnEpcLog(ss.TrnEpcLog)
ss.ConfigTstEpcLog(ss.TstEpcLog)
ss.ConfigTstTrlLog(ss.TstTrlLog)
ss.ConfigRunLog(ss.RunLog)
def ConfigEnv(ss):
if ss.MaxRuns == 0:
ss.MaxRuns = 10
if ss.MaxEpcs == 0: # allow user override
ss.MaxEpcs = 100
ss.NZeroStop = 20
ss.TrainEnv.Nm = "TrainEnv"
ss.TrainEnv.Dsc = "training params and state"
ss.TrainEnv.Table = etable.NewIdxView(ss.Easy)
ss.TrainEnv.Validate()
ss.TrainEnv.Run.Max = ss.MaxRuns # note: we are not setting epoch max -- do that manually
ss.TestEnv.Nm = "TestEnv"
ss.TestEnv.Dsc = "testing params and state"
ss.TestEnv.Table = etable.NewIdxView(ss.Easy)
ss.TestEnv.Sequential = True
ss.TestEnv.Validate()
ss.TrainEnv.Init(0)
ss.TestEnv.Init(0)
def UpdateEnv(ss):
if ss.Pats == PatsType.Easy:
ss.TrainEnv.Table = etable.NewIdxView(ss.Easy)
ss.TestEnv.Table = etable.NewIdxView(ss.Easy)
elif ss.Pats == PatsType.Hard:
ss.TrainEnv.Table = etable.NewIdxView(ss.Hard)
ss.TestEnv.Table = etable.NewIdxView(ss.Hard)
elif ss.Pats == PatsType.Impossible:
ss.TrainEnv.Table = etable.NewIdxView(ss.Impossible)
ss.TestEnv.Table = etable.NewIdxView(ss.Impossible)
elif ss.Pats == PatsType.Lines2:
all = etable.NewIdxView(ss.Lines2)
splits = split.Permuted(all, go.Slice_float64([.9, .1]), go.Slice_string(["Train", "Test"]))
ss.TrainEnv.Table = splits.Splits[0]
ss.TestEnv.Table = splits.Splits[1]
def ConfigNet(ss, net):
net.InitName(net, "PatAssoc")
inp = net.AddLayer2D("Input", 5, 5, emer.Input)
hid = net.AddLayer2D("Hidden", 6, 5, emer.Hidden)
out = net.AddLayer2D("Output", 5, 2, emer.Target)
out.SetClass("Output")
full = prjn.NewFull()
inhid = net.ConnectLayers(inp, hid, full, emer.Forward)
inhid.SetClass("InputToHidden")
net.BidirConnectLayersPy(hid, out, full)
hid.SetRelPos(relpos.Rel(Rel= relpos.Above, Other= "Input", YAlign= relpos.Front, XAlign= relpos.Left, YOffset= 1))
net.Defaults()
ss.SetParams("Network", False) # only set Network params
net.Build()
net.InitWts()
def Init(ss):
"""
Init restarts the run, and initializes everything, including network weights
and resets the epoch log table
"""
rand.Seed(ss.RndSeed)
ss.UpdateEnv()
ss.StopNow = False
ss.SetParams("", False)
ss.NewRun()
ss.UpdateView(True)
def NewRndSeed(ss):
"""
NewRndSeed gets a new random seed based on current time -- otherwise uses
the same random seed for every run
"""
ss.RndSeed = int(datetime.now(timezone.utc).timestamp())
def Counters(ss, train):
"""
Counters returns a string of the current counter state
use tabs to achieve a reasonable formatting overall
and add a few tabs at the end to allow for expansion..
"""
if train:
return "Run:\t%d\tEpoch:\t%d\tTrial:\t%d\tCycle:\t%d\tName:\t%s\t\t\t" % (ss.TrainEnv.Run.Cur, ss.TrainEnv.Epoch.Cur, ss.TrainEnv.Trial.Cur, ss.Time.Cycle, ss.TrainEnv.TrialName.Cur)
else:
return "Run:\t%d\tEpoch:\t%d\tTrial:\t%d\tCycle:\t%d\tName:\t%s\t\t\t" % (ss.TrainEnv.Run.Cur, ss.TrainEnv.Epoch.Cur, ss.TestEnv.Trial.Cur, ss.Time.Cycle, ss.TestEnv.TrialName.Cur)
def UpdateView(ss, train):
if ss.NetView != 0 and ss.NetView.IsVisible():
ss.NetView.Record(ss.Counters(train))
ss.NetView.GoUpdate()
def AlphaCyc(ss, train):
"""
AlphaCyc runs one alpha-cycle (100 msec, 4 quarters) of processing.
External inputs must have already been applied prior to calling,
using ApplyExt method on relevant layers (see TrainTrial, TestTrial).
If train is true, then learning DWt or WtFmDWt calls are made.
Handles netview updating within scope of AlphaCycle
"""
if ss.Win != 0:
ss.Win.PollEvents() # this is essential for GUI responsiveness while running
viewUpdt = ss.TrainUpdt.value
if not train:
viewUpdt = ss.TestUpdt.value
# update prior weight changes at start, so any DWt values remain visible at end
# you might want to do this less frequently to achieve a mini-batch update
# in which case, move it out to the TrainTrial method where the relevant
# counters are being dealt with.
if train:
ss.Net.WtFmDWt()
ss.Net.AlphaCycInit(train)
ss.Time.AlphaCycStart()
for qtr in range(4):
for cyc in range(ss.Time.CycPerQtr):
ss.Net.Cycle(ss.Time)
ss.Time.CycleInc()
if ss.ViewOn:
if viewUpdt == leabra.Cycle:
if cyc != ss.Time.CycPerQtr-1: # will be updated by quarter
ss.UpdateView(train)
elif viewUpdt == leabra.FastSpike:
if (cyc+1)%10 == 0:
ss.UpdateView(train)
ss.Net.QuarterFinal(ss.Time)
ss.Time.QuarterInc()
if ss.ViewOn:
if viewUpdt == leabra.Quarter:
ss.UpdateView(train)
if viewUpdt == leabra.Phase:
if qtr >= 2:
ss.UpdateView(train)
if train:
ss.Net.DWt()
if ss.ViewOn and viewUpdt == leabra.AlphaCycle:
ss.UpdateView(train)
def ApplyInputs(ss, en):
"""
ApplyInputs applies input patterns from given envirbonment.
It is good practice to have this be a separate method with appropriate
# going to the same layers, but good practice and cheap anyway
args so that it can be used for various different contexts
(training, testing, etc).
"""
ss.Net.InitExt()
lays = ["Input", "Output"]
for lnm in lays :
ly = leabra.Layer(ss.Net.LayerByName(lnm))
pats = en.State(ly.Nm)
if pats != 0:
ly.ApplyExt(pats)
def TrainTrial(ss):
"""
TrainTrial runs one trial of training using TrainEnv
"""
if ss.NeedsNewRun:
ss.NewRun()
ss.TrainEnv.Step()
# Key to query counters FIRST because current state is in NEXT epoch
# if epoch counter has changed
epc = env.CounterCur(ss.TrainEnv, env.Epoch)
chg = env.CounterChg(ss.TrainEnv, env.Epoch)
if chg:
ss.LogTrnEpc(ss.TrnEpcLog)
if ss.ViewOn and ss.TrainUpdt.value > leabra.AlphaCycle:
ss.UpdateView(True)
if ss.TestInterval > 0 and epc%ss.TestInterval == 0: # note: epc is *next* so won't trigger first time
ss.TestAll()
if epc >= ss.MaxEpcs or (ss.NZeroStop > 0 and ss.NZero >= ss.NZeroStop):
# done with training..
ss.RunEnd()
if ss.TrainEnv.Run.Incr(): # we are done!
ss.StopNow = True
return
else:
ss.NeedsNewRun = True
return
# note: type must be in place before apply inputs
ss.Net.LayerByName("Output").SetType(emer.Target)
ss.ApplyInputs(ss.TrainEnv)
ss.AlphaCyc(True) # train
ss.TrialStats(True) # accumulate
def RunEnd(ss):
"""
RunEnd is called at the end of a run -- save weights, record final log, etc here
"""
ss.LogRun(ss.RunLog)
def NewRun(ss):
"""
NewRun intializes a new run of the model, using the TrainEnv.Run counter
for the new run value
"""
run = ss.TrainEnv.Run.Cur
ss.UpdateEnv()
ss.TrainEnv.Init(run)
ss.TestEnv.Init(run)
ss.Time.Reset()
ss.Net.InitWts()
ss.InitStats()
ss.TrnEpcLog.SetNumRows(0)
ss.TstEpcLog.SetNumRows(0)
ss.NeedsNewRun = False
ss.TrainEnv.Run.Max = ss.MaxRuns
def InitStats(ss):
"""
InitStats initializes all the statistics, especially important for the
cumulative epoch stats -- called at start of new run
"""
ss.SumSSE = 0
ss.SumAvgSSE = 0
ss.SumCosDiff = 0
ss.SumErr = 0
ss.FirstZero = -1
ss.NZero = 0
ss.TrlErr = 0
ss.TrlSSE = 0
ss.TrlAvgSSE = 0
ss.EpcSSE = 0
ss.EpcAvgSSE = 0
ss.EpcPctErr = 0
ss.EpcCosDiff = 0
def TrialStats(ss, accum):
"""
TrialStats computes the trial-level statistics and adds them to the epoch accumulators if
accum is true. Note that we're accumulating stats here on the Sim side so the
core algorithm side remains as simple as possible, and doesn't need to worry about
different time-scales over which stats could be accumulated etc.
You can also aggregate directly from log data, as is done for testing stats
"""
out = leabra.Layer(ss.Net.LayerByName("Output"))
ss.TrlCosDiff = float(out.CosDiff.Cos)
ss.TrlSSE = out.SSE(0.5) # 0.5 = per-unit tolerance -- right side of .5
ss.TrlAvgSSE = ss.TrlSSE / len(out.Neurons)
if ss.TrlSSE > 0:
ss.TrlErr = 1
else:
ss.TrlErr = 0
if accum:
ss.SumErr += ss.TrlErr
ss.SumSSE += ss.TrlSSE
ss.SumAvgSSE += ss.TrlAvgSSE
ss.SumCosDiff += ss.TrlCosDiff
return
def TrainEpoch(ss):
"""
TrainEpoch runs training trials for remainder of this epoch
"""
ss.StopNow = False
curEpc = ss.TrainEnv.Epoch.Cur
while True:
ss.TrainTrial()
if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:
break
ss.Stopped()
def TrainRun(ss):
"""
TrainRun runs training trials for remainder of run
"""
ss.StopNow = False
curRun = ss.TrainEnv.Run.Cur
while True:
ss.TrainTrial()
if ss.StopNow or ss.TrainEnv.Run.Cur != curRun:
break
ss.Stopped()
def Train(ss):
"""
Train runs the full training from this point onward
"""
ss.StopNow = False
while True:
ss.TrainTrial()
if ss.StopNow:
break
ss.Stopped()
def Stop(ss):
"""
Stop tells the sim to stop running
"""
ss.StopNow = True
def Stopped(ss):
"""
Stopped is called when a run method stops running -- updates the IsRunning flag and toolbar
"""
ss.IsRunning = False
if ss.Win != 0:
vp = ss.Win.WinViewport2D()
if ss.ToolBar != 0:
ss.ToolBar.UpdateActions()
vp.SetNeedsFullRender()
ss.UpdateClassView()
def SaveWeights(ss, filename):
"""
SaveWeights saves the network weights -- when called with giv.CallMethod
it will auto-prompt for filename
"""
ss.Net.SaveWtsJSON(filename)
def TestTrial(ss, returnOnChg):
"""
TestTrial runs one trial of testing -- always sequentially presented inputs
"""
ss.TestEnv.Step()
chg = env.CounterChg(ss.TestEnv, env.Epoch)
if chg:
if ss.ViewOn and ss.TestUpdt.value > leabra.AlphaCycle:
ss.UpdateView(False)
ss.LogTstEpc(ss.TstEpcLog)
if returnOnChg:
return
ss.Net.LayerByName("Output").SetType(emer.Compare)
ss.ApplyInputs(ss.TestEnv)
ss.AlphaCyc(False)
ss.TrialStats(False)
ss.LogTstTrl(ss.TstTrlLog)
def TestItem(ss, idx):
"""
TestItem tests given item which is at given index in test item list
"""
cur = ss.TestEnv.Trial.Cur
ss.TestEnv.Trial.Cur = idx
ss.TestEnv.SetTrialName()
ss.Net.LayerByName("Output").SetType(emer.Compare)
ss.ApplyInputs(ss.TestEnv)
ss.AlphaCyc(False)
ss.TrialStats(False)
ss.TestEnv.Trial.Cur = cur
def TestAll(ss):
"""
TestAll runs through the full set of testing items
"""
ss.TestEnv.Init(ss.TrainEnv.Run.Cur)
while True:
ss.TestTrial(True)
chg = env.CounterChg(ss.TestEnv, env.Epoch)
if chg or ss.StopNow:
break
def RunTestAll(ss):
"""
RunTestAll runs through the full set of testing items, has stop running = false at end -- for gui
"""
ss.StopNow = False
ss.TestAll()
ss.Stopped()
def SetParams(ss, sheet, setMsg):
"""
SetParams sets the params for "Base" and then current ParamSet.
If sheet is empty, then it applies all avail sheets (e.g., Network, Sim)
otherwise just the named sheet
if setMsg = true then we output a message for each param that was set.
"""
if sheet == "":
ss.Params.ValidateSheets(go.Slice_string(["Network", "Sim"]))
ss.SetParamsSet("Base", sheet, setMsg)
if ss.ParamSet != "" and ss.ParamSet != "Base":
sps = ss.ParamSet.split()
for ps in sps:
ss.SetParamsSet(ps, sheet, setMsg)
if ss.Learn == LearnType.Hebbian:
ss.SetParamsSet("Hebbian", sheet, setMsg)
elif ss.Learn == LearnType.ErrorDriven:
ss.SetParamsSet("ErrorDriven", sheet, setMsg)
elif ss.Learn == LearnType.ErrorHebbIn:
ss.SetParamsSet("ErrorHebbIn", sheet, setMsg)
def SetParamsSet(ss, setNm, sheet, setMsg):
"""
SetParamsSet sets the params for given params.Set name.
If sheet is empty, then it applies all avail sheets (e.g., Network, Sim)
otherwise just the named sheet
if setMsg = true then we output a message for each param that was set.
"""
pset = ss.Params.SetByNameTry(setNm)
if sheet == "" or sheet == "Network":
if "Network" in pset.Sheets:
netp = pset.SheetByNameTry("Network")
ss.Net.ApplyParams(netp, setMsg)
if sheet == "" or sheet == "Sim":
if "Sim" in pset.Sheets:
simp= pset.SheetByNameTry("Sim")
pyparams.ApplyParams(ss, simp, setMsg)
def OpenPats(ss):
ss.Easy.SetMetaData("name", "Easy")
ss.Easy.SetMetaData("desc", "Easy Training patterns")
ss.Easy.OpenCSV("easy.tsv", etable.Tab)
ss.Hard.SetMetaData("name", "Hard")
ss.Hard.SetMetaData("desc", "Hard Training patterns")
ss.Hard.OpenCSV("hard.tsv", etable.Tab)
ss.Impossible.SetMetaData("name", "Impossible")
ss.Impossible.SetMetaData("desc", "Impossible Training patterns")
ss.Impossible.OpenCSV("impossible.tsv", etable.Tab)
ss.Lines2.SetMetaData("name", "Lines2")
ss.Lines2.SetMetaData("desc", "Lines2 Training patterns")
ss.Lines2.OpenCSV("lines2out1.tsv", etable.Tab)
def ValsTsr(ss, name):
"""
ValsTsr gets value tensor of given name, creating if not yet made
"""
if name in ss.ValsTsrs:
return ss.ValsTsrs[name]
tsr = etensor.Float32()
ss.ValsTsrs[name] = tsr
return tsr
def LogTrnEpc(ss, dt):
"""
LogTrnEpc adds data from current epoch to the TrnEpcLog table.
computes epoch averages prior to logging.
# this is triggered by increment so use previous value
"""
row = dt.Rows
dt.SetNumRows(row + 1)
epc = ss.TrainEnv.Epoch.Prv
nt = float(ss.TrainEnv.Table.Len()) # number of trials in view
ss.EpcSSE = ss.SumSSE / nt
ss.SumSSE = 0
ss.EpcAvgSSE = ss.SumAvgSSE / nt
ss.SumAvgSSE = 0
ss.EpcPctErr = float(ss.SumErr) / nt
ss.SumErr = 0
ss.EpcPctCor = 1 - ss.EpcPctErr
ss.EpcCosDiff = ss.SumCosDiff / nt
ss.SumCosDiff = 0
if ss.FirstZero < 0 and ss.EpcPctErr == 0:
ss.FirstZero = epc
if ss.EpcPctErr == 0:
ss.NZero += 1
else:
ss.NZero = 0
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("SSE", row, ss.EpcSSE)
dt.SetCellFloat("AvgSSE", row, ss.EpcAvgSSE)
dt.SetCellFloat("PctErr", row, ss.EpcPctErr)
dt.SetCellFloat("PctCor", row, ss.EpcPctCor)
dt.SetCellFloat("CosDiff", row, ss.EpcCosDiff)
for lnm in ss.LayStatNms:
ly = leabra.Layer(ss.Net.LayerByName(lnm))
dt.SetCellFloat(ly.Nm+" ActAvg", row, float(ly.Pool(0).ActAvg.ActPAvgEff))
# note: essential to use Go version of update when called from another goroutine
ss.TrnEpcPlot.GoUpdate()
if ss.TrnEpcFile != 0:
if ss.TrainEnv.Run.Cur == 0 and epc == 0:
dt.WriteCSVHeaders(ss.TrnEpcFile, etable.Tab)
dt.WriteCSVRow(ss.TrnEpcFile, row, etable.Tab)
def ConfigTrnEpcLog(ss, dt):
dt.SetMetaData("name", "TrnEpcLog")
dt.SetMetaData("desc", "Record of performance over epochs of training")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
for lnm in ss.LayStatNms:
sch.append(etable.Column(lnm + " ActAvg", etensor.FLOAT64, go.nil, go.nil))
dt.SetFromSchema(sch, 0)
def ConfigTrnEpcPlot(ss, plt, dt):
plt.Params.Title = "Pattern Associator Epoch Plot"
plt.Params.XAxisCol = "Epoch"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
for lnm in ss.LayStatNms:
plt.SetColParams(lnm+" ActAvg", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 0.5)
return plt
def LogTstTrl(ss, dt):
"""
LogTstTrl adds data from current trial to the TstTrlLog table.
# this is triggered by increment so use previous value
log always contains number of testing items
"""
epc = ss.TrainEnv.Epoch.Prv
inp = leabra.Layer(ss.Net.LayerByName("Input"))
out = leabra.Layer(ss.Net.LayerByName("Output"))
trl = ss.TestEnv.Trial.Cur
row = trl
if dt.Rows <= row:
dt.SetNumRows(row + 1)
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("Trial", row, float(trl))
dt.SetCellString("TrialName", row, ss.TestEnv.TrialName.Cur)
dt.SetCellFloat("Err", row, ss.TrlErr)
dt.SetCellFloat("SSE", row, ss.TrlSSE)
dt.SetCellFloat("AvgSSE", row, ss.TrlAvgSSE)
dt.SetCellFloat("CosDiff", row, ss.TrlCosDiff)
for lnm in ss.LayStatNms:
ly = leabra.Layer(ss.Net.LayerByName(lnm))
dt.SetCellFloat(ly.Nm+" ActM.Avg", row, float(ly.Pool(0).ActM.Avg))
ivt = ss.ValsTsr("Input")
ovt = ss.ValsTsr("Output")
inp.UnitValsTensor(ivt, "Act")
dt.SetCellTensor("InAct", row, ivt)
out.UnitValsTensor(ovt, "ActM")
dt.SetCellTensor("OutActM", row, ovt)
out.UnitValsTensor(ovt, "Targ")
dt.SetCellTensor("OutTarg", row, ovt)
# note: essential to use Go version of update when called from another goroutine
ss.TstTrlPlot.GoUpdate()
def ConfigTstTrlLog(ss, dt):
inp = leabra.Layer(ss.Net.LayerByName("Input"))
out = leabra.Layer(ss.Net.LayerByName("Output"))
dt.SetMetaData("name", "TstTrlLog")
dt.SetMetaData("desc", "Record of testing per input pattern")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
nt = ss.TestEnv.Table.Len() # number in view
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("Trial", etensor.INT64, go.nil, go.nil),
etable.Column("TrialName", etensor.STRING, go.nil, go.nil),
etable.Column("Err", etensor.FLOAT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
for lnm in ss.LayStatNms:
sch.append(etable.Column(lnm + " ActM.Avg", etensor.FLOAT64, go.nil, go.nil))
sch.append(etable.Column("InAct", etensor.FLOAT64, inp.Shp.Shp, go.nil))
sch.append(etable.Column("OutActM", etensor.FLOAT64, out.Shp.Shp, go.nil))
sch.append(etable.Column("OutTarg", etensor.FLOAT64, out.Shp.Shp, go.nil))
dt.SetFromSchema(sch, nt)
def ConfigTstTrlPlot(ss, plt, dt):
plt.Params.Title = "Pattern Associator Test Trial Plot"
plt.Params.XAxisCol = "Trial"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Trial", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("TrialName", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Err", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
for lnm in ss.LayStatNms:
plt.SetColParams(lnm+" ActM.Avg", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 0.5)
plt.SetColParams("InAct", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("OutActM", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("OutTarg", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def LogTstEpc(ss, dt):
row = dt.Rows
dt.SetNumRows(row + 1)
trl = ss.TstTrlLog
tix = etable.NewIdxView(trl)
epc = ss.TrainEnv.Epoch.Prv # ?
# note: this shows how to use agg methods to compute summary data from another
# data table, instead of incrementing on the Sim
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("SSE", row, agg.Sum(tix, "SSE")[0])
dt.SetCellFloat("AvgSSE", row, agg.Mean(tix, "AvgSSE")[0])
dt.SetCellFloat("PctErr", row, agg.Mean(tix, "Err")[0])
dt.SetCellFloat("PctCor", row, 1-agg.Mean(tix, "Err")[0])
dt.SetCellFloat("CosDiff", row, agg.Mean(tix, "CosDiff")[0])
# note: essential to use Go version of update when called from another goroutine
if ss.TstEpcPlot != 0:
ss.TstEpcPlot.GoUpdate()
def ConfigTstEpcLog(ss, dt):
dt.SetMetaData("name", "TstEpcLog")
dt.SetMetaData("desc", "Summary stats for testing trials")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
dt.SetFromSchema(sch, 0)
def ConfigTstEpcPlot(ss, plt, dt):
plt.Params.Title = "Pattern Associator Testing Epoch Plot"
plt.Params.XAxisCol = "Epoch"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.On, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def LogRun(ss, dt):
"""
LogRun adds data from current run to the RunLog table.
"""
run = ss.TrainEnv.Run.Cur # this is NOT triggered by increment yet -- use Cur
row = dt.Rows
dt.SetNumRows(row + 1)
epclog = ss.TstEpcLog
epcix = etable.NewIdxView(epclog)
# compute mean over last N epochs for run level
nlast = 5
if nlast > epcix.Len()-1:
nlast = epcix.Len() - 1
epcix.Idxs = go.Slice_int(epcix.Idxs[epcix.Len()-nlast:])
params = ss.Learn.name + "_" + ss.Pats.name
dt.SetCellFloat("Run", row, float(run))
dt.SetCellString("Params", row, params)
dt.SetCellFloat("FirstZero", row, float(ss.FirstZero))
dt.SetCellFloat("SSE", row, agg.Mean(epcix, "SSE")[0])
dt.SetCellFloat("AvgSSE", row, agg.Mean(epcix, "AvgSSE")[0])
dt.SetCellFloat("PctErr", row, agg.Mean(epcix, "PctErr")[0])
dt.SetCellFloat("PctCor", row, agg.Mean(epcix, "PctCor")[0])
dt.SetCellFloat("CosDiff", row, agg.Mean(epcix, "CosDiff")[0])
runix = etable.NewIdxView(dt)
spl = split.GroupBy(runix, go.Slice_string(["Params"]))
split.Desc(spl, "FirstZero")
split.Desc(spl, "PctCor")
split.Desc(spl, "SSE")
ss.RunStats = spl.AggsToTable(etable.AddAggName)
# note: essential to use Go version of update when called from another goroutine
if ss.RunPlot != 0:
ss.RunPlot.GoUpdate()
if ss.RunFile != 0:
if row == 0:
dt.WriteCSVHeaders(ss.RunFile, etable.Tab)
dt.WriteCSVRow(ss.RunFile, row, etable.Tab)
def ConfigRunLog(ss, dt):
dt.SetMetaData("name", "RunLog")
dt.SetMetaData("desc", "Record of performance at end of training")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Params", etensor.STRING, go.nil, go.nil),
etable.Column("FirstZero", etensor.FLOAT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
dt.SetFromSchema(sch, 0)
def ConfigRunPlot(ss, plt, dt):
plt.Params.Title = "Pattern Associator Run Plot"
plt.Params.XAxisCol = "Run"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("FirstZero", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("SSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.On, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def ConfigNetView(ss, nv):
nv.ViewDefaults()
nv.Scene().Camera.Pose.Pos.Set(0.2, 1.27, 2.62)
nv.Scene().Camera.LookAt(mat32.Vec3(0.2, 0, 0), mat32.Vec3(0, 1, 0))
def ConfigGui(ss):
"""
ConfigGui configures the GoGi gui interface for this simulation,
"""
width = 1600
height = 1200
gi.SetAppName("hebberr_combo")
gi.SetAppAbout('shows how XCal hebbian learning in shallower layers of a network can aid an error driven learning network to generalize to unseen combinations of patterns. See <a href="https://github.com/CompCogNeuro/sims/blob/master/ch4/hebberr_combo/README.md">README.md on GitHub</a>.</p>')
win = gi.NewMainWindow("hebberr_combo", "Hebbian+Error Driven Learning", width, height)
ss.Win = win
vp = win.WinViewport2D()
ss.vp = vp
updt = vp.UpdateStart()
mfr = win.SetMainFrame()
tbar = gi.AddNewToolBar(mfr, "tbar")
tbar.SetStretchMaxWidth()
ss.ToolBar = tbar
split = gi.AddNewSplitView(mfr, "split")
split.Dim = mat32.X
split.SetStretchMaxWidth()
split.SetStretchMaxHeight()
cv = ss.NewClassView("sv")
cv.AddFrame(split)
cv.Config()
tv = gi.AddNewTabView(split, "tv")
nv = netview.NetView()
tv.AddTab(nv, "NetView")
nv.Var = "Act"
nv.SetNet(ss.Net)
ss.NetView = nv
ss.ConfigNetView(nv)
plt = eplot.Plot2D()
tv.AddTab(plt, "TrnEpcPlot")
ss.TrnEpcPlot = ss.ConfigTrnEpcPlot(plt, ss.TrnEpcLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "TstTrlPlot")
ss.TstTrlPlot = ss.ConfigTstTrlPlot(plt, ss.TstTrlLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "TstEpcPlot")
ss.TstEpcPlot = ss.ConfigTstEpcPlot(plt, ss.TstEpcLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "RunPlot")
ss.RunPlot = ss.ConfigRunPlot(plt, ss.RunLog)
split.SetSplitsList(go.Slice_float32([.2, .8]))
recv = win.This()
tbar.AddAction(gi.ActOpts(Label="Init", Icon="update", Tooltip="Initialize everything including network weights, and start over. Also applies current params.", UpdateFunc=UpdtFuncNotRunning), recv, InitCB)
tbar.AddAction(gi.ActOpts(Label="Train", Icon="run", Tooltip="Starts the network training, picking up from wherever it may have left off. If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.", UpdateFunc=UpdtFuncNotRunning), recv, TrainCB)
tbar.AddAction(gi.ActOpts(Label="Stop", Icon="stop", Tooltip="Interrupts running. Hitting Train again will pick back up where it left off.", UpdateFunc=UpdtFuncRunning), recv, StopCB)
tbar.AddAction(gi.ActOpts(Label="Step Trial", Icon="step-fwd", Tooltip="Advances one training trial at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepTrialCB)
tbar.AddAction(gi.ActOpts(Label="Step Epoch", Icon="fast-fwd", Tooltip="Advances one epoch (complete set of training patterns) at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepEpochCB)
tbar.AddAction(gi.ActOpts(Label="Step Run", Icon="fast-fwd", Tooltip="Advances one full training Run at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepRunCB)
tbar.AddSeparator("test")
tbar.AddAction(gi.ActOpts(Label="Test Trial", Icon="step-fwd", Tooltip="Runs the next testing trial.", UpdateFunc=UpdtFuncNotRunning), recv, TestTrialCB)
tbar.AddAction(gi.ActOpts(Label="Test Item", Icon="step-fwd", Tooltip="Prompts for a specific input pattern name to run, and runs it in testing mode.", UpdateFunc=UpdtFuncNotRunning), recv, TestItemCB)
tbar.AddAction(gi.ActOpts(Label="Test All", Icon="fast-fwd", Tooltip="Tests all of the testing trials.", UpdateFunc=UpdtFuncNotRunning), recv, TestAllCB)
tbar.AddSeparator("log")
tbar.AddAction(gi.ActOpts(Label="Reset RunLog", Icon="reset", Tooltip="Resets the accumulated log of all Runs, which are tagged with the ParamSet used"), recv, ResetRunLogCB)
tbar.AddSeparator("misc")
tbar.AddAction(gi.ActOpts(Label="New Seed", Icon="new", Tooltip="Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time."), recv, NewRndSeedCB)
tbar.AddAction(gi.ActOpts(Label="README", Icon="file-markdown", Tooltip="Opens your browser on the README file that contains instructions for how to run this model."), recv, ReadmeCB)
# main menu
appnm = gi.AppName()
mmen = win.MainMenu
mmen.ConfigMenus(go.Slice_string([appnm, "File", "Edit", "Window"]))
amen = gi.Action(win.MainMenu.ChildByName(appnm, 0))
amen.Menu.AddAppMenu(win)
emen = gi.Action(win.MainMenu.ChildByName("Edit", 1))
emen.Menu.AddCopyCutPaste(win)
win.MainMenuUpdated()
vp.UpdateEndNoSig(updt)
win.GoStartEventLoop()
# TheSim is the overall state for this simulation
TheSim = Sim()
def main(argv):
TheSim.Config()
TheSim.ConfigGui()
TheSim.Init()
main(sys.argv[1:])
| #!/usr/local/bin/pyleabra -i
# Copyright (c) 2019, The Emergent Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# use:
# just type file name to run, or:
# pyleabra -i <file>.py
# hebberr_combo shows how XCal hebbian learning in shallower layers
# of a network can aid an error driven learning network to
# generalize to unseen combinations of patterns.
from leabra import go, leabra, emer, relpos, eplot, env, agg, patgen, prjn, etable, efile, split, etensor, params, netview, rand, erand, gi, giv, pygiv, pyparams, mat32
import importlib as il
import io, sys, getopt
from datetime import datetime, timezone
from enum import Enum
# this will become Sim later..
TheSim = 1
# LogPrec is precision for saving float values in logs
LogPrec = 4
# note: we cannot use methods for callbacks from Go -- must be separate functions
# so below are all the callbacks from the GUI toolbar actions
def InitCB(recv, send, sig, data):
TheSim.Init()
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def TrainCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.Train()
def StopCB(recv, send, sig, data):
TheSim.Stop()
def StepTrialCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.TrainTrial()
TheSim.IsRunning = False
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def StepEpochCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.TrainEpoch()
def StepRunCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.TrainRun()
def TestTrialCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.TestTrial(False)
TheSim.IsRunning = False
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def TestItemCB2(recv, send, sig, data):
win = gi.Window(handle=recv)
vp = win.WinViewport2D()
dlg = gi.Dialog(handle=send)
if sig != gi.DialogAccepted:
return
val = gi.StringPromptDialogValue(dlg)
idxs = TheSim.TestEnv.Table.RowsByString("Name", val, True, True) # contains, ignoreCase
if len(idxs) == 0:
gi.PromptDialog(vp, gi.DlgOpts(Title="Name Not Found", Prompt="No patterns found containing: " + val), True, False, go.nil, go.nil)
else:
if not TheSim.IsRunning:
TheSim.IsRunning = True
print("testing index: %s" % idxs[0])
TheSim.TestItem(idxs[0])
TheSim.IsRunning = False
vp.SetNeedsFullRender()
def TestItemCB(recv, send, sig, data):
win = gi.Window(handle=recv)
gi.StringPromptDialog(win.WinViewport2D(), "", "Test Item",
gi.DlgOpts(Title="Test Item", Prompt="Enter the Name of a given input pattern to test (case insensitive, contains given string."), win, TestItemCB2)
def TestAllCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.RunTestAll()
def ResetRunLogCB(recv, send, sig, data):
TheSim.RunLog.SetNumRows(0)
TheSim.RunPlot.Update()
def NewRndSeedCB(recv, send, sig, data):
TheSim.NewRndSeed()
def ReadmeCB(recv, send, sig, data):
gi.OpenURL("https://github.com/CompCogNeuro/sims/blob/master/ch4/hebberr_combo/README.md")
def UpdtFuncNotRunning(act):
act.SetActiveStateUpdt(not TheSim.IsRunning)
def UpdtFuncRunning(act):
act.SetActiveStateUpdt(TheSim.IsRunning)
############################################
# Enums -- note: must start at 0 for GUI
class PatsType(Enum):
Easy = 0
Hard = 1
Impossible = 2
Lines2 = 3
class LearnType(Enum):
Hebbian = 0
ErrorDriven = 1
ErrorHebbIn = 2
#####################################################
# Sim
class Sim(pygiv.ClassViewObj):
"""
Sim encapsulates the entire simulation model, and we define all the
functionality as methods on this struct. This structure keeps all relevant
state information organized and available without having to pass everything around
as arguments to methods, and provides the core GUI interface (note the view tags
for the fields which provide hints to how things should be displayed).
"""
def __init__(self):
super(Sim, self).__init__()
self.Net = leabra.Network()
self.SetTags("Net", 'view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"')
self.Learn = LearnType.ErrorDriven
self.SetTags("Learn", 'desc:"select which type of learning to use"')
self.Pats = PatsType.Lines2
self.SetTags("Pats", 'desc:"select which type of patterns to use"')
self.Easy = etable.Table()
self.SetTags("Easy", 'view:"no-inline" desc:"easy training patterns -- can be learned with Hebbian"')
self.Hard = etable.Table()
self.SetTags("Hard", 'view:"no-inline" desc:"hard training patterns -- require error-driven"')
self.Impossible = etable.Table()
self.SetTags("Impossible", 'view:"no-inline" desc:"impossible training patterns -- require error-driven + hidden layer"')
self.Lines2 = etable.Table()
self.SetTags("Lines2", 'view:"no-inline" desc:"lines training patterns"')
self.TrnEpcLog = etable.Table()
self.SetTags("TrnEpcLog", 'view:"no-inline" desc:"training epoch-level log data"')
self.TstEpcLog = etable.Table()
self.SetTags("TstEpcLog", 'view:"no-inline" desc:"testing epoch-level log data"')
self.TstTrlLog = etable.Table()
self.SetTags("TstTrlLog", 'view:"no-inline" desc:"testing trial-level log data"')
self.RunLog = etable.Table()
self.SetTags("RunLog", 'view:"no-inline" desc:"summary log of each run"')
self.RunStats = etable.Table()
self.SetTags("RunStats", 'view:"no-inline" desc:"aggregate stats on all runs"')
self.Params = params.Sets()
self.SetTags("Params", 'view:"no-inline" desc:"full collection of param sets"')
self.ParamSet = str()
self.SetTags("ParamSet", 'view:"-" desc:"which set of *additional* parameters to use -- always applies Base and optionaly this next if set"')
self.MaxRuns = int(10)
self.SetTags("MaxRuns", 'desc:"maximum number of model runs to perform"')
self.MaxEpcs = int(100)
self.SetTags("MaxEpcs", 'desc:"maximum number of epochs to run per model run"')
self.NZeroStop = int(20)
self.SetTags("NZeroStop", 'desc:"if a positive number, training will stop after this many epochs with zero SSE"')
self.TrainEnv = env.FixedTable()
self.SetTags("TrainEnv", 'desc:"Training environment -- contains everything about iterating over input / output patterns over training"')
self.TestEnv = env.FixedTable()
self.SetTags("TestEnv", 'desc:"Testing environment -- manages iterating over testing"')
self.Time = leabra.Time()
self.SetTags("Time", 'desc:"leabra timing parameters and state"')
self.ViewOn = True
self.SetTags("ViewOn", 'desc:"whether to update the network view while running"')
self.TrainUpdt = leabra.TimeScales.AlphaCycle
self.SetTags("TrainUpdt", 'desc:"at what time scale to update the display during training? Anything longer than Epoch updates at Epoch in this model"')
self.TestUpdt = leabra.TimeScales.Cycle
self.SetTags("TestUpdt", 'desc:"at what time scale to update the display during testing? Anything longer than Epoch updates at Epoch in this model"')
self.TestInterval = int(5)
self.SetTags("TestInterval", 'desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"')
self.LayStatNms = go.Slice_string(["Input", "Output"])
self.SetTags("LayStatNms", 'desc:"names of layers to collect more detailed stats on (avg act, etc)"')
# statistics: note use float64 as that is best for etable.Table
self.TrlErr = float(0)
self.SetTags("TrlErr", 'inactive:"+" desc:"1 if trial was error, 0 if correct -- based on SSE = 0 (subject to .5 unit-wise tolerance)"')
self.TrlSSE = float(0)
self.SetTags("TrlSSE", 'inactive:"+" desc:"current trial\'s sum squared error"')
self.TrlAvgSSE = float(0)
self.SetTags("TrlAvgSSE", 'inactive:"+" desc:"current trial\'s average sum squared error"')
self.TrlCosDiff = float(0)
self.SetTags("TrlCosDiff", 'inactive:"+" desc:"current trial\'s cosine difference"')
self.EpcSSE = float(0)
self.SetTags("EpcSSE", 'inactive:"+" desc:"last epoch\'s total sum squared error"')
self.EpcAvgSSE = float(0)
self.SetTags("EpcAvgSSE", 'inactive:"+" desc:"last epoch\'s average sum squared error (average over trials, and over units within layer)"')
self.EpcPctErr = float(0)
self.SetTags("EpcPctErr", 'inactive:"+" desc:"last epoch\'s average TrlErr"')
self.EpcPctCor = float(0)
self.SetTags("EpcPctCor", 'inactive:"+" desc:"1 - last epoch\'s average TrlErr"')
self.EpcCosDiff = float(0)
self.SetTags("EpcCosDiff", 'inactive:"+" desc:"last epoch\'s average cosine difference for output layer (a normalized error measure, maximum of 1 when the minus phase exactly matches the plus)"')
self.FirstZero = int(-1)
self.SetTags("FirstZero", 'inactive:"+" desc:"epoch at when SSE first went to zero"')
self.NZero = int(0)
self.SetTags("NZero", 'inactive:"+" desc:"number of epochs in a row with zero SSE"')
# internal state - view:"-"
self.SumErr = float(0)
self.SetTags("SumErr", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumSSE = float(0)
self.SetTags("SumSSE", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumAvgSSE = float(0)
self.SetTags("SumAvgSSE", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumCosDiff = float(0)
self.SetTags("SumCosDiff", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.Win = 0
self.SetTags("Win", 'view:"-" desc:"main GUI window"')
self.NetView = 0
self.SetTags("NetView", 'view:"-" desc:"the network viewer"')
self.ToolBar = 0
self.SetTags("ToolBar", 'view:"-" desc:"the master toolbar"')
self.TrnEpcPlot = 0
self.SetTags("TrnEpcPlot", 'view:"-" desc:"the training epoch plot"')
self.TstEpcPlot = 0
self.SetTags("TstEpcPlot", 'view:"-" desc:"the testing epoch plot"')
self.TstTrlPlot = 0
self.SetTags("TstTrlPlot", 'view:"-" desc:"the test-trial plot"')
self.RunPlot = 0
self.SetTags("RunPlot", 'view:"-" desc:"the run plot"')
self.TrnEpcFile = 0
self.SetTags("TrnEpcFile", 'view:"-" desc:"log file"')
self.RunFile = 0
self.SetTags("RunFile", 'view:"-" desc:"log file"')
self.ValsTsrs = {}
self.SetTags("ValsTsrs", 'view:"-" desc:"for holding layer values"')
self.IsRunning = False
self.SetTags("IsRunning", 'view:"-" desc:"true if sim is running"')
self.StopNow = False
self.SetTags("StopNow", 'view:"-" desc:"flag to stop running"')
self.NeedsNewRun = False
self.SetTags("NeedsNewRun", 'view:"-" desc:"flag to initialize NewRun if last one finished"')
self.RndSeed = int(1)
self.SetTags("RndSeed", 'view:"-" desc:"the current random seed"')
self.vp = 0
self.SetTags("vp", 'view:"-" desc:"viewport"')
def InitParams(ss):
"""
Sets the default set of parameters -- Base is always applied, and others can be optionally
selected to apply on top of that
"""
ss.Params.OpenJSON("hebberr_combo.params")
def Config(ss):
"""
Config configures all the elements using the standard functions
"""
ss.InitParams()
ss.OpenPats()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigTrnEpcLog(ss.TrnEpcLog)
ss.ConfigTstEpcLog(ss.TstEpcLog)
ss.ConfigTstTrlLog(ss.TstTrlLog)
ss.ConfigRunLog(ss.RunLog)
def ConfigEnv(ss):
if ss.MaxRuns == 0:
ss.MaxRuns = 10
if ss.MaxEpcs == 0: # allow user override
ss.MaxEpcs = 100
ss.NZeroStop = 20
ss.TrainEnv.Nm = "TrainEnv"
ss.TrainEnv.Dsc = "training params and state"
ss.TrainEnv.Table = etable.NewIdxView(ss.Easy)
ss.TrainEnv.Validate()
ss.TrainEnv.Run.Max = ss.MaxRuns # note: we are not setting epoch max -- do that manually
ss.TestEnv.Nm = "TestEnv"
ss.TestEnv.Dsc = "testing params and state"
ss.TestEnv.Table = etable.NewIdxView(ss.Easy)
ss.TestEnv.Sequential = True
ss.TestEnv.Validate()
ss.TrainEnv.Init(0)
ss.TestEnv.Init(0)
def UpdateEnv(ss):
if ss.Pats == PatsType.Easy:
ss.TrainEnv.Table = etable.NewIdxView(ss.Easy)
ss.TestEnv.Table = etable.NewIdxView(ss.Easy)
elif ss.Pats == PatsType.Hard:
ss.TrainEnv.Table = etable.NewIdxView(ss.Hard)
ss.TestEnv.Table = etable.NewIdxView(ss.Hard)
elif ss.Pats == PatsType.Impossible:
ss.TrainEnv.Table = etable.NewIdxView(ss.Impossible)
ss.TestEnv.Table = etable.NewIdxView(ss.Impossible)
elif ss.Pats == PatsType.Lines2:
all = etable.NewIdxView(ss.Lines2)
splits = split.Permuted(all, go.Slice_float64([.9, .1]), go.Slice_string(["Train", "Test"]))
ss.TrainEnv.Table = splits.Splits[0]
ss.TestEnv.Table = splits.Splits[1]
def ConfigNet(ss, net):
net.InitName(net, "PatAssoc")
inp = net.AddLayer2D("Input", 5, 5, emer.Input)
hid = net.AddLayer2D("Hidden", 6, 5, emer.Hidden)
out = net.AddLayer2D("Output", 5, 2, emer.Target)
out.SetClass("Output")
full = prjn.NewFull()
inhid = net.ConnectLayers(inp, hid, full, emer.Forward)
inhid.SetClass("InputToHidden")
net.BidirConnectLayersPy(hid, out, full)
hid.SetRelPos(relpos.Rel(Rel= relpos.Above, Other= "Input", YAlign= relpos.Front, XAlign= relpos.Left, YOffset= 1))
net.Defaults()
ss.SetParams("Network", False) # only set Network params
net.Build()
net.InitWts()
def Init(ss):
"""
Init restarts the run, and initializes everything, including network weights
and resets the epoch log table
"""
rand.Seed(ss.RndSeed)
ss.UpdateEnv()
ss.StopNow = False
ss.SetParams("", False)
ss.NewRun()
ss.UpdateView(True)
def NewRndSeed(ss):
"""
NewRndSeed gets a new random seed based on current time -- otherwise uses
the same random seed for every run
"""
ss.RndSeed = int(datetime.now(timezone.utc).timestamp())
def Counters(ss, train):
"""
Counters returns a string of the current counter state
use tabs to achieve a reasonable formatting overall
and add a few tabs at the end to allow for expansion..
"""
if train:
return "Run:\t%d\tEpoch:\t%d\tTrial:\t%d\tCycle:\t%d\tName:\t%s\t\t\t" % (ss.TrainEnv.Run.Cur, ss.TrainEnv.Epoch.Cur, ss.TrainEnv.Trial.Cur, ss.Time.Cycle, ss.TrainEnv.TrialName.Cur)
else:
return "Run:\t%d\tEpoch:\t%d\tTrial:\t%d\tCycle:\t%d\tName:\t%s\t\t\t" % (ss.TrainEnv.Run.Cur, ss.TrainEnv.Epoch.Cur, ss.TestEnv.Trial.Cur, ss.Time.Cycle, ss.TestEnv.TrialName.Cur)
def UpdateView(ss, train):
if ss.NetView != 0 and ss.NetView.IsVisible():
ss.NetView.Record(ss.Counters(train))
ss.NetView.GoUpdate()
def AlphaCyc(ss, train):
"""
AlphaCyc runs one alpha-cycle (100 msec, 4 quarters) of processing.
External inputs must have already been applied prior to calling,
using ApplyExt method on relevant layers (see TrainTrial, TestTrial).
If train is true, then learning DWt or WtFmDWt calls are made.
Handles netview updating within scope of AlphaCycle
"""
if ss.Win != 0:
ss.Win.PollEvents() # this is essential for GUI responsiveness while running
viewUpdt = ss.TrainUpdt.value
if not train:
viewUpdt = ss.TestUpdt.value
# update prior weight changes at start, so any DWt values remain visible at end
# you might want to do this less frequently to achieve a mini-batch update
# in which case, move it out to the TrainTrial method where the relevant
# counters are being dealt with.
if train:
ss.Net.WtFmDWt()
ss.Net.AlphaCycInit(train)
ss.Time.AlphaCycStart()
for qtr in range(4):
for cyc in range(ss.Time.CycPerQtr):
ss.Net.Cycle(ss.Time)
ss.Time.CycleInc()
if ss.ViewOn:
if viewUpdt == leabra.Cycle:
if cyc != ss.Time.CycPerQtr-1: # will be updated by quarter
ss.UpdateView(train)
elif viewUpdt == leabra.FastSpike:
if (cyc+1)%10 == 0:
ss.UpdateView(train)
ss.Net.QuarterFinal(ss.Time)
ss.Time.QuarterInc()
if ss.ViewOn:
if viewUpdt == leabra.Quarter:
ss.UpdateView(train)
if viewUpdt == leabra.Phase:
if qtr >= 2:
ss.UpdateView(train)
if train:
ss.Net.DWt()
if ss.ViewOn and viewUpdt == leabra.AlphaCycle:
ss.UpdateView(train)
def ApplyInputs(ss, en):
"""
ApplyInputs applies input patterns from given envirbonment.
It is good practice to have this be a separate method with appropriate
# going to the same layers, but good practice and cheap anyway
args so that it can be used for various different contexts
(training, testing, etc).
"""
ss.Net.InitExt()
lays = ["Input", "Output"]
for lnm in lays :
ly = leabra.Layer(ss.Net.LayerByName(lnm))
pats = en.State(ly.Nm)
if pats != 0:
ly.ApplyExt(pats)
def TrainTrial(ss):
"""
TrainTrial runs one trial of training using TrainEnv
"""
if ss.NeedsNewRun:
ss.NewRun()
ss.TrainEnv.Step()
# Key to query counters FIRST because current state is in NEXT epoch
# if epoch counter has changed
epc = env.CounterCur(ss.TrainEnv, env.Epoch)
chg = env.CounterChg(ss.TrainEnv, env.Epoch)
if chg:
ss.LogTrnEpc(ss.TrnEpcLog)
if ss.ViewOn and ss.TrainUpdt.value > leabra.AlphaCycle:
ss.UpdateView(True)
if ss.TestInterval > 0 and epc%ss.TestInterval == 0: # note: epc is *next* so won't trigger first time
ss.TestAll()
if epc >= ss.MaxEpcs or (ss.NZeroStop > 0 and ss.NZero >= ss.NZeroStop):
# done with training..
ss.RunEnd()
if ss.TrainEnv.Run.Incr(): # we are done!
ss.StopNow = True
return
else:
ss.NeedsNewRun = True
return
# note: type must be in place before apply inputs
ss.Net.LayerByName("Output").SetType(emer.Target)
ss.ApplyInputs(ss.TrainEnv)
ss.AlphaCyc(True) # train
ss.TrialStats(True) # accumulate
def RunEnd(ss):
"""
RunEnd is called at the end of a run -- save weights, record final log, etc here
"""
ss.LogRun(ss.RunLog)
def NewRun(ss):
"""
NewRun intializes a new run of the model, using the TrainEnv.Run counter
for the new run value
"""
run = ss.TrainEnv.Run.Cur
ss.UpdateEnv()
ss.TrainEnv.Init(run)
ss.TestEnv.Init(run)
ss.Time.Reset()
ss.Net.InitWts()
ss.InitStats()
ss.TrnEpcLog.SetNumRows(0)
ss.TstEpcLog.SetNumRows(0)
ss.NeedsNewRun = False
ss.TrainEnv.Run.Max = ss.MaxRuns
def InitStats(ss):
"""
InitStats initializes all the statistics, especially important for the
cumulative epoch stats -- called at start of new run
"""
ss.SumSSE = 0
ss.SumAvgSSE = 0
ss.SumCosDiff = 0
ss.SumErr = 0
ss.FirstZero = -1
ss.NZero = 0
ss.TrlErr = 0
ss.TrlSSE = 0
ss.TrlAvgSSE = 0
ss.EpcSSE = 0
ss.EpcAvgSSE = 0
ss.EpcPctErr = 0
ss.EpcCosDiff = 0
def TrialStats(ss, accum):
"""
TrialStats computes the trial-level statistics and adds them to the epoch accumulators if
accum is true. Note that we're accumulating stats here on the Sim side so the
core algorithm side remains as simple as possible, and doesn't need to worry about
different time-scales over which stats could be accumulated etc.
You can also aggregate directly from log data, as is done for testing stats
"""
out = leabra.Layer(ss.Net.LayerByName("Output"))
ss.TrlCosDiff = float(out.CosDiff.Cos)
ss.TrlSSE = out.SSE(0.5) # 0.5 = per-unit tolerance -- right side of .5
ss.TrlAvgSSE = ss.TrlSSE / len(out.Neurons)
if ss.TrlSSE > 0:
ss.TrlErr = 1
else:
ss.TrlErr = 0
if accum:
ss.SumErr += ss.TrlErr
ss.SumSSE += ss.TrlSSE
ss.SumAvgSSE += ss.TrlAvgSSE
ss.SumCosDiff += ss.TrlCosDiff
return
def TrainEpoch(ss):
"""
TrainEpoch runs training trials for remainder of this epoch
"""
ss.StopNow = False
curEpc = ss.TrainEnv.Epoch.Cur
while True:
ss.TrainTrial()
if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:
break
ss.Stopped()
def TrainRun(ss):
"""
TrainRun runs training trials for remainder of run
"""
ss.StopNow = False
curRun = ss.TrainEnv.Run.Cur
while True:
ss.TrainTrial()
if ss.StopNow or ss.TrainEnv.Run.Cur != curRun:
break
ss.Stopped()
def Train(ss):
"""
Train runs the full training from this point onward
"""
ss.StopNow = False
while True:
ss.TrainTrial()
if ss.StopNow:
break
ss.Stopped()
def Stop(ss):
"""
Stop tells the sim to stop running
"""
ss.StopNow = True
def Stopped(ss):
"""
Stopped is called when a run method stops running -- updates the IsRunning flag and toolbar
"""
ss.IsRunning = False
if ss.Win != 0:
vp = ss.Win.WinViewport2D()
if ss.ToolBar != 0:
ss.ToolBar.UpdateActions()
vp.SetNeedsFullRender()
ss.UpdateClassView()
def SaveWeights(ss, filename):
"""
SaveWeights saves the network weights -- when called with giv.CallMethod
it will auto-prompt for filename
"""
ss.Net.SaveWtsJSON(filename)
def TestTrial(ss, returnOnChg):
"""
TestTrial runs one trial of testing -- always sequentially presented inputs
"""
ss.TestEnv.Step()
chg = env.CounterChg(ss.TestEnv, env.Epoch)
if chg:
if ss.ViewOn and ss.TestUpdt.value > leabra.AlphaCycle:
ss.UpdateView(False)
ss.LogTstEpc(ss.TstEpcLog)
if returnOnChg:
return
ss.Net.LayerByName("Output").SetType(emer.Compare)
ss.ApplyInputs(ss.TestEnv)
ss.AlphaCyc(False)
ss.TrialStats(False)
ss.LogTstTrl(ss.TstTrlLog)
def TestItem(ss, idx):
"""
TestItem tests given item which is at given index in test item list
"""
cur = ss.TestEnv.Trial.Cur
ss.TestEnv.Trial.Cur = idx
ss.TestEnv.SetTrialName()
ss.Net.LayerByName("Output").SetType(emer.Compare)
ss.ApplyInputs(ss.TestEnv)
ss.AlphaCyc(False)
ss.TrialStats(False)
ss.TestEnv.Trial.Cur = cur
def TestAll(ss):
"""
TestAll runs through the full set of testing items
"""
ss.TestEnv.Init(ss.TrainEnv.Run.Cur)
while True:
ss.TestTrial(True)
chg = env.CounterChg(ss.TestEnv, env.Epoch)
if chg or ss.StopNow:
break
def RunTestAll(ss):
"""
RunTestAll runs through the full set of testing items, has stop running = false at end -- for gui
"""
ss.StopNow = False
ss.TestAll()
ss.Stopped()
def SetParams(ss, sheet, setMsg):
"""
SetParams sets the params for "Base" and then current ParamSet.
If sheet is empty, then it applies all avail sheets (e.g., Network, Sim)
otherwise just the named sheet
if setMsg = true then we output a message for each param that was set.
"""
if sheet == "":
ss.Params.ValidateSheets(go.Slice_string(["Network", "Sim"]))
ss.SetParamsSet("Base", sheet, setMsg)
if ss.ParamSet != "" and ss.ParamSet != "Base":
sps = ss.ParamSet.split()
for ps in sps:
ss.SetParamsSet(ps, sheet, setMsg)
if ss.Learn == LearnType.Hebbian:
ss.SetParamsSet("Hebbian", sheet, setMsg)
elif ss.Learn == LearnType.ErrorDriven:
ss.SetParamsSet("ErrorDriven", sheet, setMsg)
elif ss.Learn == LearnType.ErrorHebbIn:
ss.SetParamsSet("ErrorHebbIn", sheet, setMsg)
def SetParamsSet(ss, setNm, sheet, setMsg):
"""
SetParamsSet sets the params for given params.Set name.
If sheet is empty, then it applies all avail sheets (e.g., Network, Sim)
otherwise just the named sheet
if setMsg = true then we output a message for each param that was set.
"""
pset = ss.Params.SetByNameTry(setNm)
if sheet == "" or sheet == "Network":
if "Network" in pset.Sheets:
netp = pset.SheetByNameTry("Network")
ss.Net.ApplyParams(netp, setMsg)
if sheet == "" or sheet == "Sim":
if "Sim" in pset.Sheets:
simp= pset.SheetByNameTry("Sim")
pyparams.ApplyParams(ss, simp, setMsg)
def OpenPats(ss):
ss.Easy.SetMetaData("name", "Easy")
ss.Easy.SetMetaData("desc", "Easy Training patterns")
ss.Easy.OpenCSV("easy.tsv", etable.Tab)
ss.Hard.SetMetaData("name", "Hard")
ss.Hard.SetMetaData("desc", "Hard Training patterns")
ss.Hard.OpenCSV("hard.tsv", etable.Tab)
ss.Impossible.SetMetaData("name", "Impossible")
ss.Impossible.SetMetaData("desc", "Impossible Training patterns")
ss.Impossible.OpenCSV("impossible.tsv", etable.Tab)
ss.Lines2.SetMetaData("name", "Lines2")
ss.Lines2.SetMetaData("desc", "Lines2 Training patterns")
ss.Lines2.OpenCSV("lines2out1.tsv", etable.Tab)
def ValsTsr(ss, name):
"""
ValsTsr gets value tensor of given name, creating if not yet made
"""
if name in ss.ValsTsrs:
return ss.ValsTsrs[name]
tsr = etensor.Float32()
ss.ValsTsrs[name] = tsr
return tsr
def LogTrnEpc(ss, dt):
"""
LogTrnEpc adds data from current epoch to the TrnEpcLog table.
computes epoch averages prior to logging.
# this is triggered by increment so use previous value
"""
row = dt.Rows
dt.SetNumRows(row + 1)
epc = ss.TrainEnv.Epoch.Prv
nt = float(ss.TrainEnv.Table.Len()) # number of trials in view
ss.EpcSSE = ss.SumSSE / nt
ss.SumSSE = 0
ss.EpcAvgSSE = ss.SumAvgSSE / nt
ss.SumAvgSSE = 0
ss.EpcPctErr = float(ss.SumErr) / nt
ss.SumErr = 0
ss.EpcPctCor = 1 - ss.EpcPctErr
ss.EpcCosDiff = ss.SumCosDiff / nt
ss.SumCosDiff = 0
if ss.FirstZero < 0 and ss.EpcPctErr == 0:
ss.FirstZero = epc
if ss.EpcPctErr == 0:
ss.NZero += 1
else:
ss.NZero = 0
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("SSE", row, ss.EpcSSE)
dt.SetCellFloat("AvgSSE", row, ss.EpcAvgSSE)
dt.SetCellFloat("PctErr", row, ss.EpcPctErr)
dt.SetCellFloat("PctCor", row, ss.EpcPctCor)
dt.SetCellFloat("CosDiff", row, ss.EpcCosDiff)
for lnm in ss.LayStatNms:
ly = leabra.Layer(ss.Net.LayerByName(lnm))
dt.SetCellFloat(ly.Nm+" ActAvg", row, float(ly.Pool(0).ActAvg.ActPAvgEff))
# note: essential to use Go version of update when called from another goroutine
ss.TrnEpcPlot.GoUpdate()
if ss.TrnEpcFile != 0:
if ss.TrainEnv.Run.Cur == 0 and epc == 0:
dt.WriteCSVHeaders(ss.TrnEpcFile, etable.Tab)
dt.WriteCSVRow(ss.TrnEpcFile, row, etable.Tab)
def ConfigTrnEpcLog(ss, dt):
dt.SetMetaData("name", "TrnEpcLog")
dt.SetMetaData("desc", "Record of performance over epochs of training")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
for lnm in ss.LayStatNms:
sch.append(etable.Column(lnm + " ActAvg", etensor.FLOAT64, go.nil, go.nil))
dt.SetFromSchema(sch, 0)
def ConfigTrnEpcPlot(ss, plt, dt):
plt.Params.Title = "Pattern Associator Epoch Plot"
plt.Params.XAxisCol = "Epoch"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
for lnm in ss.LayStatNms:
plt.SetColParams(lnm+" ActAvg", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 0.5)
return plt
def LogTstTrl(ss, dt):
"""
LogTstTrl adds data from current trial to the TstTrlLog table.
# this is triggered by increment so use previous value
log always contains number of testing items
"""
epc = ss.TrainEnv.Epoch.Prv
inp = leabra.Layer(ss.Net.LayerByName("Input"))
out = leabra.Layer(ss.Net.LayerByName("Output"))
trl = ss.TestEnv.Trial.Cur
row = trl
if dt.Rows <= row:
dt.SetNumRows(row + 1)
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("Trial", row, float(trl))
dt.SetCellString("TrialName", row, ss.TestEnv.TrialName.Cur)
dt.SetCellFloat("Err", row, ss.TrlErr)
dt.SetCellFloat("SSE", row, ss.TrlSSE)
dt.SetCellFloat("AvgSSE", row, ss.TrlAvgSSE)
dt.SetCellFloat("CosDiff", row, ss.TrlCosDiff)
for lnm in ss.LayStatNms:
ly = leabra.Layer(ss.Net.LayerByName(lnm))
dt.SetCellFloat(ly.Nm+" ActM.Avg", row, float(ly.Pool(0).ActM.Avg))
ivt = ss.ValsTsr("Input")
ovt = ss.ValsTsr("Output")
inp.UnitValsTensor(ivt, "Act")
dt.SetCellTensor("InAct", row, ivt)
out.UnitValsTensor(ovt, "ActM")
dt.SetCellTensor("OutActM", row, ovt)
out.UnitValsTensor(ovt, "Targ")
dt.SetCellTensor("OutTarg", row, ovt)
# note: essential to use Go version of update when called from another goroutine
ss.TstTrlPlot.GoUpdate()
def ConfigTstTrlLog(ss, dt):
inp = leabra.Layer(ss.Net.LayerByName("Input"))
out = leabra.Layer(ss.Net.LayerByName("Output"))
dt.SetMetaData("name", "TstTrlLog")
dt.SetMetaData("desc", "Record of testing per input pattern")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
nt = ss.TestEnv.Table.Len() # number in view
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("Trial", etensor.INT64, go.nil, go.nil),
etable.Column("TrialName", etensor.STRING, go.nil, go.nil),
etable.Column("Err", etensor.FLOAT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
for lnm in ss.LayStatNms:
sch.append(etable.Column(lnm + " ActM.Avg", etensor.FLOAT64, go.nil, go.nil))
sch.append(etable.Column("InAct", etensor.FLOAT64, inp.Shp.Shp, go.nil))
sch.append(etable.Column("OutActM", etensor.FLOAT64, out.Shp.Shp, go.nil))
sch.append(etable.Column("OutTarg", etensor.FLOAT64, out.Shp.Shp, go.nil))
dt.SetFromSchema(sch, nt)
def ConfigTstTrlPlot(ss, plt, dt):
plt.Params.Title = "Pattern Associator Test Trial Plot"
plt.Params.XAxisCol = "Trial"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Trial", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("TrialName", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Err", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
for lnm in ss.LayStatNms:
plt.SetColParams(lnm+" ActM.Avg", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 0.5)
plt.SetColParams("InAct", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("OutActM", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("OutTarg", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def LogTstEpc(ss, dt):
row = dt.Rows
dt.SetNumRows(row + 1)
trl = ss.TstTrlLog
tix = etable.NewIdxView(trl)
epc = ss.TrainEnv.Epoch.Prv # ?
# note: this shows how to use agg methods to compute summary data from another
# data table, instead of incrementing on the Sim
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("SSE", row, agg.Sum(tix, "SSE")[0])
dt.SetCellFloat("AvgSSE", row, agg.Mean(tix, "AvgSSE")[0])
dt.SetCellFloat("PctErr", row, agg.Mean(tix, "Err")[0])
dt.SetCellFloat("PctCor", row, 1-agg.Mean(tix, "Err")[0])
dt.SetCellFloat("CosDiff", row, agg.Mean(tix, "CosDiff")[0])
# note: essential to use Go version of update when called from another goroutine
if ss.TstEpcPlot != 0:
ss.TstEpcPlot.GoUpdate()
def ConfigTstEpcLog(ss, dt):
dt.SetMetaData("name", "TstEpcLog")
dt.SetMetaData("desc", "Summary stats for testing trials")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
dt.SetFromSchema(sch, 0)
def ConfigTstEpcPlot(ss, plt, dt):
plt.Params.Title = "Pattern Associator Testing Epoch Plot"
plt.Params.XAxisCol = "Epoch"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.On, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def LogRun(ss, dt):
"""
LogRun adds data from current run to the RunLog table.
"""
run = ss.TrainEnv.Run.Cur # this is NOT triggered by increment yet -- use Cur
row = dt.Rows
dt.SetNumRows(row + 1)
epclog = ss.TstEpcLog
epcix = etable.NewIdxView(epclog)
# compute mean over last N epochs for run level
nlast = 5
if nlast > epcix.Len()-1:
nlast = epcix.Len() - 1
epcix.Idxs = go.Slice_int(epcix.Idxs[epcix.Len()-nlast:])
params = ss.Learn.name + "_" + ss.Pats.name
dt.SetCellFloat("Run", row, float(run))
dt.SetCellString("Params", row, params)
dt.SetCellFloat("FirstZero", row, float(ss.FirstZero))
dt.SetCellFloat("SSE", row, agg.Mean(epcix, "SSE")[0])
dt.SetCellFloat("AvgSSE", row, agg.Mean(epcix, "AvgSSE")[0])
dt.SetCellFloat("PctErr", row, agg.Mean(epcix, "PctErr")[0])
dt.SetCellFloat("PctCor", row, agg.Mean(epcix, "PctCor")[0])
dt.SetCellFloat("CosDiff", row, agg.Mean(epcix, "CosDiff")[0])
runix = etable.NewIdxView(dt)
spl = split.GroupBy(runix, go.Slice_string(["Params"]))
split.Desc(spl, "FirstZero")
split.Desc(spl, "PctCor")
split.Desc(spl, "SSE")
ss.RunStats = spl.AggsToTable(etable.AddAggName)
# note: essential to use Go version of update when called from another goroutine
if ss.RunPlot != 0:
ss.RunPlot.GoUpdate()
if ss.RunFile != 0:
if row == 0:
dt.WriteCSVHeaders(ss.RunFile, etable.Tab)
dt.WriteCSVRow(ss.RunFile, row, etable.Tab)
def ConfigRunLog(ss, dt):
dt.SetMetaData("name", "RunLog")
dt.SetMetaData("desc", "Record of performance at end of training")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Params", etensor.STRING, go.nil, go.nil),
etable.Column("FirstZero", etensor.FLOAT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
dt.SetFromSchema(sch, 0)
def ConfigRunPlot(ss, plt, dt):
plt.Params.Title = "Pattern Associator Run Plot"
plt.Params.XAxisCol = "Run"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("FirstZero", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("SSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.On, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def ConfigNetView(ss, nv):
nv.ViewDefaults()
nv.Scene().Camera.Pose.Pos.Set(0.2, 1.27, 2.62)
nv.Scene().Camera.LookAt(mat32.Vec3(0.2, 0, 0), mat32.Vec3(0, 1, 0))
def ConfigGui(ss):
"""
ConfigGui configures the GoGi gui interface for this simulation,
"""
width = 1600
height = 1200
gi.SetAppName("hebberr_combo")
gi.SetAppAbout('shows how XCal hebbian learning in shallower layers of a network can aid an error driven learning network to generalize to unseen combinations of patterns. See <a href="https://github.com/CompCogNeuro/sims/blob/master/ch4/hebberr_combo/README.md">README.md on GitHub</a>.</p>')
win = gi.NewMainWindow("hebberr_combo", "Hebbian+Error Driven Learning", width, height)
ss.Win = win
vp = win.WinViewport2D()
ss.vp = vp
updt = vp.UpdateStart()
mfr = win.SetMainFrame()
tbar = gi.AddNewToolBar(mfr, "tbar")
tbar.SetStretchMaxWidth()
ss.ToolBar = tbar
split = gi.AddNewSplitView(mfr, "split")
split.Dim = mat32.X
split.SetStretchMaxWidth()
split.SetStretchMaxHeight()
cv = ss.NewClassView("sv")
cv.AddFrame(split)
cv.Config()
tv = gi.AddNewTabView(split, "tv")
nv = netview.NetView()
tv.AddTab(nv, "NetView")
nv.Var = "Act"
nv.SetNet(ss.Net)
ss.NetView = nv
ss.ConfigNetView(nv)
plt = eplot.Plot2D()
tv.AddTab(plt, "TrnEpcPlot")
ss.TrnEpcPlot = ss.ConfigTrnEpcPlot(plt, ss.TrnEpcLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "TstTrlPlot")
ss.TstTrlPlot = ss.ConfigTstTrlPlot(plt, ss.TstTrlLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "TstEpcPlot")
ss.TstEpcPlot = ss.ConfigTstEpcPlot(plt, ss.TstEpcLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "RunPlot")
ss.RunPlot = ss.ConfigRunPlot(plt, ss.RunLog)
split.SetSplitsList(go.Slice_float32([.2, .8]))
recv = win.This()
tbar.AddAction(gi.ActOpts(Label="Init", Icon="update", Tooltip="Initialize everything including network weights, and start over. Also applies current params.", UpdateFunc=UpdtFuncNotRunning), recv, InitCB)
tbar.AddAction(gi.ActOpts(Label="Train", Icon="run", Tooltip="Starts the network training, picking up from wherever it may have left off. If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.", UpdateFunc=UpdtFuncNotRunning), recv, TrainCB)
tbar.AddAction(gi.ActOpts(Label="Stop", Icon="stop", Tooltip="Interrupts running. Hitting Train again will pick back up where it left off.", UpdateFunc=UpdtFuncRunning), recv, StopCB)
tbar.AddAction(gi.ActOpts(Label="Step Trial", Icon="step-fwd", Tooltip="Advances one training trial at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepTrialCB)
tbar.AddAction(gi.ActOpts(Label="Step Epoch", Icon="fast-fwd", Tooltip="Advances one epoch (complete set of training patterns) at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepEpochCB)
tbar.AddAction(gi.ActOpts(Label="Step Run", Icon="fast-fwd", Tooltip="Advances one full training Run at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepRunCB)
tbar.AddSeparator("test")
tbar.AddAction(gi.ActOpts(Label="Test Trial", Icon="step-fwd", Tooltip="Runs the next testing trial.", UpdateFunc=UpdtFuncNotRunning), recv, TestTrialCB)
tbar.AddAction(gi.ActOpts(Label="Test Item", Icon="step-fwd", Tooltip="Prompts for a specific input pattern name to run, and runs it in testing mode.", UpdateFunc=UpdtFuncNotRunning), recv, TestItemCB)
tbar.AddAction(gi.ActOpts(Label="Test All", Icon="fast-fwd", Tooltip="Tests all of the testing trials.", UpdateFunc=UpdtFuncNotRunning), recv, TestAllCB)
tbar.AddSeparator("log")
tbar.AddAction(gi.ActOpts(Label="Reset RunLog", Icon="reset", Tooltip="Resets the accumulated log of all Runs, which are tagged with the ParamSet used"), recv, ResetRunLogCB)
tbar.AddSeparator("misc")
tbar.AddAction(gi.ActOpts(Label="New Seed", Icon="new", Tooltip="Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time."), recv, NewRndSeedCB)
tbar.AddAction(gi.ActOpts(Label="README", Icon="file-markdown", Tooltip="Opens your browser on the README file that contains instructions for how to run this model."), recv, ReadmeCB)
# main menu
appnm = gi.AppName()
mmen = win.MainMenu
mmen.ConfigMenus(go.Slice_string([appnm, "File", "Edit", "Window"]))
amen = gi.Action(win.MainMenu.ChildByName(appnm, 0))
amen.Menu.AddAppMenu(win)
emen = gi.Action(win.MainMenu.ChildByName("Edit", 1))
emen.Menu.AddCopyCutPaste(win)
win.MainMenuUpdated()
vp.UpdateEndNoSig(updt)
win.GoStartEventLoop()
# TheSim is the overall state for this simulation
TheSim = Sim()
def main(argv):
TheSim.Config()
TheSim.ConfigGui()
TheSim.Init()
main(sys.argv[1:]) | en | 0.845848 | #!/usr/local/bin/pyleabra -i # Copyright (c) 2019, The Emergent Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. # use: # just type file name to run, or: # pyleabra -i <file>.py # hebberr_combo shows how XCal hebbian learning in shallower layers # of a network can aid an error driven learning network to # generalize to unseen combinations of patterns. # this will become Sim later.. # LogPrec is precision for saving float values in logs # note: we cannot use methods for callbacks from Go -- must be separate functions # so below are all the callbacks from the GUI toolbar actions # contains, ignoreCase ############################################ # Enums -- note: must start at 0 for GUI ##################################################### # Sim Sim encapsulates the entire simulation model, and we define all the functionality as methods on this struct. This structure keeps all relevant state information organized and available without having to pass everything around as arguments to methods, and provides the core GUI interface (note the view tags for the fields which provide hints to how things should be displayed). # statistics: note use float64 as that is best for etable.Table # internal state - view:"-" Sets the default set of parameters -- Base is always applied, and others can be optionally selected to apply on top of that Config configures all the elements using the standard functions # allow user override # note: we are not setting epoch max -- do that manually # only set Network params Init restarts the run, and initializes everything, including network weights and resets the epoch log table NewRndSeed gets a new random seed based on current time -- otherwise uses the same random seed for every run Counters returns a string of the current counter state use tabs to achieve a reasonable formatting overall and add a few tabs at the end to allow for expansion.. AlphaCyc runs one alpha-cycle (100 msec, 4 quarters) of processing. External inputs must have already been applied prior to calling, using ApplyExt method on relevant layers (see TrainTrial, TestTrial). If train is true, then learning DWt or WtFmDWt calls are made. Handles netview updating within scope of AlphaCycle # this is essential for GUI responsiveness while running # update prior weight changes at start, so any DWt values remain visible at end # you might want to do this less frequently to achieve a mini-batch update # in which case, move it out to the TrainTrial method where the relevant # counters are being dealt with. # will be updated by quarter ApplyInputs applies input patterns from given envirbonment. It is good practice to have this be a separate method with appropriate # going to the same layers, but good practice and cheap anyway args so that it can be used for various different contexts (training, testing, etc). TrainTrial runs one trial of training using TrainEnv # Key to query counters FIRST because current state is in NEXT epoch # if epoch counter has changed # note: epc is *next* so won't trigger first time # done with training.. # we are done! # note: type must be in place before apply inputs # train # accumulate RunEnd is called at the end of a run -- save weights, record final log, etc here NewRun intializes a new run of the model, using the TrainEnv.Run counter for the new run value InitStats initializes all the statistics, especially important for the cumulative epoch stats -- called at start of new run TrialStats computes the trial-level statistics and adds them to the epoch accumulators if accum is true. Note that we're accumulating stats here on the Sim side so the core algorithm side remains as simple as possible, and doesn't need to worry about different time-scales over which stats could be accumulated etc. You can also aggregate directly from log data, as is done for testing stats # 0.5 = per-unit tolerance -- right side of .5 TrainEpoch runs training trials for remainder of this epoch TrainRun runs training trials for remainder of run Train runs the full training from this point onward Stop tells the sim to stop running Stopped is called when a run method stops running -- updates the IsRunning flag and toolbar SaveWeights saves the network weights -- when called with giv.CallMethod it will auto-prompt for filename TestTrial runs one trial of testing -- always sequentially presented inputs TestItem tests given item which is at given index in test item list TestAll runs through the full set of testing items RunTestAll runs through the full set of testing items, has stop running = false at end -- for gui SetParams sets the params for "Base" and then current ParamSet. If sheet is empty, then it applies all avail sheets (e.g., Network, Sim) otherwise just the named sheet if setMsg = true then we output a message for each param that was set. SetParamsSet sets the params for given params.Set name. If sheet is empty, then it applies all avail sheets (e.g., Network, Sim) otherwise just the named sheet if setMsg = true then we output a message for each param that was set. ValsTsr gets value tensor of given name, creating if not yet made LogTrnEpc adds data from current epoch to the TrnEpcLog table. computes epoch averages prior to logging. # this is triggered by increment so use previous value # number of trials in view # note: essential to use Go version of update when called from another goroutine # order of params: on, fixMin, min, fixMax, max # default plot LogTstTrl adds data from current trial to the TstTrlLog table. # this is triggered by increment so use previous value log always contains number of testing items # note: essential to use Go version of update when called from another goroutine # number in view # order of params: on, fixMin, min, fixMax, max # default plot # ? # note: this shows how to use agg methods to compute summary data from another # data table, instead of incrementing on the Sim # note: essential to use Go version of update when called from another goroutine # order of params: on, fixMin, min, fixMax, max # default plot LogRun adds data from current run to the RunLog table. # this is NOT triggered by increment yet -- use Cur # compute mean over last N epochs for run level # note: essential to use Go version of update when called from another goroutine # order of params: on, fixMin, min, fixMax, max # default plot ConfigGui configures the GoGi gui interface for this simulation, # main menu # TheSim is the overall state for this simulation | 1.929104 | 2 |
examples/example_mnist_prune.py | kshithijiyer/qkeras | 388 | 6630022 | # Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of mnist model with pruning.
Adapted from TF model optimization example."""
import tempfile
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import save_model
from tensorflow.keras.utils import to_categorical
from qkeras import QActivation
from qkeras import QDense
from qkeras import QConv2D
from qkeras import quantized_bits
from qkeras.utils import load_qmodel
from qkeras.utils import print_model_sparsity
from tensorflow_model_optimization.python.core.sparsity.keras import prune
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_callbacks
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_schedule
batch_size = 128
num_classes = 10
epochs = 12
prune_whole_model = True # Prune whole model or just specified layers
def build_model(input_shape):
x = x_in = Input(shape=input_shape, name="input")
x = QConv2D(
32, (2, 2), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_0_m")(x)
x = QActivation("quantized_relu(4,0)", name="act0_m")(x)
x = QConv2D(
64, (3, 3), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_1_m")(x)
x = QActivation("quantized_relu(4,0)", name="act1_m")(x)
x = QConv2D(
64, (2, 2), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_2_m")(x)
x = QActivation("quantized_relu(4,0)", name="act2_m")(x)
x = Flatten()(x)
x = QDense(num_classes, kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="dense")(x)
x = Activation("softmax", name="softmax")(x)
model = Model(inputs=[x_in], outputs=[x])
return model
def build_layerwise_model(input_shape, **pruning_params):
return Sequential([
prune.prune_low_magnitude(
QConv2D(
32, (2, 2), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_0_m"),
input_shape=input_shape,
**pruning_params),
QActivation("quantized_relu(4,0)", name="act0_m"),
prune.prune_low_magnitude(
QConv2D(
64, (3, 3), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_1_m"),
**pruning_params),
QActivation("quantized_relu(4,0)", name="act1_m"),
prune.prune_low_magnitude(
QConv2D(
64, (2, 2), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_2_m"),
**pruning_params),
QActivation("quantized_relu(4,0)", name="act2_m"),
Flatten(),
prune.prune_low_magnitude(
QDense(
num_classes, kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="dense"),
**pruning_params),
Activation("softmax", name="softmax")
])
def train_and_save(model, x_train, y_train, x_test, y_test):
model.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
# Print the model summary.
model.summary()
# Add a pruning step callback to peg the pruning step to the optimizer's
# step. Also add a callback to add pruning summaries to tensorboard
callbacks = [
pruning_callbacks.UpdatePruningStep(),
#pruning_callbacks.PruningSummaries(log_dir=tempfile.mkdtemp())
pruning_callbacks.PruningSummaries(log_dir="/tmp/mnist_prune")
]
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
print_model_sparsity(model)
# Export and import the model. Check that accuracy persists.
_, keras_file = tempfile.mkstemp(".h5")
print("Saving model to: ", keras_file)
save_model(model, keras_file)
print("Reloading model")
with prune.prune_scope():
loaded_model = load_qmodel(keras_file)
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
def main():
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == "channels_first":
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
pruning_params = {
"pruning_schedule":
pruning_schedule.ConstantSparsity(0.75, begin_step=2000, frequency=100)
}
if prune_whole_model:
model = build_model(input_shape)
model = prune.prune_low_magnitude(model, **pruning_params)
else:
model = build_layerwise_model(input_shape, **pruning_params)
train_and_save(model, x_train, y_train, x_test, y_test)
if __name__ == "__main__":
main() | # Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of mnist model with pruning.
Adapted from TF model optimization example."""
import tempfile
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import save_model
from tensorflow.keras.utils import to_categorical
from qkeras import QActivation
from qkeras import QDense
from qkeras import QConv2D
from qkeras import quantized_bits
from qkeras.utils import load_qmodel
from qkeras.utils import print_model_sparsity
from tensorflow_model_optimization.python.core.sparsity.keras import prune
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_callbacks
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_schedule
batch_size = 128
num_classes = 10
epochs = 12
prune_whole_model = True # Prune whole model or just specified layers
def build_model(input_shape):
x = x_in = Input(shape=input_shape, name="input")
x = QConv2D(
32, (2, 2), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_0_m")(x)
x = QActivation("quantized_relu(4,0)", name="act0_m")(x)
x = QConv2D(
64, (3, 3), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_1_m")(x)
x = QActivation("quantized_relu(4,0)", name="act1_m")(x)
x = QConv2D(
64, (2, 2), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_2_m")(x)
x = QActivation("quantized_relu(4,0)", name="act2_m")(x)
x = Flatten()(x)
x = QDense(num_classes, kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="dense")(x)
x = Activation("softmax", name="softmax")(x)
model = Model(inputs=[x_in], outputs=[x])
return model
def build_layerwise_model(input_shape, **pruning_params):
return Sequential([
prune.prune_low_magnitude(
QConv2D(
32, (2, 2), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_0_m"),
input_shape=input_shape,
**pruning_params),
QActivation("quantized_relu(4,0)", name="act0_m"),
prune.prune_low_magnitude(
QConv2D(
64, (3, 3), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_1_m"),
**pruning_params),
QActivation("quantized_relu(4,0)", name="act1_m"),
prune.prune_low_magnitude(
QConv2D(
64, (2, 2), strides=(2,2),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="conv2d_2_m"),
**pruning_params),
QActivation("quantized_relu(4,0)", name="act2_m"),
Flatten(),
prune.prune_low_magnitude(
QDense(
num_classes, kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1),
name="dense"),
**pruning_params),
Activation("softmax", name="softmax")
])
def train_and_save(model, x_train, y_train, x_test, y_test):
model.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
# Print the model summary.
model.summary()
# Add a pruning step callback to peg the pruning step to the optimizer's
# step. Also add a callback to add pruning summaries to tensorboard
callbacks = [
pruning_callbacks.UpdatePruningStep(),
#pruning_callbacks.PruningSummaries(log_dir=tempfile.mkdtemp())
pruning_callbacks.PruningSummaries(log_dir="/tmp/mnist_prune")
]
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
print_model_sparsity(model)
# Export and import the model. Check that accuracy persists.
_, keras_file = tempfile.mkstemp(".h5")
print("Saving model to: ", keras_file)
save_model(model, keras_file)
print("Reloading model")
with prune.prune_scope():
loaded_model = load_qmodel(keras_file)
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
def main():
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == "channels_first":
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
pruning_params = {
"pruning_schedule":
pruning_schedule.ConstantSparsity(0.75, begin_step=2000, frequency=100)
}
if prune_whole_model:
model = build_model(input_shape)
model = prune.prune_low_magnitude(model, **pruning_params)
else:
model = build_layerwise_model(input_shape, **pruning_params)
train_and_save(model, x_train, y_train, x_test, y_test)
if __name__ == "__main__":
main() | en | 0.777163 | # Copyright 2019 Google LLC # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Example of mnist model with pruning. Adapted from TF model optimization example. # Prune whole model or just specified layers # Print the model summary. # Add a pruning step callback to peg the pruning step to the optimizer's # step. Also add a callback to add pruning summaries to tensorboard #pruning_callbacks.PruningSummaries(log_dir=tempfile.mkdtemp()) # Export and import the model. Check that accuracy persists. # input image dimensions # the data, shuffled and split between train and test sets # convert class vectors to binary class matrices | 2.073782 | 2 |
model/evaluator.py | Gofinge/HF | 7 | 6630023 | <reponame>Gofinge/HF
from .config import *
class Evaluator:
def __init__(self):
self._acc = 3
pass
def evaluate_trend_simple(self, y_true, y_pred):
size = len(y_true)
correct = 0
all = 0
for i in range(size):
if y_true[i] * y_pred[i] > 0 and y_true[i] != 0:
correct += 1
all += 1
elif y_true[i] != 0:
all += 1
return round(correct / all, self._acc)
def evaluate_trend(self, y_true, y_pred):
size = len(y_true)
correct_rise_or_decline = 0
correct_stay = 0
stay = 0
for i in range(size):
if abs(y_true[i]) < eps:
stay += 1
for i in range(size):
if abs(y_pred[i]) < eps:
if abs(y_true[i]) < eps:
correct_stay += 1
else:
pass # ้ขๆตไปทๆ ผไธๅ๏ผไฝๅฎ้
ไปทๆ ผๅๅ๏ผ้ขๆต้่ฏฏ
else:
if abs(y_true[i]) < eps:
pass # ้ขๆตไปทๆ ผๆนๅ๏ผไฝๅฎ้
ไปทๆ ผไธๅ๏ผ้ขๆต้่ฏฏ
else:
if y_pred[i] * y_true[i] > 0:
correct_rise_or_decline += 1 # ้ขๆตไปทๆ ผๅๅ่ถๅฟๅๅฎ้
ไปทๆ ผๅๅ่ถๅฟ็ธๅ
else:
pass # ้ขๆตไปทๆ ผๅๅ่ถๅฟๅๅฎ้
ไปทๆ ผๅๅ่ถๅฟ็ธๅ
correct = correct_stay + correct_rise_or_decline
if stay == 0:
correct_stay = 0
stay = 1
return round(correct / size, self._acc), \
round(correct_stay / stay, self._acc), \
round(correct_rise_or_decline / (size - stay), self._acc)
def evaluate_trend_2(self, y_true, y_pred):
size = len(y_true)
correct = 0
for i in range(size):
if abs(y_pred[i]) < eps:
if abs(y_true[i]) < eps:
correct += 1
else:
j = i
try:
while abs(y_true[j]) < eps:
j += 1
if y_pred[i] * y_true[j] > 0:
correct += 1
except:
pass
return round(correct / size, self._acc)
def evaluate_trend_with_delay(self, y_true, y_pred):
size = len(y_true)
correct = 0
for i in range(size):
j = i
try:
while abs(y_true[j]) < eps:
j += 1
if y_pred[i] * y_true[j] > 0:
correct += 1
except IndexError:
pass
return round(correct / size, self._acc)
def evaluate_divided_trend(self, y_true, y_pred, part_num=5):
size = len(y_true)
part_size = size // part_num
acc_list = []
for i in range(part_num):
part_y_true = y_true[i * part_size:(i + 1) * part_size]
part_y_pred = y_pred[i * part_size:(i + 1) * part_size]
acc_list.append(self.evaluate_trend_simple(part_y_true, part_y_pred))
return acc_list
def evaluate_one_hot_trend(self, y_true, y_pred):
size = len(y_true)
correct = 0
all = 0
for i in range(size):
v1, v2 = list(y_true[i]), list(y_pred[i])
try:
if v1.index(1) == v2.index(1):
correct += 1
all += 1
except ValueError:
print(v1, v2)
print(correct, all)
return round(correct / all, self._acc)
def evaluate_divided_one_hot_trend(self, y_true, y_pred, part_num=10):
size = len(y_true)
part_size = size // part_num
acc_list = []
for i in range(part_num):
part_y_true = y_true[i * part_size:(i + 1) * part_size]
part_y_pred = y_pred[i * part_size:(i + 1) * part_size]
acc_list.append(self.evaluate_one_hot_trend(part_y_true, part_y_pred))
return acc_list
def evaluate_mean_and_variance(self, true_mean_price, y_pred):
pred_mean, pred_std = y_pred[0], y_pred[1]
all = len(pred_mean)
correct = 0
for i in range(all):
if abs(true_mean_price[i] - pred_mean[i]) < pred_std[i] * z_95:
correct += 1
return round(correct / all, self._acc)
| from .config import *
class Evaluator:
def __init__(self):
self._acc = 3
pass
def evaluate_trend_simple(self, y_true, y_pred):
size = len(y_true)
correct = 0
all = 0
for i in range(size):
if y_true[i] * y_pred[i] > 0 and y_true[i] != 0:
correct += 1
all += 1
elif y_true[i] != 0:
all += 1
return round(correct / all, self._acc)
def evaluate_trend(self, y_true, y_pred):
size = len(y_true)
correct_rise_or_decline = 0
correct_stay = 0
stay = 0
for i in range(size):
if abs(y_true[i]) < eps:
stay += 1
for i in range(size):
if abs(y_pred[i]) < eps:
if abs(y_true[i]) < eps:
correct_stay += 1
else:
pass # ้ขๆตไปทๆ ผไธๅ๏ผไฝๅฎ้
ไปทๆ ผๅๅ๏ผ้ขๆต้่ฏฏ
else:
if abs(y_true[i]) < eps:
pass # ้ขๆตไปทๆ ผๆนๅ๏ผไฝๅฎ้
ไปทๆ ผไธๅ๏ผ้ขๆต้่ฏฏ
else:
if y_pred[i] * y_true[i] > 0:
correct_rise_or_decline += 1 # ้ขๆตไปทๆ ผๅๅ่ถๅฟๅๅฎ้
ไปทๆ ผๅๅ่ถๅฟ็ธๅ
else:
pass # ้ขๆตไปทๆ ผๅๅ่ถๅฟๅๅฎ้
ไปทๆ ผๅๅ่ถๅฟ็ธๅ
correct = correct_stay + correct_rise_or_decline
if stay == 0:
correct_stay = 0
stay = 1
return round(correct / size, self._acc), \
round(correct_stay / stay, self._acc), \
round(correct_rise_or_decline / (size - stay), self._acc)
def evaluate_trend_2(self, y_true, y_pred):
size = len(y_true)
correct = 0
for i in range(size):
if abs(y_pred[i]) < eps:
if abs(y_true[i]) < eps:
correct += 1
else:
j = i
try:
while abs(y_true[j]) < eps:
j += 1
if y_pred[i] * y_true[j] > 0:
correct += 1
except:
pass
return round(correct / size, self._acc)
def evaluate_trend_with_delay(self, y_true, y_pred):
size = len(y_true)
correct = 0
for i in range(size):
j = i
try:
while abs(y_true[j]) < eps:
j += 1
if y_pred[i] * y_true[j] > 0:
correct += 1
except IndexError:
pass
return round(correct / size, self._acc)
def evaluate_divided_trend(self, y_true, y_pred, part_num=5):
size = len(y_true)
part_size = size // part_num
acc_list = []
for i in range(part_num):
part_y_true = y_true[i * part_size:(i + 1) * part_size]
part_y_pred = y_pred[i * part_size:(i + 1) * part_size]
acc_list.append(self.evaluate_trend_simple(part_y_true, part_y_pred))
return acc_list
def evaluate_one_hot_trend(self, y_true, y_pred):
size = len(y_true)
correct = 0
all = 0
for i in range(size):
v1, v2 = list(y_true[i]), list(y_pred[i])
try:
if v1.index(1) == v2.index(1):
correct += 1
all += 1
except ValueError:
print(v1, v2)
print(correct, all)
return round(correct / all, self._acc)
def evaluate_divided_one_hot_trend(self, y_true, y_pred, part_num=10):
size = len(y_true)
part_size = size // part_num
acc_list = []
for i in range(part_num):
part_y_true = y_true[i * part_size:(i + 1) * part_size]
part_y_pred = y_pred[i * part_size:(i + 1) * part_size]
acc_list.append(self.evaluate_one_hot_trend(part_y_true, part_y_pred))
return acc_list
def evaluate_mean_and_variance(self, true_mean_price, y_pred):
pred_mean, pred_std = y_pred[0], y_pred[1]
all = len(pred_mean)
correct = 0
for i in range(all):
if abs(true_mean_price[i] - pred_mean[i]) < pred_std[i] * z_95:
correct += 1
return round(correct / all, self._acc) | zh | 0.954926 | # ้ขๆตไปทๆ ผไธๅ๏ผไฝๅฎ้
ไปทๆ ผๅๅ๏ผ้ขๆต้่ฏฏ # ้ขๆตไปทๆ ผๆนๅ๏ผไฝๅฎ้
ไปทๆ ผไธๅ๏ผ้ขๆต้่ฏฏ # ้ขๆตไปทๆ ผๅๅ่ถๅฟๅๅฎ้
ไปทๆ ผๅๅ่ถๅฟ็ธๅ # ้ขๆตไปทๆ ผๅๅ่ถๅฟๅๅฎ้
ไปทๆ ผๅๅ่ถๅฟ็ธๅ | 3.187423 | 3 |
clu/scripts/legacy/xdg-runtime.py | fish2000/CLU | 1 | 6630024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os, sys
SSID = os.getenv('SECURITYSESSIONID')
from clu.constants.consts import (DEBUG, HOSTNAME, XDG_RUNTIME_BASE,
XDG_RUNTIME_DIR,
XDG_RUNTIME_MODE,
TEXTMATE)
from clu.constants.exceptions import FilesystemError
from clu.fs.filesystem import rm_rf, Directory
from clu.predicates import attr
from clu.repl.ansi import Text, print_ansi
from clu.sanitizer import utf8_decode
BASEDIR = XDG_RUNTIME_BASE
SYMLINK = XDG_RUNTIME_DIR
CURRENT = os.path.split(SYMLINK)[1]
def name_xdg_runtime_dir(namebase=SSID):
return f'{HOSTNAME.casefold()}-{namebase.casefold()}'
def make_xdg_runtime_dir(directory, mode=XDG_RUNTIME_MODE):
runtime_dir = directory.subdirectory(name_xdg_runtime_dir())
if runtime_dir.exists:
rm_rf(runtime_dir)
runtime_dir.makedirs(mode=mode)
return runtime_dir
def enumerate_dirs(directory):
return (pth for pth in iter(directory) \
if pth.startswith(f"{HOSTNAME.casefold()}-"))
def create_symlink(directory, runtime_dir):
if CURRENT in directory:
raise FilesystemError(f"Symlink โ{SYMLINK}โ already exists")
runtime_dir.symlink(directory.subpath(CURRENT))
return CURRENT in directory
def remove_symlink(directory):
if CURRENT in directory:
os.unlink(directory.subpath(CURRENT))
return CURRENT not in directory
def remove_existing_dirs(directory):
if len(directory) > 0:
if DEBUG:
return remove_symlink(directory)
else:
return all(rm_rf(pth) for pth in enumerate_dirs(directory)) \
and remove_symlink(directory)
return True
def print_launchd_plist():
import plistlib
plist_dumps = attr(plistlib, 'dumps', 'writePlistToString')
plist_dict = dict(Label="ost.xdg-runtime.script",
Program=sys.executable,
ProgramArguments=[__file__],
RunAtLoad=True,
KeepAlive=False)
print_ansi(utf8_decode(plist_dumps(plist_dict, sort_keys=False)),
color=(TEXTMATE and Text.NOTHING \
or Text.LIGHTCYAN_EX))
def create_xdg_runtime_dir():
""" Create the XDG_RUNTIME_DIR directory """
basedir = Directory(BASEDIR)
# First, clear existing directories:
if not remove_existing_dirs(basedir):
raise FilesystemError(f"Couldnโt clear subdirs from {basedir!s}")
# Next, make a new runtime directory:
runtime_dir = make_xdg_runtime_dir(basedir)
if not runtime_dir.exists:
raise FilesystemError(f"Couldnโt create XDG_RUNTIME_DIR {runtime_dir!s}")
# Next, symlink the new directory:
if not create_symlink(basedir, runtime_dir):
raise FilesystemError(f"Couldnโt symlink XDG_RUNTIME_DIR {SYMLINK}" % SYMLINK)
return SYMLINK
def main():
""" Main entry point for xdg-runtime.py script """
pass
if __name__ == '__main__':
print_launchd_plist()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os, sys
SSID = os.getenv('SECURITYSESSIONID')
from clu.constants.consts import (DEBUG, HOSTNAME, XDG_RUNTIME_BASE,
XDG_RUNTIME_DIR,
XDG_RUNTIME_MODE,
TEXTMATE)
from clu.constants.exceptions import FilesystemError
from clu.fs.filesystem import rm_rf, Directory
from clu.predicates import attr
from clu.repl.ansi import Text, print_ansi
from clu.sanitizer import utf8_decode
BASEDIR = XDG_RUNTIME_BASE
SYMLINK = XDG_RUNTIME_DIR
CURRENT = os.path.split(SYMLINK)[1]
def name_xdg_runtime_dir(namebase=SSID):
return f'{HOSTNAME.casefold()}-{namebase.casefold()}'
def make_xdg_runtime_dir(directory, mode=XDG_RUNTIME_MODE):
runtime_dir = directory.subdirectory(name_xdg_runtime_dir())
if runtime_dir.exists:
rm_rf(runtime_dir)
runtime_dir.makedirs(mode=mode)
return runtime_dir
def enumerate_dirs(directory):
return (pth for pth in iter(directory) \
if pth.startswith(f"{HOSTNAME.casefold()}-"))
def create_symlink(directory, runtime_dir):
if CURRENT in directory:
raise FilesystemError(f"Symlink โ{SYMLINK}โ already exists")
runtime_dir.symlink(directory.subpath(CURRENT))
return CURRENT in directory
def remove_symlink(directory):
if CURRENT in directory:
os.unlink(directory.subpath(CURRENT))
return CURRENT not in directory
def remove_existing_dirs(directory):
if len(directory) > 0:
if DEBUG:
return remove_symlink(directory)
else:
return all(rm_rf(pth) for pth in enumerate_dirs(directory)) \
and remove_symlink(directory)
return True
def print_launchd_plist():
import plistlib
plist_dumps = attr(plistlib, 'dumps', 'writePlistToString')
plist_dict = dict(Label="ost.xdg-runtime.script",
Program=sys.executable,
ProgramArguments=[__file__],
RunAtLoad=True,
KeepAlive=False)
print_ansi(utf8_decode(plist_dumps(plist_dict, sort_keys=False)),
color=(TEXTMATE and Text.NOTHING \
or Text.LIGHTCYAN_EX))
def create_xdg_runtime_dir():
""" Create the XDG_RUNTIME_DIR directory """
basedir = Directory(BASEDIR)
# First, clear existing directories:
if not remove_existing_dirs(basedir):
raise FilesystemError(f"Couldnโt clear subdirs from {basedir!s}")
# Next, make a new runtime directory:
runtime_dir = make_xdg_runtime_dir(basedir)
if not runtime_dir.exists:
raise FilesystemError(f"Couldnโt create XDG_RUNTIME_DIR {runtime_dir!s}")
# Next, symlink the new directory:
if not create_symlink(basedir, runtime_dir):
raise FilesystemError(f"Couldnโt symlink XDG_RUNTIME_DIR {SYMLINK}" % SYMLINK)
return SYMLINK
def main():
""" Main entry point for xdg-runtime.py script """
pass
if __name__ == '__main__':
print_launchd_plist()
| en | 0.616878 | #!/usr/bin/env python # -*- coding: utf-8 -*- Create the XDG_RUNTIME_DIR directory # First, clear existing directories: # Next, make a new runtime directory: # Next, symlink the new directory: Main entry point for xdg-runtime.py script | 2.158765 | 2 |
dashboard/bq_export/bq_export/utils.py | Murka96/catapult | 0 | 6630025 | <reponame>Murka96/catapult
# Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import apache_beam as beam
## Copy of dashboard.common.utils.TestPath for google.cloud.datastore.key.Key
## rather than ndb.Key.
def TestPath(key):
if key.kind == 'Test':
# The Test key looks like ('Master', 'name', 'Bot', 'name', 'Test' 'name'..)
# Pull out every other entry and join with '/' to form the path.
return '/'.join(key.flat_path[1::2])
assert key.kind == 'TestMetadata' or key.kind == 'TestContainer'
return key.name
def FloatHack(f):
"""Workaround BQ streaming inserts not supporting inf and NaN values.
Somewhere between Beam and the BigQuery streaming inserts API infinities and
NaNs break if passed as is, apparently because JSON cannot represent these
values natively. Fortunately BigQuery appears happy to cast string values
into floats, so we just have to intercept these values and substitute strings.
Nones, and floats other than inf and NaN, are returned unchanged.
"""
if f is None:
return None
if math.isinf(f):
return 'inf' if f > 0 else '-inf'
if math.isnan(f):
return 'NaN'
return f
def PrintCounters(pipeline_result):
"""Print pipeline counters to stdout.
Useful for seeing metrics when running pipelines directly rather than in
Dataflow.
"""
try:
metrics = pipeline_result.metrics().query()
except ValueError:
# Don't crash if there are no metrics, e.g. if we were run with
# --template_location, which stages a job template but does not run the job.
return
for counter in metrics['counters']:
print('Counter: ' + repr(counter))
print(' = ' + str(counter.result))
def IsoDateToYYYYMMDD(iso_date_str):
"""Convert ISO-formatted dates to a YYYYMMDD string."""
return iso_date_str[:4] + iso_date_str[5:7] + iso_date_str[8:10]
def _ElementToYYYYMMDD(element):
return IsoDateToYYYYMMDD(element['timestamp'])
def _GetPartitionNameFn(table_name, element_to_yyyymmdd_fn):
def TableWithPartitionSuffix(element):
# Partition names are the table name with a $yyyymmdd suffix, e.g.
# 'my_dataset.my_table$20200123'. So extract the suffix from the ISO-format
# timestamp value in this element.
return table_name + '$' + element_to_yyyymmdd_fn(element)
return TableWithPartitionSuffix
def WriteToPartitionedBigQuery(table_name,
schema,
element_to_yyyymmdd_fn=_ElementToYYYYMMDD,
**kwargs):
"""Return a WriteToBigQuery configured to load into a day-partitioned table.
This is useful for idempotent writing of whole days of data.
Instead of writing to the table, this writes to the individual partitions
instead (effectively treating each partition as an independent table).
Because the table is partitioned by day, this allows us to use the
WRITE_TRUNCATE option to regenerate the specified days without deleting the
rest of the table. So instead of passing a table name string as the
destination for WriteToBigQuery, we pass a function that dynamically
calculates the partition name.
Because we are loading data into the partition directly we must *not* set
'timePartitioning' in additional_bq_parameters, otherwise the load job will
fail with a kind of schema mismatch.
"""
return beam.io.WriteToBigQuery(
_GetPartitionNameFn(table_name, element_to_yyyymmdd_fn),
schema=schema,
method=beam.io.WriteToBigQuery.Method.FILE_LOADS,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER,
**kwargs)
| # Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import apache_beam as beam
## Copy of dashboard.common.utils.TestPath for google.cloud.datastore.key.Key
## rather than ndb.Key.
def TestPath(key):
if key.kind == 'Test':
# The Test key looks like ('Master', 'name', 'Bot', 'name', 'Test' 'name'..)
# Pull out every other entry and join with '/' to form the path.
return '/'.join(key.flat_path[1::2])
assert key.kind == 'TestMetadata' or key.kind == 'TestContainer'
return key.name
def FloatHack(f):
"""Workaround BQ streaming inserts not supporting inf and NaN values.
Somewhere between Beam and the BigQuery streaming inserts API infinities and
NaNs break if passed as is, apparently because JSON cannot represent these
values natively. Fortunately BigQuery appears happy to cast string values
into floats, so we just have to intercept these values and substitute strings.
Nones, and floats other than inf and NaN, are returned unchanged.
"""
if f is None:
return None
if math.isinf(f):
return 'inf' if f > 0 else '-inf'
if math.isnan(f):
return 'NaN'
return f
def PrintCounters(pipeline_result):
"""Print pipeline counters to stdout.
Useful for seeing metrics when running pipelines directly rather than in
Dataflow.
"""
try:
metrics = pipeline_result.metrics().query()
except ValueError:
# Don't crash if there are no metrics, e.g. if we were run with
# --template_location, which stages a job template but does not run the job.
return
for counter in metrics['counters']:
print('Counter: ' + repr(counter))
print(' = ' + str(counter.result))
def IsoDateToYYYYMMDD(iso_date_str):
"""Convert ISO-formatted dates to a YYYYMMDD string."""
return iso_date_str[:4] + iso_date_str[5:7] + iso_date_str[8:10]
def _ElementToYYYYMMDD(element):
return IsoDateToYYYYMMDD(element['timestamp'])
def _GetPartitionNameFn(table_name, element_to_yyyymmdd_fn):
def TableWithPartitionSuffix(element):
# Partition names are the table name with a $yyyymmdd suffix, e.g.
# 'my_dataset.my_table$20200123'. So extract the suffix from the ISO-format
# timestamp value in this element.
return table_name + '$' + element_to_yyyymmdd_fn(element)
return TableWithPartitionSuffix
def WriteToPartitionedBigQuery(table_name,
schema,
element_to_yyyymmdd_fn=_ElementToYYYYMMDD,
**kwargs):
"""Return a WriteToBigQuery configured to load into a day-partitioned table.
This is useful for idempotent writing of whole days of data.
Instead of writing to the table, this writes to the individual partitions
instead (effectively treating each partition as an independent table).
Because the table is partitioned by day, this allows us to use the
WRITE_TRUNCATE option to regenerate the specified days without deleting the
rest of the table. So instead of passing a table name string as the
destination for WriteToBigQuery, we pass a function that dynamically
calculates the partition name.
Because we are loading data into the partition directly we must *not* set
'timePartitioning' in additional_bq_parameters, otherwise the load job will
fail with a kind of schema mismatch.
"""
return beam.io.WriteToBigQuery(
_GetPartitionNameFn(table_name, element_to_yyyymmdd_fn),
schema=schema,
method=beam.io.WriteToBigQuery.Method.FILE_LOADS,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER,
**kwargs) | en | 0.805071 | # Copyright (c) 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. ## Copy of dashboard.common.utils.TestPath for google.cloud.datastore.key.Key ## rather than ndb.Key. # The Test key looks like ('Master', 'name', 'Bot', 'name', 'Test' 'name'..) # Pull out every other entry and join with '/' to form the path. Workaround BQ streaming inserts not supporting inf and NaN values. Somewhere between Beam and the BigQuery streaming inserts API infinities and NaNs break if passed as is, apparently because JSON cannot represent these values natively. Fortunately BigQuery appears happy to cast string values into floats, so we just have to intercept these values and substitute strings. Nones, and floats other than inf and NaN, are returned unchanged. Print pipeline counters to stdout. Useful for seeing metrics when running pipelines directly rather than in Dataflow. # Don't crash if there are no metrics, e.g. if we were run with # --template_location, which stages a job template but does not run the job. Convert ISO-formatted dates to a YYYYMMDD string. # Partition names are the table name with a $yyyymmdd suffix, e.g. # 'my_dataset.my_table$20200123'. So extract the suffix from the ISO-format # timestamp value in this element. Return a WriteToBigQuery configured to load into a day-partitioned table. This is useful for idempotent writing of whole days of data. Instead of writing to the table, this writes to the individual partitions instead (effectively treating each partition as an independent table). Because the table is partitioned by day, this allows us to use the WRITE_TRUNCATE option to regenerate the specified days without deleting the rest of the table. So instead of passing a table name string as the destination for WriteToBigQuery, we pass a function that dynamically calculates the partition name. Because we are loading data into the partition directly we must *not* set 'timePartitioning' in additional_bq_parameters, otherwise the load job will fail with a kind of schema mismatch. | 2.001976 | 2 |
gigfinder/shpwriter.py | TobiasRosskopf/gigfinder | 0 | 6630026 | #! python3.8
# -*- coding: utf-8 -*-
# File name: shpwriter.py
# Author: <NAME>
# Email: <EMAIL>
# Created: 27.11.2019
# Modified: 27.11.2019
"""
TODO:
Module's docstring
"""
# Standard imports
# ---
# Third party imports
import shapefile
def lat_lng_to_shp(list_lat_lng):
"""TODO: Docstring"""
with shapefile.Writer('out') as w:
w.field('NR', 'N')
w.field('LAT', 'F', decimal=10)
w.field('LNG', 'F', decimal=10)
nr = 0
for (lat, lng) in list_lat_lng:
nr += 1
# print("{0:.6f}, {1:.6f}".format(lat, lng))
w.point(lng, lat)
w.record(nr, lat, lng)
| #! python3.8
# -*- coding: utf-8 -*-
# File name: shpwriter.py
# Author: <NAME>
# Email: <EMAIL>
# Created: 27.11.2019
# Modified: 27.11.2019
"""
TODO:
Module's docstring
"""
# Standard imports
# ---
# Third party imports
import shapefile
def lat_lng_to_shp(list_lat_lng):
"""TODO: Docstring"""
with shapefile.Writer('out') as w:
w.field('NR', 'N')
w.field('LAT', 'F', decimal=10)
w.field('LNG', 'F', decimal=10)
nr = 0
for (lat, lng) in list_lat_lng:
nr += 1
# print("{0:.6f}, {1:.6f}".format(lat, lng))
w.point(lng, lat)
w.record(nr, lat, lng)
| en | 0.422899 | #! python3.8 # -*- coding: utf-8 -*- # File name: shpwriter.py # Author: <NAME> # Email: <EMAIL> # Created: 27.11.2019 # Modified: 27.11.2019 TODO: Module's docstring # Standard imports # --- # Third party imports TODO: Docstring # print("{0:.6f}, {1:.6f}".format(lat, lng)) | 3.328034 | 3 |
infrastructure/logger.py | Axerovor/CS285_Homework_ImitationLearning_hw1 | 0 | 6630027 | <reponame>Axerovor/CS285_Homework_ImitationLearning_hw1<filename>infrastructure/logger.py
import os
import torch
from torch.utils.tensorboard import SummaryWriter
import numpy as np
class Logger:
def __init__(self, log_dir, n_logged_samples=10, summary_writer=None):
self._log_dir = log_dir
print('########################')
print('logging outputs to ', log_dir)
print('########################')
self._n_logged_samples = n_logged_samples
self._summ_writer = SummaryWriter(log_dir, flush_secs=1, max_queue=1)
def log_scalar(self, scalar, name, step_):
self._summ_writer.add_scalar('{}'.format(name), scalar, step_)
def log_scalars(self, scalar_dict, group_name, step, phase):
"""Will log all scalars in the same plot."""
self._summ_writer.add_scalars('{}_{}'.format(group_name, phase), scalar_dict, step)
def log_image(self, image, name, step):
assert (len(image.shape) == 3) # [C, H, W]
self._summ_writer.add_image('{}'.format(name), image, step)
def log_video(self, video_frames, name, step, fps=10):
assert len(video_frames.shape) == 5, "Need [N, T, C, H, W] input tensor for video logging!"
self._summ_writer.add_video('{}'.format(name), video_frames, step, fps=fps)
def log_paths_as_videos(self, paths, step, max_videos_to_save=2, fps=10, video_title='video'):
# reshape the rollouts
videos = [np.transpose(p['image_obs'], [0, 3, 1, 2]) for p in paths]
# max rollout length
max_videos_to_save = np.min([max_videos_to_save, len(videos)])
max_length = videos[0].shape[0]
for i in range(max_videos_to_save):
if videos[i].shape[0] > max_length:
max_length = videos[i].shape[0]
# pad rollouts to all be same length
for i in range(max_videos_to_save):
if videos[i].shape[0] < max_length:
padding = np.tile([videos[i][-1]], (max_length - videos[i].shape[0], 1, 1, 1))
videos[i] = np.concatenate([videos[i], padding], 0)
# log videos to tensorboard event file
print("Logging videos")
videos = np.stack(videos[:max_videos_to_save], 0)
self.log_video(videos, video_title, step, fps=fps)
def log_figures(self, figure, name, step, phase):
"""figure: matplotlib.pyplot figure handle"""
assert figure.shape[0] > 0, "Figure logging requires input shape [batch x figures]!"
self._summ_writer.add_figure('{}_{}'.format(name, phase), figure, step)
def log_figure(self, figure, name, step, phase):
"""figure: matplotlib.pyplot figure handle"""
self._summ_writer.add_figure('{}_{}'.format(name, phase), figure, step)
def log_graph(self, array, name, step, phase):
"""figure: matplotlib.pyplot figure handle"""
im = plot_graph(array)
self._summ_writer.add_image('{}_{}'.format(name, phase), im, step)
def dump_scalars(self, log_path=None):
log_path = os.path.join(self._log_dir, "scalar_data.json") if log_path is None else log_path
self._summ_writer.export_scalars_to_json(log_path)
def flush(self):
self._summ_writer.flush()
| import os
import torch
from torch.utils.tensorboard import SummaryWriter
import numpy as np
class Logger:
def __init__(self, log_dir, n_logged_samples=10, summary_writer=None):
self._log_dir = log_dir
print('########################')
print('logging outputs to ', log_dir)
print('########################')
self._n_logged_samples = n_logged_samples
self._summ_writer = SummaryWriter(log_dir, flush_secs=1, max_queue=1)
def log_scalar(self, scalar, name, step_):
self._summ_writer.add_scalar('{}'.format(name), scalar, step_)
def log_scalars(self, scalar_dict, group_name, step, phase):
"""Will log all scalars in the same plot."""
self._summ_writer.add_scalars('{}_{}'.format(group_name, phase), scalar_dict, step)
def log_image(self, image, name, step):
assert (len(image.shape) == 3) # [C, H, W]
self._summ_writer.add_image('{}'.format(name), image, step)
def log_video(self, video_frames, name, step, fps=10):
assert len(video_frames.shape) == 5, "Need [N, T, C, H, W] input tensor for video logging!"
self._summ_writer.add_video('{}'.format(name), video_frames, step, fps=fps)
def log_paths_as_videos(self, paths, step, max_videos_to_save=2, fps=10, video_title='video'):
# reshape the rollouts
videos = [np.transpose(p['image_obs'], [0, 3, 1, 2]) for p in paths]
# max rollout length
max_videos_to_save = np.min([max_videos_to_save, len(videos)])
max_length = videos[0].shape[0]
for i in range(max_videos_to_save):
if videos[i].shape[0] > max_length:
max_length = videos[i].shape[0]
# pad rollouts to all be same length
for i in range(max_videos_to_save):
if videos[i].shape[0] < max_length:
padding = np.tile([videos[i][-1]], (max_length - videos[i].shape[0], 1, 1, 1))
videos[i] = np.concatenate([videos[i], padding], 0)
# log videos to tensorboard event file
print("Logging videos")
videos = np.stack(videos[:max_videos_to_save], 0)
self.log_video(videos, video_title, step, fps=fps)
def log_figures(self, figure, name, step, phase):
"""figure: matplotlib.pyplot figure handle"""
assert figure.shape[0] > 0, "Figure logging requires input shape [batch x figures]!"
self._summ_writer.add_figure('{}_{}'.format(name, phase), figure, step)
def log_figure(self, figure, name, step, phase):
"""figure: matplotlib.pyplot figure handle"""
self._summ_writer.add_figure('{}_{}'.format(name, phase), figure, step)
def log_graph(self, array, name, step, phase):
"""figure: matplotlib.pyplot figure handle"""
im = plot_graph(array)
self._summ_writer.add_image('{}_{}'.format(name, phase), im, step)
def dump_scalars(self, log_path=None):
log_path = os.path.join(self._log_dir, "scalar_data.json") if log_path is None else log_path
self._summ_writer.export_scalars_to_json(log_path)
def flush(self):
self._summ_writer.flush() | en | 0.504333 | #######################') #######################') Will log all scalars in the same plot. # [C, H, W] # reshape the rollouts # max rollout length # pad rollouts to all be same length # log videos to tensorboard event file figure: matplotlib.pyplot figure handle figure: matplotlib.pyplot figure handle figure: matplotlib.pyplot figure handle | 2.693085 | 3 |
answer_selection/datasets/msmarco/reformat_corpus_from_idxs.py | shashiongithub/Document-Modeling-with-External-Information | 0 | 6630028 | ####################################
# Author: <NAME>
# Date: July 2017
# Project: Document Modeling with External Attention for Sentence Extraction
####################################
'''
msmarco dataset
builds data set with new splits |train', val', test
from pre-calculated indexes
'''
import os,sys
import pdb
import nltk
import io
from utils import *
if __name__ == "__main__":
splits = ['training','validation','test']
force = False
vocab = get_modified_vocab(force=force)
isf_dict,idf_dict = get_isf_idf_dict(vocab)
stopwords = get_stopwords_ids(vocab)
train_idxs = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"training.indexes"),'r').read().strip('\n').split('\n')
val_idxs = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"validation.indexes"),'r').read().strip('\n').split('\n')
train_idxs = [int(x) for x in train_idxs]
val_idxs = [int(x) for x in val_idxs]
data_gen = read_data("training")
data = []
for sample in data_gen:
data.append(sample)
data_gen = read_data("validation")
data_test = []
for sample in data_gen:
data_test.append(sample)
for idxs, corpus_split in zip([train_idxs,val_idxs,range(len(data_test))],splits):
mx_doc_len = 0
mx_sent_len = 0
# outputs files
docs_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.doc" % (corpus_split)), 'w')
questions_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.question" % (corpus_split)), 'w')
labels_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.label" % (corpus_split)), 'w')
cnt_scores_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.cnt.scores" % (corpus_split)), 'w')
isf_scores_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.isf.scores" % (corpus_split)), 'w')
locisf_scores_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.locisf.scores" % (corpus_split)), 'w')
idf_scores_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.idf.scores" % (corpus_split)), 'w')
# write to output files
count = 0
nempties = 0
for _id in idxs:
if corpus_split=='test':
sample_id,sents,question,labels = data_test[_id].unpack()
else:
sample_id,sents,question,labels = data[_id].unpack()
fullpath_doc_name = os.path.join(BASE_DIR,corpus_split,sample_id+'.doc')
ref_sents = words_to_id(sents,vocab)
ref_question = words_to_id([question],vocab)[0]
cnt,isf,idf,locisf = eval_cnts(ref_question,ref_sents,isf_dict,idf_dict,stopwords)
mx_doc_len = max(mx_doc_len,len(ref_sents))
# write doc
docs_out.write(fullpath_doc_name+'\n')
for i,sent in enumerate(ref_sents):
docs_out.write(' '.join([str(wid) for wid in sent]) +'\n')
mx_sent_len = max(mx_sent_len,len(sent))
docs_out.write('\n')
# write question
questions_out.write(fullpath_doc_name+'\n')
questions_out.write(' '.join([str(wid) for wid in ref_question]) +'\n\n')
# write labels
labels_out.write(fullpath_doc_name+'\n')
labels_out.write('\n'.join([str(lbl) for lbl in labels]) + '\n\n')
# writing sentence cnt scores
cnt_scores_out.write(fullpath_doc_name+'\n')
cnt_scores_out.write('\n'.join(["%d" % x for x in cnt]) + '\n\n')
# writing sentence isf scores
isf_scores_out.write(fullpath_doc_name+'\n')
isf_scores_out.write('\n'.join(["%.6f" % x for x in isf]) + '\n\n')
# writing sentence idf scores
idf_scores_out.write(fullpath_doc_name+'\n')
idf_scores_out.write('\n'.join(["%.6f" % x for x in idf]) + '\n\n')
# writing sentence local isf scores
locisf_scores_out.write(fullpath_doc_name+'\n')
locisf_scores_out.write('\n'.join(["%.6f" % x for x in locisf]) + '\n\n')
## INSERT QUERY EXPANSION HERE
if count%10000 == 0:
print "-->doc_count:",count
count +=1
print("%s: %d" %(corpus_split,count))
print("Max document length (nsents) in %s set:%d" % (corpus_split,mx_doc_len))
print("Max sentence length (nwords) in %s set:%d" % (corpus_split,mx_sent_len))
print("# empty new labels: ",nempties)
| ####################################
# Author: <NAME>
# Date: July 2017
# Project: Document Modeling with External Attention for Sentence Extraction
####################################
'''
msmarco dataset
builds data set with new splits |train', val', test
from pre-calculated indexes
'''
import os,sys
import pdb
import nltk
import io
from utils import *
if __name__ == "__main__":
splits = ['training','validation','test']
force = False
vocab = get_modified_vocab(force=force)
isf_dict,idf_dict = get_isf_idf_dict(vocab)
stopwords = get_stopwords_ids(vocab)
train_idxs = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"training.indexes"),'r').read().strip('\n').split('\n')
val_idxs = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"validation.indexes"),'r').read().strip('\n').split('\n')
train_idxs = [int(x) for x in train_idxs]
val_idxs = [int(x) for x in val_idxs]
data_gen = read_data("training")
data = []
for sample in data_gen:
data.append(sample)
data_gen = read_data("validation")
data_test = []
for sample in data_gen:
data_test.append(sample)
for idxs, corpus_split in zip([train_idxs,val_idxs,range(len(data_test))],splits):
mx_doc_len = 0
mx_sent_len = 0
# outputs files
docs_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.doc" % (corpus_split)), 'w')
questions_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.question" % (corpus_split)), 'w')
labels_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.label" % (corpus_split)), 'w')
cnt_scores_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.cnt.scores" % (corpus_split)), 'w')
isf_scores_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.isf.scores" % (corpus_split)), 'w')
locisf_scores_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.locisf.scores" % (corpus_split)), 'w')
idf_scores_out = open(os.path.join(PREPROC_DATA_DIR,'msmarco',"%s.idf.scores" % (corpus_split)), 'w')
# write to output files
count = 0
nempties = 0
for _id in idxs:
if corpus_split=='test':
sample_id,sents,question,labels = data_test[_id].unpack()
else:
sample_id,sents,question,labels = data[_id].unpack()
fullpath_doc_name = os.path.join(BASE_DIR,corpus_split,sample_id+'.doc')
ref_sents = words_to_id(sents,vocab)
ref_question = words_to_id([question],vocab)[0]
cnt,isf,idf,locisf = eval_cnts(ref_question,ref_sents,isf_dict,idf_dict,stopwords)
mx_doc_len = max(mx_doc_len,len(ref_sents))
# write doc
docs_out.write(fullpath_doc_name+'\n')
for i,sent in enumerate(ref_sents):
docs_out.write(' '.join([str(wid) for wid in sent]) +'\n')
mx_sent_len = max(mx_sent_len,len(sent))
docs_out.write('\n')
# write question
questions_out.write(fullpath_doc_name+'\n')
questions_out.write(' '.join([str(wid) for wid in ref_question]) +'\n\n')
# write labels
labels_out.write(fullpath_doc_name+'\n')
labels_out.write('\n'.join([str(lbl) for lbl in labels]) + '\n\n')
# writing sentence cnt scores
cnt_scores_out.write(fullpath_doc_name+'\n')
cnt_scores_out.write('\n'.join(["%d" % x for x in cnt]) + '\n\n')
# writing sentence isf scores
isf_scores_out.write(fullpath_doc_name+'\n')
isf_scores_out.write('\n'.join(["%.6f" % x for x in isf]) + '\n\n')
# writing sentence idf scores
idf_scores_out.write(fullpath_doc_name+'\n')
idf_scores_out.write('\n'.join(["%.6f" % x for x in idf]) + '\n\n')
# writing sentence local isf scores
locisf_scores_out.write(fullpath_doc_name+'\n')
locisf_scores_out.write('\n'.join(["%.6f" % x for x in locisf]) + '\n\n')
## INSERT QUERY EXPANSION HERE
if count%10000 == 0:
print "-->doc_count:",count
count +=1
print("%s: %d" %(corpus_split,count))
print("Max document length (nsents) in %s set:%d" % (corpus_split,mx_doc_len))
print("Max sentence length (nwords) in %s set:%d" % (corpus_split,mx_sent_len))
print("# empty new labels: ",nempties)
| en | 0.644002 | #################################### # Author: <NAME> # Date: July 2017 # Project: Document Modeling with External Attention for Sentence Extraction #################################### msmarco dataset builds data set with new splits |train', val', test from pre-calculated indexes # outputs files # write to output files # write doc # write question # write labels # writing sentence cnt scores # writing sentence isf scores # writing sentence idf scores # writing sentence local isf scores ## INSERT QUERY EXPANSION HERE | 2.443655 | 2 |
cogs/filters.py | Termed/AlphaWolf | 1 | 6630029 | import discord
import os
from discord.ext import commands
from discord.utils import get
class onMessage(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
message_content = message.content
if message_content.isupper():
embed = discord.Embed(title="Warning", description="{}, Please refrain from using too many capital letters".format(message.author.mention))
await message.channel.send(embed=embed)
def setup(bot):
bot.add_cog(onMessage(bot))
| import discord
import os
from discord.ext import commands
from discord.utils import get
class onMessage(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
message_content = message.content
if message_content.isupper():
embed = discord.Embed(title="Warning", description="{}, Please refrain from using too many capital letters".format(message.author.mention))
await message.channel.send(embed=embed)
def setup(bot):
bot.add_cog(onMessage(bot))
| none | 1 | 2.763339 | 3 |
|
tracing.py | anyface/Cpp-Pytorch-Model-Imagenet | 1 | 6630030 | <filename>tracing.py
import torch
import torchvision
from torchvision import transforms
from PIL import Image
from time import time
import numpy as np
# An instance of your model.
model = torchvision.models.resnet18(pretrained=True)
model.eval()
# An example input you would normally provide to your model's forward() method.
example = torch.rand(1, 3, 224, 224)
# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("model.pt")
# evalute time
batch = torch.rand(64, 3, 224, 224)
start = time()
output = traced_script_module(batch)
stop = time()
print(str(stop-start) + "s")
# read image
image = Image.open('1.jpg').convert('RGB')
default_transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
image = default_transform(image)
# forward
output = traced_script_module(image.unsqueeze(0))
print(output[0, :10])
# print top-5 predicted labels
labels = np.loadtxt('synset_words.txt', dtype=str, delimiter='\n')
data_out = output[0].data.numpy()
sorted_idxs = np.argsort(-data_out)
for i,idx in enumerate(sorted_idxs[:5]):
print('top-%d label: %s, score: %f' % (i, labels[idx], data_out[idx]))
| <filename>tracing.py
import torch
import torchvision
from torchvision import transforms
from PIL import Image
from time import time
import numpy as np
# An instance of your model.
model = torchvision.models.resnet18(pretrained=True)
model.eval()
# An example input you would normally provide to your model's forward() method.
example = torch.rand(1, 3, 224, 224)
# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("model.pt")
# evalute time
batch = torch.rand(64, 3, 224, 224)
start = time()
output = traced_script_module(batch)
stop = time()
print(str(stop-start) + "s")
# read image
image = Image.open('1.jpg').convert('RGB')
default_transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
image = default_transform(image)
# forward
output = traced_script_module(image.unsqueeze(0))
print(output[0, :10])
# print top-5 predicted labels
labels = np.loadtxt('synset_words.txt', dtype=str, delimiter='\n')
data_out = output[0].data.numpy()
sorted_idxs = np.argsort(-data_out)
for i,idx in enumerate(sorted_idxs[:5]):
print('top-%d label: %s, score: %f' % (i, labels[idx], data_out[idx]))
| en | 0.744288 | # An instance of your model. # An example input you would normally provide to your model's forward() method. # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing. # evalute time # read image # forward # print top-5 predicted labels | 2.568532 | 3 |
cogs/redditmoderation.py | niztg/DevilBot | 1 | 6630031 | <filename>cogs/redditmoderation.py
import discord
from discord.ext import commands
import praw, prawcore
from .utils import checks
class redditModerationCog(commands.Cog):
"""r/Overwatch_Memes Moderation related Commands"""
def __init__(self, bot):
self.bot = bot
self.db_conn = bot.db_conn
self.colour = 0xff9300
self.footer = 'Bot developed by DevilJamJar#0001\nWith a lot of help from โฟnizcomix#7532'
self.thumb = 'https://styles.redditmedia.com/t5_3el0q/styles/communityIcon_iag4ayvh1eq41.jpg'
@commands.command(pass_context=True, aliases=['lb'], invoke_without_command=True)
@checks.check_mod_server()
async def leaderboard(self, ctx, amount: int = 10):
"""Displays moderation leaderboard"""
if 0 < amount < 15:
pass
else:
return await ctx.send('The limit needs to be between `1` and `14`')
async with ctx.typing():
record = await self.db_conn.fetch(
'SELECT "Mod_Name", ("Flair_Removals" * 5 + "Regular_Removals") AS Removals FROM "ModStatsAug" ORDER BY Removals DESC LIMIT $1',
amount)
embed = discord.Embed(title=f'Monthly Top {amount} Moderator Actions Leaderboard', color=0xff9300)
for row in record:
embed.add_field(
name=row[0],
value=row[1],
inline=False
)
embed.set_thumbnail(url=self.thumb)
embed.set_footer(text=self.footer)
await ctx.send(embed=embed)
@commands.command(aliases=['stat', 'overview'])
@checks.check_mod_server()
async def stats(self, ctx, *, user:str=None):
"""Displays mod stats for a user, or for you."""
if not user:
user = ctx.author.display_name
user = user.lower()
async with ctx.typing():
record = await self.db_conn.fetch(
'SELECT * FROM "ModStatsAug" WHERE "Mod_Name" = $1', user)
if not len(record):
return await ctx.send('Specified user `not found.` Please note that the default user is your `nickname` if another user is not specified.')
embed=discord.Embed(title=f'Monthly Moderator Stats for u/{record[0][0]}', color=self.colour)
embed.add_field(name='Flair removals:', value=f'{record[0][1]}', inline=False)
embed.add_field(name='Regular removals:', value=f'{record[0][2]}', inline=False)
embed.add_field(name='Total action count:', value=f'{int(record[0][1]) * 5 + int(record[0][2])}', inline=False)
return await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(redditModerationCog(bot))
| <filename>cogs/redditmoderation.py
import discord
from discord.ext import commands
import praw, prawcore
from .utils import checks
class redditModerationCog(commands.Cog):
"""r/Overwatch_Memes Moderation related Commands"""
def __init__(self, bot):
self.bot = bot
self.db_conn = bot.db_conn
self.colour = 0xff9300
self.footer = 'Bot developed by DevilJamJar#0001\nWith a lot of help from โฟnizcomix#7532'
self.thumb = 'https://styles.redditmedia.com/t5_3el0q/styles/communityIcon_iag4ayvh1eq41.jpg'
@commands.command(pass_context=True, aliases=['lb'], invoke_without_command=True)
@checks.check_mod_server()
async def leaderboard(self, ctx, amount: int = 10):
"""Displays moderation leaderboard"""
if 0 < amount < 15:
pass
else:
return await ctx.send('The limit needs to be between `1` and `14`')
async with ctx.typing():
record = await self.db_conn.fetch(
'SELECT "Mod_Name", ("Flair_Removals" * 5 + "Regular_Removals") AS Removals FROM "ModStatsAug" ORDER BY Removals DESC LIMIT $1',
amount)
embed = discord.Embed(title=f'Monthly Top {amount} Moderator Actions Leaderboard', color=0xff9300)
for row in record:
embed.add_field(
name=row[0],
value=row[1],
inline=False
)
embed.set_thumbnail(url=self.thumb)
embed.set_footer(text=self.footer)
await ctx.send(embed=embed)
@commands.command(aliases=['stat', 'overview'])
@checks.check_mod_server()
async def stats(self, ctx, *, user:str=None):
"""Displays mod stats for a user, or for you."""
if not user:
user = ctx.author.display_name
user = user.lower()
async with ctx.typing():
record = await self.db_conn.fetch(
'SELECT * FROM "ModStatsAug" WHERE "Mod_Name" = $1', user)
if not len(record):
return await ctx.send('Specified user `not found.` Please note that the default user is your `nickname` if another user is not specified.')
embed=discord.Embed(title=f'Monthly Moderator Stats for u/{record[0][0]}', color=self.colour)
embed.add_field(name='Flair removals:', value=f'{record[0][1]}', inline=False)
embed.add_field(name='Regular removals:', value=f'{record[0][2]}', inline=False)
embed.add_field(name='Total action count:', value=f'{int(record[0][1]) * 5 + int(record[0][2])}', inline=False)
return await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(redditModerationCog(bot))
| en | 0.808576 | r/Overwatch_Memes Moderation related Commands #0001\nWith a lot of help from โฟnizcomix#7532' Displays moderation leaderboard Displays mod stats for a user, or for you. | 2.6387 | 3 |
heroespy/util/util.py | DanRyanIrish/HEROES-Telescope | 0 | 6630032 | import numpy as np
import matplotlib.pyplot as plt
import sys
import platform
import datetime
from scipy import constants as con
from scipy.special import kv
import urllib2 as url
from bs4 import BeautifulSoup
import os
import tempfile
import shutil
from sunpy.time import parse_time
from scipy.integrate import quad
from scipy import interpolate
#data_dir = os.path.join(os.path.dirname(heroes.__file__), "util", "data")
data_dir = '/Users/schriste/Dropbox/python/heroes/util/data/'
_msis_atmosphere_file = None
class Fit_data:
"""A class for data."""
def __init__(self, x, y, xtitle, ytitle, name, xunits, yunits, log):
self.xrange = [x.min(), x.max()]
self.yrange = [y.min(), y.max()]
self.x = x
self.y = y
self.xtitle = xtitle
self.ytitle = ytitle
self.log = log
self.name = name
self.xunits = xunits
self.yunits = yunits
def func(self, x):
if self.log[0] == 1:
fit_x = np.log10(self.x)
else: fit_x = self.x
if self.log[1] == 1:
fit_y = np.log10(self.y)
fill_value = -100
else:
fit_y = self.y
fill_value = 0
f = interpolate.interp1d(fit_x, fit_y, kind = 3, bounds_error=False, fill_value = fill_value)
x_in = x
if self.log[0] == 1:
x_in = 10 ** x_in
if self.log[1] == 1:
f1 = lambda y: 10 ** f(y)
else:
f1 = f
return f1(x_in)
def show(self):
ax = plt.subplot(111)
if self.log is not None:
if self.log[0] == 1:
ax.set_xscale('log')
if self.log[1] == 1:
ax.set_yscale('log')
ax.set_ylabel(self.ytitle + ' [' + self.yunits + ']')
ax.set_xlabel(self.xtitle + ' [' + self.xunits + ']')
ax.set_title(self.name)
num_points = self.x.shape[0]
fit_x = np.linspace(self.xrange[0], self.xrange[1], num = num_points*10)
fit_y = self.func(fit_x)
ax.plot(fit_x, fit_y, "-", color = 'blue')
ax.plot(self.x, self.y, "o", color = 'red')
plt.show()
# densities
# source; wolframalpha
density = {"air stp": 0.001204, "si": 2.33, "be": 1.848, "water": 1, "cadmium telluride": 6.2,
"cesium iodide": 4.51, "gallium arsenide": 5.31, "mercuric iodide": 6.36, "lead glass": 6.22}
'''The X-ray transmission data comes from NIST
(http://www.nist.gov/pml/data/xraycoef/index.cfm)'''
def xray_transmission(path_length_m, energy_kev, material='air stp'):
"""Provide the X-ray transmission (0 to 1) in given a path length in meters at
a particular energy given in keV through a material with a constant density."""
coefficients = mass_attenuation_coefficicent(energy_kev, material=material)
transmission = np.exp(-coefficients * density_cgs.get(material) * path_length_m * 100.0)
return transmission
def load_mass_attenuation_coefficients(material='air_dry_near_sea_level'):
'''Load the mass attenuation coefficients (cm2/g) and mass energy-absorption coefficients (cm2/g)
from the data files as a function of energy (MeV). The allowed materials are listed in density.'''
filename = data_dir + 'XrayMassCoef_' + material.replace(' ', '_').capitalize() + '.txt'
data = np.genfromtxt(filename, comments = ';', missing_values = ' ', skip_header = 8)
return data
def mass_attenuation_coefficicent(energy_kev, material):
"""Returns the mass attenuation coefficient at an energy given in keV"""
data = load_mass_attenuation_coefficients(material)
# data is better behaved in log space
data_energy_kev = np.log10(data[:,0]*1000)
data_attenuation_coeff = np.log10(data[:,1])
f = interpolate.interp1d(data_energy_kev, data_attenuation_coeff)
return 10 ** f(np.log10(energy_kev))
def plot_mass_attenuation_coefficient(material='air_dry_near_sea_level'):
'''Plot the mass the mass attenuation coefficients and mass energy-absorption
coefficients for a named material. See load_mass_attenuation_coefficients definition
for list of allowed materials.'''
data = load_mass_attenuation_coefficients(material=material)
energy_kev = data[:,0]
mass_atten_coeff = data[:,1]
mass_energy_atten_coeff = data[:,2]
ax = plt.subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Energy [keV]')
ax.set_title(material.replace('_', ' ').capitalize())
ax.set_ylabel(r'Mass Attenuation Coefficient [cm$^2$/g]')
ax.plot(energy_kev, mass_atten_coeff)
ax.plot(energy_kev, mass_energy_atten_coeff)
ax.legend((r'$\mu/\rho$', r'$\mu_{en}/\rho$'))
plt.show()
def xray_absorption(energy_kev, thickness_um, material='si'):
'''Calculate the xray absorption in a material with a thickess (given in microns).'''
return 1-xray_transmission(energy_kev, thickness_um/1e6, material=material)
def detector_efficiency(energy_kev, thickness_um, material='si'):
'''Calculate the detector quantum efficiency (in percent) at a given energy'''
return xray_absorption(energy_kev, thickness_um, material=material)*100.0
def load_attenuation_length(material='si'):
filename = data_dir + material + '_xray_atten_length.txt'
data = np.genfromtxt(filename, comments = ';', missing_values = ' ', skip_header = 3)
return data
def xyplot(x, y, ytitle = None, xtitle = None, title = None, log = None):
fig = plt.figure()
ax = fig.add_subplot(111)
if log is not None:
if log[0] == 1:
ax.set_xscale('log')
if log[1] == 1:
ax.set_yscale('log')
if ytitle is not None:
ax.set_ylabel(ytitle)
if xtitle is not None:
ax.set_xlabel(xtitle)
if title is not None:
ax.set_title(title)
ax.plot(x, y)
# plt.show()
return fig
def oplot(x, y, plt):
ax = plt.gca()
ax.plot(x, y)
plt.show()
def thermal_bremsstrahlung_thin(energy_kev, kt):
"""This function calculates the optically thin continuum thermal bremsstrahlung
photon flux incident on the Earth from an isothermal plasma on the Sun.
Normalization is for an emission measure on the Sun of 1.e49 cm-3
function brem_49,e,kt , verbose=verbose
if keyword_set(verbose) then print, 'Differential Bremsstrahlung '+$
'spectrum at Earth for emission measure of 1.e49.'
;
kt0 =( kt(0) > 0.1) ;protect against vectors for kt
result = (1.e8/9.26) * float(acgaunt(12.3985/E, KT0/.08617)) *exp(-(E/KT0 < 50)) /E / KT0^.5
return, result(*)
"""
# kt0 =( kt(0) > 0.1) ; protect against vectors for kt
#result = (1.e8/9.26) * float(acgaunt(12.3985/E, KT0/.08617)) *exp(-(E/KT0 < 50)) /E / KT0^.5
result = (1.e8/9.26) * gaunt_factor(energy_kev, kt) * 1 / (energy_kev * np.sqrt(kt)) * np.exp(- (energy_kev / kt))
return result
def rgaunt_factor(energy_kev, kt, Z=1):
"""Analytic fitting formula for the non-relativistivic gaunt factor
Source
======
Itoh et al. 2000, ApJSS, 128, 125
"""
k = con.physical_constants.get('Boltzmann constant')[0]
electron_volt = con.physical_constants.get('electron volt')[0]
# units
temp_K_to_kev_conversion = k / electron_volt / 1000
data_gaunt = np.genfromtxt(data_dir + 'itoh.txt')
coefficients = data_gaunt[Z-1].reshape(11,11)
u = energy_kev / kt
temperature_K = kt / temp_K_to_kev_conversion
gaunt_factor = 0
U = (np.log10(u) + 1.5) / 2.5
t = (np.log10(temperature_K) - 7.25) / 1.25
for j in range(11):
for i in range(11):
gaunt_factor += coefficients[i,j] * (t ** i) * (U ** j)
return gaunt_factor
def nrgaunt_factor(energy_kev, kt, Z=1):
"""Analytic fitting formula for the non-relativistivic gaunt factor
Source
======
Itoh et al. 2000, ApJSS, 128, 125
"""
k = con.physical_constants.get('Boltzmann constant')[0]
electron_volt = con.physical_constants.get('electron volt')[0]
# units
temp_K_to_kev_conversion = k / electron_volt / 1000
coefficients = np.genfromtxt(data_dir + 'itohnr.txt', delimiter = ",")
u = energy_kev / kt
temperature_K = kt / temp_K_to_kev_conversion
print(temperature_K)
U = (np.log10(u) + 1.5) / 2.5
g2 = Z ** 2 * 1.579e5 / temperature_K
G = (np.log10(g2) + 0.5) / 2.5
gaunt_factor = 0
for j in range(11):
for i in range(11):
gaunt_factor += coefficients[i,j] * (G ** i) * (U ** j)
return gaunt_factor
def effective_area(energy_kev):
"""Returns the HEROES effective area in cm^2 at a particular energy given in keV."""
data_energy_kev = np.arange(20,80,10)
data_effective_area = np.array([80,75,60,40,15,5])
f = interpolate.interp1d(data_energy_kev, data_effective_area)
return f(energy_kev)
def effective_area2_fitdata():
number_of_modules = 8
data = np.genfromtxt('/Users/schriste/Dropbox/python/heroes/util/data/heroes_effective_area_0am5am.txt', comments=';', names=['x','y1','y2'])
result = Fit_data(data['x'], number_of_modules * data['y1'], 'Energy', 'Effective Area', 'HEROES', 'keV', 'cm$^{2}$', log = [0,0])
return result
def effective_area2(energy_kev):
fit_data = effective_area2_fitdata()
return fit_data.func(energy_kev)
def detector_background(energy_kev):
data_energy_kev = np.arange(20,80,10)
data_det_background = np.array([2,2,2.5,3,3,3]) * 0.001
f = interpolate.interp1d(data_energy_kev, data_det_background)
return f(energy_kev)
def atmo_transmission(energy_kev):
data_energy_kev = np.arange(20,80,10)
data_atmo_transmission = np.array([0.26, 2.0, 3.2, 3.7, 4.2, 4.5]) * 0.1
f = interpolate.interp1d(data_energy_kev, data_atmo_transmission)
return f(energy_kev)
def detector_efficiency(energy_kev):
data_energy_kev = np.arange(20,80,10)
data_detector_efficiency = np.array([9.8, 9.2, 9.9, 9.7, 8.9, 7.7]) * 0.1
f = interpolate.interp1d(data_energy_kev, data_detector_efficiency)
return f(energy_kev)
def sensitivity(background_counts, flux_to_counts_conversion, statistical_significance=5):
"""Calculates the sensitivity of an instrument using the following formula
K = signal / sqrt(signal + background)
where K is the significance (in sigma). This equation solves to
Sensitivity Flux limit = (K^2 + sqrt(K^4 - 4*background)) / 2 * flux_to_counts_conversion
"""
result = 1/(2 * flux_to_counts_conversion) * statistical_significance ** 2 + np.sqrt( statistical_significance ** 4 - 4 * background_counts )
return result
def sensitivity(integration_time, de = 5, statistical_sig = 5):
"""Returns the HEROES sensitivity at a particular energy given in keV.
de is the width of the energy interval in keV"""
energy_kev = np.arange(20,80,10)
det_eff = detector_background(energy_kev)
det_background = detector_background(energy_kev)
eff_area = effective_area(energy_kev)
det_efficiency = detector_efficiency(energy_kev)
transmission = atmo_transmission(energy_kev)
background_area = 8 * 0.04
fraction_flux = 0.8
a = statistical_sig ** 2 + np.sqrt(statistical_sig ** 4 + 4*statistical_sig ** 2 *
det_background * de * background_area * integration_time)
b = 2 * eff_area * de * integration_time * transmission * det_eff * fraction_flux
return a/b
def get_msis_atmosphere_density(latitude=55, longitude=45, reload=False, date = '2000/01/01 01:00:00'):
'''Downloads the MSIS atmospheric model from the web at a given longitude, latitude
and returns the density (g/cm^3) as a function of height (km). The data is saved
in a temporary file and further calls use this to save time'''
global _msis_atmosphere_file
t = parse_time(date)
vars = [5,11] # 5 is height, 11 is density g/cm^3
if (_msis_atmosphere_file == None) or (reload is True):
temp = tempfile.NamedTemporaryFile(delete=False)
_msis_atmosphere_file = temp.name
addr = 'http://omniweb.gsfc.nasa.gov/cgi/vitmo/vitmo_model.cgi'
data = u'model=msis&year=' + str(t.year) + '&month=' + str(t.month).zfill(2)
data += '&day=' + str(t.day).zfill(2) + '&time_flag=0&hour='
data += str(t.hour).zfill(2) + '&geo_flag=0.&latitude'
data += str(latitude) + '&longitude=' + str(longitude)
data += u'&height=100.&profile=1&start=0.&stop=1000.&step=20.&f10_7=&f10_7_3=&ap=&format=0&'
data += 'vars=0' + str(vars[0]) + '&vars=0' + str(vars[1])
a = url.Request(addr, data)
req = url.urlopen(a)
with open(temp.name, 'wb') as fp:
shutil.copyfileobj(req, fp)
data = np.genfromtxt(_msis_atmosphere_file, skip_header = 18, skip_footer = 16, dtype='f8,f8', names=['x','y'])
return data
def atmosphere_density_fitdata(date = '2000/01/01 01:00:00', latitude=55, longitude=45):
data = get_msis_atmosphere_density(date=date, latitude=latitude, longitude=longitude)
f = Fit_data(1e5 * data['x'], data['y'], 'Height', 'density', 'MSIS', 'cm', 'g cm$^{-3}$', log = [0,1])
return f
def atmosphere_density(height_km, date = '2000/01/01 01:00:00', latitude=55, longitude=45):
'''
Returns the atmospheric density (in g/cm^-3) at a specific height (given in cm)
Source
------
http://omniweb.gsfc.nasa.gov/vitmo/msis_vitmo.html
'''
fitdata = atmosphere_density_fitdata(date = date, latitude = latitude, longitude = longitude)
return fitdata.func(height_km)
def atmosphere_mass(height_km):
'''Returns the amount of mass in a 1 sq cm column of air above a height given in km'''
mass_flux = quad(atmosphere_density_fitdata().func, height_km * 1e5, 1e8)[0]
return mass_flux
def xray_transmission_in_atmosphere(energy_kev, height_km, view_angle=90, data = None):
"""Find the total mass of atmosphere above a height given in km"""
co = mass_attenuation_coefficicent(energy_kev, material='air stp')
mass_flux = atmosphere_mass(height_km)
return np.exp(-co * mass_flux * np.sin(np.deg2rad(view_angle)) )
def foxsi_effective_area_fitdata():
data = np.genfromtxt(data_dir + 'foxsi_effective_area.txt', skip_header = 1, delimiter = ',', dtype='f8,f8,f8', names=['x','y1','y2'])
f = Fit_data(data['x'], data['y1'], 'Energy', 'Effective Area', 'FOXSI', 'keV', 'cm$^{2}$', log = [0,0])
return f
def heroes_effective_area(num_shells=14):
data = np.genfromtxt(data_dir + 'heroes_aeff_' + str(num_shells) + 'shells.txt', skip_header = 2)
x = data[:,0]
y = np.arange(0,13)
z = data[:,1:]
def plot_foxsi_effarea_compare():
data = np.genfromtxt(data_dir + 'foxsi_effective_area.txt', skip_header = 1, delimiter = ',')
energy_kev = data[:,0]
foxsi1_cm2 = data[:,1]
foxsi2_cm2 = data[:,2]
ax = plt.subplot(111)
#ax.set_xscale('log')
#ax.set_yscale('log')
ax.set_xlabel('Energy [keV]')
#ax.set_title(material.replace('_', ' ').capitalize())
ax.set_ylabel(r'Effective Area [cm$^2$]')
ax.plot(energy_kev, foxsi1_cm2, color = 'blue')
ax.plot(energy_kev, foxsi2_cm2, color = 'red')
ax.legend((r'FOXSI-1', r'FOXSI-2'))
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import sys
import platform
import datetime
from scipy import constants as con
from scipy.special import kv
import urllib2 as url
from bs4 import BeautifulSoup
import os
import tempfile
import shutil
from sunpy.time import parse_time
from scipy.integrate import quad
from scipy import interpolate
#data_dir = os.path.join(os.path.dirname(heroes.__file__), "util", "data")
data_dir = '/Users/schriste/Dropbox/python/heroes/util/data/'
_msis_atmosphere_file = None
class Fit_data:
"""A class for data."""
def __init__(self, x, y, xtitle, ytitle, name, xunits, yunits, log):
self.xrange = [x.min(), x.max()]
self.yrange = [y.min(), y.max()]
self.x = x
self.y = y
self.xtitle = xtitle
self.ytitle = ytitle
self.log = log
self.name = name
self.xunits = xunits
self.yunits = yunits
def func(self, x):
if self.log[0] == 1:
fit_x = np.log10(self.x)
else: fit_x = self.x
if self.log[1] == 1:
fit_y = np.log10(self.y)
fill_value = -100
else:
fit_y = self.y
fill_value = 0
f = interpolate.interp1d(fit_x, fit_y, kind = 3, bounds_error=False, fill_value = fill_value)
x_in = x
if self.log[0] == 1:
x_in = 10 ** x_in
if self.log[1] == 1:
f1 = lambda y: 10 ** f(y)
else:
f1 = f
return f1(x_in)
def show(self):
ax = plt.subplot(111)
if self.log is not None:
if self.log[0] == 1:
ax.set_xscale('log')
if self.log[1] == 1:
ax.set_yscale('log')
ax.set_ylabel(self.ytitle + ' [' + self.yunits + ']')
ax.set_xlabel(self.xtitle + ' [' + self.xunits + ']')
ax.set_title(self.name)
num_points = self.x.shape[0]
fit_x = np.linspace(self.xrange[0], self.xrange[1], num = num_points*10)
fit_y = self.func(fit_x)
ax.plot(fit_x, fit_y, "-", color = 'blue')
ax.plot(self.x, self.y, "o", color = 'red')
plt.show()
# densities
# source; wolframalpha
density = {"air stp": 0.001204, "si": 2.33, "be": 1.848, "water": 1, "cadmium telluride": 6.2,
"cesium iodide": 4.51, "gallium arsenide": 5.31, "mercuric iodide": 6.36, "lead glass": 6.22}
'''The X-ray transmission data comes from NIST
(http://www.nist.gov/pml/data/xraycoef/index.cfm)'''
def xray_transmission(path_length_m, energy_kev, material='air stp'):
"""Provide the X-ray transmission (0 to 1) in given a path length in meters at
a particular energy given in keV through a material with a constant density."""
coefficients = mass_attenuation_coefficicent(energy_kev, material=material)
transmission = np.exp(-coefficients * density_cgs.get(material) * path_length_m * 100.0)
return transmission
def load_mass_attenuation_coefficients(material='air_dry_near_sea_level'):
'''Load the mass attenuation coefficients (cm2/g) and mass energy-absorption coefficients (cm2/g)
from the data files as a function of energy (MeV). The allowed materials are listed in density.'''
filename = data_dir + 'XrayMassCoef_' + material.replace(' ', '_').capitalize() + '.txt'
data = np.genfromtxt(filename, comments = ';', missing_values = ' ', skip_header = 8)
return data
def mass_attenuation_coefficicent(energy_kev, material):
"""Returns the mass attenuation coefficient at an energy given in keV"""
data = load_mass_attenuation_coefficients(material)
# data is better behaved in log space
data_energy_kev = np.log10(data[:,0]*1000)
data_attenuation_coeff = np.log10(data[:,1])
f = interpolate.interp1d(data_energy_kev, data_attenuation_coeff)
return 10 ** f(np.log10(energy_kev))
def plot_mass_attenuation_coefficient(material='air_dry_near_sea_level'):
'''Plot the mass the mass attenuation coefficients and mass energy-absorption
coefficients for a named material. See load_mass_attenuation_coefficients definition
for list of allowed materials.'''
data = load_mass_attenuation_coefficients(material=material)
energy_kev = data[:,0]
mass_atten_coeff = data[:,1]
mass_energy_atten_coeff = data[:,2]
ax = plt.subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Energy [keV]')
ax.set_title(material.replace('_', ' ').capitalize())
ax.set_ylabel(r'Mass Attenuation Coefficient [cm$^2$/g]')
ax.plot(energy_kev, mass_atten_coeff)
ax.plot(energy_kev, mass_energy_atten_coeff)
ax.legend((r'$\mu/\rho$', r'$\mu_{en}/\rho$'))
plt.show()
def xray_absorption(energy_kev, thickness_um, material='si'):
'''Calculate the xray absorption in a material with a thickess (given in microns).'''
return 1-xray_transmission(energy_kev, thickness_um/1e6, material=material)
def detector_efficiency(energy_kev, thickness_um, material='si'):
'''Calculate the detector quantum efficiency (in percent) at a given energy'''
return xray_absorption(energy_kev, thickness_um, material=material)*100.0
def load_attenuation_length(material='si'):
filename = data_dir + material + '_xray_atten_length.txt'
data = np.genfromtxt(filename, comments = ';', missing_values = ' ', skip_header = 3)
return data
def xyplot(x, y, ytitle = None, xtitle = None, title = None, log = None):
fig = plt.figure()
ax = fig.add_subplot(111)
if log is not None:
if log[0] == 1:
ax.set_xscale('log')
if log[1] == 1:
ax.set_yscale('log')
if ytitle is not None:
ax.set_ylabel(ytitle)
if xtitle is not None:
ax.set_xlabel(xtitle)
if title is not None:
ax.set_title(title)
ax.plot(x, y)
# plt.show()
return fig
def oplot(x, y, plt):
ax = plt.gca()
ax.plot(x, y)
plt.show()
def thermal_bremsstrahlung_thin(energy_kev, kt):
"""This function calculates the optically thin continuum thermal bremsstrahlung
photon flux incident on the Earth from an isothermal plasma on the Sun.
Normalization is for an emission measure on the Sun of 1.e49 cm-3
function brem_49,e,kt , verbose=verbose
if keyword_set(verbose) then print, 'Differential Bremsstrahlung '+$
'spectrum at Earth for emission measure of 1.e49.'
;
kt0 =( kt(0) > 0.1) ;protect against vectors for kt
result = (1.e8/9.26) * float(acgaunt(12.3985/E, KT0/.08617)) *exp(-(E/KT0 < 50)) /E / KT0^.5
return, result(*)
"""
# kt0 =( kt(0) > 0.1) ; protect against vectors for kt
#result = (1.e8/9.26) * float(acgaunt(12.3985/E, KT0/.08617)) *exp(-(E/KT0 < 50)) /E / KT0^.5
result = (1.e8/9.26) * gaunt_factor(energy_kev, kt) * 1 / (energy_kev * np.sqrt(kt)) * np.exp(- (energy_kev / kt))
return result
def rgaunt_factor(energy_kev, kt, Z=1):
"""Analytic fitting formula for the non-relativistivic gaunt factor
Source
======
Itoh et al. 2000, ApJSS, 128, 125
"""
k = con.physical_constants.get('Boltzmann constant')[0]
electron_volt = con.physical_constants.get('electron volt')[0]
# units
temp_K_to_kev_conversion = k / electron_volt / 1000
data_gaunt = np.genfromtxt(data_dir + 'itoh.txt')
coefficients = data_gaunt[Z-1].reshape(11,11)
u = energy_kev / kt
temperature_K = kt / temp_K_to_kev_conversion
gaunt_factor = 0
U = (np.log10(u) + 1.5) / 2.5
t = (np.log10(temperature_K) - 7.25) / 1.25
for j in range(11):
for i in range(11):
gaunt_factor += coefficients[i,j] * (t ** i) * (U ** j)
return gaunt_factor
def nrgaunt_factor(energy_kev, kt, Z=1):
"""Analytic fitting formula for the non-relativistivic gaunt factor
Source
======
Itoh et al. 2000, ApJSS, 128, 125
"""
k = con.physical_constants.get('Boltzmann constant')[0]
electron_volt = con.physical_constants.get('electron volt')[0]
# units
temp_K_to_kev_conversion = k / electron_volt / 1000
coefficients = np.genfromtxt(data_dir + 'itohnr.txt', delimiter = ",")
u = energy_kev / kt
temperature_K = kt / temp_K_to_kev_conversion
print(temperature_K)
U = (np.log10(u) + 1.5) / 2.5
g2 = Z ** 2 * 1.579e5 / temperature_K
G = (np.log10(g2) + 0.5) / 2.5
gaunt_factor = 0
for j in range(11):
for i in range(11):
gaunt_factor += coefficients[i,j] * (G ** i) * (U ** j)
return gaunt_factor
def effective_area(energy_kev):
"""Returns the HEROES effective area in cm^2 at a particular energy given in keV."""
data_energy_kev = np.arange(20,80,10)
data_effective_area = np.array([80,75,60,40,15,5])
f = interpolate.interp1d(data_energy_kev, data_effective_area)
return f(energy_kev)
def effective_area2_fitdata():
number_of_modules = 8
data = np.genfromtxt('/Users/schriste/Dropbox/python/heroes/util/data/heroes_effective_area_0am5am.txt', comments=';', names=['x','y1','y2'])
result = Fit_data(data['x'], number_of_modules * data['y1'], 'Energy', 'Effective Area', 'HEROES', 'keV', 'cm$^{2}$', log = [0,0])
return result
def effective_area2(energy_kev):
fit_data = effective_area2_fitdata()
return fit_data.func(energy_kev)
def detector_background(energy_kev):
data_energy_kev = np.arange(20,80,10)
data_det_background = np.array([2,2,2.5,3,3,3]) * 0.001
f = interpolate.interp1d(data_energy_kev, data_det_background)
return f(energy_kev)
def atmo_transmission(energy_kev):
data_energy_kev = np.arange(20,80,10)
data_atmo_transmission = np.array([0.26, 2.0, 3.2, 3.7, 4.2, 4.5]) * 0.1
f = interpolate.interp1d(data_energy_kev, data_atmo_transmission)
return f(energy_kev)
def detector_efficiency(energy_kev):
data_energy_kev = np.arange(20,80,10)
data_detector_efficiency = np.array([9.8, 9.2, 9.9, 9.7, 8.9, 7.7]) * 0.1
f = interpolate.interp1d(data_energy_kev, data_detector_efficiency)
return f(energy_kev)
def sensitivity(background_counts, flux_to_counts_conversion, statistical_significance=5):
"""Calculates the sensitivity of an instrument using the following formula
K = signal / sqrt(signal + background)
where K is the significance (in sigma). This equation solves to
Sensitivity Flux limit = (K^2 + sqrt(K^4 - 4*background)) / 2 * flux_to_counts_conversion
"""
result = 1/(2 * flux_to_counts_conversion) * statistical_significance ** 2 + np.sqrt( statistical_significance ** 4 - 4 * background_counts )
return result
def sensitivity(integration_time, de = 5, statistical_sig = 5):
"""Returns the HEROES sensitivity at a particular energy given in keV.
de is the width of the energy interval in keV"""
energy_kev = np.arange(20,80,10)
det_eff = detector_background(energy_kev)
det_background = detector_background(energy_kev)
eff_area = effective_area(energy_kev)
det_efficiency = detector_efficiency(energy_kev)
transmission = atmo_transmission(energy_kev)
background_area = 8 * 0.04
fraction_flux = 0.8
a = statistical_sig ** 2 + np.sqrt(statistical_sig ** 4 + 4*statistical_sig ** 2 *
det_background * de * background_area * integration_time)
b = 2 * eff_area * de * integration_time * transmission * det_eff * fraction_flux
return a/b
def get_msis_atmosphere_density(latitude=55, longitude=45, reload=False, date = '2000/01/01 01:00:00'):
'''Downloads the MSIS atmospheric model from the web at a given longitude, latitude
and returns the density (g/cm^3) as a function of height (km). The data is saved
in a temporary file and further calls use this to save time'''
global _msis_atmosphere_file
t = parse_time(date)
vars = [5,11] # 5 is height, 11 is density g/cm^3
if (_msis_atmosphere_file == None) or (reload is True):
temp = tempfile.NamedTemporaryFile(delete=False)
_msis_atmosphere_file = temp.name
addr = 'http://omniweb.gsfc.nasa.gov/cgi/vitmo/vitmo_model.cgi'
data = u'model=msis&year=' + str(t.year) + '&month=' + str(t.month).zfill(2)
data += '&day=' + str(t.day).zfill(2) + '&time_flag=0&hour='
data += str(t.hour).zfill(2) + '&geo_flag=0.&latitude'
data += str(latitude) + '&longitude=' + str(longitude)
data += u'&height=100.&profile=1&start=0.&stop=1000.&step=20.&f10_7=&f10_7_3=&ap=&format=0&'
data += 'vars=0' + str(vars[0]) + '&vars=0' + str(vars[1])
a = url.Request(addr, data)
req = url.urlopen(a)
with open(temp.name, 'wb') as fp:
shutil.copyfileobj(req, fp)
data = np.genfromtxt(_msis_atmosphere_file, skip_header = 18, skip_footer = 16, dtype='f8,f8', names=['x','y'])
return data
def atmosphere_density_fitdata(date = '2000/01/01 01:00:00', latitude=55, longitude=45):
data = get_msis_atmosphere_density(date=date, latitude=latitude, longitude=longitude)
f = Fit_data(1e5 * data['x'], data['y'], 'Height', 'density', 'MSIS', 'cm', 'g cm$^{-3}$', log = [0,1])
return f
def atmosphere_density(height_km, date = '2000/01/01 01:00:00', latitude=55, longitude=45):
'''
Returns the atmospheric density (in g/cm^-3) at a specific height (given in cm)
Source
------
http://omniweb.gsfc.nasa.gov/vitmo/msis_vitmo.html
'''
fitdata = atmosphere_density_fitdata(date = date, latitude = latitude, longitude = longitude)
return fitdata.func(height_km)
def atmosphere_mass(height_km):
'''Returns the amount of mass in a 1 sq cm column of air above a height given in km'''
mass_flux = quad(atmosphere_density_fitdata().func, height_km * 1e5, 1e8)[0]
return mass_flux
def xray_transmission_in_atmosphere(energy_kev, height_km, view_angle=90, data = None):
"""Find the total mass of atmosphere above a height given in km"""
co = mass_attenuation_coefficicent(energy_kev, material='air stp')
mass_flux = atmosphere_mass(height_km)
return np.exp(-co * mass_flux * np.sin(np.deg2rad(view_angle)) )
def foxsi_effective_area_fitdata():
data = np.genfromtxt(data_dir + 'foxsi_effective_area.txt', skip_header = 1, delimiter = ',', dtype='f8,f8,f8', names=['x','y1','y2'])
f = Fit_data(data['x'], data['y1'], 'Energy', 'Effective Area', 'FOXSI', 'keV', 'cm$^{2}$', log = [0,0])
return f
def heroes_effective_area(num_shells=14):
data = np.genfromtxt(data_dir + 'heroes_aeff_' + str(num_shells) + 'shells.txt', skip_header = 2)
x = data[:,0]
y = np.arange(0,13)
z = data[:,1:]
def plot_foxsi_effarea_compare():
data = np.genfromtxt(data_dir + 'foxsi_effective_area.txt', skip_header = 1, delimiter = ',')
energy_kev = data[:,0]
foxsi1_cm2 = data[:,1]
foxsi2_cm2 = data[:,2]
ax = plt.subplot(111)
#ax.set_xscale('log')
#ax.set_yscale('log')
ax.set_xlabel('Energy [keV]')
#ax.set_title(material.replace('_', ' ').capitalize())
ax.set_ylabel(r'Effective Area [cm$^2$]')
ax.plot(energy_kev, foxsi1_cm2, color = 'blue')
ax.plot(energy_kev, foxsi2_cm2, color = 'red')
ax.legend((r'FOXSI-1', r'FOXSI-2'))
plt.show()
| en | 0.708918 | #data_dir = os.path.join(os.path.dirname(heroes.__file__), "util", "data") A class for data. # densities # source; wolframalpha The X-ray transmission data comes from NIST (http://www.nist.gov/pml/data/xraycoef/index.cfm) Provide the X-ray transmission (0 to 1) in given a path length in meters at a particular energy given in keV through a material with a constant density. Load the mass attenuation coefficients (cm2/g) and mass energy-absorption coefficients (cm2/g) from the data files as a function of energy (MeV). The allowed materials are listed in density. Returns the mass attenuation coefficient at an energy given in keV # data is better behaved in log space Plot the mass the mass attenuation coefficients and mass energy-absorption coefficients for a named material. See load_mass_attenuation_coefficients definition for list of allowed materials. Calculate the xray absorption in a material with a thickess (given in microns). Calculate the detector quantum efficiency (in percent) at a given energy # plt.show() This function calculates the optically thin continuum thermal bremsstrahlung photon flux incident on the Earth from an isothermal plasma on the Sun. Normalization is for an emission measure on the Sun of 1.e49 cm-3 function brem_49,e,kt , verbose=verbose if keyword_set(verbose) then print, 'Differential Bremsstrahlung '+$ 'spectrum at Earth for emission measure of 1.e49.' ; kt0 =( kt(0) > 0.1) ;protect against vectors for kt result = (1.e8/9.26) * float(acgaunt(12.3985/E, KT0/.08617)) *exp(-(E/KT0 < 50)) /E / KT0^.5 return, result(*) # kt0 =( kt(0) > 0.1) ; protect against vectors for kt #result = (1.e8/9.26) * float(acgaunt(12.3985/E, KT0/.08617)) *exp(-(E/KT0 < 50)) /E / KT0^.5 Analytic fitting formula for the non-relativistivic gaunt factor Source ====== Itoh et al. 2000, ApJSS, 128, 125 # units Analytic fitting formula for the non-relativistivic gaunt factor Source ====== Itoh et al. 2000, ApJSS, 128, 125 # units Returns the HEROES effective area in cm^2 at a particular energy given in keV. Calculates the sensitivity of an instrument using the following formula K = signal / sqrt(signal + background) where K is the significance (in sigma). This equation solves to Sensitivity Flux limit = (K^2 + sqrt(K^4 - 4*background)) / 2 * flux_to_counts_conversion Returns the HEROES sensitivity at a particular energy given in keV. de is the width of the energy interval in keV Downloads the MSIS atmospheric model from the web at a given longitude, latitude and returns the density (g/cm^3) as a function of height (km). The data is saved in a temporary file and further calls use this to save time # 5 is height, 11 is density g/cm^3 Returns the atmospheric density (in g/cm^-3) at a specific height (given in cm) Source ------ http://omniweb.gsfc.nasa.gov/vitmo/msis_vitmo.html Returns the amount of mass in a 1 sq cm column of air above a height given in km Find the total mass of atmosphere above a height given in km #ax.set_xscale('log') #ax.set_yscale('log') #ax.set_title(material.replace('_', ' ').capitalize()) | 2.377863 | 2 |
tfx/components/base/base_driver_test.py | NunoEdgarGFlowHub/tfx | 0 | 6630033 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.base.base_driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import tensorflow as tf
from tfx.components.base import base_driver
from tfx.orchestration import data_types
from tfx.utils import channel
from tfx.utils import types
class BaseDriverTest(tf.test.TestCase):
def setUp(self):
self._mock_metadata = tf.test.mock.Mock()
self._input_dict = {
'input_data': [types.TfxArtifact(type_name='InputType')],
}
input_dir = os.path.join(
os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
self._testMethodName, 'input_dir')
# valid input artifacts must have a uri pointing to an existing directory.
for key, input_list in self._input_dict.items():
for index, artifact in enumerate(input_list):
artifact.id = index + 1
uri = os.path.join(input_dir, key, str(artifact.id), '')
artifact.uri = uri
tf.gfile.MakeDirs(uri)
self._output_dict = {
'output_data': [types.TfxArtifact(type_name='OutputType')],
}
self._exec_properties = {
'key': 'value',
}
self._base_output_dir = os.path.join(
os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
self._testMethodName, 'base_output_dir')
self._driver_args = data_types.DriverArgs(
worker_name='worker_name',
base_output_dir=self._base_output_dir,
enable_cache=True)
self._execution_id = 100
def _check_output(self, execution_decision):
output_dict = execution_decision.output_dict
self.assertEqual(self._output_dict.keys(), output_dict.keys())
for name, output_list in output_dict.items():
for (original_output, output) in zip(self._output_dict[name],
output_list):
if execution_decision.execution_id:
# Uncached results should have a newly created uri.
self.assertEqual(
os.path.join(self._base_output_dir, name,
str(execution_decision.execution_id), ''),
output.uri)
else:
# Cached results have a different set of uri.
self.assertEqual(
os.path.join(self._base_output_dir, name, str(self._execution_id),
''), output.uri)
self.assertEqual(original_output.split, output.split)
def test_prepare_execution(self):
input_dict = copy.deepcopy(self._input_dict)
output_dict = copy.deepcopy(self._output_dict)
exec_properties = copy.deepcopy(self._exec_properties)
self._mock_metadata.previous_run.return_value = None
self._mock_metadata.prepare_execution.return_value = self._execution_id
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.prepare_execution(input_dict, output_dict,
exec_properties,
self._driver_args)
self.assertEqual(self._execution_id, execution_decision.execution_id)
self._check_output(execution_decision)
def test_cached_execution(self):
input_dict = copy.deepcopy(self._input_dict)
output_dict = copy.deepcopy(self._output_dict)
exec_properties = copy.deepcopy(self._exec_properties)
cached_output_dict = copy.deepcopy(self._output_dict)
for key, artifact_list in cached_output_dict.items():
for artifact in artifact_list:
artifact.uri = os.path.join(self._base_output_dir, key,
str(self._execution_id), '')
# valid cached artifacts must have an existing uri.
tf.gfile.MakeDirs(artifact.uri)
self._mock_metadata.previous_run.return_value = self._execution_id
self._mock_metadata.fetch_previous_result_artifacts.return_value = cached_output_dict
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.prepare_execution(input_dict, output_dict,
exec_properties,
self._driver_args)
self.assertIsNone(execution_decision.execution_id)
self._check_output(execution_decision)
def test_artifact_missing(self):
input_dict = copy.deepcopy(self._input_dict)
input_dict['input_data'][0].uri = 'should/not/exist'
output_dict = copy.deepcopy(self._output_dict)
exec_properties = copy.deepcopy(self._exec_properties)
driver_options = copy.deepcopy(self._driver_args)
driver_options.enable_cache = False
cached_output_dict = copy.deepcopy(self._output_dict)
for key, artifact_list in cached_output_dict.items():
for artifact in artifact_list:
artifact.uri = os.path.join(self._base_output_dir, key,
str(self._execution_id), '')
# valid cached artifacts must have an existing uri.
tf.gfile.MakeDirs(artifact.uri)
self._mock_metadata.previous_run.return_value = self._execution_id
self._mock_metadata.fetch_previous_result_artifacts.return_value = cached_output_dict
driver = base_driver.BaseDriver(self._mock_metadata)
with self.assertRaises(RuntimeError):
driver.prepare_execution(input_dict, output_dict, exec_properties,
driver_options)
def test_no_cache_on_missing_uri(self):
input_dict = copy.deepcopy(self._input_dict)
output_dict = copy.deepcopy(self._output_dict)
exec_properties = copy.deepcopy(self._exec_properties)
cached_output_dict = copy.deepcopy(self._output_dict)
for key, artifact_list in cached_output_dict.items():
for artifact in artifact_list:
artifact.uri = os.path.join(self._base_output_dir, key,
str(self._execution_id), '')
# Non existing output uri will force a cache miss.
self.assertFalse(tf.gfile.Exists(artifact.uri))
self._mock_metadata.previous_run.return_value = self._execution_id
self._mock_metadata.fetch_previous_result_artifacts.return_value = cached_output_dict
actual_execution_id = self._execution_id + 1
self._mock_metadata.prepare_execution.return_value = actual_execution_id
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.prepare_execution(input_dict, output_dict,
exec_properties,
self._driver_args)
self.assertEqual(actual_execution_id, execution_decision.execution_id)
self._check_output(execution_decision)
def test_pre_execution_new_execution(self):
input_dict = {
'input_a':
channel.Channel(
type_name='input_a',
artifacts=[types.TfxArtifact(type_name='input_a')])
}
output_dict = {
'output_a':
channel.Channel(
type_name='output_a',
artifacts=[
types.TfxArtifact(type_name='output_a', split='split')
])
}
execution_id = 1
exec_properties = copy.deepcopy(self._exec_properties)
driver_args = data_types.DriverArgs(
worker_name='worker_name', base_output_dir='base', enable_cache=True)
pipeline_info = data_types.PipelineInfo(
pipeline_name='my_pipeline_name',
pipeline_root=os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
run_id='my_run_id')
component_info = data_types.ComponentInfo(
component_type='a.b.c', component_id='my_component_id')
self._mock_metadata.get_artifacts_by_info.side_effect = list(
input_dict['input_a'].get())
self._mock_metadata.register_execution.side_effect = [execution_id]
self._mock_metadata.previous_execution.side_effect = [None]
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.pre_execution(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
driver_args=driver_args,
pipeline_info=pipeline_info,
component_info=component_info)
self.assertFalse(execution_decision.use_cached_results)
self.assertEqual(execution_decision.execution_id, 1)
self.assertItemsEqual(execution_decision.exec_properties, exec_properties)
self.assertEqual(
execution_decision.output_dict['output_a'][0].uri,
os.path.join(pipeline_info.pipeline_root, component_info.component_id,
'output_a', str(execution_id), 'split', ''))
def test_pre_execution_cached(self):
input_dict = {
'input_a':
channel.Channel(
type_name='input_a',
artifacts=[types.TfxArtifact(type_name='input_a')])
}
output_dict = {
'output_a':
channel.Channel(
type_name='output_a',
artifacts=[
types.TfxArtifact(type_name='output_a', split='split')
])
}
execution_id = 1
exec_properties = copy.deepcopy(self._exec_properties)
driver_args = data_types.DriverArgs(
worker_name='worker_name', base_output_dir='base', enable_cache=True)
pipeline_info = data_types.PipelineInfo(
pipeline_name='my_pipeline_name',
pipeline_root=os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
run_id='my_run_id')
component_info = data_types.ComponentInfo(
component_type='a.b.c', component_id='my_component_id')
self._mock_metadata.get_artifacts_by_info.side_effect = list(
input_dict['input_a'].get())
self._mock_metadata.register_execution.side_effect = [execution_id]
self._mock_metadata.previous_execution.side_effect = [2]
self._mock_metadata.fetch_previous_result_artifacts.side_effect = [
self._output_dict
]
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.pre_execution(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
driver_args=driver_args,
pipeline_info=pipeline_info,
component_info=component_info)
self.assertTrue(execution_decision.use_cached_results)
self.assertEqual(execution_decision.execution_id, 1)
self.assertItemsEqual(execution_decision.exec_properties, exec_properties)
self.assertItemsEqual(execution_decision.output_dict, self._output_dict)
if __name__ == '__main__':
tf.test.main()
| # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.base.base_driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import tensorflow as tf
from tfx.components.base import base_driver
from tfx.orchestration import data_types
from tfx.utils import channel
from tfx.utils import types
class BaseDriverTest(tf.test.TestCase):
def setUp(self):
self._mock_metadata = tf.test.mock.Mock()
self._input_dict = {
'input_data': [types.TfxArtifact(type_name='InputType')],
}
input_dir = os.path.join(
os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
self._testMethodName, 'input_dir')
# valid input artifacts must have a uri pointing to an existing directory.
for key, input_list in self._input_dict.items():
for index, artifact in enumerate(input_list):
artifact.id = index + 1
uri = os.path.join(input_dir, key, str(artifact.id), '')
artifact.uri = uri
tf.gfile.MakeDirs(uri)
self._output_dict = {
'output_data': [types.TfxArtifact(type_name='OutputType')],
}
self._exec_properties = {
'key': 'value',
}
self._base_output_dir = os.path.join(
os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
self._testMethodName, 'base_output_dir')
self._driver_args = data_types.DriverArgs(
worker_name='worker_name',
base_output_dir=self._base_output_dir,
enable_cache=True)
self._execution_id = 100
def _check_output(self, execution_decision):
output_dict = execution_decision.output_dict
self.assertEqual(self._output_dict.keys(), output_dict.keys())
for name, output_list in output_dict.items():
for (original_output, output) in zip(self._output_dict[name],
output_list):
if execution_decision.execution_id:
# Uncached results should have a newly created uri.
self.assertEqual(
os.path.join(self._base_output_dir, name,
str(execution_decision.execution_id), ''),
output.uri)
else:
# Cached results have a different set of uri.
self.assertEqual(
os.path.join(self._base_output_dir, name, str(self._execution_id),
''), output.uri)
self.assertEqual(original_output.split, output.split)
def test_prepare_execution(self):
input_dict = copy.deepcopy(self._input_dict)
output_dict = copy.deepcopy(self._output_dict)
exec_properties = copy.deepcopy(self._exec_properties)
self._mock_metadata.previous_run.return_value = None
self._mock_metadata.prepare_execution.return_value = self._execution_id
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.prepare_execution(input_dict, output_dict,
exec_properties,
self._driver_args)
self.assertEqual(self._execution_id, execution_decision.execution_id)
self._check_output(execution_decision)
def test_cached_execution(self):
input_dict = copy.deepcopy(self._input_dict)
output_dict = copy.deepcopy(self._output_dict)
exec_properties = copy.deepcopy(self._exec_properties)
cached_output_dict = copy.deepcopy(self._output_dict)
for key, artifact_list in cached_output_dict.items():
for artifact in artifact_list:
artifact.uri = os.path.join(self._base_output_dir, key,
str(self._execution_id), '')
# valid cached artifacts must have an existing uri.
tf.gfile.MakeDirs(artifact.uri)
self._mock_metadata.previous_run.return_value = self._execution_id
self._mock_metadata.fetch_previous_result_artifacts.return_value = cached_output_dict
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.prepare_execution(input_dict, output_dict,
exec_properties,
self._driver_args)
self.assertIsNone(execution_decision.execution_id)
self._check_output(execution_decision)
def test_artifact_missing(self):
input_dict = copy.deepcopy(self._input_dict)
input_dict['input_data'][0].uri = 'should/not/exist'
output_dict = copy.deepcopy(self._output_dict)
exec_properties = copy.deepcopy(self._exec_properties)
driver_options = copy.deepcopy(self._driver_args)
driver_options.enable_cache = False
cached_output_dict = copy.deepcopy(self._output_dict)
for key, artifact_list in cached_output_dict.items():
for artifact in artifact_list:
artifact.uri = os.path.join(self._base_output_dir, key,
str(self._execution_id), '')
# valid cached artifacts must have an existing uri.
tf.gfile.MakeDirs(artifact.uri)
self._mock_metadata.previous_run.return_value = self._execution_id
self._mock_metadata.fetch_previous_result_artifacts.return_value = cached_output_dict
driver = base_driver.BaseDriver(self._mock_metadata)
with self.assertRaises(RuntimeError):
driver.prepare_execution(input_dict, output_dict, exec_properties,
driver_options)
def test_no_cache_on_missing_uri(self):
input_dict = copy.deepcopy(self._input_dict)
output_dict = copy.deepcopy(self._output_dict)
exec_properties = copy.deepcopy(self._exec_properties)
cached_output_dict = copy.deepcopy(self._output_dict)
for key, artifact_list in cached_output_dict.items():
for artifact in artifact_list:
artifact.uri = os.path.join(self._base_output_dir, key,
str(self._execution_id), '')
# Non existing output uri will force a cache miss.
self.assertFalse(tf.gfile.Exists(artifact.uri))
self._mock_metadata.previous_run.return_value = self._execution_id
self._mock_metadata.fetch_previous_result_artifacts.return_value = cached_output_dict
actual_execution_id = self._execution_id + 1
self._mock_metadata.prepare_execution.return_value = actual_execution_id
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.prepare_execution(input_dict, output_dict,
exec_properties,
self._driver_args)
self.assertEqual(actual_execution_id, execution_decision.execution_id)
self._check_output(execution_decision)
def test_pre_execution_new_execution(self):
input_dict = {
'input_a':
channel.Channel(
type_name='input_a',
artifacts=[types.TfxArtifact(type_name='input_a')])
}
output_dict = {
'output_a':
channel.Channel(
type_name='output_a',
artifacts=[
types.TfxArtifact(type_name='output_a', split='split')
])
}
execution_id = 1
exec_properties = copy.deepcopy(self._exec_properties)
driver_args = data_types.DriverArgs(
worker_name='worker_name', base_output_dir='base', enable_cache=True)
pipeline_info = data_types.PipelineInfo(
pipeline_name='my_pipeline_name',
pipeline_root=os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
run_id='my_run_id')
component_info = data_types.ComponentInfo(
component_type='a.b.c', component_id='my_component_id')
self._mock_metadata.get_artifacts_by_info.side_effect = list(
input_dict['input_a'].get())
self._mock_metadata.register_execution.side_effect = [execution_id]
self._mock_metadata.previous_execution.side_effect = [None]
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.pre_execution(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
driver_args=driver_args,
pipeline_info=pipeline_info,
component_info=component_info)
self.assertFalse(execution_decision.use_cached_results)
self.assertEqual(execution_decision.execution_id, 1)
self.assertItemsEqual(execution_decision.exec_properties, exec_properties)
self.assertEqual(
execution_decision.output_dict['output_a'][0].uri,
os.path.join(pipeline_info.pipeline_root, component_info.component_id,
'output_a', str(execution_id), 'split', ''))
def test_pre_execution_cached(self):
input_dict = {
'input_a':
channel.Channel(
type_name='input_a',
artifacts=[types.TfxArtifact(type_name='input_a')])
}
output_dict = {
'output_a':
channel.Channel(
type_name='output_a',
artifacts=[
types.TfxArtifact(type_name='output_a', split='split')
])
}
execution_id = 1
exec_properties = copy.deepcopy(self._exec_properties)
driver_args = data_types.DriverArgs(
worker_name='worker_name', base_output_dir='base', enable_cache=True)
pipeline_info = data_types.PipelineInfo(
pipeline_name='my_pipeline_name',
pipeline_root=os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
run_id='my_run_id')
component_info = data_types.ComponentInfo(
component_type='a.b.c', component_id='my_component_id')
self._mock_metadata.get_artifacts_by_info.side_effect = list(
input_dict['input_a'].get())
self._mock_metadata.register_execution.side_effect = [execution_id]
self._mock_metadata.previous_execution.side_effect = [2]
self._mock_metadata.fetch_previous_result_artifacts.side_effect = [
self._output_dict
]
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.pre_execution(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
driver_args=driver_args,
pipeline_info=pipeline_info,
component_info=component_info)
self.assertTrue(execution_decision.use_cached_results)
self.assertEqual(execution_decision.execution_id, 1)
self.assertItemsEqual(execution_decision.exec_properties, exec_properties)
self.assertItemsEqual(execution_decision.output_dict, self._output_dict)
if __name__ == '__main__':
tf.test.main()
| en | 0.838823 | # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for tfx.components.base.base_driver. # valid input artifacts must have a uri pointing to an existing directory. # Uncached results should have a newly created uri. # Cached results have a different set of uri. # valid cached artifacts must have an existing uri. # valid cached artifacts must have an existing uri. # Non existing output uri will force a cache miss. | 1.881947 | 2 |
trace-creator/trace-creator.py | CSIRT-MU/trace-share | 1 | 6630034 | #!/usr/bin/env python
#
# BSD 3-Clause License
#
# Copyright (c) 2018, CSIRT-MU, Masaryk University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Script to run commands on attacker machine within the TraceCreator and capture packet trace and scripts output
based on given configuration.
Needs elevated privileges due to tshark ability to store files in a shared folder.
Requirements:
* tshark
* Python 3
* Python modules: termcolor, paramiko, YAML
Usage:
# ./trace-creator.py -c <configuration_file> -o <output_directory> -i <capture_interface>
-d <additional_capture_delay> -u <ssh_username> -p <ssh_password>
"""
# Common python modules
import sys # Common system functions
import os # Common operating system functions
import argparse # Arguments parser
import subprocess # Executes commands in shell
import time # Manipulates time values
import re # Regular expressions support
import shlex # Split the string s using shell-like syntax
import shutil # Copy files and directory trees
# Additional python modules
from termcolor import cprint # Colors in the console output
import paramiko # SSH connection module
import yaml # YAML configuration parser
def create_capture_directory(directory):
"""
Creates temporary capture directory (script requires other directory than virtually shared).
:param directory: capture directory path
"""
if not os.path.exists(directory):
os.makedirs(directory)
subprocess.call("chmod 777 " + directory, shell=True)
def get_task_id(task, timestamp):
"""
Generates task ID with format "<timestamp>-<task_name>".
:param task: parsed configuration of one task from the whole configuration file
:param timestamp: timestamp of the task
:return: normalized file name
"""
task_id = "{timestamp}-{name}".format(timestamp=timestamp, name=task["name"][:50].lower())
# Remove invalid characters from the tak name
return re.sub(r'[ @#$%^&*<>{}:|;\'\\\"/]', r'_', task_id)
def host_configure(host, command, timestamp, output_directory, username, password):
"""
Run given command on the host via SSH connection.
:param host: IP address of the remote host
:param command: command to run
:param timestamp: timestamp of the task
:param output_directory: directory path to store commands output
:param username: SSH connection username
:param password: SSH connection password
"""
cprint("[info] Configuration of host: " + host, "green")
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(host, username=username, password=password)
stdin_handle, stdout_handle, stderr_handle = ssh_client.exec_command(command)
stdout = stdout_handle.read()
stderr = stderr_handle.read()
if stdout or stderr:
directory_name = "{path}/{task_id}/".format(path=output_directory, task_id=get_task_id(task, timestamp))
if not os.path.exists(directory_name):
os.makedirs(directory_name)
if stdout:
with open(directory_name + host + ".out", 'w') as out_file:
out_file.write(stdout)
cprint("[info] Command output: \n" + str(stdout), "green")
if stderr:
with open(directory_name + host + ".err", 'w') as err_file:
err_file.write(stdout)
cprint("[warning] Command error output: \n" + str(stderr), "blue")
ssh_client.close()
def start_tshark(task, network_interface, capture_directory, timestamp):
"""
Starts tshark capture process based on task configuration.
:param task: parsed configuration of one task from the whole configuration file
:param network_interface: capture network interface
:param capture_directory: temporary directory to store generated data
:param timestamp: timestamp of the task
:return: initialized tshark process
"""
cprint("[info] Starting tshark capture...", "green")
capture_file_path = "{path}/{filename}.pcapng".format(path=capture_directory,
filename=get_task_id(task, timestamp))
tshark_command = "tshark -i {interface} -q -w {output_file} -F pcapng".format(interface=network_interface,
output_file=capture_file_path)
if "filter" in task:
tshark_command += " -f \"{filter}\"".format(filter=task["filter"])
# shlex.split splits into shell args list, alternatively use without shlex.split and add shell=True
tshark_process = subprocess.Popen(shlex.split(tshark_command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return tshark_process
def run_command(task, timestamp, output_directory):
"""
Run task command and provide its output.
:param task: parsed configuration of one task from the whole configuration file
:param timestamp: timestamp of the task
:param output_directory: directory for log and error files
"""
cprint("[info] Running command: " + task["command"], "green")
process = subprocess.Popen(shlex.split(task["command"]), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stdout:
log_filename = "{path}/{filename}.out".format(path=output_directory, filename=get_task_id(task, timestamp))
with open(log_filename, 'w') as out_file:
out_file.write(stdout)
cprint("[info] Command output: \n" + str(stdout), "green")
if stderr:
err_filename = "{path}/{filename}.err".format(path=output_directory, filename=get_task_id(task, timestamp))
with open(err_filename, 'w') as err_file:
err_file.write(stderr)
cprint("[warning] Command error output: \n" + str(stderr), "blue")
def move_files(source_directory, destination_directory):
"""
Move all files within the source_directory to the destination_directory.
:param source_directory: source directory with files
:param destination_directory: destination directory
"""
for item in os.listdir(source_directory):
source = os.path.join(source_directory, item)
destination = os.path.join(destination_directory, item)
shutil.move(source, destination)
def process_creator_task(task, capture_directory, args):
"""
Process task in given configuration. Prepare hosts, start tshark capture with specified filter, run desired
command, and provide command outputs together with generated capture files.
:param task: parsed configuration of one task from the whole configuration file
:param capture_directory: temporary directory to store generated data
:param args: creator script arguments
"""
cprint("[info] Processing task: " + task["name"], "green")
task_timestamp = time.strftime("%Y-%m-%d_%H-%M-%S")
if "configuration" in task:
for host_configuration in task["configuration"]:
host_configure(host_configuration["ip"], host_configuration["command"], task_timestamp,
args.output_directory, args.username, args.password)
tshark_process = start_tshark(task, args.interface, capture_directory, task_timestamp)
run_command(task, task_timestamp, args.output_directory)
time.sleep(args.delay)
tshark_process.terminate()
move_files(capture_directory, args.output_directory)
cprint("[info] Finished task: " + task["name"], "green")
if __name__ == "__main__":
# Argument parser automatically creates -h argument
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--configuration", help="Path to the configuration file.", type=argparse.FileType('r'),
required=False, default="/vagrant/configuration/trace-creator.yml")
parser.add_argument("-o", "--output_directory", help="Output directory for captured files.", type=str,
required=False, default="/vagrant/capture/")
parser.add_argument("-i", "--interface", help="Capture network interface.", type=str,
required=False, default="enp0s8")
parser.add_argument("-d", "--delay", help="Delay to stop capture after process finished (in seconds).", type=int,
required=False, default=3)
parser.add_argument("-u", "--username", help="Username for connection to remote host via SSH.", type=str,
required=False, default="vagrant")
parser.add_argument("-p", "--password", help="Username for connection to remote host via SSH.", type=str,
required=False, default="vagrant")
args = parser.parse_args()
try:
configuration = yaml.load(args.configuration)
except yaml.YAMLError as exc:
cprint("[error] YAML configuration not correctly loaded: " + str(exc), "red")
sys.exit(1)
# Create temporary capture directory (necessary for tshark)
capture_directory = "/tmp/capture/"
create_capture_directory(capture_directory)
# Create output directory if not exists
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
cprint("[info] Starting commands execution and packet capture...", "green")
for task in configuration:
process_creator_task(task, capture_directory, args)
cprint("[info] All data exported!", "green")
cprint("[info] Now you can destroy TraceCreator environment using \"vagrant destroy\" command.", "green")
| #!/usr/bin/env python
#
# BSD 3-Clause License
#
# Copyright (c) 2018, CSIRT-MU, Masaryk University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Script to run commands on attacker machine within the TraceCreator and capture packet trace and scripts output
based on given configuration.
Needs elevated privileges due to tshark ability to store files in a shared folder.
Requirements:
* tshark
* Python 3
* Python modules: termcolor, paramiko, YAML
Usage:
# ./trace-creator.py -c <configuration_file> -o <output_directory> -i <capture_interface>
-d <additional_capture_delay> -u <ssh_username> -p <ssh_password>
"""
# Common python modules
import sys # Common system functions
import os # Common operating system functions
import argparse # Arguments parser
import subprocess # Executes commands in shell
import time # Manipulates time values
import re # Regular expressions support
import shlex # Split the string s using shell-like syntax
import shutil # Copy files and directory trees
# Additional python modules
from termcolor import cprint # Colors in the console output
import paramiko # SSH connection module
import yaml # YAML configuration parser
def create_capture_directory(directory):
"""
Creates temporary capture directory (script requires other directory than virtually shared).
:param directory: capture directory path
"""
if not os.path.exists(directory):
os.makedirs(directory)
subprocess.call("chmod 777 " + directory, shell=True)
def get_task_id(task, timestamp):
"""
Generates task ID with format "<timestamp>-<task_name>".
:param task: parsed configuration of one task from the whole configuration file
:param timestamp: timestamp of the task
:return: normalized file name
"""
task_id = "{timestamp}-{name}".format(timestamp=timestamp, name=task["name"][:50].lower())
# Remove invalid characters from the tak name
return re.sub(r'[ @#$%^&*<>{}:|;\'\\\"/]', r'_', task_id)
def host_configure(host, command, timestamp, output_directory, username, password):
"""
Run given command on the host via SSH connection.
:param host: IP address of the remote host
:param command: command to run
:param timestamp: timestamp of the task
:param output_directory: directory path to store commands output
:param username: SSH connection username
:param password: SSH connection password
"""
cprint("[info] Configuration of host: " + host, "green")
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(host, username=username, password=password)
stdin_handle, stdout_handle, stderr_handle = ssh_client.exec_command(command)
stdout = stdout_handle.read()
stderr = stderr_handle.read()
if stdout or stderr:
directory_name = "{path}/{task_id}/".format(path=output_directory, task_id=get_task_id(task, timestamp))
if not os.path.exists(directory_name):
os.makedirs(directory_name)
if stdout:
with open(directory_name + host + ".out", 'w') as out_file:
out_file.write(stdout)
cprint("[info] Command output: \n" + str(stdout), "green")
if stderr:
with open(directory_name + host + ".err", 'w') as err_file:
err_file.write(stdout)
cprint("[warning] Command error output: \n" + str(stderr), "blue")
ssh_client.close()
def start_tshark(task, network_interface, capture_directory, timestamp):
"""
Starts tshark capture process based on task configuration.
:param task: parsed configuration of one task from the whole configuration file
:param network_interface: capture network interface
:param capture_directory: temporary directory to store generated data
:param timestamp: timestamp of the task
:return: initialized tshark process
"""
cprint("[info] Starting tshark capture...", "green")
capture_file_path = "{path}/{filename}.pcapng".format(path=capture_directory,
filename=get_task_id(task, timestamp))
tshark_command = "tshark -i {interface} -q -w {output_file} -F pcapng".format(interface=network_interface,
output_file=capture_file_path)
if "filter" in task:
tshark_command += " -f \"{filter}\"".format(filter=task["filter"])
# shlex.split splits into shell args list, alternatively use without shlex.split and add shell=True
tshark_process = subprocess.Popen(shlex.split(tshark_command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return tshark_process
def run_command(task, timestamp, output_directory):
"""
Run task command and provide its output.
:param task: parsed configuration of one task from the whole configuration file
:param timestamp: timestamp of the task
:param output_directory: directory for log and error files
"""
cprint("[info] Running command: " + task["command"], "green")
process = subprocess.Popen(shlex.split(task["command"]), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stdout:
log_filename = "{path}/{filename}.out".format(path=output_directory, filename=get_task_id(task, timestamp))
with open(log_filename, 'w') as out_file:
out_file.write(stdout)
cprint("[info] Command output: \n" + str(stdout), "green")
if stderr:
err_filename = "{path}/{filename}.err".format(path=output_directory, filename=get_task_id(task, timestamp))
with open(err_filename, 'w') as err_file:
err_file.write(stderr)
cprint("[warning] Command error output: \n" + str(stderr), "blue")
def move_files(source_directory, destination_directory):
"""
Move all files within the source_directory to the destination_directory.
:param source_directory: source directory with files
:param destination_directory: destination directory
"""
for item in os.listdir(source_directory):
source = os.path.join(source_directory, item)
destination = os.path.join(destination_directory, item)
shutil.move(source, destination)
def process_creator_task(task, capture_directory, args):
"""
Process task in given configuration. Prepare hosts, start tshark capture with specified filter, run desired
command, and provide command outputs together with generated capture files.
:param task: parsed configuration of one task from the whole configuration file
:param capture_directory: temporary directory to store generated data
:param args: creator script arguments
"""
cprint("[info] Processing task: " + task["name"], "green")
task_timestamp = time.strftime("%Y-%m-%d_%H-%M-%S")
if "configuration" in task:
for host_configuration in task["configuration"]:
host_configure(host_configuration["ip"], host_configuration["command"], task_timestamp,
args.output_directory, args.username, args.password)
tshark_process = start_tshark(task, args.interface, capture_directory, task_timestamp)
run_command(task, task_timestamp, args.output_directory)
time.sleep(args.delay)
tshark_process.terminate()
move_files(capture_directory, args.output_directory)
cprint("[info] Finished task: " + task["name"], "green")
if __name__ == "__main__":
# Argument parser automatically creates -h argument
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--configuration", help="Path to the configuration file.", type=argparse.FileType('r'),
required=False, default="/vagrant/configuration/trace-creator.yml")
parser.add_argument("-o", "--output_directory", help="Output directory for captured files.", type=str,
required=False, default="/vagrant/capture/")
parser.add_argument("-i", "--interface", help="Capture network interface.", type=str,
required=False, default="enp0s8")
parser.add_argument("-d", "--delay", help="Delay to stop capture after process finished (in seconds).", type=int,
required=False, default=3)
parser.add_argument("-u", "--username", help="Username for connection to remote host via SSH.", type=str,
required=False, default="vagrant")
parser.add_argument("-p", "--password", help="Username for connection to remote host via SSH.", type=str,
required=False, default="vagrant")
args = parser.parse_args()
try:
configuration = yaml.load(args.configuration)
except yaml.YAMLError as exc:
cprint("[error] YAML configuration not correctly loaded: " + str(exc), "red")
sys.exit(1)
# Create temporary capture directory (necessary for tshark)
capture_directory = "/tmp/capture/"
create_capture_directory(capture_directory)
# Create output directory if not exists
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
cprint("[info] Starting commands execution and packet capture...", "green")
for task in configuration:
process_creator_task(task, capture_directory, args)
cprint("[info] All data exported!", "green")
cprint("[info] Now you can destroy TraceCreator environment using \"vagrant destroy\" command.", "green")
| en | 0.692866 | #!/usr/bin/env python # # BSD 3-Clause License # # Copyright (c) 2018, CSIRT-MU, Masaryk University # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Script to run commands on attacker machine within the TraceCreator and capture packet trace and scripts output based on given configuration. Needs elevated privileges due to tshark ability to store files in a shared folder. Requirements: * tshark * Python 3 * Python modules: termcolor, paramiko, YAML Usage: # ./trace-creator.py -c <configuration_file> -o <output_directory> -i <capture_interface> -d <additional_capture_delay> -u <ssh_username> -p <ssh_password> # Common python modules # Common system functions # Common operating system functions # Arguments parser # Executes commands in shell # Manipulates time values # Regular expressions support # Split the string s using shell-like syntax # Copy files and directory trees # Additional python modules # Colors in the console output # SSH connection module # YAML configuration parser Creates temporary capture directory (script requires other directory than virtually shared). :param directory: capture directory path Generates task ID with format "<timestamp>-<task_name>". :param task: parsed configuration of one task from the whole configuration file :param timestamp: timestamp of the task :return: normalized file name # Remove invalid characters from the tak name #$%^&*<>{}:|;\'\\\"/]', r'_', task_id) Run given command on the host via SSH connection. :param host: IP address of the remote host :param command: command to run :param timestamp: timestamp of the task :param output_directory: directory path to store commands output :param username: SSH connection username :param password: SSH connection password Starts tshark capture process based on task configuration. :param task: parsed configuration of one task from the whole configuration file :param network_interface: capture network interface :param capture_directory: temporary directory to store generated data :param timestamp: timestamp of the task :return: initialized tshark process # shlex.split splits into shell args list, alternatively use without shlex.split and add shell=True Run task command and provide its output. :param task: parsed configuration of one task from the whole configuration file :param timestamp: timestamp of the task :param output_directory: directory for log and error files Move all files within the source_directory to the destination_directory. :param source_directory: source directory with files :param destination_directory: destination directory Process task in given configuration. Prepare hosts, start tshark capture with specified filter, run desired command, and provide command outputs together with generated capture files. :param task: parsed configuration of one task from the whole configuration file :param capture_directory: temporary directory to store generated data :param args: creator script arguments # Argument parser automatically creates -h argument # Create temporary capture directory (necessary for tshark) # Create output directory if not exists | 1.220829 | 1 |
src/envoxy/tests/test_asserts.py | muzzley/envoxy | 2 | 6630035 | from ..asserts import *
from .fixtures import test_payload
import pytest
import datetime
from ..exceptions import ValidationException
##### assertz #####
def test_assertz_ok(test_payload):
assert assertz(True, "Error", 0, 500) == None
assert assertz(test_payload["username"] == "", "Error", 0, 500) == None
def test_assertz_nok(test_payload):
with pytest.raises(ValidationException):
assertz(False, "Error", 0, 500)
with pytest.raises(ValidationException):
assertz(type(test_payload["application_ids"]) is not list, "Error", 0, 500)
##### assertz_mandatory #####
def test_assertz_mandatory_ok(test_payload):
assert assertz_mandatory(test_payload, "password") == None
assert assertz_mandatory(test_payload["application_ids"], 1) == None
def test_assertz_mandatory_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload, "non_existent_key")
assert str(e.value) == "Mandatory: non_existent_key"
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload, "")
assert str(e.value) == "Key must not be emtpy"
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload, "username")
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload["user"], "last_name")
assert str(e.value) == "Mandatory: last_name"
with pytest.raises(ValidationException) as e:
assertz_mandatory({})
assert str(e.value) == "Mandatory: {}"
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload, "features")
assert str(e.value) == "Mandatory: features"
with pytest.raises(ValidationException) as e:
null_value = None
assertz_mandatory(null_value)
assert str(e.value) == "Mandatory: None"
##### assertz_string #####
def test_assertz_string_ok(test_payload):
assert assertz_string(test_payload["user"]["name"]) == None
assert assertz_string(u"random unicode string") == None
assert assertz_string(None) == None
assert assertz_string(test_payload, "password") == None
def test_assertz_string_nok(test_payload):
with pytest.raises(ValidationException) as e:
assert assertz_string(test_payload, "application_ids", 2000, 400) is not None
assert str(e.value) == f"Invalid value type: {test_payload['application_ids']}"
with pytest.raises(ValidationException) as e:
assert assertz_string(test_payload, "age", 2000) is not None
assert str(e.value) == f"Invalid value type: {test_payload['age']}"
##### assertz_integer #####
def test_assertz_integer_ok(test_payload):
assert assertz_integer(test_payload["age"]) == None
assert assertz_integer(-40000) == None
assert assertz_integer(None) == None
def test_assertz_integer_nok(test_payload):
with pytest.raises(ValidationException) as e:
assert assertz_integer(test_payload["user"], "name", 2000, 400) is not None
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assert assertz_integer(test_payload, "user", 2001) is not None
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
##### assertz_float #####
def test_assertz_float_ok(test_payload):
assert assertz_float(test_payload["value"]) == None
assert assertz_float(3.14159265359) == None
assert assertz_float(None) == None
def test_assertz_float_nok(test_payload):
with pytest.raises(ValidationException) as e:
assert assertz_float(test_payload["user"], "name", 2000, 400) is not None
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assert assertz_float(test_payload, "user", 2001) is not None
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
##### assertz_timestamp #####
def test_assertz_timestamp_ok(test_payload):
assert assertz_timestamp(test_payload["created"]) == None
assert assertz_timestamp(datetime.datetime.now()) == None
assert assertz_timestamp(datetime.date.today()) == None
assert assertz_timestamp(None) == None
def test_assertz_timestamp_nok(test_payload):
with pytest.raises(ValidationException) as e:
assert assertz_timestamp(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assert assertz_timestamp(test_payload["user"])
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
##### assertz_boolean #####
def test_assertz_boolean_ok(test_payload):
assert assertz_boolean(test_payload["active"]) == None
assert assertz_boolean(False) == None
assert assertz_boolean(None) == None
def test_assertz_boolean_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_boolean(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_boolean(test_payload, "user", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
##### assertz_complex #####
def test_assertz_complex_ok(test_payload):
assert assertz_complex(test_payload, "user") == None
assert assertz_complex(test_payload["application_ids"]) == None
assert assertz_complex('{"key": "value", "key1": {"key2": "value"}}') == None
assert assertz_complex(None) == None
def test_assertz_complex_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_complex(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_complex(test_payload, "created", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['created']}"
with pytest.raises(ValidationException) as e:
assertz_complex('[]')
assert str(e.value) == "Invalid value type: []"
##### assertz_dict #####
def test_assertz_dict_ok(test_payload):
assert assertz_dict(test_payload, "user") == None
assert assertz_dict({"key": "value", "key1": {"key2": "value"}}) == None
assert assertz_dict(None) == None
def test_assertz_dict_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_dict(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_dict(test_payload, "password", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['password']}"
with pytest.raises(ValidationException) as e:
assertz_dict({})
assert str(e.value) == "Invalid value type: {}"
##### assertz_json #####
def test_assertz_json_ok(test_payload):
assert assertz_json(test_payload, "headers") == None
assert assertz_json('{"key": "value", "key1": {"key2": "value"}}') == None
assert assertz_json(None) == None
def test_assertz_json_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_json(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_json(test_payload, "features", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['features']}"
with pytest.raises(ValidationException) as e:
assertz_json('{}')
assert str(e.value) == "Invalid value type: {}"
##### assertz_array #####
def test_assertz_array_ok(test_payload):
assert assertz_array(test_payload, "application_ids") == None
assert assertz_array(["a", "b", "c"]) == None
assert assertz_array(None) == None
def test_assertz_array_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_array(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_array(test_payload, "user", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
with pytest.raises(ValidationException) as e:
assertz_array([])
assert str(e.value) == f"Invalid value type: []"
##### assertz_uuid #####
def test_assertz_uuid_ok(test_payload):
assert assertz_uuid(test_payload, "unique_id") == None
assert assertz_uuid("6912574d-988a-4b34-98c4-424c61d37fef") == None
assert assertz_uuid(None) == None
def test_assertz_uuid_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_uuid(test_payload["user"], "name")
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_uuid(test_payload, "user")
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
with pytest.raises(ValidationException) as e:
assertz_uuid(test_payload, "features")
assert str(e.value) == f"Invalid value type: {test_payload['features']}"
with pytest.raises(ValidationException) as e:
assertz_uuid(test_payload, "age")
assert str(e.value) == f"Invalid value type: {test_payload['age']}"
with pytest.raises(ValidationException) as e:
assertz_uuid([])
assert str(e.value) == f"Invalid value type: []"
##### assertz_utf8 #####
def test_assertz_utf8_ok(test_payload):
assert assertz_utf8(test_payload["user"], "alias") == None
assert assertz_utf8(None) == None
def test_assertz_utf8_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_utf8(test_payload["user"], "icon")
assert str(e.value) == "Invalid utf-8 encoding"
with pytest.raises(ValidationException) as e:
assertz_utf8(test_payload["age"])
assert str(e.value) == "Invalid utf-8 encoding"
with pytest.raises(ValidationException) as e:
assertz_utf8(test_payload, "features")
assert str(e.value) == "Invalid utf-8 encoding"
##### assertz_ascii #####
def test_assertz_ascii_ok(test_payload):
assert assertz_ascii(test_payload["user"], "name") == None
assert assertz_ascii(test_payload["regex"]) == None
assert assertz_ascii(None) == None
def test_assertz_ascii_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_ascii(test_payload["user"], "icon")
assert str(e.value) == "Invalid ascii encoding"
with pytest.raises(ValidationException) as e:
assertz_ascii(test_payload["user"]["alias"])
assert str(e.value) == "Invalid ascii encoding"
with pytest.raises(ValidationException) as e:
assertz_ascii(test_payload, "features")
assert str(e.value) == "Invalid ascii encoding"
##### assertz_hash #####
def test_assertz_hash_ok(test_payload):
assert assertz_hash(test_payload, "hash") == None
assert assertz_hash("zc6kj0xrb27rs0mthfn9j4m8m8pchy0q8sewh7x0c9o9g") == None
assert assertz_hash(None) == None
def test_assertz_hash_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_hash(test_payload["user"], "icon")
assert str(e.value) == "Invalid hash"
with pytest.raises(ValidationException) as e:
assertz_hash(test_payload["user"]["alias"])
assert str(e.value) == "Invalid hash"
with pytest.raises(ValidationException) as e:
assertz_hash("b66hx5xqs6siakp6ne4dj6w9dms7ydgmoxdmgjy33x6wt0iz1efmuxxnfwx7tjsr")
assert str(e.value) == "Invalid hash"
##### assertz_uri #####
def test_assertz_url_ok(test_payload):
assert assertz_url(test_payload, "website") == None
assert assertz_url(test_payload["sample_uri"]) == None
assert assertz_url(None) == None
def test_assertz_url_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_url(test_payload["user"], "icon")
assert str(e.value) == "Invalid url"
with pytest.raises(ValidationException) as e:
assertz_url(test_payload["user"]["alias"])
assert str(e.value) == "Invalid url"
with pytest.raises(ValidationException) as e:
assertz_url(test_payload["application_ids"])
assert str(e.value) == "Invalid url"
##### assertz_email #####
def test_assertz_email_ok(test_payload):
assert assertz_email(test_payload["user"], "email") == None
assert assertz_email(None) == None
def test_assertz_email_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_email(test_payload["user"], "icon")
assert str(e.value) == "Invalid email"
with pytest.raises(ValidationException) as e:
assertz_email("john@doe")
assert str(e.value) == "Invalid email"
##### assertz_location #####
def test_assertz_location_ok(test_payload):
assert assertz_location(test_payload["user"], "home") == None
assert assertz_location(test_payload["user"]["work"]) == None
assert assertz_location(None) == None
def test_assertz_location_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_location(test_payload["user"], "icon")
assert str(e.value) == "Invalid location"
with pytest.raises(ValidationException) as e:
assertz_location(test_payload["username"])
assert str(e.value) == "Invalid location"
with pytest.raises(ValidationException) as e:
assertz_location(test_payload["location"])
assert str(e.value) == "Invalid location"
##### assertz_phone #####
def test_assertz_phone_ok(test_payload):
assert assertz_phone(test_payload["user"], "phone") == None
assert assertz_phone(None) == None
def test_assertz_phone_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_phone(test_payload["user"], "icon")
assert str(e.value) == "Invalid phone"
with pytest.raises(ValidationException) as e:
assertz_phone(test_payload["username"])
assert str(e.value) == "Invalid phone"
with pytest.raises(ValidationException) as e:
assertz_phone(test_payload["user"]["cellphone"])
assert str(e.value) == "Invalid phone"
##### assertz_unauthorized #####
def test_assertz_unauthorized_ok(test_payload):
assert assertz_unauthorized(test_payload["age"] > 18, "invalid age") == None
def test_assertz_unauthorized_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_unauthorized(test_payload["active"] == False, "inactive")
assert str(e.value) == "inactive"
with pytest.raises(ValidationException) as e:
assertz_unauthorized(test_payload["age"] < 25, "age must be under 25")
assert str(e.value) == "age must be under 25"
with pytest.raises(ValidationException) as e:
assertz_unauthorized(test_payload["username"] and test_payload["password"], "username or password should not be empty")
assert str(e.value) == "username or password should not be empty"
def test_assertz_mandatory_reply_ok(test_payload):
_result = assertz_mandatory_reply(test_payload, "activated", 1000, 400)
assert 'payload' in _result
assert 'status' in _result
assert _result['status'] == 400
assert _result['payload']['code'] == 1000
assert _result['payload']['text'] == 'Mandatory: activated'
| from ..asserts import *
from .fixtures import test_payload
import pytest
import datetime
from ..exceptions import ValidationException
##### assertz #####
def test_assertz_ok(test_payload):
assert assertz(True, "Error", 0, 500) == None
assert assertz(test_payload["username"] == "", "Error", 0, 500) == None
def test_assertz_nok(test_payload):
with pytest.raises(ValidationException):
assertz(False, "Error", 0, 500)
with pytest.raises(ValidationException):
assertz(type(test_payload["application_ids"]) is not list, "Error", 0, 500)
##### assertz_mandatory #####
def test_assertz_mandatory_ok(test_payload):
assert assertz_mandatory(test_payload, "password") == None
assert assertz_mandatory(test_payload["application_ids"], 1) == None
def test_assertz_mandatory_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload, "non_existent_key")
assert str(e.value) == "Mandatory: non_existent_key"
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload, "")
assert str(e.value) == "Key must not be emtpy"
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload, "username")
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload["user"], "last_name")
assert str(e.value) == "Mandatory: last_name"
with pytest.raises(ValidationException) as e:
assertz_mandatory({})
assert str(e.value) == "Mandatory: {}"
with pytest.raises(ValidationException) as e:
assertz_mandatory(test_payload, "features")
assert str(e.value) == "Mandatory: features"
with pytest.raises(ValidationException) as e:
null_value = None
assertz_mandatory(null_value)
assert str(e.value) == "Mandatory: None"
##### assertz_string #####
def test_assertz_string_ok(test_payload):
assert assertz_string(test_payload["user"]["name"]) == None
assert assertz_string(u"random unicode string") == None
assert assertz_string(None) == None
assert assertz_string(test_payload, "password") == None
def test_assertz_string_nok(test_payload):
with pytest.raises(ValidationException) as e:
assert assertz_string(test_payload, "application_ids", 2000, 400) is not None
assert str(e.value) == f"Invalid value type: {test_payload['application_ids']}"
with pytest.raises(ValidationException) as e:
assert assertz_string(test_payload, "age", 2000) is not None
assert str(e.value) == f"Invalid value type: {test_payload['age']}"
##### assertz_integer #####
def test_assertz_integer_ok(test_payload):
assert assertz_integer(test_payload["age"]) == None
assert assertz_integer(-40000) == None
assert assertz_integer(None) == None
def test_assertz_integer_nok(test_payload):
with pytest.raises(ValidationException) as e:
assert assertz_integer(test_payload["user"], "name", 2000, 400) is not None
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assert assertz_integer(test_payload, "user", 2001) is not None
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
##### assertz_float #####
def test_assertz_float_ok(test_payload):
assert assertz_float(test_payload["value"]) == None
assert assertz_float(3.14159265359) == None
assert assertz_float(None) == None
def test_assertz_float_nok(test_payload):
with pytest.raises(ValidationException) as e:
assert assertz_float(test_payload["user"], "name", 2000, 400) is not None
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assert assertz_float(test_payload, "user", 2001) is not None
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
##### assertz_timestamp #####
def test_assertz_timestamp_ok(test_payload):
assert assertz_timestamp(test_payload["created"]) == None
assert assertz_timestamp(datetime.datetime.now()) == None
assert assertz_timestamp(datetime.date.today()) == None
assert assertz_timestamp(None) == None
def test_assertz_timestamp_nok(test_payload):
with pytest.raises(ValidationException) as e:
assert assertz_timestamp(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assert assertz_timestamp(test_payload["user"])
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
##### assertz_boolean #####
def test_assertz_boolean_ok(test_payload):
assert assertz_boolean(test_payload["active"]) == None
assert assertz_boolean(False) == None
assert assertz_boolean(None) == None
def test_assertz_boolean_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_boolean(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_boolean(test_payload, "user", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
##### assertz_complex #####
def test_assertz_complex_ok(test_payload):
assert assertz_complex(test_payload, "user") == None
assert assertz_complex(test_payload["application_ids"]) == None
assert assertz_complex('{"key": "value", "key1": {"key2": "value"}}') == None
assert assertz_complex(None) == None
def test_assertz_complex_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_complex(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_complex(test_payload, "created", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['created']}"
with pytest.raises(ValidationException) as e:
assertz_complex('[]')
assert str(e.value) == "Invalid value type: []"
##### assertz_dict #####
def test_assertz_dict_ok(test_payload):
assert assertz_dict(test_payload, "user") == None
assert assertz_dict({"key": "value", "key1": {"key2": "value"}}) == None
assert assertz_dict(None) == None
def test_assertz_dict_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_dict(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_dict(test_payload, "password", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['password']}"
with pytest.raises(ValidationException) as e:
assertz_dict({})
assert str(e.value) == "Invalid value type: {}"
##### assertz_json #####
def test_assertz_json_ok(test_payload):
assert assertz_json(test_payload, "headers") == None
assert assertz_json('{"key": "value", "key1": {"key2": "value"}}') == None
assert assertz_json(None) == None
def test_assertz_json_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_json(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_json(test_payload, "features", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['features']}"
with pytest.raises(ValidationException) as e:
assertz_json('{}')
assert str(e.value) == "Invalid value type: {}"
##### assertz_array #####
def test_assertz_array_ok(test_payload):
assert assertz_array(test_payload, "application_ids") == None
assert assertz_array(["a", "b", "c"]) == None
assert assertz_array(None) == None
def test_assertz_array_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_array(test_payload["user"], "name", 2000, 400)
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_array(test_payload, "user", 2001)
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
with pytest.raises(ValidationException) as e:
assertz_array([])
assert str(e.value) == f"Invalid value type: []"
##### assertz_uuid #####
def test_assertz_uuid_ok(test_payload):
assert assertz_uuid(test_payload, "unique_id") == None
assert assertz_uuid("6912574d-988a-4b34-98c4-424c61d37fef") == None
assert assertz_uuid(None) == None
def test_assertz_uuid_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_uuid(test_payload["user"], "name")
assert str(e.value) == f"Invalid value type: {test_payload['user']['name']}"
with pytest.raises(ValidationException) as e:
assertz_uuid(test_payload, "user")
assert str(e.value) == f"Invalid value type: {test_payload['user']}"
with pytest.raises(ValidationException) as e:
assertz_uuid(test_payload, "features")
assert str(e.value) == f"Invalid value type: {test_payload['features']}"
with pytest.raises(ValidationException) as e:
assertz_uuid(test_payload, "age")
assert str(e.value) == f"Invalid value type: {test_payload['age']}"
with pytest.raises(ValidationException) as e:
assertz_uuid([])
assert str(e.value) == f"Invalid value type: []"
##### assertz_utf8 #####
def test_assertz_utf8_ok(test_payload):
assert assertz_utf8(test_payload["user"], "alias") == None
assert assertz_utf8(None) == None
def test_assertz_utf8_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_utf8(test_payload["user"], "icon")
assert str(e.value) == "Invalid utf-8 encoding"
with pytest.raises(ValidationException) as e:
assertz_utf8(test_payload["age"])
assert str(e.value) == "Invalid utf-8 encoding"
with pytest.raises(ValidationException) as e:
assertz_utf8(test_payload, "features")
assert str(e.value) == "Invalid utf-8 encoding"
##### assertz_ascii #####
def test_assertz_ascii_ok(test_payload):
assert assertz_ascii(test_payload["user"], "name") == None
assert assertz_ascii(test_payload["regex"]) == None
assert assertz_ascii(None) == None
def test_assertz_ascii_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_ascii(test_payload["user"], "icon")
assert str(e.value) == "Invalid ascii encoding"
with pytest.raises(ValidationException) as e:
assertz_ascii(test_payload["user"]["alias"])
assert str(e.value) == "Invalid ascii encoding"
with pytest.raises(ValidationException) as e:
assertz_ascii(test_payload, "features")
assert str(e.value) == "Invalid ascii encoding"
##### assertz_hash #####
def test_assertz_hash_ok(test_payload):
assert assertz_hash(test_payload, "hash") == None
assert assertz_hash("zc6kj0xrb27rs0mthfn9j4m8m8pchy0q8sewh7x0c9o9g") == None
assert assertz_hash(None) == None
def test_assertz_hash_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_hash(test_payload["user"], "icon")
assert str(e.value) == "Invalid hash"
with pytest.raises(ValidationException) as e:
assertz_hash(test_payload["user"]["alias"])
assert str(e.value) == "Invalid hash"
with pytest.raises(ValidationException) as e:
assertz_hash("b66hx5xqs6siakp6ne4dj6w9dms7ydgmoxdmgjy33x6wt0iz1efmuxxnfwx7tjsr")
assert str(e.value) == "Invalid hash"
##### assertz_uri #####
def test_assertz_url_ok(test_payload):
assert assertz_url(test_payload, "website") == None
assert assertz_url(test_payload["sample_uri"]) == None
assert assertz_url(None) == None
def test_assertz_url_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_url(test_payload["user"], "icon")
assert str(e.value) == "Invalid url"
with pytest.raises(ValidationException) as e:
assertz_url(test_payload["user"]["alias"])
assert str(e.value) == "Invalid url"
with pytest.raises(ValidationException) as e:
assertz_url(test_payload["application_ids"])
assert str(e.value) == "Invalid url"
##### assertz_email #####
def test_assertz_email_ok(test_payload):
assert assertz_email(test_payload["user"], "email") == None
assert assertz_email(None) == None
def test_assertz_email_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_email(test_payload["user"], "icon")
assert str(e.value) == "Invalid email"
with pytest.raises(ValidationException) as e:
assertz_email("john@doe")
assert str(e.value) == "Invalid email"
##### assertz_location #####
def test_assertz_location_ok(test_payload):
assert assertz_location(test_payload["user"], "home") == None
assert assertz_location(test_payload["user"]["work"]) == None
assert assertz_location(None) == None
def test_assertz_location_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_location(test_payload["user"], "icon")
assert str(e.value) == "Invalid location"
with pytest.raises(ValidationException) as e:
assertz_location(test_payload["username"])
assert str(e.value) == "Invalid location"
with pytest.raises(ValidationException) as e:
assertz_location(test_payload["location"])
assert str(e.value) == "Invalid location"
##### assertz_phone #####
def test_assertz_phone_ok(test_payload):
assert assertz_phone(test_payload["user"], "phone") == None
assert assertz_phone(None) == None
def test_assertz_phone_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_phone(test_payload["user"], "icon")
assert str(e.value) == "Invalid phone"
with pytest.raises(ValidationException) as e:
assertz_phone(test_payload["username"])
assert str(e.value) == "Invalid phone"
with pytest.raises(ValidationException) as e:
assertz_phone(test_payload["user"]["cellphone"])
assert str(e.value) == "Invalid phone"
##### assertz_unauthorized #####
def test_assertz_unauthorized_ok(test_payload):
assert assertz_unauthorized(test_payload["age"] > 18, "invalid age") == None
def test_assertz_unauthorized_nok(test_payload):
with pytest.raises(ValidationException) as e:
assertz_unauthorized(test_payload["active"] == False, "inactive")
assert str(e.value) == "inactive"
with pytest.raises(ValidationException) as e:
assertz_unauthorized(test_payload["age"] < 25, "age must be under 25")
assert str(e.value) == "age must be under 25"
with pytest.raises(ValidationException) as e:
assertz_unauthorized(test_payload["username"] and test_payload["password"], "username or password should not be empty")
assert str(e.value) == "username or password should not be empty"
def test_assertz_mandatory_reply_ok(test_payload):
_result = assertz_mandatory_reply(test_payload, "activated", 1000, 400)
assert 'payload' in _result
assert 'status' in _result
assert _result['status'] == 400
assert _result['payload']['code'] == 1000
assert _result['payload']['text'] == 'Mandatory: activated'
| de | 0.332463 | ##### assertz ##### ##### assertz_mandatory ##### ##### assertz_string ##### ##### assertz_integer ##### ##### assertz_float ##### ##### assertz_timestamp ##### ##### assertz_boolean ##### ##### assertz_complex ##### ##### assertz_dict ##### ##### assertz_json ##### ##### assertz_array ##### ##### assertz_uuid ##### ##### assertz_utf8 ##### ##### assertz_ascii ##### ##### assertz_hash ##### ##### assertz_uri ##### ##### assertz_email ##### ##### assertz_location ##### ##### assertz_phone ##### ##### assertz_unauthorized ##### | 2.46758 | 2 |
anidbcli/anidbconnector.py | skyejonke/anidbcli | 32 | 6630036 | import socket
import hashlib
import time
import os
import json
import anidbcli.encryptors as encryptors
API_ADDRESS = "api.anidb.net"
API_PORT = 9000
SOCKET_TIMEOUT = 5
MAX_RECEIVE_SIZE = 65507 # Max size of an UDP packet is about 1400B anyway
RETRY_COUNT = 3
API_ENDPOINT_ENCRYPT = "ENCRYPT user=%s&type=1"
API_ENDPOINT_LOGIN = "AUTH user=%s&pass=%s&protover=3&client=anidbcli&clientver=1&enc=UTF8"
API_ENDPOINT_LOGOUT = "LOGOUT s=%s"
ENCRYPTION_ENABLED = 209
LOGIN_ACCEPTED = 200
LOGIN_ACCEPTED_NEW_VERSION_AVAILABLE = 201
class AnidbConnector:
def __init__(self, bind_addr = None):
"""For class initialization use class methods create_plain or create_secure."""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if bind_addr:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(tuple(bind_addr))
self.socket.connect((socket.gethostbyname_ex(API_ADDRESS)[2][0], API_PORT))
self.socket.settimeout(SOCKET_TIMEOUT)
self.crypto = encryptors.PlainTextCrypto()
self.salt = None
def _set_crypto(self, crypto):
self.crypto = crypto
@classmethod
def create_plain(cls, username, password):
"""Creates unencrypted UDP API connection using the provided credenitals."""
instance = cls()
instance._login(username, password)
return instance
@classmethod
def create_secure(cls, username, password, api_key):
"""Creates AES128 encrypted UDP API connection using the provided credenitals and users api key."""
instance = cls()
enc_res = instance.send_request(API_ENDPOINT_ENCRYPT % username, False)
if enc_res["code"] != ENCRYPTION_ENABLED:
raise Exception(enc_res["data"])
instance.salt = enc_res["data"].split(" ")[0]
md5 = hashlib.md5(bytes(api_key + instance.salt, "ascii"))
instance._set_crypto(encryptors.Aes128TextEncryptor(md5.digest()))
instance._login(username, password)
return instance
@classmethod
def create_from_session(cls, session_key, sock_addr, api_key, salt):
"""Crates instance from an existing session. If salt is not None, encrypted instance is created."""
instance = cls(sock_addr)
instance.session = session_key
if (salt != None):
instance.salt = salt
md5 = hashlib.md5(bytes(api_key + instance.salt, "ascii"))
instance._set_crypto(encryptors.Aes128TextEncryptor(md5.digest()))
return instance
def _login(self, username, password):
response = self.send_request(API_ENDPOINT_LOGIN % (username, password), False)
if response["code"] == LOGIN_ACCEPTED or response["code"] == LOGIN_ACCEPTED_NEW_VERSION_AVAILABLE:
self.session = response["data"].split(" ")[0]
else:
raise Exception(response["data"])
def close(self, persistent, persist_file):
"""Logs out the user from current session and closes the connection."""
if not self.session:
raise Exception("Cannot logout: No active session.")
if persistent:
try:
os.makedirs(os.path.dirname(persist_file))
except: pass # Exists
d = dict()
d["session_key"] = self.session
d["timestamp"] = time.time()
d["salt"] = None
d["sockaddr"] = self.socket.getsockname()
if (self.salt): d["salt"] = self.salt
with open(persist_file, "w") as file:
file.writelines(json.dumps(d))
else:
try:
os.remove(persist_file)
except: pass # does not exist
self.send_request(API_ENDPOINT_LOGOUT % self.session, False)
self.socket.close()
def send_request(self, content, appendSession=True):
"""Sends request to the API and returns a dictionary containing response code and data."""
if appendSession:
if not self.session:
raise Exception("No session was set")
content += "&s=%s" % self.session
res = None
for _ in range(RETRY_COUNT):
try:
self.socket.send(self.crypto.Encrypt(content))
res = self.socket.recv(MAX_RECEIVE_SIZE)
break
except: # Socket timeout / upd packet not sent
time.sleep(1)
pass
if not res:
raise Exception("Cound not connect to anidb UDP API: Socket timeout.")
res = self.crypto.Decrypt(res)
res = res.rstrip("\n")
response = dict()
response["code"] = int(res[:3])
response["data"] = res[4:]
return response | import socket
import hashlib
import time
import os
import json
import anidbcli.encryptors as encryptors
API_ADDRESS = "api.anidb.net"
API_PORT = 9000
SOCKET_TIMEOUT = 5
MAX_RECEIVE_SIZE = 65507 # Max size of an UDP packet is about 1400B anyway
RETRY_COUNT = 3
API_ENDPOINT_ENCRYPT = "ENCRYPT user=%s&type=1"
API_ENDPOINT_LOGIN = "AUTH user=%s&pass=%s&protover=3&client=anidbcli&clientver=1&enc=UTF8"
API_ENDPOINT_LOGOUT = "LOGOUT s=%s"
ENCRYPTION_ENABLED = 209
LOGIN_ACCEPTED = 200
LOGIN_ACCEPTED_NEW_VERSION_AVAILABLE = 201
class AnidbConnector:
def __init__(self, bind_addr = None):
"""For class initialization use class methods create_plain or create_secure."""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if bind_addr:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(tuple(bind_addr))
self.socket.connect((socket.gethostbyname_ex(API_ADDRESS)[2][0], API_PORT))
self.socket.settimeout(SOCKET_TIMEOUT)
self.crypto = encryptors.PlainTextCrypto()
self.salt = None
def _set_crypto(self, crypto):
self.crypto = crypto
@classmethod
def create_plain(cls, username, password):
"""Creates unencrypted UDP API connection using the provided credenitals."""
instance = cls()
instance._login(username, password)
return instance
@classmethod
def create_secure(cls, username, password, api_key):
"""Creates AES128 encrypted UDP API connection using the provided credenitals and users api key."""
instance = cls()
enc_res = instance.send_request(API_ENDPOINT_ENCRYPT % username, False)
if enc_res["code"] != ENCRYPTION_ENABLED:
raise Exception(enc_res["data"])
instance.salt = enc_res["data"].split(" ")[0]
md5 = hashlib.md5(bytes(api_key + instance.salt, "ascii"))
instance._set_crypto(encryptors.Aes128TextEncryptor(md5.digest()))
instance._login(username, password)
return instance
@classmethod
def create_from_session(cls, session_key, sock_addr, api_key, salt):
"""Crates instance from an existing session. If salt is not None, encrypted instance is created."""
instance = cls(sock_addr)
instance.session = session_key
if (salt != None):
instance.salt = salt
md5 = hashlib.md5(bytes(api_key + instance.salt, "ascii"))
instance._set_crypto(encryptors.Aes128TextEncryptor(md5.digest()))
return instance
def _login(self, username, password):
response = self.send_request(API_ENDPOINT_LOGIN % (username, password), False)
if response["code"] == LOGIN_ACCEPTED or response["code"] == LOGIN_ACCEPTED_NEW_VERSION_AVAILABLE:
self.session = response["data"].split(" ")[0]
else:
raise Exception(response["data"])
def close(self, persistent, persist_file):
"""Logs out the user from current session and closes the connection."""
if not self.session:
raise Exception("Cannot logout: No active session.")
if persistent:
try:
os.makedirs(os.path.dirname(persist_file))
except: pass # Exists
d = dict()
d["session_key"] = self.session
d["timestamp"] = time.time()
d["salt"] = None
d["sockaddr"] = self.socket.getsockname()
if (self.salt): d["salt"] = self.salt
with open(persist_file, "w") as file:
file.writelines(json.dumps(d))
else:
try:
os.remove(persist_file)
except: pass # does not exist
self.send_request(API_ENDPOINT_LOGOUT % self.session, False)
self.socket.close()
def send_request(self, content, appendSession=True):
"""Sends request to the API and returns a dictionary containing response code and data."""
if appendSession:
if not self.session:
raise Exception("No session was set")
content += "&s=%s" % self.session
res = None
for _ in range(RETRY_COUNT):
try:
self.socket.send(self.crypto.Encrypt(content))
res = self.socket.recv(MAX_RECEIVE_SIZE)
break
except: # Socket timeout / upd packet not sent
time.sleep(1)
pass
if not res:
raise Exception("Cound not connect to anidb UDP API: Socket timeout.")
res = self.crypto.Decrypt(res)
res = res.rstrip("\n")
response = dict()
response["code"] = int(res[:3])
response["data"] = res[4:]
return response | en | 0.794067 | # Max size of an UDP packet is about 1400B anyway For class initialization use class methods create_plain or create_secure. Creates unencrypted UDP API connection using the provided credenitals. Creates AES128 encrypted UDP API connection using the provided credenitals and users api key. Crates instance from an existing session. If salt is not None, encrypted instance is created. Logs out the user from current session and closes the connection. # Exists # does not exist Sends request to the API and returns a dictionary containing response code and data. # Socket timeout / upd packet not sent | 2.949848 | 3 |
RecoBTag/PerformanceDB/python/measure/Btag_mistag101220.py | ckamtsikis/cmssw | 852 | 6630037 | import FWCore.ParameterSet.Config as cms
BtagPerformanceESProducer_MISTAGJBPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJBPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJBPLtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJBPLwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJBPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJBPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJBPMtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJBPMwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJBPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJBPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJBPTtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJBPTwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJPLtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJPLwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJPMtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJPMwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJPTtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJPTwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGSSVHEM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGSSVHEM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGSSVHEMtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGSSVHEMwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGSSVHPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGSSVHPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGSSVHPTtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGSSVHPTwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGTCHEL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGTCHEL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGTCHELtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGTCHELwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGTCHEM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGTCHEM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGTCHEMtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGTCHEMwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGTCHPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGTCHPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGTCHPTtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGTCHPTwp_v4_offline')
)
| import FWCore.ParameterSet.Config as cms
BtagPerformanceESProducer_MISTAGJBPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJBPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJBPLtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJBPLwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJBPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJBPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJBPMtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJBPMwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJBPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJBPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJBPTtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJBPTwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJPL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJPL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJPLtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJPLwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJPM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJPM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJPMtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJPMwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGJPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGJPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGJPTtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGJPTwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGSSVHEM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGSSVHEM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGSSVHEMtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGSSVHEMwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGSSVHPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGSSVHPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGSSVHPTtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGSSVHPTwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGTCHEL = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGTCHEL'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGTCHELtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGTCHELwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGTCHEM = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGTCHEM'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGTCHEMtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGTCHEMwp_v4_offline')
)
BtagPerformanceESProducer_MISTAGTCHPT = cms.ESProducer("BtagPerformanceESProducer",
# this is what it makes available
ComponentName = cms.string('MISTAGTCHPT'),
# this is where it gets the payload from
PayloadName = cms.string('BTagMISTAGTCHPTtable_v4_offline'),
WorkingPointName = cms.string('BTagMISTAGTCHPTwp_v4_offline')
)
| en | 0.977839 | # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from # this is what it makes available # this is where it gets the payload from | 1.708547 | 2 |
tests/test_commands.py | kali-physi-hacker/pythonanywhere-cli | 0 | 6630038 | <reponame>kali-physi-hacker/pythonanywhere-cli
import os
from mock import patch
from pythonanywhere_wrapper.client import PythonAnywhereError
from pythonanywhere_cli.commands import Command, StaticFile, Webapps
class CommonTestFunctions(object):
def command_options(self, **kwargs):
options = {
"--api_key": None,
"--help": False,
"--path": None,
"--python_version": None,
"--url": None,
"--user": None,
"--version": False,
"--virtualenv_path": None,
"<domain_name>": None,
"<path>": None,
"<python_version>": None,
"<static_id>": None,
"<url>": None,
"create": False,
"delete": False,
"list": True,
"reload": False,
"static_mapping": True,
"update": False,
"webapps": False
}
options.update(kwargs)
return options
class TestCommand(CommonTestFunctions):
class CommandException(Command):
COMMANDS = [
"exception"
]
def exception(self):
raise PythonAnywhereError("Test Exception")
def test_get_client_if_options(self):
command = Command(self.command_options(**{
"--api_key": "Test Key", "--user": "Test User"
}))
assert command.client.api_key == "Test Key"
assert command.client.user == "Test User"
def test_get_client_if_not_options(self):
os.environ["PYTHONANYWHERE_CLI_API_KEY"] = "Test Key"
os.environ["PYTHONANYWHERE_CLI_USER"] = "Test User"
command = Command(self.command_options())
assert command.client.api_key == "Test Key"
assert command.client.user == "Test User"
@patch("pythonanywhere_cli.commands.snakesay")
def test_run_exception(self, snakesay):
command_exception = self.CommandException(
self.command_options(**{"exception": True})
)
command_exception.run()
snakesay.assert_called_with("Test Exception")
class TestStaticFile(CommonTestFunctions):
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_create(self, mock_client, snakesay):
static_file = StaticFile({
"create": True,
"--api_key": None,
"--user": None,
"<domain_name>": "tests.domain.com",
"<url>": "/url/",
"<path>": "/path/"
})
static_file.run()
client_call = static_file.client.webapps.static_files.create
client_call.assert_called_with(
data={"url": "/url/", "path": "/path/"},
domain_name="tests.domain.com"
)
snakesay.assert_called_with((
"Static File mapping created for domain"
" tests.domain.com with url /url/ and path /path/"
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_delete(self, mock_client, snakesay):
static_file = StaticFile({
"delete": True,
"--api_key": None,
"--user": None,
"<domain_name>": "tests.domain.com",
"<static_id>": "123"
})
static_file.run()
client_call = static_file.client.webapps.static_files.delete
client_call.assert_called_with(
domain_name="tests.domain.com", static_id="123"
)
snakesay.assert_called_with((
"Static File mapping 123 for domain"
" tests.domain.com has been removed."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_list(self, mock_client, snakesay):
static_file = StaticFile(self.command_options(**{
"list": True,
"<domain_name>": "tests.domain.com",
}))
client_call = static_file.client.webapps.static_files
client_call.return_value.json.return_value = [
{u"url": u"/static/", u"path": u"/static/", u"id": 123},
]
static_file.run()
client_call.assert_called_with(
domain_name="tests.domain.com"
)
snakesay.assert_called_with((
"Static File mappings for domain tests.domain.com: 1. ID: 123"
" URL: /static/ Path: /static/"
))
@patch("pythonanywhere_cli.commands.snakesay")
def test_update_if_not_url_if_not_path(self, snakesay):
static_file = StaticFile(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"<static_id>": "123",
"--url": None,
"--path": None,
}))
static_file.run()
snakesay.assert_called_with((
"You should supply a url or path to make"
" any updates."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_update_if_url(self, mock_client, snakesay):
static_file = StaticFile(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"<static_id>": "123",
"--url": "/url/",
"--path": None,
}))
static_file.run()
client_call = static_file.client.webapps.static_files.update
client_call.assert_called_with(
domain_name="tests.domain.com",
static_id="123",
data={"url": "/url/"}
)
snakesay.assert_called_with((
"Static File 123 for domain"
" tests.domain.com was updated. URL: /url/"
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_update_if_path(self, mock_client, snakesay):
static_file = StaticFile(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"<static_id>": "123",
"--url": None,
"--path": "/path/",
}))
static_file.run()
client_call = static_file.client.webapps.static_files.update
client_call.assert_called_with(
domain_name="tests.domain.com",
static_id="123",
data={"path": "/path/"}
)
snakesay.assert_called_with((
"Static File 123 for domain"
" tests.domain.com was updated. Path: /path/"
))
class TestWebapps(CommonTestFunctions):
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_create(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"create": True,
"<domain_name>": "tests.domain.com",
"<python_version>": "python27",
}))
webapp.run()
client_call = webapp.client.webapps.create
client_call.assert_called_with(
data={
"domain_name": "tests.domain.com", "python_version": "python27"
},
)
snakesay.assert_called_with((
"Webapp created with domain"
" tests.domain.com using python version python27."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_delete(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"delete": True,
"<domain_name>": "tests.domain.com",
}))
webapp.run()
client_call = webapp.client.webapps.delete
client_call.assert_called_with(domain_name="tests.domain.com")
snakesay.assert_called_with((
"Webapp with domain"
" tests.domain.com deleted."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_reload(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"reload": True,
"<domain_name>": "tests.domain.com",
}))
webapp.run()
client_call = webapp.client.webapps.reload
client_call.assert_called_with(domain_name="tests.domain.com")
snakesay.assert_called_with((
"Webapp with domain"
" tests.domain.com has been reloaded."
))
@patch("pythonanywhere_cli.commands.snakesay")
def test_update_if_not_virtualenv_path_if_not_python_vers(self, snakesay):
webapp = Webapps(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"--virtualenv_path": None,
"--python_version": None,
}))
webapp.run()
snakesay.assert_called_with((
"You should supply a virtualenv_path or"
" python_version to make any updates."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_update_if_virtualenv_path(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"--virtualenv_path": "/path/",
"--python_version": None,
}))
webapp.run()
client_call = webapp.client.webapps.update
client_call.assert_called_with(
domain_name="tests.domain.com", data={"virtualenv_path": "/path/"}
)
snakesay.assert_called_with((
"Webapp with domain tests.domain.com"
" was updated. Virtualenv Path: /path/"
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_update_if_python_version(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"--virtualenv_path": None,
"--python_version": "python27",
}))
webapp.run()
client_call = webapp.client.webapps.update
client_call.assert_called_with(
domain_name="tests.domain.com", data={"python_version": "python27"}
)
snakesay.assert_called_with((
"Webapp with domain tests.domain.com"
" was updated. Python Version: python27"
))
| import os
from mock import patch
from pythonanywhere_wrapper.client import PythonAnywhereError
from pythonanywhere_cli.commands import Command, StaticFile, Webapps
class CommonTestFunctions(object):
def command_options(self, **kwargs):
options = {
"--api_key": None,
"--help": False,
"--path": None,
"--python_version": None,
"--url": None,
"--user": None,
"--version": False,
"--virtualenv_path": None,
"<domain_name>": None,
"<path>": None,
"<python_version>": None,
"<static_id>": None,
"<url>": None,
"create": False,
"delete": False,
"list": True,
"reload": False,
"static_mapping": True,
"update": False,
"webapps": False
}
options.update(kwargs)
return options
class TestCommand(CommonTestFunctions):
class CommandException(Command):
COMMANDS = [
"exception"
]
def exception(self):
raise PythonAnywhereError("Test Exception")
def test_get_client_if_options(self):
command = Command(self.command_options(**{
"--api_key": "Test Key", "--user": "Test User"
}))
assert command.client.api_key == "Test Key"
assert command.client.user == "Test User"
def test_get_client_if_not_options(self):
os.environ["PYTHONANYWHERE_CLI_API_KEY"] = "Test Key"
os.environ["PYTHONANYWHERE_CLI_USER"] = "Test User"
command = Command(self.command_options())
assert command.client.api_key == "Test Key"
assert command.client.user == "Test User"
@patch("pythonanywhere_cli.commands.snakesay")
def test_run_exception(self, snakesay):
command_exception = self.CommandException(
self.command_options(**{"exception": True})
)
command_exception.run()
snakesay.assert_called_with("Test Exception")
class TestStaticFile(CommonTestFunctions):
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_create(self, mock_client, snakesay):
static_file = StaticFile({
"create": True,
"--api_key": None,
"--user": None,
"<domain_name>": "tests.domain.com",
"<url>": "/url/",
"<path>": "/path/"
})
static_file.run()
client_call = static_file.client.webapps.static_files.create
client_call.assert_called_with(
data={"url": "/url/", "path": "/path/"},
domain_name="tests.domain.com"
)
snakesay.assert_called_with((
"Static File mapping created for domain"
" tests.domain.com with url /url/ and path /path/"
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_delete(self, mock_client, snakesay):
static_file = StaticFile({
"delete": True,
"--api_key": None,
"--user": None,
"<domain_name>": "tests.domain.com",
"<static_id>": "123"
})
static_file.run()
client_call = static_file.client.webapps.static_files.delete
client_call.assert_called_with(
domain_name="tests.domain.com", static_id="123"
)
snakesay.assert_called_with((
"Static File mapping 123 for domain"
" tests.domain.com has been removed."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_list(self, mock_client, snakesay):
static_file = StaticFile(self.command_options(**{
"list": True,
"<domain_name>": "tests.domain.com",
}))
client_call = static_file.client.webapps.static_files
client_call.return_value.json.return_value = [
{u"url": u"/static/", u"path": u"/static/", u"id": 123},
]
static_file.run()
client_call.assert_called_with(
domain_name="tests.domain.com"
)
snakesay.assert_called_with((
"Static File mappings for domain tests.domain.com: 1. ID: 123"
" URL: /static/ Path: /static/"
))
@patch("pythonanywhere_cli.commands.snakesay")
def test_update_if_not_url_if_not_path(self, snakesay):
static_file = StaticFile(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"<static_id>": "123",
"--url": None,
"--path": None,
}))
static_file.run()
snakesay.assert_called_with((
"You should supply a url or path to make"
" any updates."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_update_if_url(self, mock_client, snakesay):
static_file = StaticFile(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"<static_id>": "123",
"--url": "/url/",
"--path": None,
}))
static_file.run()
client_call = static_file.client.webapps.static_files.update
client_call.assert_called_with(
domain_name="tests.domain.com",
static_id="123",
data={"url": "/url/"}
)
snakesay.assert_called_with((
"Static File 123 for domain"
" tests.domain.com was updated. URL: /url/"
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_update_if_path(self, mock_client, snakesay):
static_file = StaticFile(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"<static_id>": "123",
"--url": None,
"--path": "/path/",
}))
static_file.run()
client_call = static_file.client.webapps.static_files.update
client_call.assert_called_with(
domain_name="tests.domain.com",
static_id="123",
data={"path": "/path/"}
)
snakesay.assert_called_with((
"Static File 123 for domain"
" tests.domain.com was updated. Path: /path/"
))
class TestWebapps(CommonTestFunctions):
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_create(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"create": True,
"<domain_name>": "tests.domain.com",
"<python_version>": "python27",
}))
webapp.run()
client_call = webapp.client.webapps.create
client_call.assert_called_with(
data={
"domain_name": "tests.domain.com", "python_version": "python27"
},
)
snakesay.assert_called_with((
"Webapp created with domain"
" tests.domain.com using python version python27."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_delete(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"delete": True,
"<domain_name>": "tests.domain.com",
}))
webapp.run()
client_call = webapp.client.webapps.delete
client_call.assert_called_with(domain_name="tests.domain.com")
snakesay.assert_called_with((
"Webapp with domain"
" tests.domain.com deleted."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_reload(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"reload": True,
"<domain_name>": "tests.domain.com",
}))
webapp.run()
client_call = webapp.client.webapps.reload
client_call.assert_called_with(domain_name="tests.domain.com")
snakesay.assert_called_with((
"Webapp with domain"
" tests.domain.com has been reloaded."
))
@patch("pythonanywhere_cli.commands.snakesay")
def test_update_if_not_virtualenv_path_if_not_python_vers(self, snakesay):
webapp = Webapps(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"--virtualenv_path": None,
"--python_version": None,
}))
webapp.run()
snakesay.assert_called_with((
"You should supply a virtualenv_path or"
" python_version to make any updates."
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_update_if_virtualenv_path(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"--virtualenv_path": "/path/",
"--python_version": None,
}))
webapp.run()
client_call = webapp.client.webapps.update
client_call.assert_called_with(
domain_name="tests.domain.com", data={"virtualenv_path": "/path/"}
)
snakesay.assert_called_with((
"Webapp with domain tests.domain.com"
" was updated. Virtualenv Path: /path/"
))
@patch("pythonanywhere_cli.commands.snakesay")
@patch("pythonanywhere_cli.commands.PythonAnywhere")
def test_update_if_python_version(self, mock_client, snakesay):
webapp = Webapps(self.command_options(**{
"update": True,
"<domain_name>": "tests.domain.com",
"--virtualenv_path": None,
"--python_version": "python27",
}))
webapp.run()
client_call = webapp.client.webapps.update
client_call.assert_called_with(
domain_name="tests.domain.com", data={"python_version": "python27"}
)
snakesay.assert_called_with((
"Webapp with domain tests.domain.com"
" was updated. Python Version: python27"
)) | none | 1 | 2.239162 | 2 |
|
labelbits_gpio_40_41.py | TheMindVirus/labelbits | 0 | 6630039 | class label:
def __init__(self, name = "[LABEL]",
shift = 0, mask = 0xFFFFFFFF, comment = "", *args, **kwargs):
self.name = name
self.shift = shift
self.mask = mask
self.comment = comment;
class labelbits:
def __init__(self, labels = [], bits = 32, *args, **kwargs):
self.bits = bits
self.labels = labels
def cover(self, binary, item):
masked = ""
mask = item.mask << item.shift
for i in range(self.bits -1, -1, -1):
if ((mask >> i) & 1):
masked += str((binary >> i) & 1)
else:
masked += "-"
return masked
def mode(self, binary):
mapping = ["Input", "Output", "ALT5", "ALT4", "ALT0", "ALT1", "ALT2", "ALT3"]
if (binary >= 0) and (binary < len(mapping)):
return mapping[binary]
else:
return str(binary)
def info(self, binary, indent = 32):
print("<<<LabelBits>>>")
raw = format(binary, "#0" + str(self.bits + 2) + "b")[2:]
print(("Raw Data:\t").expandtabs(indent) + str(raw) + " [LSB]\n")
for item in self.labels:
data = (binary >> item.shift) & item.mask
line = str(item.name) + ":\t" + self.cover(binary, item)
comment = "//" + str(item.comment)
print(comment + "\n" + line.expandtabs(indent) + " [" +
self.mode(data) + "]\n")
ALT_LABELS = \
[
label("FSEL40", 0, 0b111, "GPIO 40"),
label("FSEL41", 3, 0b111, "GPIO 41"),
label("FSEL42", 6, 0b111, "GPIO 42"),
label("FSEL43", 9, 0b111, "GPIO 43"),
label("FSEL44", 12, 0b111, "GPIO 44"),
label("FSEL45", 15, 0b111, "GPIO 45"),
label("FSEL46", 18, 0b111, "GPIO 46"),
label("FSEL47", 21, 0b111, "GPIO 47"),
label("FSEL48", 24, 0b111, "GPIO 48"),
label("FSEL49", 27, 0b111, "GPIO 49"),
label("-", 30, 0b11, "Reserved"),
]
print("000 - Input")
print("001 - Output")
print("100 - ALT0")
print("101 - ALT1")
print("110 - ALT2")
print("111 - ALT3")
print("011 - ALT4")
print("010 - ALT5")
print()
GPIO = labelbits(ALT_LABELS)
GPIO.info(0x00000064)
| class label:
def __init__(self, name = "[LABEL]",
shift = 0, mask = 0xFFFFFFFF, comment = "", *args, **kwargs):
self.name = name
self.shift = shift
self.mask = mask
self.comment = comment;
class labelbits:
def __init__(self, labels = [], bits = 32, *args, **kwargs):
self.bits = bits
self.labels = labels
def cover(self, binary, item):
masked = ""
mask = item.mask << item.shift
for i in range(self.bits -1, -1, -1):
if ((mask >> i) & 1):
masked += str((binary >> i) & 1)
else:
masked += "-"
return masked
def mode(self, binary):
mapping = ["Input", "Output", "ALT5", "ALT4", "ALT0", "ALT1", "ALT2", "ALT3"]
if (binary >= 0) and (binary < len(mapping)):
return mapping[binary]
else:
return str(binary)
def info(self, binary, indent = 32):
print("<<<LabelBits>>>")
raw = format(binary, "#0" + str(self.bits + 2) + "b")[2:]
print(("Raw Data:\t").expandtabs(indent) + str(raw) + " [LSB]\n")
for item in self.labels:
data = (binary >> item.shift) & item.mask
line = str(item.name) + ":\t" + self.cover(binary, item)
comment = "//" + str(item.comment)
print(comment + "\n" + line.expandtabs(indent) + " [" +
self.mode(data) + "]\n")
ALT_LABELS = \
[
label("FSEL40", 0, 0b111, "GPIO 40"),
label("FSEL41", 3, 0b111, "GPIO 41"),
label("FSEL42", 6, 0b111, "GPIO 42"),
label("FSEL43", 9, 0b111, "GPIO 43"),
label("FSEL44", 12, 0b111, "GPIO 44"),
label("FSEL45", 15, 0b111, "GPIO 45"),
label("FSEL46", 18, 0b111, "GPIO 46"),
label("FSEL47", 21, 0b111, "GPIO 47"),
label("FSEL48", 24, 0b111, "GPIO 48"),
label("FSEL49", 27, 0b111, "GPIO 49"),
label("-", 30, 0b11, "Reserved"),
]
print("000 - Input")
print("001 - Output")
print("100 - ALT0")
print("101 - ALT1")
print("110 - ALT2")
print("111 - ALT3")
print("011 - ALT4")
print("010 - ALT5")
print()
GPIO = labelbits(ALT_LABELS)
GPIO.info(0x00000064)
| none | 1 | 3.218371 | 3 |
|
automation_infra/utils/tunnelled_requestor.py | AnyVisionltd/automation-infra | 6 | 6630040 | <gh_stars>1-10
import requests
from automation_infra.utils.httprequestor import HTTPRequestor
class TunnelledRequestor(HTTPRequestor):
"""The goal of the requestor object is to enable communication with a service in different ways.
This requestor enables interacting with a service via an existing tunnel.
Other possible requestors are Kong requestor (to use for interacting with a service via kong),
or others as the need arises."""
def __init__(self, tunnel):
"""params: tunnel: infra.model.Tunnel object
A tunnel can be started with the help of the host.TunnelManager plugin start_tunnel() method"""
self.tunnel = tunnel
def build_url(self, route):
url = f"http://{self.tunnel._hostname}:{self.tunnel._local_bind_port}{route}"
return url
def get(self, route, params=None, **kwargs):
"""route is the path of the url without the domainname"""
formatted_url = self.build_url(route)
res = requests.get(formatted_url, params=params, **kwargs)
return res
def post(self, route, data=None, json=None, **kwargs):
formatted_url = self.build_url(route)
res = requests.post(formatted_url, data, json, **kwargs)
return res
def put(self, route, data=None, **kwargs):
formatted_url = self.build_url(route)
res = requests.put(formatted_url, data, **kwargs)
return res
def delete(self, route, **kwargs):
formatted_url = self.build_url(route)
res = requests.delete(formatted_url, **kwargs)
return res
def patch(self, route, data=None, **kwargs) -> requests.Response:
formatted_url = self.build_url(route)
res = requests.patch(formatted_url, data, **kwargs)
return res
# TODO: implement other CRUD methods if needed...
| import requests
from automation_infra.utils.httprequestor import HTTPRequestor
class TunnelledRequestor(HTTPRequestor):
"""The goal of the requestor object is to enable communication with a service in different ways.
This requestor enables interacting with a service via an existing tunnel.
Other possible requestors are Kong requestor (to use for interacting with a service via kong),
or others as the need arises."""
def __init__(self, tunnel):
"""params: tunnel: infra.model.Tunnel object
A tunnel can be started with the help of the host.TunnelManager plugin start_tunnel() method"""
self.tunnel = tunnel
def build_url(self, route):
url = f"http://{self.tunnel._hostname}:{self.tunnel._local_bind_port}{route}"
return url
def get(self, route, params=None, **kwargs):
"""route is the path of the url without the domainname"""
formatted_url = self.build_url(route)
res = requests.get(formatted_url, params=params, **kwargs)
return res
def post(self, route, data=None, json=None, **kwargs):
formatted_url = self.build_url(route)
res = requests.post(formatted_url, data, json, **kwargs)
return res
def put(self, route, data=None, **kwargs):
formatted_url = self.build_url(route)
res = requests.put(formatted_url, data, **kwargs)
return res
def delete(self, route, **kwargs):
formatted_url = self.build_url(route)
res = requests.delete(formatted_url, **kwargs)
return res
def patch(self, route, data=None, **kwargs) -> requests.Response:
formatted_url = self.build_url(route)
res = requests.patch(formatted_url, data, **kwargs)
return res
# TODO: implement other CRUD methods if needed... | en | 0.858906 | The goal of the requestor object is to enable communication with a service in different ways. This requestor enables interacting with a service via an existing tunnel. Other possible requestors are Kong requestor (to use for interacting with a service via kong), or others as the need arises. params: tunnel: infra.model.Tunnel object A tunnel can be started with the help of the host.TunnelManager plugin start_tunnel() method route is the path of the url without the domainname # TODO: implement other CRUD methods if needed... | 3.005185 | 3 |
mayan/apps/documents/migrations/0043_auto_20180429_0759.py | Fourdee/mayan-edms | 4 | 6630041 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-29 07:59
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
import documents.models
class Migration(migrations.Migration):
dependencies = [
('documents', '0042_auto_20180403_0702'),
]
operations = [
migrations.AlterField(
model_name='documentversion',
name='encoding',
field=models.CharField(blank=True, editable=False, max_length=64, null=True, verbose_name='Encoding'),
),
migrations.AlterField(
model_name='documentversion',
name='file',
field=models.FileField(storage=django.core.files.storage.FileSystemStorage(location=b'/home/rosarior/development/mayan-edms/mayan/media/document_storage'), upload_to=documents.models.UUID_FUNCTION, verbose_name='File'),
),
migrations.AlterField(
model_name='documentversion',
name='mimetype',
field=models.CharField(blank=True, editable=False, max_length=255, null=True, verbose_name='MIME type'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-29 07:59
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
import documents.models
class Migration(migrations.Migration):
dependencies = [
('documents', '0042_auto_20180403_0702'),
]
operations = [
migrations.AlterField(
model_name='documentversion',
name='encoding',
field=models.CharField(blank=True, editable=False, max_length=64, null=True, verbose_name='Encoding'),
),
migrations.AlterField(
model_name='documentversion',
name='file',
field=models.FileField(storage=django.core.files.storage.FileSystemStorage(location=b'/home/rosarior/development/mayan-edms/mayan/media/document_storage'), upload_to=documents.models.UUID_FUNCTION, verbose_name='File'),
),
migrations.AlterField(
model_name='documentversion',
name='mimetype',
field=models.CharField(blank=True, editable=False, max_length=255, null=True, verbose_name='MIME type'),
),
] | en | 0.693727 | # -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-04-29 07:59 | 1.70499 | 2 |
src/utils/utils.py | elisaF/rst_discourse_parser | 1 | 6630042 | <reponame>elisaF/rst_discourse_parser
import re
from itertools import izip
from nltk.tree import Tree
from trees.parse_tree import ParseTree
import rst_lib
import string
def replace_words(text, word_dic):
"""
take a text and <strong class="highlight">replace</strong> <strong class="highlight">words</strong> that match a key in a dictionary with
the associated value, return the changed text
"""
rc = re.compile('|'.join(map(re.escape, word_dic)))
def translate(match):
return word_dic[match.group(0)]
return rc.sub(translate, text)
def unescape_penn_special_word(text):
penn_special_chars = {'-LRB-': '(', '-RRB-': ')', '-LAB-': '<', '-RAB-': '>',
'-LCB-': '{', '-RCB-': '}', '-LSB-': '[', '-RSB-':']',
'\\/' : '/', '\\*' : '*',
'``' : '"', "''" : '"', '`' : "'"}
return replace_words(text, penn_special_chars)
def sorted_dict_values_by_key(adict):
L = []
for i in sorted(adict.keys()):
L.append(adict[i])
return L
def sorted_dict_keys(adict):
keys = adict.keys()
return sorted(keys)
argmax = lambda array: max(izip(array, xrange(len(array))))[1]
argmin = lambda array: min(izip(array, xrange(len(array))))[1]
def permutation_indices(data):
return sorted(range(len(data)), key = data.__getitem__)
def argsmax (array, how_many):
L = permutation_indices(array)[-how_many:]
L.reverse()
return L
def count_how_many(array, item):
tot = 0
for array_item in array:
if item == array_item:
tot += 1
return tot
def split_mrg_by_sentence(s):
result = []
cnt_par = 0
last_split_index = 0
for i in range(0, len(s)):
if s[i] == "(":
cnt_par = cnt_par + 1
elif s[i] == ")":
cnt_par = cnt_par - 1
if cnt_par == 0:
# Split
if last_split_index < i:
result.append(s[last_split_index:i].replace("\n","").strip()[1:])
last_split_index = i + 1
return result
def simplified_tag(t):
"""
Returns a simplified POS tag:
NP-SBJ -> NP
PP=4 -> PP
-RRB- -> -RRB-
"""
if t == None:
return None
if t[0:1] == "-":
return t
else:
caret_pos = t.find("-")
t_minus_caret = ""
if not caret_pos == -1:
t_minus_caret = t[0:caret_pos]
else:
t_minus_caret = t
equal_pos = t_minus_caret.find("=")
t_simplified = ""
if not equal_pos == -1:
t_simplified = t_minus_caret[0:equal_pos]
else:
t_simplified = t_minus_caret
return t_simplified
def split_hilda_inputfile_by_sentence(f):
sents = []
for line in open(f).readlines():
line = line.strip()
if line == '':
continue
sents.append(line.split('<s>'))
return sents
def get_sent_dependencies(deps):
sent2dep_list = []
for (i, sent_deps) in enumerate(deps):
sent_dep_list = []
#tree = trees[i]
for dep_item in sent_deps.split('\r\n'):
dep_pattern = r'(.+?)\((.+?)-(\d+?), (.+?)-(\d+?)\)'
dep_m = re.match(dep_pattern, dep_item)
if dep_m is not None:
#dep_type = type2class[dep_m.group(1).split('_')[0]]
dep_type = dep_m.group(1)
governor_word = dep_m.group(2)
governor_word_number = int(dep_m.group(3)) - 1
dependent_word = dep_m.group(4)
dependent_word_number = int(dep_m.group(5)) - 1
dep_item_info = (dep_type, governor_word, governor_word_number, dependent_word, dependent_word_number)
# print
sent_dep_list.append(dep_item_info)
sent2dep_list.append(sent_dep_list)
return sent2dep_list
def print_SGML_tree(parse_tree, offset = 1, depth = 0, status = None, relation = None):
joty_script_mapping = {'textual-organization' : 'TextualOrganization',
'same-unit' : 'Same-Unit'}
out = ''
for i in range(depth):
out += ' '
if isinstance(parse_tree, basestring):
return out + '( %s (leaf %d) (rel2par %s) (text _!%s_!) )\n' % (status, offset,
relation, parse_tree)
out += '( %s (span %d %d)' % ('Root' if depth == 0 else status, offset, offset + len(parse_tree.leaves()) - 1)
if depth > 0:
out += ' (rel2par %s)' % relation
out += '\n'
left = parse_tree[0]
#print left
left_status = 'Nucleus' if parse_tree.label()[-5] == 'N' else 'Satellite'
right = parse_tree[1]
#print right
right_status = 'Nucleus' if parse_tree.label()[-2] == 'N' else 'Satellite'
if left_status[0] == 'S' and right_status[0] == 'N':
left_relation = replace_words(parse_tree.label()[ : -6], joty_script_mapping)
right_relation = 'span'
elif right_status[0] == 'S' and left_status[0] == 'N':
right_relation = replace_words(parse_tree.label()[ : -6], joty_script_mapping)
left_relation = 'span'
else:
left_relation = replace_words(parse_tree.label()[ : -6], joty_script_mapping)
right_relation = left_relation
out += print_SGML_tree(left, offset, depth + 1, left_status, left_relation)
out += print_SGML_tree(right, offset + (len(left.leaves()) if isinstance(left, Tree) else 1), depth + 1, right_status, right_relation)
for i in range(depth):
out += ' '
out += ')\n'
return out
def copy_subtree(subtree, detach = False):
if isinstance(subtree, Tree):
result = subtree.__deepcopy__()
if detach:
result._parent = None
else:
result = subtree
return result
def make_new_subtree(label, subtree1, subtree2, deepcopy = False):
if deepcopy:
stump1_clone = copy_subtree(subtree1, True)
stump2_clone = copy_subtree(subtree2, True)
else:
stump1_clone = subtree1
stump2_clone = subtree2
if isinstance(stump1_clone, ParseTree):
stump1_clone._parent = None
if isinstance(stump2_clone, ParseTree):
stump2_clone._parent = None
return ParseTree(label, [stump1_clone, stump2_clone])
# return ParseTree(label, [stump1_clone, stump2_clone])
def find_EDU_in_sentence_index(cuts, edu_index):
for (i, (sent_start_edu, sent_end_edu)) in enumerate(cuts):
if edu_index >= sent_start_edu and edu_index < sent_end_edu:
return i
def load_tree_from_file(filename, tokenize = False):
def preprocess_leaf(leaf):
leaf = re.sub('_!(.+?)!_', '\\1', leaf)
if tokenize:
return leaf.split(' ')
else:
return leaf
if filename.endswith('.dis'):
pt = rst_lib.load_tree(filename)
elif filename.endswith('.tree'):
pt = ParseTree.parse(open(filename).read(), leaf_pattern = '_!.+?!_', parse_leaf = preprocess_leaf)
return pt
def is_punctuation(word):
if not word or len(word) == 0:
return False
for i in range(len(word)):
if word[i] not in string.punctuation:
return False
return True
def simplify_tree(tree, start):
if not tree:
return None
# print 'before', tree
if not isinstance(tree, ParseTree):
t = ParseTree('leaf', [str(start + 1)])
else:
t = tree.__deepcopy__(None)
# print t
L = simplify_tree(tree[0], start)
R = simplify_tree(tree[1], start + len(L.leaves()))
t[0] = L
t[1] = R
# print 'end', t
# print
return t
def get_syntactic_subtrees(tree, start_word, end_word):
# print tree
# print 'start_word', start_word, 'end_word', end_word
# print
if tree.label() == 'ROOT':
tree = tree[0]
assert start_word >= 0 and end_word - start_word <= len(tree.leaves()) and start_word < end_word
if len(tree.leaves()) == end_word - start_word:
return [tree]
subtrees = []
start = 0
i = 0
# print len(tree)
while i < len(tree) - 1:
if start + len(tree[i].leaves()) > start_word:
break
start += len(tree[i].leaves())
i += 1
j = len(tree) - 1
end = len(tree.leaves())
while j > 0:
if end - len(tree[j].leaves()) < end_word:
break
end -= len(tree[j].leaves())
j -= 1
# print 'i', i, 'j', j
for k in range(i, j + 1):
subtree = tree[k]
if k == i:
if k == j:
end1 = end_word - start
else:
end1 = len(subtree.leaves())
subtrees.extend(get_syntactic_subtrees(subtree, start_word - start, end1))
elif k == j:
if k == i:
start1 = start_word
else:
start1 = 0
subtrees.extend(get_syntactic_subtrees(subtree, start1, end_word - (end - len(subtree.leaves()))))
else:
subtrees.append(subtree)
length = 0
for subtree in subtrees:
length += len(subtree.leaves())
assert length == end_word - start_word
return subtrees
def get_edu_entity_grid(entity_grid_filename):
grid = []
for line in open(entity_grid_filename).readlines()[1 : ]:
line = line.strip()
if line != '':
fields = line.split('\t')
grid.append(fields[1 : ])
return grid
def compute_edit_distance(sequence1, sequence2):
#print 'rst:' , rst_actions
#print 'pst:', pt_actions
m = len(sequence1)
n = len(sequence2)
matrix = {}
for i in range(m + 1):
#print matrix[i]
matrix[(i, 0)] = i
for j in range(n + 1):
matrix[(0, j)] = j
for j in range(1, n + 1):
for i in range(1, m + 1):
if sequence1[i - 1] == sequence2[j - 1]:
substitution_cost = 0
else:
substitution_cost = 1
matrix[(i, j)] = min(matrix[(i - 1, j - 1)] + substitution_cost,
matrix[(i - 1, j)] + 1,
matrix[(i, j - 1)] + 1)
if i > 1 and j > 1 and sequence1[i - 1] == sequence2[j - 2] and sequence1[i - 2] == sequence2[j - 1]:
matrix[(i, j)] = min(matrix[i - 2, j - 2] + substitution_cost,
matrix[(i, j)])
#for i in range(1, m + 1):
#print rst_actions[i - 1], pt_actions[i - 1], matrix[(i, i)]
return matrix[(m, n)]
| import re
from itertools import izip
from nltk.tree import Tree
from trees.parse_tree import ParseTree
import rst_lib
import string
def replace_words(text, word_dic):
"""
take a text and <strong class="highlight">replace</strong> <strong class="highlight">words</strong> that match a key in a dictionary with
the associated value, return the changed text
"""
rc = re.compile('|'.join(map(re.escape, word_dic)))
def translate(match):
return word_dic[match.group(0)]
return rc.sub(translate, text)
def unescape_penn_special_word(text):
penn_special_chars = {'-LRB-': '(', '-RRB-': ')', '-LAB-': '<', '-RAB-': '>',
'-LCB-': '{', '-RCB-': '}', '-LSB-': '[', '-RSB-':']',
'\\/' : '/', '\\*' : '*',
'``' : '"', "''" : '"', '`' : "'"}
return replace_words(text, penn_special_chars)
def sorted_dict_values_by_key(adict):
L = []
for i in sorted(adict.keys()):
L.append(adict[i])
return L
def sorted_dict_keys(adict):
keys = adict.keys()
return sorted(keys)
argmax = lambda array: max(izip(array, xrange(len(array))))[1]
argmin = lambda array: min(izip(array, xrange(len(array))))[1]
def permutation_indices(data):
return sorted(range(len(data)), key = data.__getitem__)
def argsmax (array, how_many):
L = permutation_indices(array)[-how_many:]
L.reverse()
return L
def count_how_many(array, item):
tot = 0
for array_item in array:
if item == array_item:
tot += 1
return tot
def split_mrg_by_sentence(s):
result = []
cnt_par = 0
last_split_index = 0
for i in range(0, len(s)):
if s[i] == "(":
cnt_par = cnt_par + 1
elif s[i] == ")":
cnt_par = cnt_par - 1
if cnt_par == 0:
# Split
if last_split_index < i:
result.append(s[last_split_index:i].replace("\n","").strip()[1:])
last_split_index = i + 1
return result
def simplified_tag(t):
"""
Returns a simplified POS tag:
NP-SBJ -> NP
PP=4 -> PP
-RRB- -> -RRB-
"""
if t == None:
return None
if t[0:1] == "-":
return t
else:
caret_pos = t.find("-")
t_minus_caret = ""
if not caret_pos == -1:
t_minus_caret = t[0:caret_pos]
else:
t_minus_caret = t
equal_pos = t_minus_caret.find("=")
t_simplified = ""
if not equal_pos == -1:
t_simplified = t_minus_caret[0:equal_pos]
else:
t_simplified = t_minus_caret
return t_simplified
def split_hilda_inputfile_by_sentence(f):
sents = []
for line in open(f).readlines():
line = line.strip()
if line == '':
continue
sents.append(line.split('<s>'))
return sents
def get_sent_dependencies(deps):
sent2dep_list = []
for (i, sent_deps) in enumerate(deps):
sent_dep_list = []
#tree = trees[i]
for dep_item in sent_deps.split('\r\n'):
dep_pattern = r'(.+?)\((.+?)-(\d+?), (.+?)-(\d+?)\)'
dep_m = re.match(dep_pattern, dep_item)
if dep_m is not None:
#dep_type = type2class[dep_m.group(1).split('_')[0]]
dep_type = dep_m.group(1)
governor_word = dep_m.group(2)
governor_word_number = int(dep_m.group(3)) - 1
dependent_word = dep_m.group(4)
dependent_word_number = int(dep_m.group(5)) - 1
dep_item_info = (dep_type, governor_word, governor_word_number, dependent_word, dependent_word_number)
# print
sent_dep_list.append(dep_item_info)
sent2dep_list.append(sent_dep_list)
return sent2dep_list
def print_SGML_tree(parse_tree, offset = 1, depth = 0, status = None, relation = None):
joty_script_mapping = {'textual-organization' : 'TextualOrganization',
'same-unit' : 'Same-Unit'}
out = ''
for i in range(depth):
out += ' '
if isinstance(parse_tree, basestring):
return out + '( %s (leaf %d) (rel2par %s) (text _!%s_!) )\n' % (status, offset,
relation, parse_tree)
out += '( %s (span %d %d)' % ('Root' if depth == 0 else status, offset, offset + len(parse_tree.leaves()) - 1)
if depth > 0:
out += ' (rel2par %s)' % relation
out += '\n'
left = parse_tree[0]
#print left
left_status = 'Nucleus' if parse_tree.label()[-5] == 'N' else 'Satellite'
right = parse_tree[1]
#print right
right_status = 'Nucleus' if parse_tree.label()[-2] == 'N' else 'Satellite'
if left_status[0] == 'S' and right_status[0] == 'N':
left_relation = replace_words(parse_tree.label()[ : -6], joty_script_mapping)
right_relation = 'span'
elif right_status[0] == 'S' and left_status[0] == 'N':
right_relation = replace_words(parse_tree.label()[ : -6], joty_script_mapping)
left_relation = 'span'
else:
left_relation = replace_words(parse_tree.label()[ : -6], joty_script_mapping)
right_relation = left_relation
out += print_SGML_tree(left, offset, depth + 1, left_status, left_relation)
out += print_SGML_tree(right, offset + (len(left.leaves()) if isinstance(left, Tree) else 1), depth + 1, right_status, right_relation)
for i in range(depth):
out += ' '
out += ')\n'
return out
def copy_subtree(subtree, detach = False):
if isinstance(subtree, Tree):
result = subtree.__deepcopy__()
if detach:
result._parent = None
else:
result = subtree
return result
def make_new_subtree(label, subtree1, subtree2, deepcopy = False):
if deepcopy:
stump1_clone = copy_subtree(subtree1, True)
stump2_clone = copy_subtree(subtree2, True)
else:
stump1_clone = subtree1
stump2_clone = subtree2
if isinstance(stump1_clone, ParseTree):
stump1_clone._parent = None
if isinstance(stump2_clone, ParseTree):
stump2_clone._parent = None
return ParseTree(label, [stump1_clone, stump2_clone])
# return ParseTree(label, [stump1_clone, stump2_clone])
def find_EDU_in_sentence_index(cuts, edu_index):
for (i, (sent_start_edu, sent_end_edu)) in enumerate(cuts):
if edu_index >= sent_start_edu and edu_index < sent_end_edu:
return i
def load_tree_from_file(filename, tokenize = False):
def preprocess_leaf(leaf):
leaf = re.sub('_!(.+?)!_', '\\1', leaf)
if tokenize:
return leaf.split(' ')
else:
return leaf
if filename.endswith('.dis'):
pt = rst_lib.load_tree(filename)
elif filename.endswith('.tree'):
pt = ParseTree.parse(open(filename).read(), leaf_pattern = '_!.+?!_', parse_leaf = preprocess_leaf)
return pt
def is_punctuation(word):
if not word or len(word) == 0:
return False
for i in range(len(word)):
if word[i] not in string.punctuation:
return False
return True
def simplify_tree(tree, start):
if not tree:
return None
# print 'before', tree
if not isinstance(tree, ParseTree):
t = ParseTree('leaf', [str(start + 1)])
else:
t = tree.__deepcopy__(None)
# print t
L = simplify_tree(tree[0], start)
R = simplify_tree(tree[1], start + len(L.leaves()))
t[0] = L
t[1] = R
# print 'end', t
# print
return t
def get_syntactic_subtrees(tree, start_word, end_word):
# print tree
# print 'start_word', start_word, 'end_word', end_word
# print
if tree.label() == 'ROOT':
tree = tree[0]
assert start_word >= 0 and end_word - start_word <= len(tree.leaves()) and start_word < end_word
if len(tree.leaves()) == end_word - start_word:
return [tree]
subtrees = []
start = 0
i = 0
# print len(tree)
while i < len(tree) - 1:
if start + len(tree[i].leaves()) > start_word:
break
start += len(tree[i].leaves())
i += 1
j = len(tree) - 1
end = len(tree.leaves())
while j > 0:
if end - len(tree[j].leaves()) < end_word:
break
end -= len(tree[j].leaves())
j -= 1
# print 'i', i, 'j', j
for k in range(i, j + 1):
subtree = tree[k]
if k == i:
if k == j:
end1 = end_word - start
else:
end1 = len(subtree.leaves())
subtrees.extend(get_syntactic_subtrees(subtree, start_word - start, end1))
elif k == j:
if k == i:
start1 = start_word
else:
start1 = 0
subtrees.extend(get_syntactic_subtrees(subtree, start1, end_word - (end - len(subtree.leaves()))))
else:
subtrees.append(subtree)
length = 0
for subtree in subtrees:
length += len(subtree.leaves())
assert length == end_word - start_word
return subtrees
def get_edu_entity_grid(entity_grid_filename):
grid = []
for line in open(entity_grid_filename).readlines()[1 : ]:
line = line.strip()
if line != '':
fields = line.split('\t')
grid.append(fields[1 : ])
return grid
def compute_edit_distance(sequence1, sequence2):
#print 'rst:' , rst_actions
#print 'pst:', pt_actions
m = len(sequence1)
n = len(sequence2)
matrix = {}
for i in range(m + 1):
#print matrix[i]
matrix[(i, 0)] = i
for j in range(n + 1):
matrix[(0, j)] = j
for j in range(1, n + 1):
for i in range(1, m + 1):
if sequence1[i - 1] == sequence2[j - 1]:
substitution_cost = 0
else:
substitution_cost = 1
matrix[(i, j)] = min(matrix[(i - 1, j - 1)] + substitution_cost,
matrix[(i - 1, j)] + 1,
matrix[(i, j - 1)] + 1)
if i > 1 and j > 1 and sequence1[i - 1] == sequence2[j - 2] and sequence1[i - 2] == sequence2[j - 1]:
matrix[(i, j)] = min(matrix[i - 2, j - 2] + substitution_cost,
matrix[(i, j)])
#for i in range(1, m + 1):
#print rst_actions[i - 1], pt_actions[i - 1], matrix[(i, i)]
return matrix[(m, n)] | en | 0.331073 | take a text and <strong class="highlight">replace</strong> <strong class="highlight">words</strong> that match a key in a dictionary with
the associated value, return the changed text # Split Returns a simplified POS tag:
NP-SBJ -> NP
PP=4 -> PP
-RRB- -> -RRB- #tree = trees[i] #dep_type = type2class[dep_m.group(1).split('_')[0]] # print #print left #print right # return ParseTree(label, [stump1_clone, stump2_clone]) # print 'before', tree # print t # print 'end', t # print # print tree # print 'start_word', start_word, 'end_word', end_word # print # print len(tree) # print 'i', i, 'j', j #print 'rst:' , rst_actions #print 'pst:', pt_actions #print matrix[i] #for i in range(1, m + 1): #print rst_actions[i - 1], pt_actions[i - 1], matrix[(i, i)] | 2.88718 | 3 |
src/qt_models/propeditormodel.py | facade-technologies-inc/facile | 2 | 6630043 | """
..
/------------------------------------------------------------------------------\
| -- FACADE TECHNOLOGIES INC. CONFIDENTIAL -- |
|------------------------------------------------------------------------------|
| |
| Copyright [2019] Facade Technologies Inc. |
| All Rights Reserved. |
| |
| NOTICE: All information contained herein is, and remains the property of |
| Facade Technologies Inc. and its suppliers if any. The intellectual and |
| and technical concepts contained herein are proprietary to Facade |
| Technologies Inc. and its suppliers and may be covered by U.S. and Foreign |
| Patents, patents in process, and are protected by trade secret or copyright |
| law. Dissemination of this information or reproduction of this material is |
| strictly forbidden unless prior written permission is obtained from Facade |
| Technologies Inc. |
| |
\------------------------------------------------------------------------------/
This module contains the PropModel() class.
"""
from enum import Enum
from PySide2.QtCore import QAbstractItemModel, QModelIndex, Qt
from PySide2.QtGui import QColor
class PropModel(QAbstractItemModel):
"""
A subclass that allows us to show the Data through QTreeView.
"""
def __init__(self, propData: object):
"""
Constructs a model for the Property Editor.
:param propData: The data from the properties.
:type propData: object
:return: The constructed model.
:rtype: QObject
"""
QAbstractItemModel.__init__(self)
self._propData = propData
def index(self, row: int, column: int, parent: QModelIndex) -> QModelIndex:
"""
Purpose of this function is to return a QModelIndex that maps to the appropriate data
:param row: Row of the index.
:type row: int
:param column: Column of the index.
:type column: int
:param parent: Parent of that row or column.
:type parent: QModelIndex
:return: The index for the data.
:rtype: QModelIndex
"""
if not self.hasIndex(row, column, parent):
return QModelIndex()
# referencing category
if not parent.isValid():
internalData = self._propData.getCategories()[row]
else:
parentData = parent.internalPointer()
if parentData in self._propData.getCategories():
internalData = self._propData.getCategoryProperties(parentData)[row]
else:
return QModelIndex()
return self.createIndex(row, column, internalData)
def parent(self, index: QModelIndex) -> QModelIndex:
"""
Purpose of this function is to return the parent index of the index that is provided
:param index: Index that is provided.
:type index: QModelIndex
:return: Returns the parent index of the index provided.
:rtype: QModelIndex
"""
if not index.isValid():
return QModelIndex()
data = index.internalPointer()
if data in self._propData.getCategories():
return QModelIndex()
category = self._propData.getPropertyCategory(data)
return self.createIndex(self._propData.getCategoryIndex(category), 0, category)
def columnCount(self, parent: QModelIndex) -> int:
"""
Purpose of this function is to return the number of columns for the children of a given parent
:param parent: Parent will tell us our column count.
:type parent: QModelIndex
:return: Number of columns.
:rtype: int
"""
return 2
def rowCount(self, parent: QModelIndex) -> int:
"""
Purpose of this function is to return the number of children of a given parent
:param parent: Parent will tell us our column count.
:type parent: QModelIndex
:return: Number of rows.
:rtype: int
"""
if not parent.isValid():
numCategories = self._propData.getNumCategories()
return numCategories
parentData = parent.internalPointer()
if parentData in self._propData.getCategories():
return self._propData.getNumPropertiesInCategory(parentData)
else:
return 0
def data(self, index: QModelIndex, role: int) -> object:
"""
Purpose of this function is to retrieve data stored under the given role for the item referred to by the
index
:param index: Index that is provided.
:type index: QModelIndex
:param role: The given role for item referred.
:type role: int
:return: Data of the given role from index.
:rtype: object
"""
if not index.isValid():
return QModelIndex()
row = index.row()
col = index.column()
data = index.internalPointer()
if role == Qt.DisplayRole:
if data in self._propData.getCategories():
if col == 0:
return data
else:
return None
else:
col = index.column()
if col == 0:
return data.getName()
elif col == 1:
t = data.getType()
if issubclass(t, Enum):
return data.getValue().name
if t == bool:
return None
return str(data.getValue())
else:
return None
elif role == Qt.BackgroundRole:
if data in self._propData.getCategories():
return QColor(Qt.darkRed)
else:
shade = row % 2 * 25
return QColor(100 + shade, 150 + shade, 200 + shade)
def headerData(self, section: int, orientation: Qt.Orientation, role: int) -> object:
"""
This method is used for displaying the header data for 'the given role
and orientation of that specific section.
:param section: Specific section for the header data.
:type section: int
:param orientation: Given orientation for the header data.
:type orientation: Qt.Orientation
:param role: The given role for the header data.
:type role: int
:return: Model of header data.
:rtype: object
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return ["Name", "Value"][section]
return None
def traverse(self) -> None:
"""
This method is used for debugging by mimicking how a view might query the model for data.
:return: None
:rtype: NoneType
"""
parent = QModelIndex()
work = [parent]
while len(work) > 0:
cur = work.pop()
rows = self.rowCount(cur)
cols = self.columnCount(cur)
for r in range(rows):
for c in range(cols):
work.append(self.index(r, c, cur))
def setData(self, index: QModelIndex, value: object, role: int) -> bool:
"""
Purpose of this function is to set the role data for the index to value
:param index: Index that is provided.
:type index: QModelIndex
:param value: Value that is set.
:type value: object
:param role: The given role data.
:type role: int
:return: Set data for index to a value.
:rtype: bool
"""
if role != Qt.EditRole:
return False
if not index.isValid():
return False
if not value:
return False
data = index.internalPointer()
if data in self._propData.getCategories():
return False
else:
if index.column() != 1:
return False
else:
valueWasSet = data.setValue(value)
return valueWasSet
def flags(self, index: QModelIndex) -> object:
"""
Purpose of this function is to determine what can be done with a given index
:param index: Index that is provided.
:type index: QModelIndex
:return: Returns the item flags for the given index.
:rtype: ItemFlags
"""
if not index.isValid():
return Qt.NoItemFlags
data = index.internalPointer()
if data in self._propData.getCategories() or (data.isReadOnly() and data.getType() != bool):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
else:
if index.column() == 1:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
else:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
| """
..
/------------------------------------------------------------------------------\
| -- FACADE TECHNOLOGIES INC. CONFIDENTIAL -- |
|------------------------------------------------------------------------------|
| |
| Copyright [2019] Facade Technologies Inc. |
| All Rights Reserved. |
| |
| NOTICE: All information contained herein is, and remains the property of |
| Facade Technologies Inc. and its suppliers if any. The intellectual and |
| and technical concepts contained herein are proprietary to Facade |
| Technologies Inc. and its suppliers and may be covered by U.S. and Foreign |
| Patents, patents in process, and are protected by trade secret or copyright |
| law. Dissemination of this information or reproduction of this material is |
| strictly forbidden unless prior written permission is obtained from Facade |
| Technologies Inc. |
| |
\------------------------------------------------------------------------------/
This module contains the PropModel() class.
"""
from enum import Enum
from PySide2.QtCore import QAbstractItemModel, QModelIndex, Qt
from PySide2.QtGui import QColor
class PropModel(QAbstractItemModel):
"""
A subclass that allows us to show the Data through QTreeView.
"""
def __init__(self, propData: object):
"""
Constructs a model for the Property Editor.
:param propData: The data from the properties.
:type propData: object
:return: The constructed model.
:rtype: QObject
"""
QAbstractItemModel.__init__(self)
self._propData = propData
def index(self, row: int, column: int, parent: QModelIndex) -> QModelIndex:
"""
Purpose of this function is to return a QModelIndex that maps to the appropriate data
:param row: Row of the index.
:type row: int
:param column: Column of the index.
:type column: int
:param parent: Parent of that row or column.
:type parent: QModelIndex
:return: The index for the data.
:rtype: QModelIndex
"""
if not self.hasIndex(row, column, parent):
return QModelIndex()
# referencing category
if not parent.isValid():
internalData = self._propData.getCategories()[row]
else:
parentData = parent.internalPointer()
if parentData in self._propData.getCategories():
internalData = self._propData.getCategoryProperties(parentData)[row]
else:
return QModelIndex()
return self.createIndex(row, column, internalData)
def parent(self, index: QModelIndex) -> QModelIndex:
"""
Purpose of this function is to return the parent index of the index that is provided
:param index: Index that is provided.
:type index: QModelIndex
:return: Returns the parent index of the index provided.
:rtype: QModelIndex
"""
if not index.isValid():
return QModelIndex()
data = index.internalPointer()
if data in self._propData.getCategories():
return QModelIndex()
category = self._propData.getPropertyCategory(data)
return self.createIndex(self._propData.getCategoryIndex(category), 0, category)
def columnCount(self, parent: QModelIndex) -> int:
"""
Purpose of this function is to return the number of columns for the children of a given parent
:param parent: Parent will tell us our column count.
:type parent: QModelIndex
:return: Number of columns.
:rtype: int
"""
return 2
def rowCount(self, parent: QModelIndex) -> int:
"""
Purpose of this function is to return the number of children of a given parent
:param parent: Parent will tell us our column count.
:type parent: QModelIndex
:return: Number of rows.
:rtype: int
"""
if not parent.isValid():
numCategories = self._propData.getNumCategories()
return numCategories
parentData = parent.internalPointer()
if parentData in self._propData.getCategories():
return self._propData.getNumPropertiesInCategory(parentData)
else:
return 0
def data(self, index: QModelIndex, role: int) -> object:
"""
Purpose of this function is to retrieve data stored under the given role for the item referred to by the
index
:param index: Index that is provided.
:type index: QModelIndex
:param role: The given role for item referred.
:type role: int
:return: Data of the given role from index.
:rtype: object
"""
if not index.isValid():
return QModelIndex()
row = index.row()
col = index.column()
data = index.internalPointer()
if role == Qt.DisplayRole:
if data in self._propData.getCategories():
if col == 0:
return data
else:
return None
else:
col = index.column()
if col == 0:
return data.getName()
elif col == 1:
t = data.getType()
if issubclass(t, Enum):
return data.getValue().name
if t == bool:
return None
return str(data.getValue())
else:
return None
elif role == Qt.BackgroundRole:
if data in self._propData.getCategories():
return QColor(Qt.darkRed)
else:
shade = row % 2 * 25
return QColor(100 + shade, 150 + shade, 200 + shade)
def headerData(self, section: int, orientation: Qt.Orientation, role: int) -> object:
"""
This method is used for displaying the header data for 'the given role
and orientation of that specific section.
:param section: Specific section for the header data.
:type section: int
:param orientation: Given orientation for the header data.
:type orientation: Qt.Orientation
:param role: The given role for the header data.
:type role: int
:return: Model of header data.
:rtype: object
"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return ["Name", "Value"][section]
return None
def traverse(self) -> None:
"""
This method is used for debugging by mimicking how a view might query the model for data.
:return: None
:rtype: NoneType
"""
parent = QModelIndex()
work = [parent]
while len(work) > 0:
cur = work.pop()
rows = self.rowCount(cur)
cols = self.columnCount(cur)
for r in range(rows):
for c in range(cols):
work.append(self.index(r, c, cur))
def setData(self, index: QModelIndex, value: object, role: int) -> bool:
"""
Purpose of this function is to set the role data for the index to value
:param index: Index that is provided.
:type index: QModelIndex
:param value: Value that is set.
:type value: object
:param role: The given role data.
:type role: int
:return: Set data for index to a value.
:rtype: bool
"""
if role != Qt.EditRole:
return False
if not index.isValid():
return False
if not value:
return False
data = index.internalPointer()
if data in self._propData.getCategories():
return False
else:
if index.column() != 1:
return False
else:
valueWasSet = data.setValue(value)
return valueWasSet
def flags(self, index: QModelIndex) -> object:
"""
Purpose of this function is to determine what can be done with a given index
:param index: Index that is provided.
:type index: QModelIndex
:return: Returns the item flags for the given index.
:rtype: ItemFlags
"""
if not index.isValid():
return Qt.NoItemFlags
data = index.internalPointer()
if data in self._propData.getCategories() or (data.isReadOnly() and data.getType() != bool):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
else:
if index.column() == 1:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
else:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
| en | 0.715113 | .. /------------------------------------------------------------------------------\ | -- FACADE TECHNOLOGIES INC. CONFIDENTIAL -- | |------------------------------------------------------------------------------| | | | Copyright [2019] Facade Technologies Inc. | | All Rights Reserved. | | | | NOTICE: All information contained herein is, and remains the property of | | Facade Technologies Inc. and its suppliers if any. The intellectual and | | and technical concepts contained herein are proprietary to Facade | | Technologies Inc. and its suppliers and may be covered by U.S. and Foreign | | Patents, patents in process, and are protected by trade secret or copyright | | law. Dissemination of this information or reproduction of this material is | | strictly forbidden unless prior written permission is obtained from Facade | | Technologies Inc. | | | \------------------------------------------------------------------------------/ This module contains the PropModel() class. A subclass that allows us to show the Data through QTreeView. Constructs a model for the Property Editor. :param propData: The data from the properties. :type propData: object :return: The constructed model. :rtype: QObject Purpose of this function is to return a QModelIndex that maps to the appropriate data :param row: Row of the index. :type row: int :param column: Column of the index. :type column: int :param parent: Parent of that row or column. :type parent: QModelIndex :return: The index for the data. :rtype: QModelIndex # referencing category Purpose of this function is to return the parent index of the index that is provided :param index: Index that is provided. :type index: QModelIndex :return: Returns the parent index of the index provided. :rtype: QModelIndex Purpose of this function is to return the number of columns for the children of a given parent :param parent: Parent will tell us our column count. :type parent: QModelIndex :return: Number of columns. :rtype: int Purpose of this function is to return the number of children of a given parent :param parent: Parent will tell us our column count. :type parent: QModelIndex :return: Number of rows. :rtype: int Purpose of this function is to retrieve data stored under the given role for the item referred to by the index :param index: Index that is provided. :type index: QModelIndex :param role: The given role for item referred. :type role: int :return: Data of the given role from index. :rtype: object This method is used for displaying the header data for 'the given role and orientation of that specific section. :param section: Specific section for the header data. :type section: int :param orientation: Given orientation for the header data. :type orientation: Qt.Orientation :param role: The given role for the header data. :type role: int :return: Model of header data. :rtype: object This method is used for debugging by mimicking how a view might query the model for data. :return: None :rtype: NoneType Purpose of this function is to set the role data for the index to value :param index: Index that is provided. :type index: QModelIndex :param value: Value that is set. :type value: object :param role: The given role data. :type role: int :return: Set data for index to a value. :rtype: bool Purpose of this function is to determine what can be done with a given index :param index: Index that is provided. :type index: QModelIndex :return: Returns the item flags for the given index. :rtype: ItemFlags | 1.936207 | 2 |
Unit2/OnlineWorks/Draw2.py | yuhao1998/PythonStudy | 0 | 6630044 | <reponame>yuhao1998/PythonStudy
'''
turtleๅ
ซ่งๅพๅฝข็ปๅถ
โชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโซโฌ
ๆ่ฟฐ
ไฝฟ็จturtleๅบ๏ผ็ปๅถไธไธชๅ
ซ่งๅพๅฝขใโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโซโฌ
'''
import turtle as t
t.pensize(2)
for i in range(8):
t.fd(150)
t.left(135)
t.done() | '''
turtleๅ
ซ่งๅพๅฝข็ปๅถ
โชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโซโฌ
ๆ่ฟฐ
ไฝฟ็จturtleๅบ๏ผ็ปๅถไธไธชๅ
ซ่งๅพๅฝขใโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโซโฌ
'''
import turtle as t
t.pensize(2)
for i in range(8):
t.fd(150)
t.left(135)
t.done() | zh | 0.312434 | turtleๅ
ซ่งๅพๅฝข็ปๅถ โชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโซโฌ ๆ่ฟฐ ไฝฟ็จturtleๅบ๏ผ็ปๅถไธไธชๅ
ซ่งๅพๅฝขใโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโชโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโซโฌโชโฌโชโฌโชโฌโชโฌโชโฌโชโฌโฎโฌโญโฌโซโฌ | 2.917784 | 3 |
core_tools/data/ds/data_set_DataMgr.py | opietx/core_tools | 0 | 6630045 | import numpy as np
import copy
import string
class m_param_origanizer():
def __init__(self, m_param_raw):
self.m_param_raw = m_param_raw
def get(self, key, nth_set):
items = self[key]
for i in items:
if i.nth_set == nth_set:
return i
raise ValueError('m_param with id {} and set {} not found in this data collection.'.format(key, nth_set))
def __getitem__(self, key):
'''
gets a list with parameters containing this key
Returns
list<m_param_raw> : raw parameters originating from this id.
'''
param_s = []
for m_param in self.m_param_raw:
if m_param.param_id == key:
param_s.append(m_param)
if len(param_s) != 0:
return param_s
raise ValueError('m_param with id {} not found in this data collection.'.format(key))
def get_m_param_id(self):
'''
get the measurement id's
'''
id_s = set()
for m_param in self.m_param_raw:
id_s.add(m_param.param_id_m_param)
return list(id_s)
def __copy__(self):
new_m_param = []
for i in self.m_param_raw:
new_m_param.append(copy.copy(i))
return m_param_origanizer(new_m_param)
class data_descriptor: #autogenerate parameter info
def __set_name__(self, owner, name): # from python 3.6 (super handy :) )
self.name = name
def __get__(self, obj, objtype):
return getattr(obj.__dict__.get("_dataset_data_description__raw_data"), self.name)
class dataset_data_description():
unit = data_descriptor()
label = data_descriptor()
name = data_descriptor()
def __init__(self, name, m_param_raw, m_params_raw_collection):
'''
Args:
m_param_raw (m_param_raw) : pointer to the raw parameter to add
m_params_raw_collection (m_param_origanizer) : object containing a representation of all the data in the dataset
'''
self.name = name
self.__raw_data = m_param_raw
self.__raw_data_org = m_params_raw_collection
self.__repr_attr_overview = []
self.__populate_data()
def __populate_data(self):
for i in range(len(self.__raw_data.dependency)):
repr_attr_overview = []
raw_data = self.__raw_data_org[self.__raw_data.dependency[i]]
for j in range(len(raw_data)): #this is not pretty, but it works..
dataDescription = dataset_data_description('', raw_data[j], self.__raw_data_org)
if self.ndim <= 2:
name = string.ascii_lowercase[23+i] + str(j+1)
self.__setattr__(name, dataDescription)
if j == 0:
self.__setattr__(string.ascii_lowercase[23+i], dataDescription)
if len(raw_data) == 1:
name = string.ascii_lowercase[23+i]
repr_attr_overview += [(name, dataDescription)]
if self.ndim > 2:
self.__setattr__(string.ascii_lowercase[8+i] + str(j+1), dataDescription)
if len(raw_data) == 1:
self.__setattr__(string.ascii_lowercase[8+i], dataDescription)
repr_attr_overview += [(string.ascii_lowercase[8+i], dataDescription)]
else:
repr_attr_overview += [(string.ascii_lowercase[8+i] + str(j+1), dataDescription)]
dataDescription.name = repr_attr_overview[-1][0]
self.__repr_attr_overview += [repr_attr_overview]
if self.ndim <= 2:
name = string.ascii_lowercase[23+self.ndim-1]
if len(self.__raw_data.dependency) != 0:
name = string.ascii_lowercase[23+self.ndim]
else:
name = string.ascii_lowercase[8+self.ndim-1]
self.__setattr__(name, self)
def __call__(self):
if self.__raw_data.setpoint is True or self.__raw_data.setpoint_local is True:
if self.__raw_data.data_buffer.data.ndim > 1: #over dimensioned
idx = [0] * self.__raw_data.data_buffer.data.ndim
idx[self.__raw_data.nth_dim] = slice(None)
return self.__raw_data.data_buffer.data[tuple(idx)]
return self.__raw_data.data_buffer.data
@property
def shape(self):
return self().shape
@property
def ndim(self):
return len(self.shape)
def full(self):
return self.__raw_data.data_buffer.data
def get_raw_content(self):
return self.__repr_attr_overview
def average(self, dim):
'''
average the array across 1 dimension
arg:
dim (str/int) : 0 ('x'), 1 ('y') , ...
'''
dim = self.dim_to_int(dim)
if dim > self.ndim:
raise ValueError("you are trying to average over a dimension that does not exists")
raw_data_org_copy = copy.copy(self.__raw_data_org)
raw_data_cpy = raw_data_org_copy.get(self.__raw_data.param_id, self.__raw_data.nth_set)
raw_data_cpy.dependency.pop(dim)
raw_data_cpy.data_buffer.buffer_lambda = raw_data_cpy.data_buffer.averaging_lambda(dim)
return dataset_data_description(self.name, raw_data_cpy, raw_data_org_copy)
def slice(self, dim, i):
'''
take the ith slice of dimension i
'''
dim = self.dim_to_int(dim)
if not isinstance(i, slice):
i = slice(int(i),int(i)+1)
if dim > self.ndim:
raise ValueError("you are trying to average over a dimension that does not exists")
idx = [slice(None)]*self.ndim
idx[dim] = i
raw_data_org_copy = copy.copy(self.__raw_data_org)
raw_data_cpy = raw_data_org_copy.get(self.__raw_data.param_id, self.__raw_data.nth_set)
if i.start is not None and i.stop-i.start == 1:
idx[dim] = i.start
raw_data_cpy.dependency.pop(dim)
elif i.stop is not None:
id_to_slice = raw_data_cpy.dependency[dim]
items= raw_data_org_copy[id_to_slice]
for item in items:
# TODO this is not generic yet (I think, this has to be checked).
item.data_buffer.buffer_lambda = item.data_buffer.slice_lambda([idx[dim]])
raw_data_cpy.data_buffer.buffer_lambda = raw_data_cpy.data_buffer.slice_lambda(idx)
return dataset_data_description(self.name, raw_data_cpy, raw_data_org_copy)
def __getitem__(self, args):
if not isinstance(args, tuple):
args = [args]
args = list(args)
to_slice = None
for i in range(len(args)):
if isinstance(args[i], int):
to_slice = (i, slice(args[i], args[i]+1))
elif isinstance(args[i], slice) and args[i] != slice(None):
to_slice = (i, args[i])
if to_slice is None:
return self
args.pop(to_slice[0])
return self.slice(to_slice[0], to_slice[1])[tuple(args)]
def __repr__(self):
output_print = ""
output_print += "| " + "{:<15}".format(self.name) + " | " + "{:<15}".format(self.label) + " | " + "{:<8}".format(self.unit)+ " | " + "{:<25}".format(str(self.shape)) + "|\n"
for i in self.__repr_attr_overview:
for j in i:
dataDescription = j[1]
if dataDescription.ndim == 1:
output_print += "| " + "{:<14}".format(j[0]) + " | " + "{:<15}".format(dataDescription.label) + " | " + "{:<8}".format(dataDescription.unit)+ " | " + "{:<25}".format(str(dataDescription.shape)) + "|\n"
return output_print
@staticmethod
def dim_to_int(dim):
'''
convert dim (if text) into a number on which axix of the array to performan a operation (e.g. x = 0, y=1)
'''
if isinstance(dim, str):
if dim in 'xyz':
dim = list(string.ascii_lowercase).index(dim) - 23
else:
dim = list(string.ascii_lowercase).index(dim) - 8
return dim
class data_set_property_intializer():
'''
mockup of dataclass for development purposes-- dont use this class.
'''
def __init__(self, m_params):
self.__repr_attr_overview = []
# m_meas_id's
m_id = m_params.get_m_param_id()
for i in range(len(m_id)): #this is not pretty.
n_sets = len(m_params[m_id[i]])
repr_attr_overview = []
for j in range(n_sets):
ds_descript = dataset_data_description('', m_params.get(m_id[i], j), m_params)
name = 'm' + str(i+1) + string.ascii_lowercase[j]
setattr(self, name, ds_descript)
if j == 0:
setattr(self, 'm' + str(i+1), ds_descript)
if j == 0 and n_sets==1: #consistent printing
repr_attr_overview += [('m' + str(i+1), ds_descript)]
ds_descript.name = 'm' + str(i+1)
else:
repr_attr_overview += [(name, ds_descript)]
ds_descript.name = name
self.__repr_attr_overview += [repr_attr_overview]
def __repr__(self):
output_print = "DataSet :: my_measurement_name\n\nid = 1256\nTrueID = 1225565471200\n\n"
output_print += "| idn | label | unit | size |\n"
output_print += "---------------------------------------------------------------------------\n"
for i in self.__repr_attr_overview:
for j in i:
output_print += j[1].__repr__()
output_print += "\n"
output_print += "database : vanderyspen\n"
output_print += "set_up : XLD\n"
output_print += "project : 6dot\n"
output_print += "sample_name : SQ19\n"
return output_print | import numpy as np
import copy
import string
class m_param_origanizer():
def __init__(self, m_param_raw):
self.m_param_raw = m_param_raw
def get(self, key, nth_set):
items = self[key]
for i in items:
if i.nth_set == nth_set:
return i
raise ValueError('m_param with id {} and set {} not found in this data collection.'.format(key, nth_set))
def __getitem__(self, key):
'''
gets a list with parameters containing this key
Returns
list<m_param_raw> : raw parameters originating from this id.
'''
param_s = []
for m_param in self.m_param_raw:
if m_param.param_id == key:
param_s.append(m_param)
if len(param_s) != 0:
return param_s
raise ValueError('m_param with id {} not found in this data collection.'.format(key))
def get_m_param_id(self):
'''
get the measurement id's
'''
id_s = set()
for m_param in self.m_param_raw:
id_s.add(m_param.param_id_m_param)
return list(id_s)
def __copy__(self):
new_m_param = []
for i in self.m_param_raw:
new_m_param.append(copy.copy(i))
return m_param_origanizer(new_m_param)
class data_descriptor: #autogenerate parameter info
def __set_name__(self, owner, name): # from python 3.6 (super handy :) )
self.name = name
def __get__(self, obj, objtype):
return getattr(obj.__dict__.get("_dataset_data_description__raw_data"), self.name)
class dataset_data_description():
unit = data_descriptor()
label = data_descriptor()
name = data_descriptor()
def __init__(self, name, m_param_raw, m_params_raw_collection):
'''
Args:
m_param_raw (m_param_raw) : pointer to the raw parameter to add
m_params_raw_collection (m_param_origanizer) : object containing a representation of all the data in the dataset
'''
self.name = name
self.__raw_data = m_param_raw
self.__raw_data_org = m_params_raw_collection
self.__repr_attr_overview = []
self.__populate_data()
def __populate_data(self):
for i in range(len(self.__raw_data.dependency)):
repr_attr_overview = []
raw_data = self.__raw_data_org[self.__raw_data.dependency[i]]
for j in range(len(raw_data)): #this is not pretty, but it works..
dataDescription = dataset_data_description('', raw_data[j], self.__raw_data_org)
if self.ndim <= 2:
name = string.ascii_lowercase[23+i] + str(j+1)
self.__setattr__(name, dataDescription)
if j == 0:
self.__setattr__(string.ascii_lowercase[23+i], dataDescription)
if len(raw_data) == 1:
name = string.ascii_lowercase[23+i]
repr_attr_overview += [(name, dataDescription)]
if self.ndim > 2:
self.__setattr__(string.ascii_lowercase[8+i] + str(j+1), dataDescription)
if len(raw_data) == 1:
self.__setattr__(string.ascii_lowercase[8+i], dataDescription)
repr_attr_overview += [(string.ascii_lowercase[8+i], dataDescription)]
else:
repr_attr_overview += [(string.ascii_lowercase[8+i] + str(j+1), dataDescription)]
dataDescription.name = repr_attr_overview[-1][0]
self.__repr_attr_overview += [repr_attr_overview]
if self.ndim <= 2:
name = string.ascii_lowercase[23+self.ndim-1]
if len(self.__raw_data.dependency) != 0:
name = string.ascii_lowercase[23+self.ndim]
else:
name = string.ascii_lowercase[8+self.ndim-1]
self.__setattr__(name, self)
def __call__(self):
if self.__raw_data.setpoint is True or self.__raw_data.setpoint_local is True:
if self.__raw_data.data_buffer.data.ndim > 1: #over dimensioned
idx = [0] * self.__raw_data.data_buffer.data.ndim
idx[self.__raw_data.nth_dim] = slice(None)
return self.__raw_data.data_buffer.data[tuple(idx)]
return self.__raw_data.data_buffer.data
@property
def shape(self):
return self().shape
@property
def ndim(self):
return len(self.shape)
def full(self):
return self.__raw_data.data_buffer.data
def get_raw_content(self):
return self.__repr_attr_overview
def average(self, dim):
'''
average the array across 1 dimension
arg:
dim (str/int) : 0 ('x'), 1 ('y') , ...
'''
dim = self.dim_to_int(dim)
if dim > self.ndim:
raise ValueError("you are trying to average over a dimension that does not exists")
raw_data_org_copy = copy.copy(self.__raw_data_org)
raw_data_cpy = raw_data_org_copy.get(self.__raw_data.param_id, self.__raw_data.nth_set)
raw_data_cpy.dependency.pop(dim)
raw_data_cpy.data_buffer.buffer_lambda = raw_data_cpy.data_buffer.averaging_lambda(dim)
return dataset_data_description(self.name, raw_data_cpy, raw_data_org_copy)
def slice(self, dim, i):
'''
take the ith slice of dimension i
'''
dim = self.dim_to_int(dim)
if not isinstance(i, slice):
i = slice(int(i),int(i)+1)
if dim > self.ndim:
raise ValueError("you are trying to average over a dimension that does not exists")
idx = [slice(None)]*self.ndim
idx[dim] = i
raw_data_org_copy = copy.copy(self.__raw_data_org)
raw_data_cpy = raw_data_org_copy.get(self.__raw_data.param_id, self.__raw_data.nth_set)
if i.start is not None and i.stop-i.start == 1:
idx[dim] = i.start
raw_data_cpy.dependency.pop(dim)
elif i.stop is not None:
id_to_slice = raw_data_cpy.dependency[dim]
items= raw_data_org_copy[id_to_slice]
for item in items:
# TODO this is not generic yet (I think, this has to be checked).
item.data_buffer.buffer_lambda = item.data_buffer.slice_lambda([idx[dim]])
raw_data_cpy.data_buffer.buffer_lambda = raw_data_cpy.data_buffer.slice_lambda(idx)
return dataset_data_description(self.name, raw_data_cpy, raw_data_org_copy)
def __getitem__(self, args):
if not isinstance(args, tuple):
args = [args]
args = list(args)
to_slice = None
for i in range(len(args)):
if isinstance(args[i], int):
to_slice = (i, slice(args[i], args[i]+1))
elif isinstance(args[i], slice) and args[i] != slice(None):
to_slice = (i, args[i])
if to_slice is None:
return self
args.pop(to_slice[0])
return self.slice(to_slice[0], to_slice[1])[tuple(args)]
def __repr__(self):
output_print = ""
output_print += "| " + "{:<15}".format(self.name) + " | " + "{:<15}".format(self.label) + " | " + "{:<8}".format(self.unit)+ " | " + "{:<25}".format(str(self.shape)) + "|\n"
for i in self.__repr_attr_overview:
for j in i:
dataDescription = j[1]
if dataDescription.ndim == 1:
output_print += "| " + "{:<14}".format(j[0]) + " | " + "{:<15}".format(dataDescription.label) + " | " + "{:<8}".format(dataDescription.unit)+ " | " + "{:<25}".format(str(dataDescription.shape)) + "|\n"
return output_print
@staticmethod
def dim_to_int(dim):
'''
convert dim (if text) into a number on which axix of the array to performan a operation (e.g. x = 0, y=1)
'''
if isinstance(dim, str):
if dim in 'xyz':
dim = list(string.ascii_lowercase).index(dim) - 23
else:
dim = list(string.ascii_lowercase).index(dim) - 8
return dim
class data_set_property_intializer():
'''
mockup of dataclass for development purposes-- dont use this class.
'''
def __init__(self, m_params):
self.__repr_attr_overview = []
# m_meas_id's
m_id = m_params.get_m_param_id()
for i in range(len(m_id)): #this is not pretty.
n_sets = len(m_params[m_id[i]])
repr_attr_overview = []
for j in range(n_sets):
ds_descript = dataset_data_description('', m_params.get(m_id[i], j), m_params)
name = 'm' + str(i+1) + string.ascii_lowercase[j]
setattr(self, name, ds_descript)
if j == 0:
setattr(self, 'm' + str(i+1), ds_descript)
if j == 0 and n_sets==1: #consistent printing
repr_attr_overview += [('m' + str(i+1), ds_descript)]
ds_descript.name = 'm' + str(i+1)
else:
repr_attr_overview += [(name, ds_descript)]
ds_descript.name = name
self.__repr_attr_overview += [repr_attr_overview]
def __repr__(self):
output_print = "DataSet :: my_measurement_name\n\nid = 1256\nTrueID = 1225565471200\n\n"
output_print += "| idn | label | unit | size |\n"
output_print += "---------------------------------------------------------------------------\n"
for i in self.__repr_attr_overview:
for j in i:
output_print += j[1].__repr__()
output_print += "\n"
output_print += "database : vanderyspen\n"
output_print += "set_up : XLD\n"
output_print += "project : 6dot\n"
output_print += "sample_name : SQ19\n"
return output_print | en | 0.595729 | gets a list with parameters containing this key Returns list<m_param_raw> : raw parameters originating from this id. get the measurement id's #autogenerate parameter info # from python 3.6 (super handy :) ) Args: m_param_raw (m_param_raw) : pointer to the raw parameter to add m_params_raw_collection (m_param_origanizer) : object containing a representation of all the data in the dataset #this is not pretty, but it works.. #over dimensioned average the array across 1 dimension arg: dim (str/int) : 0 ('x'), 1 ('y') , ... take the ith slice of dimension i # TODO this is not generic yet (I think, this has to be checked). convert dim (if text) into a number on which axix of the array to performan a operation (e.g. x = 0, y=1) mockup of dataclass for development purposes-- dont use this class. # m_meas_id's #this is not pretty. #consistent printing | 2.649228 | 3 |
DataStructure/codes/doublyLinkedList.py | yangdongjue5510/TIL | 0 | 6630046 | <reponame>yangdongjue5510/TIL
class Node:
def __init__(self, key = None):
self.key = key
self.next = self.prev = self
def __str__(self):
return str(self.key)
class DoublyLinkedList:
def __init__(self):
self.head = Node()
self.size = 0
def __iter__(self): #๋ฐ๋ณตํ๊ฒ ๋ง๋๋ ๋ฐ๋ณต์. yield๊ฐ ์๋๊ฒ generator
v = self.head
while v != None:
yield v #return๊ณผ ๋น์ท.
v = v.next
def __str__(self):
return "->".join(str(v) for v in self)
#splice
def splice(self, a, b, x):
if a == None or b == None or x == None :
return
ap = a.prev
bn = b.next
#cut
ap.next = bn
bn.prev = ap
#insert after x
xn = x.next
xn.prev = b
x.next = a
b.next = xn
a.prev = x
#search
def search(self, key):
v = self.head
while v.next != self.head:
if v.key == key:
return v
v = v.next
return None
#isEmpty
def isEmpty(self):
v = self.head
if v.next ==self.head:
return True
else:
return False
#first last
def first(self):
v = self.head
if v.next!=self.head:
return v.next
else:
return None
def last(self):
v = self.head
if v.prev != self.head:
return v.prev
else:
return None
def moveAfter(self, a, x): #๋
ธ๋ a๋ฅผ ๋
ธ๋ x ๋ค๋ก ์ด๋
self.splice(a, a, x) #a๋ฅผ ๋ผ์ด๋ด์ด x๋ค๋ก ๋ถ์ธ ๊ฒ๊ณผ ๊ฐ๋ค
def moveBefore(self, a, x): #๋
ธ๋ a๋ฅผ ๋
ธ๋ x ์์ผ๋ก ์ด๋
self.splice(a, a, x.prev)
def insertAfter(self, x, key): #key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ x๋ค์ ์ฝ์
self.moveAfter(Node(key), x)
def insertBefore(self, x, key): #key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ x์์ ์ฝ์
self.moveBefore(Node(key), x)
def pushFront(self, key): # key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ ํค๋ ๋ค์ ์ฝ์
self.insertAfter(self.head, key)
def pushBack(self, key): # key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ ํค๋ ์์ ์ฝ์
self.insertBefore(self.head, key)
#์ญ์
def remove(self, x):
if x == None or x == self.head: return
x.prev.next , x.next.prev = x.next, x.prev
def popFront(self):
if self.isEmpty(): return None
key = self.head.next.key
self.remove(self.head.next)
return key
def popBack(self):
if self.isEmpty(): return None
key = self.head.prev.key
self.remove(self.head.prev)
return key | class Node:
def __init__(self, key = None):
self.key = key
self.next = self.prev = self
def __str__(self):
return str(self.key)
class DoublyLinkedList:
def __init__(self):
self.head = Node()
self.size = 0
def __iter__(self): #๋ฐ๋ณตํ๊ฒ ๋ง๋๋ ๋ฐ๋ณต์. yield๊ฐ ์๋๊ฒ generator
v = self.head
while v != None:
yield v #return๊ณผ ๋น์ท.
v = v.next
def __str__(self):
return "->".join(str(v) for v in self)
#splice
def splice(self, a, b, x):
if a == None or b == None or x == None :
return
ap = a.prev
bn = b.next
#cut
ap.next = bn
bn.prev = ap
#insert after x
xn = x.next
xn.prev = b
x.next = a
b.next = xn
a.prev = x
#search
def search(self, key):
v = self.head
while v.next != self.head:
if v.key == key:
return v
v = v.next
return None
#isEmpty
def isEmpty(self):
v = self.head
if v.next ==self.head:
return True
else:
return False
#first last
def first(self):
v = self.head
if v.next!=self.head:
return v.next
else:
return None
def last(self):
v = self.head
if v.prev != self.head:
return v.prev
else:
return None
def moveAfter(self, a, x): #๋
ธ๋ a๋ฅผ ๋
ธ๋ x ๋ค๋ก ์ด๋
self.splice(a, a, x) #a๋ฅผ ๋ผ์ด๋ด์ด x๋ค๋ก ๋ถ์ธ ๊ฒ๊ณผ ๊ฐ๋ค
def moveBefore(self, a, x): #๋
ธ๋ a๋ฅผ ๋
ธ๋ x ์์ผ๋ก ์ด๋
self.splice(a, a, x.prev)
def insertAfter(self, x, key): #key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ x๋ค์ ์ฝ์
self.moveAfter(Node(key), x)
def insertBefore(self, x, key): #key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ x์์ ์ฝ์
self.moveBefore(Node(key), x)
def pushFront(self, key): # key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ ํค๋ ๋ค์ ์ฝ์
self.insertAfter(self.head, key)
def pushBack(self, key): # key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ ํค๋ ์์ ์ฝ์
self.insertBefore(self.head, key)
#์ญ์
def remove(self, x):
if x == None or x == self.head: return
x.prev.next , x.next.prev = x.next, x.prev
def popFront(self):
if self.isEmpty(): return None
key = self.head.next.key
self.remove(self.head.next)
return key
def popBack(self):
if self.isEmpty(): return None
key = self.head.prev.key
self.remove(self.head.prev)
return key | ko | 0.999582 | #๋ฐ๋ณตํ๊ฒ ๋ง๋๋ ๋ฐ๋ณต์. yield๊ฐ ์๋๊ฒ generator #return๊ณผ ๋น์ท. #splice #cut #insert after x #search #isEmpty #first last #๋
ธ๋ a๋ฅผ ๋
ธ๋ x ๋ค๋ก ์ด๋ #a๋ฅผ ๋ผ์ด๋ด์ด x๋ค๋ก ๋ถ์ธ ๊ฒ๊ณผ ๊ฐ๋ค #๋
ธ๋ a๋ฅผ ๋
ธ๋ x ์์ผ๋ก ์ด๋ #key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ x๋ค์ ์ฝ์
#key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ x์์ ์ฝ์
# key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ ํค๋ ๋ค์ ์ฝ์
# key๊ฐ์ ๊ฐ๋ ๋
ธ๋๋ฅผ ํค๋ ์์ ์ฝ์
#์ญ์ | 3.611589 | 4 |
Chapter08/c8_06_noLonger_working.py | John-ye666/Python-for-Finance-Second-Edition | 236 | 6630047 | <reponame>John-ye666/Python-for-Finance-Second-Edition<filename>Chapter08/c8_06_noLonger_working.py
import pandas as pd
url='http://chart.yahoo.com/table.csv?s=IBM'
x=pd.read_csv(url,index_col=0,parse_dates=True)
print(x.head())
| import pandas as pd
url='http://chart.yahoo.com/table.csv?s=IBM'
x=pd.read_csv(url,index_col=0,parse_dates=True)
print(x.head()) | none | 1 | 2.865102 | 3 |
|
exeteracovid/scripts/export_diet_data_to_csv.py | deng113jie/ExeTeraCovid | 3 | 6630048 | import numpy as np
import pandas as pd
from exeteracovid.algorithms.healthy_diet_index import healthy_diet_index
def export_diet_data_to_csv(s, src_data, geo_data, dest_data, csv_file):
src_ptnts = src_data['patients']
src_diet = src_data['diet']
geo_ptnts = geo_data['patients']
ffq_questions = ('ffq_chips', 'ffq_crisps_snacks', 'ffq_eggs', 'ffq_fast_food',
'ffq_fibre_rich_breakfast', 'ffq_fizzy_pop', 'ffq_fruit',
'ffq_fruit_juice', 'ffq_ice_cream', 'ffq_live_probiotic_fermented',
'ffq_oily_fish', 'ffq_pasta', 'ffq_pulses', 'ffq_red_meat',
'ffq_red_processed_meat', 'ffq_refined_breakfast', 'ffq_rice',
'ffq_salad', 'ffq_sweets', 'ffq_vegetables', 'ffq_white_bread',
'ffq_white_fish', 'ffq_white_fish_battered_breaded', 'ffq_white_meat',
'ffq_white_processed_meat', 'ffq_wholemeal_bread')
ffq_dict = {k: s.get(src_diet[k]).data[:] for k in ffq_questions}
scores = healthy_diet_index(ffq_dict)
p_ids = s.get(src_ptnts['id']).data[:]
d_pids = s.get(src_diet['patient_id']).data[:]
g_pids = s.get(geo_ptnts['id']).data[:]
if not np.array_equal(p_ids, g_pids):
print("src_data and geo_data do not match")
exit()
unique_d_pids = set(d_pids)
p_filter = np.zeros(len(p_ids), np.bool)
for i in range(len(p_ids)):
p_filter[i] = p_ids[i] in unique_d_pids
patient_fields = ('110_to_220_cm', '15_to_55_bmi', '16_to_90_years', '40_to_200_kg',
'a1c_measurement_mmol', 'a1c_measurement_mmol_valid', 'a1c_measurement_percent', 'a1c_measurement_percent_valid',
'activity_change', 'age', 'age_filter', 'alcohol_change', 'already_had_covid',
'assessment_count', 'blood_group', 'bmi', 'bmi_clean', 'bmi_valid', 'cancer_clinical_trial_site',
'cancer_type', 'classic_symptoms', 'classic_symptoms_days_ago', 'classic_symptoms_days_ago_valid',
'clinical_study_institutions', 'clinical_study_names', 'clinical_study_nct_ids',
'contact_additional_studies', 'contact_health_worker', 'country_code', 'created_at',
'created_at_day', 'diabetes_diagnosis_year', 'diabetes_diagnosis_year_valid', 'diabetes_oral_biguanide',
'diabetes_oral_dpp4', 'diabetes_oral_meglitinides', 'diabetes_oral_other_medication',
'diabetes_oral_sglt2', 'diabetes_oral_sulfonylurea', 'diabetes_oral_thiazolidinediones',
'diabetes_treatment_basal_insulin', 'diabetes_treatment_insulin_pump', 'diabetes_treatment_lifestyle',
'diabetes_treatment_none', 'diabetes_treatment_other_injection', 'diabetes_treatment_other_oral',
'diabetes_treatment_pfnts', 'diabetes_treatment_rapid_insulin', 'diabetes_type',
'diabetes_uses_cgm', 'diet_change', 'diet_counts', 'does_chemotherapy', 'ethnicity', 'ever_had_covid_test',
'first_assessment_day', 'gender', 'has_asthma', 'has_cancer', 'has_diabetes', 'has_eczema', 'has_hayfever',
'has_heart_disease',
'has_kidney_disease', 'has_lung_disease', 'has_lung_disease_only',
'health_worker_with_contact',
'healthcare_professional', 'height_cm', 'height_cm_clean', 'height_cm_valid', 'help_available',
'housebound_problems', 'ht_combined_oral_contraceptive_pill', 'ht_depot_injection_or_implant', 'ht_hormone_treatment_therapy',
'ht_mirena_or_other_coil', 'ht_none', 'ht_oestrogen_hormone_therapy', 'ht_pfnts', 'ht_progestone_only_pill',
'ht_testosterone_hormone_therapy', 'id',
'interacted_patients_with_covid',
'interacted_with_covid', 'is_carer_for_community',
'is_pregnant',
'is_smoker',
'last_assessment_day', 'lifestyle_version', 'limited_activity',
'lsoa11cd',
'max_assessment_test_result',
'max_test_result', 'mobility_aid',
'need_inside_help', 'need_outside_help',
'needs_help', 'never_used_shortage', 'on_cancer_clinical_trial',
'period_frequency', 'period_status', 'period_stopped_age', 'period_stopped_age_valid', 'pregnant_weeks',
'pregnant_weeks_valid',
'race_is_other', 'race_is_prefer_not_to_say', 'race_is_uk_asian', 'race_is_uk_black', 'race_is_uk_chinese',
'race_is_uk_middle_eastern', 'race_is_uk_mixed_other', 'race_is_uk_mixed_white_black', 'race_is_uk_white',
'race_is_us_asian', 'race_is_us_black', 'race_is_us_hawaiian_pacific', 'race_is_us_indian_native', 'race_is_us_white',
'race_other', 'reported_by_another', 'same_household_as_reporter', 'se_postcode', 'smoked_years_ago',
'smoked_years_ago_valid', 'smoker_status', 'snacking_change', 'sometimes_used_shortage',
'still_have_past_symptoms', 'takes_any_blood_pressure_medications', 'takes_aspirin', 'takes_blood_pressure_medications_pril',
'takes_blood_pressure_medications_sartan', 'takes_corticosteroids', 'takes_immunosuppressants', 'test_count',
'vs_asked_at_set', 'vs_garlic', 'vs_multivitamins', 'vs_none', 'vs_omega_3', 'vs_other', 'vs_pftns',
'vs_probiotics', 'vs_vitamin_c', 'vs_vitamin_d', 'vs_zinc', 'weight_change', 'weight_change_kg',
'weight_change_kg_valid', 'weight_change_pounds', 'weight_change_pounds_valid', 'weight_kg',
'weight_kg_clean', 'weight_kg_valid', 'year_of_birth', 'year_of_birth_valid')
patient_geo_fields = ('has_imd_data', 'imd_rank', 'imd_decile', 'ruc11cd')
flt_ptnts = dest_data.create_group('patients')
for k in patient_fields:
r = s.get(src_ptnts[k])
w = r.create_like(flt_ptnts, k)
s.apply_filter(p_filter, r, w)
for k in patient_geo_fields:
r = s.get(geo_ptnts[k])
w = r.create_like(flt_ptnts, k)
s.apply_filter(p_filter, r, w)
p_dict = {'id': s.apply_filter(p_filter, p_ids)}
for k in flt_ptnts.keys():
if "weight" in k or "height" in k:
pkey = "patient_{}".format(k)
else:
pkey = k
p_dict[pkey] = s.get(flt_ptnts[k]).data[:]
pdf = pd.DataFrame(p_dict)
d_dict = {'diet_id': s.get(src_diet['id']).data[:],
'patient_id': s.get(src_diet['patient_id']).data[:]}
d_dict.update({
k: s.get(src_diet[k]).data[:] for k in src_diet.keys() if k not in ('id', 'patient_id')
})
d_dict.update({'scores': scores})
ddf = pd.DataFrame(d_dict)
tdf = pd.merge(left=ddf, right=pdf, left_on='patient_id', right_on='id')
for k in tdf.keys():
if k[-2:] == "_x" or k[-2:] == "_y":
print(k)
print(tdf.columns)
tdf.to_csv(csv_file, index=False)
| import numpy as np
import pandas as pd
from exeteracovid.algorithms.healthy_diet_index import healthy_diet_index
def export_diet_data_to_csv(s, src_data, geo_data, dest_data, csv_file):
src_ptnts = src_data['patients']
src_diet = src_data['diet']
geo_ptnts = geo_data['patients']
ffq_questions = ('ffq_chips', 'ffq_crisps_snacks', 'ffq_eggs', 'ffq_fast_food',
'ffq_fibre_rich_breakfast', 'ffq_fizzy_pop', 'ffq_fruit',
'ffq_fruit_juice', 'ffq_ice_cream', 'ffq_live_probiotic_fermented',
'ffq_oily_fish', 'ffq_pasta', 'ffq_pulses', 'ffq_red_meat',
'ffq_red_processed_meat', 'ffq_refined_breakfast', 'ffq_rice',
'ffq_salad', 'ffq_sweets', 'ffq_vegetables', 'ffq_white_bread',
'ffq_white_fish', 'ffq_white_fish_battered_breaded', 'ffq_white_meat',
'ffq_white_processed_meat', 'ffq_wholemeal_bread')
ffq_dict = {k: s.get(src_diet[k]).data[:] for k in ffq_questions}
scores = healthy_diet_index(ffq_dict)
p_ids = s.get(src_ptnts['id']).data[:]
d_pids = s.get(src_diet['patient_id']).data[:]
g_pids = s.get(geo_ptnts['id']).data[:]
if not np.array_equal(p_ids, g_pids):
print("src_data and geo_data do not match")
exit()
unique_d_pids = set(d_pids)
p_filter = np.zeros(len(p_ids), np.bool)
for i in range(len(p_ids)):
p_filter[i] = p_ids[i] in unique_d_pids
patient_fields = ('110_to_220_cm', '15_to_55_bmi', '16_to_90_years', '40_to_200_kg',
'a1c_measurement_mmol', 'a1c_measurement_mmol_valid', 'a1c_measurement_percent', 'a1c_measurement_percent_valid',
'activity_change', 'age', 'age_filter', 'alcohol_change', 'already_had_covid',
'assessment_count', 'blood_group', 'bmi', 'bmi_clean', 'bmi_valid', 'cancer_clinical_trial_site',
'cancer_type', 'classic_symptoms', 'classic_symptoms_days_ago', 'classic_symptoms_days_ago_valid',
'clinical_study_institutions', 'clinical_study_names', 'clinical_study_nct_ids',
'contact_additional_studies', 'contact_health_worker', 'country_code', 'created_at',
'created_at_day', 'diabetes_diagnosis_year', 'diabetes_diagnosis_year_valid', 'diabetes_oral_biguanide',
'diabetes_oral_dpp4', 'diabetes_oral_meglitinides', 'diabetes_oral_other_medication',
'diabetes_oral_sglt2', 'diabetes_oral_sulfonylurea', 'diabetes_oral_thiazolidinediones',
'diabetes_treatment_basal_insulin', 'diabetes_treatment_insulin_pump', 'diabetes_treatment_lifestyle',
'diabetes_treatment_none', 'diabetes_treatment_other_injection', 'diabetes_treatment_other_oral',
'diabetes_treatment_pfnts', 'diabetes_treatment_rapid_insulin', 'diabetes_type',
'diabetes_uses_cgm', 'diet_change', 'diet_counts', 'does_chemotherapy', 'ethnicity', 'ever_had_covid_test',
'first_assessment_day', 'gender', 'has_asthma', 'has_cancer', 'has_diabetes', 'has_eczema', 'has_hayfever',
'has_heart_disease',
'has_kidney_disease', 'has_lung_disease', 'has_lung_disease_only',
'health_worker_with_contact',
'healthcare_professional', 'height_cm', 'height_cm_clean', 'height_cm_valid', 'help_available',
'housebound_problems', 'ht_combined_oral_contraceptive_pill', 'ht_depot_injection_or_implant', 'ht_hormone_treatment_therapy',
'ht_mirena_or_other_coil', 'ht_none', 'ht_oestrogen_hormone_therapy', 'ht_pfnts', 'ht_progestone_only_pill',
'ht_testosterone_hormone_therapy', 'id',
'interacted_patients_with_covid',
'interacted_with_covid', 'is_carer_for_community',
'is_pregnant',
'is_smoker',
'last_assessment_day', 'lifestyle_version', 'limited_activity',
'lsoa11cd',
'max_assessment_test_result',
'max_test_result', 'mobility_aid',
'need_inside_help', 'need_outside_help',
'needs_help', 'never_used_shortage', 'on_cancer_clinical_trial',
'period_frequency', 'period_status', 'period_stopped_age', 'period_stopped_age_valid', 'pregnant_weeks',
'pregnant_weeks_valid',
'race_is_other', 'race_is_prefer_not_to_say', 'race_is_uk_asian', 'race_is_uk_black', 'race_is_uk_chinese',
'race_is_uk_middle_eastern', 'race_is_uk_mixed_other', 'race_is_uk_mixed_white_black', 'race_is_uk_white',
'race_is_us_asian', 'race_is_us_black', 'race_is_us_hawaiian_pacific', 'race_is_us_indian_native', 'race_is_us_white',
'race_other', 'reported_by_another', 'same_household_as_reporter', 'se_postcode', 'smoked_years_ago',
'smoked_years_ago_valid', 'smoker_status', 'snacking_change', 'sometimes_used_shortage',
'still_have_past_symptoms', 'takes_any_blood_pressure_medications', 'takes_aspirin', 'takes_blood_pressure_medications_pril',
'takes_blood_pressure_medications_sartan', 'takes_corticosteroids', 'takes_immunosuppressants', 'test_count',
'vs_asked_at_set', 'vs_garlic', 'vs_multivitamins', 'vs_none', 'vs_omega_3', 'vs_other', 'vs_pftns',
'vs_probiotics', 'vs_vitamin_c', 'vs_vitamin_d', 'vs_zinc', 'weight_change', 'weight_change_kg',
'weight_change_kg_valid', 'weight_change_pounds', 'weight_change_pounds_valid', 'weight_kg',
'weight_kg_clean', 'weight_kg_valid', 'year_of_birth', 'year_of_birth_valid')
patient_geo_fields = ('has_imd_data', 'imd_rank', 'imd_decile', 'ruc11cd')
flt_ptnts = dest_data.create_group('patients')
for k in patient_fields:
r = s.get(src_ptnts[k])
w = r.create_like(flt_ptnts, k)
s.apply_filter(p_filter, r, w)
for k in patient_geo_fields:
r = s.get(geo_ptnts[k])
w = r.create_like(flt_ptnts, k)
s.apply_filter(p_filter, r, w)
p_dict = {'id': s.apply_filter(p_filter, p_ids)}
for k in flt_ptnts.keys():
if "weight" in k or "height" in k:
pkey = "patient_{}".format(k)
else:
pkey = k
p_dict[pkey] = s.get(flt_ptnts[k]).data[:]
pdf = pd.DataFrame(p_dict)
d_dict = {'diet_id': s.get(src_diet['id']).data[:],
'patient_id': s.get(src_diet['patient_id']).data[:]}
d_dict.update({
k: s.get(src_diet[k]).data[:] for k in src_diet.keys() if k not in ('id', 'patient_id')
})
d_dict.update({'scores': scores})
ddf = pd.DataFrame(d_dict)
tdf = pd.merge(left=ddf, right=pdf, left_on='patient_id', right_on='id')
for k in tdf.keys():
if k[-2:] == "_x" or k[-2:] == "_y":
print(k)
print(tdf.columns)
tdf.to_csv(csv_file, index=False)
| none | 1 | 2.901014 | 3 |
|
cowsay/lib/cows/satanic.py | Ovlic/cowsay_py | 0 | 6630049 | <reponame>Ovlic/cowsay_py<filename>cowsay/lib/cows/satanic.py
def Satanic(thoughts, eyes, eye, tongue):
return f"""
{thoughts}
{thoughts} (__)
(\\/)
/-------\\/
/ | 666 ||{tongue}
* ||----||
~~ ~~
""" | def Satanic(thoughts, eyes, eye, tongue):
return f"""
{thoughts}
{thoughts} (__)
(\\/)
/-------\\/
/ | 666 ||{tongue}
* ||----||
~~ ~~
""" | en | 0.387643 | {thoughts} {thoughts} (__) (\\/) /-------\\/ / | 666 ||{tongue} * ||----|| ~~ ~~ | 1.938572 | 2 |
src/webapp/webapp.py | Somsubhra/Enrich | 1 | 6630050 | <reponame>Somsubhra/Enrich
# Headers
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# All imports
from flask import Flask, render_template, request, jsonify
from extras import Logger
from output import Tagger
# The Web App class
class WebApp:
# Constructor for the Web App class
def __init__(self, host, port, debug):
self.host = host
self.port = port
self.debug = debug
self.app = Flask(__name__)
# Index route
@self.app.route('/')
def index():
return render_template('index.html')
@self.app.route('/api/tag')
def tag_api():
text = request.args["text"]
_type = request.args["type"]
tagger = Tagger(_type)
result = tagger.tag(text)
return jsonify(success=True, result=result)
Logger.log_success('Server started successfully')
# Run the Web App
def run(self):
self.app.run(self.host, self.port, self.debug) | # Headers
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# All imports
from flask import Flask, render_template, request, jsonify
from extras import Logger
from output import Tagger
# The Web App class
class WebApp:
# Constructor for the Web App class
def __init__(self, host, port, debug):
self.host = host
self.port = port
self.debug = debug
self.app = Flask(__name__)
# Index route
@self.app.route('/')
def index():
return render_template('index.html')
@self.app.route('/api/tag')
def tag_api():
text = request.args["text"]
_type = request.args["type"]
tagger = Tagger(_type)
result = tagger.tag(text)
return jsonify(success=True, result=result)
Logger.log_success('Server started successfully')
# Run the Web App
def run(self):
self.app.run(self.host, self.port, self.debug) | en | 0.531977 | # Headers # All imports # The Web App class # Constructor for the Web App class # Index route # Run the Web App | 2.795882 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.