max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
sdk/search/azure-mgmt-search/azure/mgmt/search/models/_models.py | rsdoherty/azure-sdk-for-python | 2,728 | 6630551 | <filename>sdk/search/azure-mgmt-search/azure/mgmt/search/models/_models.py<gh_stars>1000+
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class AdminKeyResult(msrest.serialization.Model):
"""Response containing the primary and secondary admin API keys for a given Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar primary_key: The primary admin API key of the search service.
:vartype primary_key: str
:ivar secondary_key: The secondary admin API key of the search service.
:vartype secondary_key: str
"""
_validation = {
'primary_key': {'readonly': True},
'secondary_key': {'readonly': True},
}
_attribute_map = {
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AdminKeyResult, self).__init__(**kwargs)
self.primary_key = None
self.secondary_key = None
class AsyncOperationResult(msrest.serialization.Model):
"""The details of a long running asynchronous shared private link resource operation.
:param status: The current status of the long running asynchronous shared private link resource
operation. Possible values include: "Running", "Succeeded", "Failed".
:type status: str or ~azure.mgmt.search.models.SharedPrivateLinkResourceAsyncOperationResult
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AsyncOperationResult, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
class CheckNameAvailabilityInput(msrest.serialization.Model):
"""Input of check name availability API.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The search service name to validate. Search service names must only
contain lowercase letters, digits or dashes, cannot use dash as the first two or last one
characters, cannot contain consecutive dashes, and must be between 2 and 60 characters in
length.
:type name: str
:ivar type: Required. The type of the resource whose name is to be validated. This value must
always be 'searchServices'. Default value: "searchServices".
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "searchServices"
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityInput, self).__init__(**kwargs)
self.name = kwargs['name']
class CheckNameAvailabilityOutput(msrest.serialization.Model):
"""Output of check name availability API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar is_name_available: A value indicating whether the name is available.
:vartype is_name_available: bool
:ivar reason: The reason why the name is not available. 'Invalid' indicates the name provided
does not match the naming requirements (incorrect length, unsupported characters, etc.).
'AlreadyExists' indicates that the name is already in use and is therefore unavailable.
Possible values include: "Invalid", "AlreadyExists".
:vartype reason: str or ~azure.mgmt.search.models.UnavailableNameReason
:ivar message: A message that explains why the name is invalid and provides resource naming
requirements. Available only if 'Invalid' is returned in the 'reason' property.
:vartype message: str
"""
_validation = {
'is_name_available': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'is_name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityOutput, self).__init__(**kwargs)
self.is_name_available = None
self.reason = None
self.message = None
class CloudErrorBody(msrest.serialization.Model):
"""Describes a particular API error with an error code and a message.
:param code: An error code that describes the error condition more precisely than an HTTP
status code. Can be used to programmatically handle specific error cases.
:type code: str
:param message: A message that describes the error in detail and provides debugging
information.
:type message: str
:param target: The target of the particular error (for example, the name of the property in
error).
:type target: str
:param details: Contains nested errors that are related to this error.
:type details: list[~azure.mgmt.search.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class Identity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:param type: Required. The identity type. Possible values include: "None", "SystemAssigned".
:type type: str or ~azure.mgmt.search.models.IdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs['type']
class IpRule(msrest.serialization.Model):
"""The IP restriction rule of the Azure Cognitive Search service.
:param value: Value corresponding to a single IPv4 address (eg., 172.16.58.3) or an IP range in
CIDR format (eg., 172.16.58.3/24) to be allowed.
:type value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpRule, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ListQueryKeysResult(msrest.serialization.Model):
"""Response containing the query API keys for a given Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The query keys for the Azure Cognitive Search service.
:vartype value: list[~azure.mgmt.search.models.QueryKey]
:ivar next_link: Request URL that can be used to query next page of query keys. Returned when
the total number of requested query keys exceed maximum page size.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[QueryKey]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListQueryKeysResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class NetworkRuleSet(msrest.serialization.Model):
"""Network specific rules that determine how the Azure Cognitive Search service may be reached.
:param ip_rules: A list of IP restriction rules that defines the inbound network(s) with
allowing access to the search service endpoint. At the meantime, all other public IP networks
are blocked by the firewall. These restriction rules are applied only when the
'publicNetworkAccess' of the search service is 'enabled'; otherwise, traffic over public
interface is not allowed even with any public IP rules, and private endpoint connections would
be the exclusive access method.
:type ip_rules: list[~azure.mgmt.search.models.IpRule]
"""
_attribute_map = {
'ip_rules': {'key': 'ipRules', 'type': '[IpRule]'},
}
def __init__(
self,
**kwargs
):
super(NetworkRuleSet, self).__init__(**kwargs)
self.ip_rules = kwargs.get('ip_rules', None)
class Operation(msrest.serialization.Model):
"""Describes a REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation. This name is of the form
{provider}/{resource}/{operation}.
:vartype name: str
:ivar display: The object that describes the operation.
:vartype display: ~azure.mgmt.search.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
'display': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = None
class OperationDisplay(msrest.serialization.Model):
"""The object that describes the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: The friendly name of the resource provider.
:vartype provider: str
:ivar operation: The operation type: read, write, delete, listKeys/action, etc.
:vartype operation: str
:ivar resource: The resource type on which the operation is performed.
:vartype resource: str
:ivar description: The friendly name of the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.operation = None
self.resource = None
self.description = None
class OperationListResult(msrest.serialization.Model):
"""The result of the request to list REST API operations. It contains a list of operations and a URL to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of operations supported by the resource provider.
:vartype value: list[~azure.mgmt.search.models.Operation]
:ivar next_link: The URL to get the next set of operation list results, if any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class PrivateEndpointConnection(Resource):
"""Describes an existing Private Endpoint connection to the Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param properties: Describes the properties of an existing Private Endpoint connection to the
Azure Cognitive Search service.
:type properties: ~azure.mgmt.search.models.PrivateEndpointConnectionProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PrivateEndpointConnectionProperties'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""Response containing a list of Private Endpoint connections.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Private Endpoint connections.
:vartype value: list[~azure.mgmt.search.models.PrivateEndpointConnection]
:ivar next_link: Request URL that can be used to query next page of private endpoint
connections. Returned when the total number of requested private endpoint connections exceed
maximum page size.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PrivateEndpointConnectionProperties(msrest.serialization.Model):
"""Describes the properties of an existing Private Endpoint connection to the Azure Cognitive Search service.
:param private_endpoint: The private endpoint resource from Microsoft.Network provider.
:type private_endpoint:
~azure.mgmt.search.models.PrivateEndpointConnectionPropertiesPrivateEndpoint
:param private_link_service_connection_state: Describes the current state of an existing
Private Link Service connection to the Azure Private Endpoint.
:type private_link_service_connection_state:
~azure.mgmt.search.models.PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState
"""
_attribute_map = {
'private_endpoint': {'key': 'privateEndpoint', 'type': 'PrivateEndpointConnectionPropertiesPrivateEndpoint'},
'private_link_service_connection_state': {'key': 'privateLinkServiceConnectionState', 'type': 'PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionProperties, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
class PrivateEndpointConnectionPropertiesPrivateEndpoint(msrest.serialization.Model):
"""The private endpoint resource from Microsoft.Network provider.
:param id: The resource id of the private endpoint resource from Microsoft.Network provider.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionPropertiesPrivateEndpoint, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState(msrest.serialization.Model):
"""Describes the current state of an existing Private Link Service connection to the Azure Private Endpoint.
:param status: Status of the the private link service connection. Can be Pending, Approved,
Rejected, or Disconnected. Possible values include: "Pending", "Approved", "Rejected",
"Disconnected".
:type status: str or ~azure.mgmt.search.models.PrivateLinkServiceConnectionStatus
:param description: The description for the private link service connection state.
:type description: str
:param actions_required: A description of any extra actions that may be required.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', "None")
class PrivateLinkResource(Resource):
"""Describes a supported private link resource for the Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar properties: Describes the properties of a supported private link resource for the Azure
Cognitive Search service.
:vartype properties: ~azure.mgmt.search.models.PrivateLinkResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PrivateLinkResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.properties = None
class PrivateLinkResourceProperties(msrest.serialization.Model):
"""Describes the properties of a supported private link resource for the Azure Cognitive Search service. For a given API version, this represents the 'supported' groupIds when creating a shared private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_id: The group ID of the private link resource.
:vartype group_id: str
:ivar required_members: The list of required members of the private link resource.
:vartype required_members: list[str]
:ivar required_zone_names: The list of required DNS zone names of the private link resource.
:vartype required_zone_names: list[str]
:ivar shareable_private_link_resource_types: The list of resources that are onboarded to
private link service, that are supported by Azure Cognitive Search.
:vartype shareable_private_link_resource_types:
list[~azure.mgmt.search.models.ShareablePrivateLinkResourceType]
"""
_validation = {
'group_id': {'readonly': True},
'required_members': {'readonly': True},
'required_zone_names': {'readonly': True},
'shareable_private_link_resource_types': {'readonly': True},
}
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
'shareable_private_link_resource_types': {'key': 'shareablePrivateLinkResourceTypes', 'type': '[ShareablePrivateLinkResourceType]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceProperties, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = None
self.shareable_private_link_resource_types = None
class PrivateLinkResourcesResult(msrest.serialization.Model):
"""Response containing a list of supported Private Link Resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of supported Private Link Resources.
:vartype value: list[~azure.mgmt.search.models.PrivateLinkResource]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourcesResult, self).__init__(**kwargs)
self.value = None
class QueryKey(msrest.serialization.Model):
"""Describes an API key for a given Azure Cognitive Search service that has permissions for query operations only.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the query API key; may be empty.
:vartype name: str
:ivar key: The value of the query API key.
:vartype key: str
"""
_validation = {
'name': {'readonly': True},
'key': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(QueryKey, self).__init__(**kwargs)
self.name = None
self.key = None
class SearchManagementRequestOptions(msrest.serialization.Model):
"""Parameter group.
:param client_request_id: A client-generated GUID value that identifies this request. If
specified, this will be included in response information as a way to track the request.
:type client_request_id: str
"""
_attribute_map = {
'client_request_id': {'key': 'clientRequestId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchManagementRequestOptions, self).__init__(**kwargs)
self.client_request_id = kwargs.get('client_request_id', None)
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class SearchService(TrackedResource):
"""Describes an Azure Cognitive Search service and its current state.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param sku: The SKU of the Search Service, which determines price tier and capacity limits.
This property is required when creating a new Search Service.
:type sku: ~azure.mgmt.search.models.Sku
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.search.models.Identity
:param replica_count: The number of replicas in the search service. If specified, it must be a
value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU.
:type replica_count: int
:param partition_count: The number of partitions in the search service; if specified, it can be
1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3'
services with hostingMode set to 'highDensity', the allowed values are between 1 and 3.
:type partition_count: int
:param hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable
up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the
maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default'
or 'highDensity'. For all other SKUs, this value must be 'default'. Possible values include:
"default", "highDensity". Default value: "default".
:type hosting_mode: str or ~azure.mgmt.search.models.HostingMode
:param public_network_access: This value can be set to 'enabled' to avoid breaking changes on
existing customer resources and templates. If set to 'disabled', traffic over public interface
is not allowed, and private endpoint connections would be the exclusive access method. Possible
values include: "enabled", "disabled". Default value: "enabled".
:type public_network_access: str or ~azure.mgmt.search.models.PublicNetworkAccess
:ivar status: The status of the search service. Possible values include: 'running': The search
service is running and no provisioning operations are underway. 'provisioning': The search
service is being provisioned or scaled up or down. 'deleting': The search service is being
deleted. 'degraded': The search service is degraded. This can occur when the underlying search
units are not healthy. The search service is most likely operational, but performance might be
slow and some requests might be dropped. 'disabled': The search service is disabled. In this
state, the service will reject all API requests. 'error': The search service is in an error
state. If your service is in the degraded, disabled, or error states, it means the Azure
Cognitive Search team is actively investigating the underlying issue. Dedicated services in
these states are still chargeable based on the number of search units provisioned. Possible
values include: "running", "provisioning", "deleting", "degraded", "disabled", "error".
:vartype status: str or ~azure.mgmt.search.models.SearchServiceStatus
:ivar status_details: The details of the search service status.
:vartype status_details: str
:ivar provisioning_state: The state of the last provisioning operation performed on the search
service. Provisioning is an intermediate state that occurs while service capacity is being
established. After capacity is set up, provisioningState changes to either 'succeeded' or
'failed'. Client applications can poll provisioning status (the recommended polling interval is
from 30 seconds to one minute) by using the Get Search Service operation to see when an
operation is completed. If you are using the free service, this value tends to come back as
'succeeded' directly in the call to Create search service. This is because the free service
uses capacity that is already set up. Possible values include: "succeeded", "provisioning",
"failed".
:vartype provisioning_state: str or ~azure.mgmt.search.models.ProvisioningState
:param network_rule_set: Network specific rules that determine how the Azure Cognitive Search
service may be reached.
:type network_rule_set: ~azure.mgmt.search.models.NetworkRuleSet
:ivar private_endpoint_connections: The list of private endpoint connections to the Azure
Cognitive Search service.
:vartype private_endpoint_connections:
list[~azure.mgmt.search.models.PrivateEndpointConnection]
:ivar shared_private_link_resources: The list of shared private link resources managed by the
Azure Cognitive Search service.
:vartype shared_private_link_resources:
list[~azure.mgmt.search.models.SharedPrivateLinkResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'replica_count': {'maximum': 12, 'minimum': 1},
'partition_count': {'maximum': 12, 'minimum': 1},
'status': {'readonly': True},
'status_details': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'shared_private_link_resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'identity': {'key': 'identity', 'type': 'Identity'},
'replica_count': {'key': 'properties.replicaCount', 'type': 'int'},
'partition_count': {'key': 'properties.partitionCount', 'type': 'int'},
'hosting_mode': {'key': 'properties.hostingMode', 'type': 'str'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'status_details': {'key': 'properties.statusDetails', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'network_rule_set': {'key': 'properties.networkRuleSet', 'type': 'NetworkRuleSet'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'shared_private_link_resources': {'key': 'properties.sharedPrivateLinkResources', 'type': '[SharedPrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(SearchService, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.identity = kwargs.get('identity', None)
self.replica_count = kwargs.get('replica_count', 1)
self.partition_count = kwargs.get('partition_count', 1)
self.hosting_mode = kwargs.get('hosting_mode', "default")
self.public_network_access = kwargs.get('public_network_access', "enabled")
self.status = None
self.status_details = None
self.provisioning_state = None
self.network_rule_set = kwargs.get('network_rule_set', None)
self.private_endpoint_connections = None
self.shared_private_link_resources = None
class SearchServiceListResult(msrest.serialization.Model):
"""Response containing a list of Azure Cognitive Search services.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of search services.
:vartype value: list[~azure.mgmt.search.models.SearchService]
:ivar next_link: Request URL that can be used to query next page of search services. Returned
when the total number of requested search services exceed maximum page size.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SearchService]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchServiceListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SearchServiceUpdate(Resource):
"""The parameters used to update an Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param sku: The SKU of the Search Service, which determines price tier and capacity limits.
This property is required when creating a new Search Service.
:type sku: ~azure.mgmt.search.models.Sku
:param location: The geographic location of the resource. This must be one of the supported and
registered Azure Geo Regions (for example, West US, East US, Southeast Asia, and so forth).
This property is required when creating a new resource.
:type location: str
:param tags: A set of tags. Tags to help categorize the resource in the Azure portal.
:type tags: dict[str, str]
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.search.models.Identity
:param replica_count: The number of replicas in the search service. If specified, it must be a
value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU.
:type replica_count: int
:param partition_count: The number of partitions in the search service; if specified, it can be
1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3'
services with hostingMode set to 'highDensity', the allowed values are between 1 and 3.
:type partition_count: int
:param hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable
up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the
maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default'
or 'highDensity'. For all other SKUs, this value must be 'default'. Possible values include:
"default", "highDensity". Default value: "default".
:type hosting_mode: str or ~azure.mgmt.search.models.HostingMode
:param public_network_access: This value can be set to 'enabled' to avoid breaking changes on
existing customer resources and templates. If set to 'disabled', traffic over public interface
is not allowed, and private endpoint connections would be the exclusive access method. Possible
values include: "enabled", "disabled". Default value: "enabled".
:type public_network_access: str or ~azure.mgmt.search.models.PublicNetworkAccess
:ivar status: The status of the search service. Possible values include: 'running': The search
service is running and no provisioning operations are underway. 'provisioning': The search
service is being provisioned or scaled up or down. 'deleting': The search service is being
deleted. 'degraded': The search service is degraded. This can occur when the underlying search
units are not healthy. The search service is most likely operational, but performance might be
slow and some requests might be dropped. 'disabled': The search service is disabled. In this
state, the service will reject all API requests. 'error': The search service is in an error
state. If your service is in the degraded, disabled, or error states, it means the Azure
Cognitive Search team is actively investigating the underlying issue. Dedicated services in
these states are still chargeable based on the number of search units provisioned. Possible
values include: "running", "provisioning", "deleting", "degraded", "disabled", "error".
:vartype status: str or ~azure.mgmt.search.models.SearchServiceStatus
:ivar status_details: The details of the search service status.
:vartype status_details: str
:ivar provisioning_state: The state of the last provisioning operation performed on the search
service. Provisioning is an intermediate state that occurs while service capacity is being
established. After capacity is set up, provisioningState changes to either 'succeeded' or
'failed'. Client applications can poll provisioning status (the recommended polling interval is
from 30 seconds to one minute) by using the Get Search Service operation to see when an
operation is completed. If you are using the free service, this value tends to come back as
'succeeded' directly in the call to Create search service. This is because the free service
uses capacity that is already set up. Possible values include: "succeeded", "provisioning",
"failed".
:vartype provisioning_state: str or ~azure.mgmt.search.models.ProvisioningState
:param network_rule_set: Network specific rules that determine how the Azure Cognitive Search
service may be reached.
:type network_rule_set: ~azure.mgmt.search.models.NetworkRuleSet
:ivar private_endpoint_connections: The list of private endpoint connections to the Azure
Cognitive Search service.
:vartype private_endpoint_connections:
list[~azure.mgmt.search.models.PrivateEndpointConnection]
:ivar shared_private_link_resources: The list of shared private link resources managed by the
Azure Cognitive Search service.
:vartype shared_private_link_resources:
list[~azure.mgmt.search.models.SharedPrivateLinkResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'replica_count': {'maximum': 12, 'minimum': 1},
'partition_count': {'maximum': 12, 'minimum': 1},
'status': {'readonly': True},
'status_details': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'shared_private_link_resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'Identity'},
'replica_count': {'key': 'properties.replicaCount', 'type': 'int'},
'partition_count': {'key': 'properties.partitionCount', 'type': 'int'},
'hosting_mode': {'key': 'properties.hostingMode', 'type': 'str'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'status_details': {'key': 'properties.statusDetails', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'network_rule_set': {'key': 'properties.networkRuleSet', 'type': 'NetworkRuleSet'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'shared_private_link_resources': {'key': 'properties.sharedPrivateLinkResources', 'type': '[SharedPrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(SearchServiceUpdate, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.replica_count = kwargs.get('replica_count', 1)
self.partition_count = kwargs.get('partition_count', 1)
self.hosting_mode = kwargs.get('hosting_mode', "default")
self.public_network_access = kwargs.get('public_network_access', "enabled")
self.status = None
self.status_details = None
self.provisioning_state = None
self.network_rule_set = kwargs.get('network_rule_set', None)
self.private_endpoint_connections = None
self.shared_private_link_resources = None
class ShareablePrivateLinkResourceProperties(msrest.serialization.Model):
"""Describes the properties of a resource type that has been onboarded to private link service, supported by Azure Cognitive Search.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The resource provider type for the resource that has been onboarded to private link
service, supported by Azure Cognitive Search.
:vartype type: str
:ivar group_id: The resource provider group id for the resource that has been onboarded to
private link service, supported by Azure Cognitive Search.
:vartype group_id: str
:ivar description: The description of the resource type that has been onboarded to private link
service, supported by Azure Cognitive Search.
:vartype description: str
"""
_validation = {
'type': {'readonly': True},
'group_id': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'group_id': {'key': 'groupId', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareablePrivateLinkResourceProperties, self).__init__(**kwargs)
self.type = None
self.group_id = None
self.description = None
class ShareablePrivateLinkResourceType(msrest.serialization.Model):
"""Describes an resource type that has been onboarded to private link service, supported by Azure Cognitive Search.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the resource type that has been onboarded to private link service,
supported by Azure Cognitive Search.
:vartype name: str
:ivar properties: Describes the properties of a resource type that has been onboarded to
private link service, supported by Azure Cognitive Search.
:vartype properties: ~azure.mgmt.search.models.ShareablePrivateLinkResourceProperties
"""
_validation = {
'name': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ShareablePrivateLinkResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(ShareablePrivateLinkResourceType, self).__init__(**kwargs)
self.name = None
self.properties = None
class SharedPrivateLinkResource(Resource):
"""Describes a Shared Private Link Resource managed by the Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param properties: Describes the properties of a Shared Private Link Resource managed by the
Azure Cognitive Search service.
:type properties: ~azure.mgmt.search.models.SharedPrivateLinkResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'SharedPrivateLinkResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(SharedPrivateLinkResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class SharedPrivateLinkResourceListResult(msrest.serialization.Model):
"""Response containing a list of Shared Private Link Resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Shared Private Link Resources.
:vartype value: list[~azure.mgmt.search.models.SharedPrivateLinkResource]
:param next_link: The URL to get the next set of shared private link resources, if there are
any.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SharedPrivateLinkResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SharedPrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = None
self.next_link = kwargs.get('next_link', None)
class SharedPrivateLinkResourceProperties(msrest.serialization.Model):
"""Describes the properties of an existing Shared Private Link Resource managed by the Azure Cognitive Search service.
:param private_link_resource_id: The resource id of the resource the shared private link
resource is for.
:type private_link_resource_id: str
:param group_id: The group id from the provider of resource the shared private link resource is
for.
:type group_id: str
:param request_message: The request message for requesting approval of the shared private link
resource.
:type request_message: str
:param resource_region: Optional. Can be used to specify the Azure Resource Manager location of
the resource to which a shared private link is to be created. This is only required for those
resources whose DNS configuration are regional (such as Azure Kubernetes Service).
:type resource_region: str
:param status: Status of the shared private link resource. Can be Pending, Approved, Rejected
or Disconnected. Possible values include: "Pending", "Approved", "Rejected", "Disconnected".
:type status: str or ~azure.mgmt.search.models.SharedPrivateLinkResourceStatus
:param provisioning_state: The provisioning state of the shared private link resource. Can be
Updating, Deleting, Failed, Succeeded or Incomplete. Possible values include: "Updating",
"Deleting", "Failed", "Succeeded", "Incomplete".
:type provisioning_state: str or
~azure.mgmt.search.models.SharedPrivateLinkResourceProvisioningState
"""
_attribute_map = {
'private_link_resource_id': {'key': 'privateLinkResourceId', 'type': 'str'},
'group_id': {'key': 'groupId', 'type': 'str'},
'request_message': {'key': 'requestMessage', 'type': 'str'},
'resource_region': {'key': 'resourceRegion', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SharedPrivateLinkResourceProperties, self).__init__(**kwargs)
self.private_link_resource_id = kwargs.get('private_link_resource_id', None)
self.group_id = kwargs.get('group_id', None)
self.request_message = kwargs.get('request_message', None)
self.resource_region = kwargs.get('resource_region', None)
self.status = kwargs.get('status', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
class Sku(msrest.serialization.Model):
"""Defines the SKU of an Azure Cognitive Search Service, which determines price tier and capacity limits.
:param name: The SKU of the search service. Valid values include: 'free': Shared service.
'basic': Dedicated service with up to 3 replicas. 'standard': Dedicated service with up to 12
partitions and 12 replicas. 'standard2': Similar to standard, but with more capacity per search
unit. 'standard3': The largest Standard offering with up to 12 partitions and 12 replicas (or
up to 3 partitions with more indexes if you also set the hostingMode property to
'highDensity'). 'storage_optimized_l1': Supports 1TB per partition, up to 12 partitions.
'storage_optimized_l2': Supports 2TB per partition, up to 12 partitions.'. Possible values
include: "free", "basic", "standard", "standard2", "standard3", "storage_optimized_l1",
"storage_optimized_l2".
:type name: str or ~azure.mgmt.search.models.SkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
| <filename>sdk/search/azure-mgmt-search/azure/mgmt/search/models/_models.py<gh_stars>1000+
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class AdminKeyResult(msrest.serialization.Model):
"""Response containing the primary and secondary admin API keys for a given Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar primary_key: The primary admin API key of the search service.
:vartype primary_key: str
:ivar secondary_key: The secondary admin API key of the search service.
:vartype secondary_key: str
"""
_validation = {
'primary_key': {'readonly': True},
'secondary_key': {'readonly': True},
}
_attribute_map = {
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AdminKeyResult, self).__init__(**kwargs)
self.primary_key = None
self.secondary_key = None
class AsyncOperationResult(msrest.serialization.Model):
"""The details of a long running asynchronous shared private link resource operation.
:param status: The current status of the long running asynchronous shared private link resource
operation. Possible values include: "Running", "Succeeded", "Failed".
:type status: str or ~azure.mgmt.search.models.SharedPrivateLinkResourceAsyncOperationResult
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AsyncOperationResult, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
class CheckNameAvailabilityInput(msrest.serialization.Model):
"""Input of check name availability API.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The search service name to validate. Search service names must only
contain lowercase letters, digits or dashes, cannot use dash as the first two or last one
characters, cannot contain consecutive dashes, and must be between 2 and 60 characters in
length.
:type name: str
:ivar type: Required. The type of the resource whose name is to be validated. This value must
always be 'searchServices'. Default value: "searchServices".
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "searchServices"
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityInput, self).__init__(**kwargs)
self.name = kwargs['name']
class CheckNameAvailabilityOutput(msrest.serialization.Model):
"""Output of check name availability API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar is_name_available: A value indicating whether the name is available.
:vartype is_name_available: bool
:ivar reason: The reason why the name is not available. 'Invalid' indicates the name provided
does not match the naming requirements (incorrect length, unsupported characters, etc.).
'AlreadyExists' indicates that the name is already in use and is therefore unavailable.
Possible values include: "Invalid", "AlreadyExists".
:vartype reason: str or ~azure.mgmt.search.models.UnavailableNameReason
:ivar message: A message that explains why the name is invalid and provides resource naming
requirements. Available only if 'Invalid' is returned in the 'reason' property.
:vartype message: str
"""
_validation = {
'is_name_available': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'is_name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityOutput, self).__init__(**kwargs)
self.is_name_available = None
self.reason = None
self.message = None
class CloudErrorBody(msrest.serialization.Model):
"""Describes a particular API error with an error code and a message.
:param code: An error code that describes the error condition more precisely than an HTTP
status code. Can be used to programmatically handle specific error cases.
:type code: str
:param message: A message that describes the error in detail and provides debugging
information.
:type message: str
:param target: The target of the particular error (for example, the name of the property in
error).
:type target: str
:param details: Contains nested errors that are related to this error.
:type details: list[~azure.mgmt.search.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class Identity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:param type: Required. The identity type. Possible values include: "None", "SystemAssigned".
:type type: str or ~azure.mgmt.search.models.IdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs['type']
class IpRule(msrest.serialization.Model):
"""The IP restriction rule of the Azure Cognitive Search service.
:param value: Value corresponding to a single IPv4 address (eg., 172.16.58.3) or an IP range in
CIDR format (eg., 172.16.58.3/24) to be allowed.
:type value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpRule, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ListQueryKeysResult(msrest.serialization.Model):
"""Response containing the query API keys for a given Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The query keys for the Azure Cognitive Search service.
:vartype value: list[~azure.mgmt.search.models.QueryKey]
:ivar next_link: Request URL that can be used to query next page of query keys. Returned when
the total number of requested query keys exceed maximum page size.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[QueryKey]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListQueryKeysResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class NetworkRuleSet(msrest.serialization.Model):
"""Network specific rules that determine how the Azure Cognitive Search service may be reached.
:param ip_rules: A list of IP restriction rules that defines the inbound network(s) with
allowing access to the search service endpoint. At the meantime, all other public IP networks
are blocked by the firewall. These restriction rules are applied only when the
'publicNetworkAccess' of the search service is 'enabled'; otherwise, traffic over public
interface is not allowed even with any public IP rules, and private endpoint connections would
be the exclusive access method.
:type ip_rules: list[~azure.mgmt.search.models.IpRule]
"""
_attribute_map = {
'ip_rules': {'key': 'ipRules', 'type': '[IpRule]'},
}
def __init__(
self,
**kwargs
):
super(NetworkRuleSet, self).__init__(**kwargs)
self.ip_rules = kwargs.get('ip_rules', None)
class Operation(msrest.serialization.Model):
"""Describes a REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation. This name is of the form
{provider}/{resource}/{operation}.
:vartype name: str
:ivar display: The object that describes the operation.
:vartype display: ~azure.mgmt.search.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
'display': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = None
class OperationDisplay(msrest.serialization.Model):
"""The object that describes the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: The friendly name of the resource provider.
:vartype provider: str
:ivar operation: The operation type: read, write, delete, listKeys/action, etc.
:vartype operation: str
:ivar resource: The resource type on which the operation is performed.
:vartype resource: str
:ivar description: The friendly name of the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.operation = None
self.resource = None
self.description = None
class OperationListResult(msrest.serialization.Model):
"""The result of the request to list REST API operations. It contains a list of operations and a URL to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of operations supported by the resource provider.
:vartype value: list[~azure.mgmt.search.models.Operation]
:ivar next_link: The URL to get the next set of operation list results, if any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class PrivateEndpointConnection(Resource):
"""Describes an existing Private Endpoint connection to the Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param properties: Describes the properties of an existing Private Endpoint connection to the
Azure Cognitive Search service.
:type properties: ~azure.mgmt.search.models.PrivateEndpointConnectionProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PrivateEndpointConnectionProperties'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""Response containing a list of Private Endpoint connections.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Private Endpoint connections.
:vartype value: list[~azure.mgmt.search.models.PrivateEndpointConnection]
:ivar next_link: Request URL that can be used to query next page of private endpoint
connections. Returned when the total number of requested private endpoint connections exceed
maximum page size.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PrivateEndpointConnectionProperties(msrest.serialization.Model):
"""Describes the properties of an existing Private Endpoint connection to the Azure Cognitive Search service.
:param private_endpoint: The private endpoint resource from Microsoft.Network provider.
:type private_endpoint:
~azure.mgmt.search.models.PrivateEndpointConnectionPropertiesPrivateEndpoint
:param private_link_service_connection_state: Describes the current state of an existing
Private Link Service connection to the Azure Private Endpoint.
:type private_link_service_connection_state:
~azure.mgmt.search.models.PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState
"""
_attribute_map = {
'private_endpoint': {'key': 'privateEndpoint', 'type': 'PrivateEndpointConnectionPropertiesPrivateEndpoint'},
'private_link_service_connection_state': {'key': 'privateLinkServiceConnectionState', 'type': 'PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionProperties, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
class PrivateEndpointConnectionPropertiesPrivateEndpoint(msrest.serialization.Model):
"""The private endpoint resource from Microsoft.Network provider.
:param id: The resource id of the private endpoint resource from Microsoft.Network provider.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionPropertiesPrivateEndpoint, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState(msrest.serialization.Model):
"""Describes the current state of an existing Private Link Service connection to the Azure Private Endpoint.
:param status: Status of the the private link service connection. Can be Pending, Approved,
Rejected, or Disconnected. Possible values include: "Pending", "Approved", "Rejected",
"Disconnected".
:type status: str or ~azure.mgmt.search.models.PrivateLinkServiceConnectionStatus
:param description: The description for the private link service connection state.
:type description: str
:param actions_required: A description of any extra actions that may be required.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', "None")
class PrivateLinkResource(Resource):
"""Describes a supported private link resource for the Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar properties: Describes the properties of a supported private link resource for the Azure
Cognitive Search service.
:vartype properties: ~azure.mgmt.search.models.PrivateLinkResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PrivateLinkResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.properties = None
class PrivateLinkResourceProperties(msrest.serialization.Model):
"""Describes the properties of a supported private link resource for the Azure Cognitive Search service. For a given API version, this represents the 'supported' groupIds when creating a shared private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_id: The group ID of the private link resource.
:vartype group_id: str
:ivar required_members: The list of required members of the private link resource.
:vartype required_members: list[str]
:ivar required_zone_names: The list of required DNS zone names of the private link resource.
:vartype required_zone_names: list[str]
:ivar shareable_private_link_resource_types: The list of resources that are onboarded to
private link service, that are supported by Azure Cognitive Search.
:vartype shareable_private_link_resource_types:
list[~azure.mgmt.search.models.ShareablePrivateLinkResourceType]
"""
_validation = {
'group_id': {'readonly': True},
'required_members': {'readonly': True},
'required_zone_names': {'readonly': True},
'shareable_private_link_resource_types': {'readonly': True},
}
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
'shareable_private_link_resource_types': {'key': 'shareablePrivateLinkResourceTypes', 'type': '[ShareablePrivateLinkResourceType]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceProperties, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = None
self.shareable_private_link_resource_types = None
class PrivateLinkResourcesResult(msrest.serialization.Model):
"""Response containing a list of supported Private Link Resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of supported Private Link Resources.
:vartype value: list[~azure.mgmt.search.models.PrivateLinkResource]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourcesResult, self).__init__(**kwargs)
self.value = None
class QueryKey(msrest.serialization.Model):
"""Describes an API key for a given Azure Cognitive Search service that has permissions for query operations only.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the query API key; may be empty.
:vartype name: str
:ivar key: The value of the query API key.
:vartype key: str
"""
_validation = {
'name': {'readonly': True},
'key': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(QueryKey, self).__init__(**kwargs)
self.name = None
self.key = None
class SearchManagementRequestOptions(msrest.serialization.Model):
"""Parameter group.
:param client_request_id: A client-generated GUID value that identifies this request. If
specified, this will be included in response information as a way to track the request.
:type client_request_id: str
"""
_attribute_map = {
'client_request_id': {'key': 'clientRequestId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchManagementRequestOptions, self).__init__(**kwargs)
self.client_request_id = kwargs.get('client_request_id', None)
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class SearchService(TrackedResource):
"""Describes an Azure Cognitive Search service and its current state.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param sku: The SKU of the Search Service, which determines price tier and capacity limits.
This property is required when creating a new Search Service.
:type sku: ~azure.mgmt.search.models.Sku
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.search.models.Identity
:param replica_count: The number of replicas in the search service. If specified, it must be a
value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU.
:type replica_count: int
:param partition_count: The number of partitions in the search service; if specified, it can be
1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3'
services with hostingMode set to 'highDensity', the allowed values are between 1 and 3.
:type partition_count: int
:param hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable
up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the
maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default'
or 'highDensity'. For all other SKUs, this value must be 'default'. Possible values include:
"default", "highDensity". Default value: "default".
:type hosting_mode: str or ~azure.mgmt.search.models.HostingMode
:param public_network_access: This value can be set to 'enabled' to avoid breaking changes on
existing customer resources and templates. If set to 'disabled', traffic over public interface
is not allowed, and private endpoint connections would be the exclusive access method. Possible
values include: "enabled", "disabled". Default value: "enabled".
:type public_network_access: str or ~azure.mgmt.search.models.PublicNetworkAccess
:ivar status: The status of the search service. Possible values include: 'running': The search
service is running and no provisioning operations are underway. 'provisioning': The search
service is being provisioned or scaled up or down. 'deleting': The search service is being
deleted. 'degraded': The search service is degraded. This can occur when the underlying search
units are not healthy. The search service is most likely operational, but performance might be
slow and some requests might be dropped. 'disabled': The search service is disabled. In this
state, the service will reject all API requests. 'error': The search service is in an error
state. If your service is in the degraded, disabled, or error states, it means the Azure
Cognitive Search team is actively investigating the underlying issue. Dedicated services in
these states are still chargeable based on the number of search units provisioned. Possible
values include: "running", "provisioning", "deleting", "degraded", "disabled", "error".
:vartype status: str or ~azure.mgmt.search.models.SearchServiceStatus
:ivar status_details: The details of the search service status.
:vartype status_details: str
:ivar provisioning_state: The state of the last provisioning operation performed on the search
service. Provisioning is an intermediate state that occurs while service capacity is being
established. After capacity is set up, provisioningState changes to either 'succeeded' or
'failed'. Client applications can poll provisioning status (the recommended polling interval is
from 30 seconds to one minute) by using the Get Search Service operation to see when an
operation is completed. If you are using the free service, this value tends to come back as
'succeeded' directly in the call to Create search service. This is because the free service
uses capacity that is already set up. Possible values include: "succeeded", "provisioning",
"failed".
:vartype provisioning_state: str or ~azure.mgmt.search.models.ProvisioningState
:param network_rule_set: Network specific rules that determine how the Azure Cognitive Search
service may be reached.
:type network_rule_set: ~azure.mgmt.search.models.NetworkRuleSet
:ivar private_endpoint_connections: The list of private endpoint connections to the Azure
Cognitive Search service.
:vartype private_endpoint_connections:
list[~azure.mgmt.search.models.PrivateEndpointConnection]
:ivar shared_private_link_resources: The list of shared private link resources managed by the
Azure Cognitive Search service.
:vartype shared_private_link_resources:
list[~azure.mgmt.search.models.SharedPrivateLinkResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'replica_count': {'maximum': 12, 'minimum': 1},
'partition_count': {'maximum': 12, 'minimum': 1},
'status': {'readonly': True},
'status_details': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'shared_private_link_resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'identity': {'key': 'identity', 'type': 'Identity'},
'replica_count': {'key': 'properties.replicaCount', 'type': 'int'},
'partition_count': {'key': 'properties.partitionCount', 'type': 'int'},
'hosting_mode': {'key': 'properties.hostingMode', 'type': 'str'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'status_details': {'key': 'properties.statusDetails', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'network_rule_set': {'key': 'properties.networkRuleSet', 'type': 'NetworkRuleSet'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'shared_private_link_resources': {'key': 'properties.sharedPrivateLinkResources', 'type': '[SharedPrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(SearchService, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.identity = kwargs.get('identity', None)
self.replica_count = kwargs.get('replica_count', 1)
self.partition_count = kwargs.get('partition_count', 1)
self.hosting_mode = kwargs.get('hosting_mode', "default")
self.public_network_access = kwargs.get('public_network_access', "enabled")
self.status = None
self.status_details = None
self.provisioning_state = None
self.network_rule_set = kwargs.get('network_rule_set', None)
self.private_endpoint_connections = None
self.shared_private_link_resources = None
class SearchServiceListResult(msrest.serialization.Model):
"""Response containing a list of Azure Cognitive Search services.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of search services.
:vartype value: list[~azure.mgmt.search.models.SearchService]
:ivar next_link: Request URL that can be used to query next page of search services. Returned
when the total number of requested search services exceed maximum page size.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SearchService]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SearchServiceListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SearchServiceUpdate(Resource):
"""The parameters used to update an Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param sku: The SKU of the Search Service, which determines price tier and capacity limits.
This property is required when creating a new Search Service.
:type sku: ~azure.mgmt.search.models.Sku
:param location: The geographic location of the resource. This must be one of the supported and
registered Azure Geo Regions (for example, West US, East US, Southeast Asia, and so forth).
This property is required when creating a new resource.
:type location: str
:param tags: A set of tags. Tags to help categorize the resource in the Azure portal.
:type tags: dict[str, str]
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.search.models.Identity
:param replica_count: The number of replicas in the search service. If specified, it must be a
value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU.
:type replica_count: int
:param partition_count: The number of partitions in the search service; if specified, it can be
1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3'
services with hostingMode set to 'highDensity', the allowed values are between 1 and 3.
:type partition_count: int
:param hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable
up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the
maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default'
or 'highDensity'. For all other SKUs, this value must be 'default'. Possible values include:
"default", "highDensity". Default value: "default".
:type hosting_mode: str or ~azure.mgmt.search.models.HostingMode
:param public_network_access: This value can be set to 'enabled' to avoid breaking changes on
existing customer resources and templates. If set to 'disabled', traffic over public interface
is not allowed, and private endpoint connections would be the exclusive access method. Possible
values include: "enabled", "disabled". Default value: "enabled".
:type public_network_access: str or ~azure.mgmt.search.models.PublicNetworkAccess
:ivar status: The status of the search service. Possible values include: 'running': The search
service is running and no provisioning operations are underway. 'provisioning': The search
service is being provisioned or scaled up or down. 'deleting': The search service is being
deleted. 'degraded': The search service is degraded. This can occur when the underlying search
units are not healthy. The search service is most likely operational, but performance might be
slow and some requests might be dropped. 'disabled': The search service is disabled. In this
state, the service will reject all API requests. 'error': The search service is in an error
state. If your service is in the degraded, disabled, or error states, it means the Azure
Cognitive Search team is actively investigating the underlying issue. Dedicated services in
these states are still chargeable based on the number of search units provisioned. Possible
values include: "running", "provisioning", "deleting", "degraded", "disabled", "error".
:vartype status: str or ~azure.mgmt.search.models.SearchServiceStatus
:ivar status_details: The details of the search service status.
:vartype status_details: str
:ivar provisioning_state: The state of the last provisioning operation performed on the search
service. Provisioning is an intermediate state that occurs while service capacity is being
established. After capacity is set up, provisioningState changes to either 'succeeded' or
'failed'. Client applications can poll provisioning status (the recommended polling interval is
from 30 seconds to one minute) by using the Get Search Service operation to see when an
operation is completed. If you are using the free service, this value tends to come back as
'succeeded' directly in the call to Create search service. This is because the free service
uses capacity that is already set up. Possible values include: "succeeded", "provisioning",
"failed".
:vartype provisioning_state: str or ~azure.mgmt.search.models.ProvisioningState
:param network_rule_set: Network specific rules that determine how the Azure Cognitive Search
service may be reached.
:type network_rule_set: ~azure.mgmt.search.models.NetworkRuleSet
:ivar private_endpoint_connections: The list of private endpoint connections to the Azure
Cognitive Search service.
:vartype private_endpoint_connections:
list[~azure.mgmt.search.models.PrivateEndpointConnection]
:ivar shared_private_link_resources: The list of shared private link resources managed by the
Azure Cognitive Search service.
:vartype shared_private_link_resources:
list[~azure.mgmt.search.models.SharedPrivateLinkResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'replica_count': {'maximum': 12, 'minimum': 1},
'partition_count': {'maximum': 12, 'minimum': 1},
'status': {'readonly': True},
'status_details': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'shared_private_link_resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'Identity'},
'replica_count': {'key': 'properties.replicaCount', 'type': 'int'},
'partition_count': {'key': 'properties.partitionCount', 'type': 'int'},
'hosting_mode': {'key': 'properties.hostingMode', 'type': 'str'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'status_details': {'key': 'properties.statusDetails', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'network_rule_set': {'key': 'properties.networkRuleSet', 'type': 'NetworkRuleSet'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'shared_private_link_resources': {'key': 'properties.sharedPrivateLinkResources', 'type': '[SharedPrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(SearchServiceUpdate, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.replica_count = kwargs.get('replica_count', 1)
self.partition_count = kwargs.get('partition_count', 1)
self.hosting_mode = kwargs.get('hosting_mode', "default")
self.public_network_access = kwargs.get('public_network_access', "enabled")
self.status = None
self.status_details = None
self.provisioning_state = None
self.network_rule_set = kwargs.get('network_rule_set', None)
self.private_endpoint_connections = None
self.shared_private_link_resources = None
class ShareablePrivateLinkResourceProperties(msrest.serialization.Model):
"""Describes the properties of a resource type that has been onboarded to private link service, supported by Azure Cognitive Search.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The resource provider type for the resource that has been onboarded to private link
service, supported by Azure Cognitive Search.
:vartype type: str
:ivar group_id: The resource provider group id for the resource that has been onboarded to
private link service, supported by Azure Cognitive Search.
:vartype group_id: str
:ivar description: The description of the resource type that has been onboarded to private link
service, supported by Azure Cognitive Search.
:vartype description: str
"""
_validation = {
'type': {'readonly': True},
'group_id': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'group_id': {'key': 'groupId', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareablePrivateLinkResourceProperties, self).__init__(**kwargs)
self.type = None
self.group_id = None
self.description = None
class ShareablePrivateLinkResourceType(msrest.serialization.Model):
"""Describes an resource type that has been onboarded to private link service, supported by Azure Cognitive Search.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the resource type that has been onboarded to private link service,
supported by Azure Cognitive Search.
:vartype name: str
:ivar properties: Describes the properties of a resource type that has been onboarded to
private link service, supported by Azure Cognitive Search.
:vartype properties: ~azure.mgmt.search.models.ShareablePrivateLinkResourceProperties
"""
_validation = {
'name': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ShareablePrivateLinkResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(ShareablePrivateLinkResourceType, self).__init__(**kwargs)
self.name = None
self.properties = None
class SharedPrivateLinkResource(Resource):
"""Describes a Shared Private Link Resource managed by the Azure Cognitive Search service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param properties: Describes the properties of a Shared Private Link Resource managed by the
Azure Cognitive Search service.
:type properties: ~azure.mgmt.search.models.SharedPrivateLinkResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'SharedPrivateLinkResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(SharedPrivateLinkResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class SharedPrivateLinkResourceListResult(msrest.serialization.Model):
"""Response containing a list of Shared Private Link Resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Shared Private Link Resources.
:vartype value: list[~azure.mgmt.search.models.SharedPrivateLinkResource]
:param next_link: The URL to get the next set of shared private link resources, if there are
any.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SharedPrivateLinkResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SharedPrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = None
self.next_link = kwargs.get('next_link', None)
class SharedPrivateLinkResourceProperties(msrest.serialization.Model):
"""Describes the properties of an existing Shared Private Link Resource managed by the Azure Cognitive Search service.
:param private_link_resource_id: The resource id of the resource the shared private link
resource is for.
:type private_link_resource_id: str
:param group_id: The group id from the provider of resource the shared private link resource is
for.
:type group_id: str
:param request_message: The request message for requesting approval of the shared private link
resource.
:type request_message: str
:param resource_region: Optional. Can be used to specify the Azure Resource Manager location of
the resource to which a shared private link is to be created. This is only required for those
resources whose DNS configuration are regional (such as Azure Kubernetes Service).
:type resource_region: str
:param status: Status of the shared private link resource. Can be Pending, Approved, Rejected
or Disconnected. Possible values include: "Pending", "Approved", "Rejected", "Disconnected".
:type status: str or ~azure.mgmt.search.models.SharedPrivateLinkResourceStatus
:param provisioning_state: The provisioning state of the shared private link resource. Can be
Updating, Deleting, Failed, Succeeded or Incomplete. Possible values include: "Updating",
"Deleting", "Failed", "Succeeded", "Incomplete".
:type provisioning_state: str or
~azure.mgmt.search.models.SharedPrivateLinkResourceProvisioningState
"""
_attribute_map = {
'private_link_resource_id': {'key': 'privateLinkResourceId', 'type': 'str'},
'group_id': {'key': 'groupId', 'type': 'str'},
'request_message': {'key': 'requestMessage', 'type': 'str'},
'resource_region': {'key': 'resourceRegion', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SharedPrivateLinkResourceProperties, self).__init__(**kwargs)
self.private_link_resource_id = kwargs.get('private_link_resource_id', None)
self.group_id = kwargs.get('group_id', None)
self.request_message = kwargs.get('request_message', None)
self.resource_region = kwargs.get('resource_region', None)
self.status = kwargs.get('status', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
class Sku(msrest.serialization.Model):
"""Defines the SKU of an Azure Cognitive Search Service, which determines price tier and capacity limits.
:param name: The SKU of the search service. Valid values include: 'free': Shared service.
'basic': Dedicated service with up to 3 replicas. 'standard': Dedicated service with up to 12
partitions and 12 replicas. 'standard2': Similar to standard, but with more capacity per search
unit. 'standard3': The largest Standard offering with up to 12 partitions and 12 replicas (or
up to 3 partitions with more indexes if you also set the hostingMode property to
'highDensity'). 'storage_optimized_l1': Supports 1TB per partition, up to 12 partitions.
'storage_optimized_l2': Supports 2TB per partition, up to 12 partitions.'. Possible values
include: "free", "basic", "standard", "standard2", "standard3", "storage_optimized_l1",
"storage_optimized_l2".
:type name: str or ~azure.mgmt.search.models.SkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
| en | 0.758258 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- Response containing the primary and secondary admin API keys for a given Azure Cognitive Search service. Variables are only populated by the server, and will be ignored when sending a request. :ivar primary_key: The primary admin API key of the search service. :vartype primary_key: str :ivar secondary_key: The secondary admin API key of the search service. :vartype secondary_key: str The details of a long running asynchronous shared private link resource operation. :param status: The current status of the long running asynchronous shared private link resource operation. Possible values include: "Running", "Succeeded", "Failed". :type status: str or ~azure.mgmt.search.models.SharedPrivateLinkResourceAsyncOperationResult Input of check name availability API. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. The search service name to validate. Search service names must only contain lowercase letters, digits or dashes, cannot use dash as the first two or last one characters, cannot contain consecutive dashes, and must be between 2 and 60 characters in length. :type name: str :ivar type: Required. The type of the resource whose name is to be validated. This value must always be 'searchServices'. Default value: "searchServices". :vartype type: str Output of check name availability API. Variables are only populated by the server, and will be ignored when sending a request. :ivar is_name_available: A value indicating whether the name is available. :vartype is_name_available: bool :ivar reason: The reason why the name is not available. 'Invalid' indicates the name provided does not match the naming requirements (incorrect length, unsupported characters, etc.). 'AlreadyExists' indicates that the name is already in use and is therefore unavailable. Possible values include: "Invalid", "AlreadyExists". :vartype reason: str or ~azure.mgmt.search.models.UnavailableNameReason :ivar message: A message that explains why the name is invalid and provides resource naming requirements. Available only if 'Invalid' is returned in the 'reason' property. :vartype message: str Describes a particular API error with an error code and a message. :param code: An error code that describes the error condition more precisely than an HTTP status code. Can be used to programmatically handle specific error cases. :type code: str :param message: A message that describes the error in detail and provides debugging information. :type message: str :param target: The target of the particular error (for example, the name of the property in error). :type target: str :param details: Contains nested errors that are related to this error. :type details: list[~azure.mgmt.search.models.CloudErrorBody] Identity for the resource. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar principal_id: The principal ID of resource identity. :vartype principal_id: str :ivar tenant_id: The tenant ID of resource. :vartype tenant_id: str :param type: Required. The identity type. Possible values include: "None", "SystemAssigned". :type type: str or ~azure.mgmt.search.models.IdentityType The IP restriction rule of the Azure Cognitive Search service. :param value: Value corresponding to a single IPv4 address (eg., 172.16.58.3) or an IP range in CIDR format (eg., 172.16.58.3/24) to be allowed. :type value: str Response containing the query API keys for a given Azure Cognitive Search service. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The query keys for the Azure Cognitive Search service. :vartype value: list[~azure.mgmt.search.models.QueryKey] :ivar next_link: Request URL that can be used to query next page of query keys. Returned when the total number of requested query keys exceed maximum page size. :vartype next_link: str Network specific rules that determine how the Azure Cognitive Search service may be reached. :param ip_rules: A list of IP restriction rules that defines the inbound network(s) with allowing access to the search service endpoint. At the meantime, all other public IP networks are blocked by the firewall. These restriction rules are applied only when the 'publicNetworkAccess' of the search service is 'enabled'; otherwise, traffic over public interface is not allowed even with any public IP rules, and private endpoint connections would be the exclusive access method. :type ip_rules: list[~azure.mgmt.search.models.IpRule] Describes a REST API operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the operation. This name is of the form {provider}/{resource}/{operation}. :vartype name: str :ivar display: The object that describes the operation. :vartype display: ~azure.mgmt.search.models.OperationDisplay The object that describes the operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar provider: The friendly name of the resource provider. :vartype provider: str :ivar operation: The operation type: read, write, delete, listKeys/action, etc. :vartype operation: str :ivar resource: The resource type on which the operation is performed. :vartype resource: str :ivar description: The friendly name of the operation. :vartype description: str The result of the request to list REST API operations. It contains a list of operations and a URL to get the next set of results. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of operations supported by the resource provider. :vartype value: list[~azure.mgmt.search.models.Operation] :ivar next_link: The URL to get the next set of operation list results, if any. :vartype next_link: str Common fields that are returned in the response for all Azure Resource Manager resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str Describes an existing Private Endpoint connection to the Azure Cognitive Search service. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :param properties: Describes the properties of an existing Private Endpoint connection to the Azure Cognitive Search service. :type properties: ~azure.mgmt.search.models.PrivateEndpointConnectionProperties Response containing a list of Private Endpoint connections. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of Private Endpoint connections. :vartype value: list[~azure.mgmt.search.models.PrivateEndpointConnection] :ivar next_link: Request URL that can be used to query next page of private endpoint connections. Returned when the total number of requested private endpoint connections exceed maximum page size. :vartype next_link: str Describes the properties of an existing Private Endpoint connection to the Azure Cognitive Search service. :param private_endpoint: The private endpoint resource from Microsoft.Network provider. :type private_endpoint: ~azure.mgmt.search.models.PrivateEndpointConnectionPropertiesPrivateEndpoint :param private_link_service_connection_state: Describes the current state of an existing Private Link Service connection to the Azure Private Endpoint. :type private_link_service_connection_state: ~azure.mgmt.search.models.PrivateEndpointConnectionPropertiesPrivateLinkServiceConnectionState The private endpoint resource from Microsoft.Network provider. :param id: The resource id of the private endpoint resource from Microsoft.Network provider. :type id: str Describes the current state of an existing Private Link Service connection to the Azure Private Endpoint. :param status: Status of the the private link service connection. Can be Pending, Approved, Rejected, or Disconnected. Possible values include: "Pending", "Approved", "Rejected", "Disconnected". :type status: str or ~azure.mgmt.search.models.PrivateLinkServiceConnectionStatus :param description: The description for the private link service connection state. :type description: str :param actions_required: A description of any extra actions that may be required. :type actions_required: str Describes a supported private link resource for the Azure Cognitive Search service. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar properties: Describes the properties of a supported private link resource for the Azure Cognitive Search service. :vartype properties: ~azure.mgmt.search.models.PrivateLinkResourceProperties Describes the properties of a supported private link resource for the Azure Cognitive Search service. For a given API version, this represents the 'supported' groupIds when creating a shared private link resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar group_id: The group ID of the private link resource. :vartype group_id: str :ivar required_members: The list of required members of the private link resource. :vartype required_members: list[str] :ivar required_zone_names: The list of required DNS zone names of the private link resource. :vartype required_zone_names: list[str] :ivar shareable_private_link_resource_types: The list of resources that are onboarded to private link service, that are supported by Azure Cognitive Search. :vartype shareable_private_link_resource_types: list[~azure.mgmt.search.models.ShareablePrivateLinkResourceType] Response containing a list of supported Private Link Resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of supported Private Link Resources. :vartype value: list[~azure.mgmt.search.models.PrivateLinkResource] Describes an API key for a given Azure Cognitive Search service that has permissions for query operations only. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the query API key; may be empty. :vartype name: str :ivar key: The value of the query API key. :vartype key: str Parameter group. :param client_request_id: A client-generated GUID value that identifies this request. If specified, this will be included in response information as a way to track the request. :type client_request_id: str The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str Describes an Azure Cognitive Search service and its current state. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str :param sku: The SKU of the Search Service, which determines price tier and capacity limits. This property is required when creating a new Search Service. :type sku: ~azure.mgmt.search.models.Sku :param identity: The identity of the resource. :type identity: ~azure.mgmt.search.models.Identity :param replica_count: The number of replicas in the search service. If specified, it must be a value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU. :type replica_count: int :param partition_count: The number of partitions in the search service; if specified, it can be 1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3' services with hostingMode set to 'highDensity', the allowed values are between 1 and 3. :type partition_count: int :param hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default' or 'highDensity'. For all other SKUs, this value must be 'default'. Possible values include: "default", "highDensity". Default value: "default". :type hosting_mode: str or ~azure.mgmt.search.models.HostingMode :param public_network_access: This value can be set to 'enabled' to avoid breaking changes on existing customer resources and templates. If set to 'disabled', traffic over public interface is not allowed, and private endpoint connections would be the exclusive access method. Possible values include: "enabled", "disabled". Default value: "enabled". :type public_network_access: str or ~azure.mgmt.search.models.PublicNetworkAccess :ivar status: The status of the search service. Possible values include: 'running': The search service is running and no provisioning operations are underway. 'provisioning': The search service is being provisioned or scaled up or down. 'deleting': The search service is being deleted. 'degraded': The search service is degraded. This can occur when the underlying search units are not healthy. The search service is most likely operational, but performance might be slow and some requests might be dropped. 'disabled': The search service is disabled. In this state, the service will reject all API requests. 'error': The search service is in an error state. If your service is in the degraded, disabled, or error states, it means the Azure Cognitive Search team is actively investigating the underlying issue. Dedicated services in these states are still chargeable based on the number of search units provisioned. Possible values include: "running", "provisioning", "deleting", "degraded", "disabled", "error". :vartype status: str or ~azure.mgmt.search.models.SearchServiceStatus :ivar status_details: The details of the search service status. :vartype status_details: str :ivar provisioning_state: The state of the last provisioning operation performed on the search service. Provisioning is an intermediate state that occurs while service capacity is being established. After capacity is set up, provisioningState changes to either 'succeeded' or 'failed'. Client applications can poll provisioning status (the recommended polling interval is from 30 seconds to one minute) by using the Get Search Service operation to see when an operation is completed. If you are using the free service, this value tends to come back as 'succeeded' directly in the call to Create search service. This is because the free service uses capacity that is already set up. Possible values include: "succeeded", "provisioning", "failed". :vartype provisioning_state: str or ~azure.mgmt.search.models.ProvisioningState :param network_rule_set: Network specific rules that determine how the Azure Cognitive Search service may be reached. :type network_rule_set: ~azure.mgmt.search.models.NetworkRuleSet :ivar private_endpoint_connections: The list of private endpoint connections to the Azure Cognitive Search service. :vartype private_endpoint_connections: list[~azure.mgmt.search.models.PrivateEndpointConnection] :ivar shared_private_link_resources: The list of shared private link resources managed by the Azure Cognitive Search service. :vartype shared_private_link_resources: list[~azure.mgmt.search.models.SharedPrivateLinkResource] Response containing a list of Azure Cognitive Search services. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of search services. :vartype value: list[~azure.mgmt.search.models.SearchService] :ivar next_link: Request URL that can be used to query next page of search services. Returned when the total number of requested search services exceed maximum page size. :vartype next_link: str The parameters used to update an Azure Cognitive Search service. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :param sku: The SKU of the Search Service, which determines price tier and capacity limits. This property is required when creating a new Search Service. :type sku: ~azure.mgmt.search.models.Sku :param location: The geographic location of the resource. This must be one of the supported and registered Azure Geo Regions (for example, West US, East US, Southeast Asia, and so forth). This property is required when creating a new resource. :type location: str :param tags: A set of tags. Tags to help categorize the resource in the Azure portal. :type tags: dict[str, str] :param identity: The identity of the resource. :type identity: ~azure.mgmt.search.models.Identity :param replica_count: The number of replicas in the search service. If specified, it must be a value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU. :type replica_count: int :param partition_count: The number of partitions in the search service; if specified, it can be 1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3' services with hostingMode set to 'highDensity', the allowed values are between 1 and 3. :type partition_count: int :param hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default' or 'highDensity'. For all other SKUs, this value must be 'default'. Possible values include: "default", "highDensity". Default value: "default". :type hosting_mode: str or ~azure.mgmt.search.models.HostingMode :param public_network_access: This value can be set to 'enabled' to avoid breaking changes on existing customer resources and templates. If set to 'disabled', traffic over public interface is not allowed, and private endpoint connections would be the exclusive access method. Possible values include: "enabled", "disabled". Default value: "enabled". :type public_network_access: str or ~azure.mgmt.search.models.PublicNetworkAccess :ivar status: The status of the search service. Possible values include: 'running': The search service is running and no provisioning operations are underway. 'provisioning': The search service is being provisioned or scaled up or down. 'deleting': The search service is being deleted. 'degraded': The search service is degraded. This can occur when the underlying search units are not healthy. The search service is most likely operational, but performance might be slow and some requests might be dropped. 'disabled': The search service is disabled. In this state, the service will reject all API requests. 'error': The search service is in an error state. If your service is in the degraded, disabled, or error states, it means the Azure Cognitive Search team is actively investigating the underlying issue. Dedicated services in these states are still chargeable based on the number of search units provisioned. Possible values include: "running", "provisioning", "deleting", "degraded", "disabled", "error". :vartype status: str or ~azure.mgmt.search.models.SearchServiceStatus :ivar status_details: The details of the search service status. :vartype status_details: str :ivar provisioning_state: The state of the last provisioning operation performed on the search service. Provisioning is an intermediate state that occurs while service capacity is being established. After capacity is set up, provisioningState changes to either 'succeeded' or 'failed'. Client applications can poll provisioning status (the recommended polling interval is from 30 seconds to one minute) by using the Get Search Service operation to see when an operation is completed. If you are using the free service, this value tends to come back as 'succeeded' directly in the call to Create search service. This is because the free service uses capacity that is already set up. Possible values include: "succeeded", "provisioning", "failed". :vartype provisioning_state: str or ~azure.mgmt.search.models.ProvisioningState :param network_rule_set: Network specific rules that determine how the Azure Cognitive Search service may be reached. :type network_rule_set: ~azure.mgmt.search.models.NetworkRuleSet :ivar private_endpoint_connections: The list of private endpoint connections to the Azure Cognitive Search service. :vartype private_endpoint_connections: list[~azure.mgmt.search.models.PrivateEndpointConnection] :ivar shared_private_link_resources: The list of shared private link resources managed by the Azure Cognitive Search service. :vartype shared_private_link_resources: list[~azure.mgmt.search.models.SharedPrivateLinkResource] Describes the properties of a resource type that has been onboarded to private link service, supported by Azure Cognitive Search. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The resource provider type for the resource that has been onboarded to private link service, supported by Azure Cognitive Search. :vartype type: str :ivar group_id: The resource provider group id for the resource that has been onboarded to private link service, supported by Azure Cognitive Search. :vartype group_id: str :ivar description: The description of the resource type that has been onboarded to private link service, supported by Azure Cognitive Search. :vartype description: str Describes an resource type that has been onboarded to private link service, supported by Azure Cognitive Search. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the resource type that has been onboarded to private link service, supported by Azure Cognitive Search. :vartype name: str :ivar properties: Describes the properties of a resource type that has been onboarded to private link service, supported by Azure Cognitive Search. :vartype properties: ~azure.mgmt.search.models.ShareablePrivateLinkResourceProperties Describes a Shared Private Link Resource managed by the Azure Cognitive Search service. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :param properties: Describes the properties of a Shared Private Link Resource managed by the Azure Cognitive Search service. :type properties: ~azure.mgmt.search.models.SharedPrivateLinkResourceProperties Response containing a list of Shared Private Link Resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of Shared Private Link Resources. :vartype value: list[~azure.mgmt.search.models.SharedPrivateLinkResource] :param next_link: The URL to get the next set of shared private link resources, if there are any. :type next_link: str Describes the properties of an existing Shared Private Link Resource managed by the Azure Cognitive Search service. :param private_link_resource_id: The resource id of the resource the shared private link resource is for. :type private_link_resource_id: str :param group_id: The group id from the provider of resource the shared private link resource is for. :type group_id: str :param request_message: The request message for requesting approval of the shared private link resource. :type request_message: str :param resource_region: Optional. Can be used to specify the Azure Resource Manager location of the resource to which a shared private link is to be created. This is only required for those resources whose DNS configuration are regional (such as Azure Kubernetes Service). :type resource_region: str :param status: Status of the shared private link resource. Can be Pending, Approved, Rejected or Disconnected. Possible values include: "Pending", "Approved", "Rejected", "Disconnected". :type status: str or ~azure.mgmt.search.models.SharedPrivateLinkResourceStatus :param provisioning_state: The provisioning state of the shared private link resource. Can be Updating, Deleting, Failed, Succeeded or Incomplete. Possible values include: "Updating", "Deleting", "Failed", "Succeeded", "Incomplete". :type provisioning_state: str or ~azure.mgmt.search.models.SharedPrivateLinkResourceProvisioningState Defines the SKU of an Azure Cognitive Search Service, which determines price tier and capacity limits. :param name: The SKU of the search service. Valid values include: 'free': Shared service. 'basic': Dedicated service with up to 3 replicas. 'standard': Dedicated service with up to 12 partitions and 12 replicas. 'standard2': Similar to standard, but with more capacity per search unit. 'standard3': The largest Standard offering with up to 12 partitions and 12 replicas (or up to 3 partitions with more indexes if you also set the hostingMode property to 'highDensity'). 'storage_optimized_l1': Supports 1TB per partition, up to 12 partitions. 'storage_optimized_l2': Supports 2TB per partition, up to 12 partitions.'. Possible values include: "free", "basic", "standard", "standard2", "standard3", "storage_optimized_l1", "storage_optimized_l2". :type name: str or ~azure.mgmt.search.models.SkuName | 1.977314 | 2 |
ENV/lib/python3.6/site-packages/plaster/interfaces.py | captain-c00keys/pyramid-stocks | 0 | 6630552 | import abc
from .compat import add_metaclass
@add_metaclass(abc.ABCMeta)
class ILoader(object):
"""
An abstraction over an source of configuration settings.
It is required to implement ``get_sections``, ``get_settings`` and
``setup_logging``.
Optionally, it may also implement other :term:`loader protocol` interfaces
to provide extra functionality. For example,
:class:`plaster.protocols.IWSGIProtocol` which requires ``get_wsgi_app``,
and ``get_wsgi_server`` for loading WSGI configurations. Services that
depend on such functionality should document the required functionality
behind a particular :term:`loader protocol` which custom loaders can
implement.
:ivar uri: The :class:`plaster.PlasterURL` object used to find the
:class:`plaster.ILoaderFactory`.
"""
@abc.abstractmethod
def get_sections(self):
"""
Load the list of section names available.
"""
@abc.abstractmethod
def get_settings(self, section=None, defaults=None):
"""
Load the settings for the named ``section``.
:param section: The name of the section in the config file. If this is
``None`` then it is up to the loader to determine a sensible
default usually derived from the fragment in the ``path#name``
syntax of the ``config_uri``.
:param defaults: A ``dict`` of default values used to populate the
settings and support variable interpolation. Any values in
``defaults`` may be overridden by the loader prior to returning
the final configuration dictionary.
:returns: A ``dict`` of settings. This should return a dictionary
object even if the section is missing.
:raises ValueError: If a section name is missing and cannot be
determined from the ``config_uri``.
"""
@abc.abstractmethod
def setup_logging(self, defaults=None):
"""
Execute the logging configuration defined in the config file.
This function should, at least, configure the Python standard logging
module. However, it may also be used to configure any other logging
subsystems that serve a similar purpose.
:param defaults: A ``dict`` of default values used to populate the
settings and support variable interpolation. Any values in
``defaults`` may be overridden by the loader prior to returning
the final configuration dictionary.
"""
@add_metaclass(abc.ABCMeta)
class ILoaderFactory(object):
@abc.abstractmethod
def __call__(self, uri):
"""
A factory which accepts a :class:`plaster.PlasterURL` and returns a
:class:`plaster.ILoader` object.
"""
@add_metaclass(abc.ABCMeta)
class ILoaderInfo(object):
"""
An info object describing a specific :class:`plaster.ILoader`.
:ivar scheme: The full scheme of the loader.
:ivar protocols: Zero or more supported :term:`loader protocol`
identifiers.
:ivar factory: The :class:`plaster.ILoaderFactory`.
"""
@abc.abstractmethod
def load(self, config_uri):
"""
Create and return an :class:`plaster.ILoader` instance.
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
"""
| import abc
from .compat import add_metaclass
@add_metaclass(abc.ABCMeta)
class ILoader(object):
"""
An abstraction over an source of configuration settings.
It is required to implement ``get_sections``, ``get_settings`` and
``setup_logging``.
Optionally, it may also implement other :term:`loader protocol` interfaces
to provide extra functionality. For example,
:class:`plaster.protocols.IWSGIProtocol` which requires ``get_wsgi_app``,
and ``get_wsgi_server`` for loading WSGI configurations. Services that
depend on such functionality should document the required functionality
behind a particular :term:`loader protocol` which custom loaders can
implement.
:ivar uri: The :class:`plaster.PlasterURL` object used to find the
:class:`plaster.ILoaderFactory`.
"""
@abc.abstractmethod
def get_sections(self):
"""
Load the list of section names available.
"""
@abc.abstractmethod
def get_settings(self, section=None, defaults=None):
"""
Load the settings for the named ``section``.
:param section: The name of the section in the config file. If this is
``None`` then it is up to the loader to determine a sensible
default usually derived from the fragment in the ``path#name``
syntax of the ``config_uri``.
:param defaults: A ``dict`` of default values used to populate the
settings and support variable interpolation. Any values in
``defaults`` may be overridden by the loader prior to returning
the final configuration dictionary.
:returns: A ``dict`` of settings. This should return a dictionary
object even if the section is missing.
:raises ValueError: If a section name is missing and cannot be
determined from the ``config_uri``.
"""
@abc.abstractmethod
def setup_logging(self, defaults=None):
"""
Execute the logging configuration defined in the config file.
This function should, at least, configure the Python standard logging
module. However, it may also be used to configure any other logging
subsystems that serve a similar purpose.
:param defaults: A ``dict`` of default values used to populate the
settings and support variable interpolation. Any values in
``defaults`` may be overridden by the loader prior to returning
the final configuration dictionary.
"""
@add_metaclass(abc.ABCMeta)
class ILoaderFactory(object):
@abc.abstractmethod
def __call__(self, uri):
"""
A factory which accepts a :class:`plaster.PlasterURL` and returns a
:class:`plaster.ILoader` object.
"""
@add_metaclass(abc.ABCMeta)
class ILoaderInfo(object):
"""
An info object describing a specific :class:`plaster.ILoader`.
:ivar scheme: The full scheme of the loader.
:ivar protocols: Zero or more supported :term:`loader protocol`
identifiers.
:ivar factory: The :class:`plaster.ILoaderFactory`.
"""
@abc.abstractmethod
def load(self, config_uri):
"""
Create and return an :class:`plaster.ILoader` instance.
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
"""
| en | 0.652198 | An abstraction over an source of configuration settings. It is required to implement ``get_sections``, ``get_settings`` and ``setup_logging``. Optionally, it may also implement other :term:`loader protocol` interfaces to provide extra functionality. For example, :class:`plaster.protocols.IWSGIProtocol` which requires ``get_wsgi_app``, and ``get_wsgi_server`` for loading WSGI configurations. Services that depend on such functionality should document the required functionality behind a particular :term:`loader protocol` which custom loaders can implement. :ivar uri: The :class:`plaster.PlasterURL` object used to find the :class:`plaster.ILoaderFactory`. Load the list of section names available. Load the settings for the named ``section``. :param section: The name of the section in the config file. If this is ``None`` then it is up to the loader to determine a sensible default usually derived from the fragment in the ``path#name`` syntax of the ``config_uri``. :param defaults: A ``dict`` of default values used to populate the settings and support variable interpolation. Any values in ``defaults`` may be overridden by the loader prior to returning the final configuration dictionary. :returns: A ``dict`` of settings. This should return a dictionary object even if the section is missing. :raises ValueError: If a section name is missing and cannot be determined from the ``config_uri``. Execute the logging configuration defined in the config file. This function should, at least, configure the Python standard logging module. However, it may also be used to configure any other logging subsystems that serve a similar purpose. :param defaults: A ``dict`` of default values used to populate the settings and support variable interpolation. Any values in ``defaults`` may be overridden by the loader prior to returning the final configuration dictionary. A factory which accepts a :class:`plaster.PlasterURL` and returns a :class:`plaster.ILoader` object. An info object describing a specific :class:`plaster.ILoader`. :ivar scheme: The full scheme of the loader. :ivar protocols: Zero or more supported :term:`loader protocol` identifiers. :ivar factory: The :class:`plaster.ILoaderFactory`. Create and return an :class:`plaster.ILoader` instance. :param config_uri: Anything that can be parsed by :func:`plaster.parse_uri`. | 2.894787 | 3 |
anthill/platform/api/rest/handlers/detail.py | 0x55AAh/anthill_gaming | 1 | 6630553 | <gh_stars>1-10
from anthill.framework.handlers import RequestHandler, JSONHandlerMixin
from anthill.framework.utils.asynchronous import thread_pool_exec
from anthill.framework.core.exceptions import ImproperlyConfigured
from anthill.framework.http import Http404
from anthill.platform.api.rest.handlers.base import MarshmallowMixin
class SingleObjectMixin:
"""
Provide the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
query_pk_and_slug = False
async def get_object(self, queryset=None):
"""
Return the object the handler is displaying.
Require `self.queryset` and a `pk` or `slug` argument in the url entry.
Subclasses can override this to return any object.
"""
# Use a custom queryset if provided.
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.path_kwargs.get(self.pk_url_kwarg)
slug = self.path_kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = await thread_pool_exec(queryset.filter_by, pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = await thread_pool_exec(queryset.filter_by, **{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError(
"Generic detail handler %s must be called with either an object "
"pk or a slug in the url." % self.__class__.__name__)
# Get the single item from the filtered queryset
obj = await thread_pool_exec(queryset.one_or_none)
if obj is None:
raise Http404
return obj
def get_queryset(self):
"""
Return the queryset that will be used to look up the object.
This method is called by the default implementation of get_object() and
may not be called if get_object() is overridden.
"""
if self.queryset is None:
if self.model:
return self.model.query
else:
raise ImproperlyConfigured(
"%(cls)s is missing a queryset. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset
def get_slug_field(self):
"""Get the name of a slug field to be used to look up by slug."""
return self.slug_field
class MarshmallowSingleObjectMixin(MarshmallowMixin):
def get_schema(self):
schema_class = self.get_schema_class()
return schema_class()
def get_schema_class(self):
if self.schema_class is None:
try:
return self.object.__marshmallow__
except AttributeError:
raise ImproperlyConfigured(
"No schema class for dumping data. Either provide a schema_class "
"or define schema on the Model.")
return super().get_schema_class()
class DetailMixin(SingleObjectMixin, MarshmallowSingleObjectMixin, JSONHandlerMixin):
def get_schema_class(self):
if self.schema_class is None:
try:
return self.object.__marshmallow__
except AttributeError:
raise ImproperlyConfigured(
"No schema class for dumping data. Either provide a schema_class "
"or define schema on the Model.")
return self.schema_class
async def get(self, *args, **kwargs):
# noinspection PyAttributeOutsideInit
self.object = await self.get_object()
self.write_json(data=self.serialize(self.object))
class DetailHandler(DetailMixin, RequestHandler):
pass
| from anthill.framework.handlers import RequestHandler, JSONHandlerMixin
from anthill.framework.utils.asynchronous import thread_pool_exec
from anthill.framework.core.exceptions import ImproperlyConfigured
from anthill.framework.http import Http404
from anthill.platform.api.rest.handlers.base import MarshmallowMixin
class SingleObjectMixin:
"""
Provide the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
query_pk_and_slug = False
async def get_object(self, queryset=None):
"""
Return the object the handler is displaying.
Require `self.queryset` and a `pk` or `slug` argument in the url entry.
Subclasses can override this to return any object.
"""
# Use a custom queryset if provided.
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.path_kwargs.get(self.pk_url_kwarg)
slug = self.path_kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = await thread_pool_exec(queryset.filter_by, pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = await thread_pool_exec(queryset.filter_by, **{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError(
"Generic detail handler %s must be called with either an object "
"pk or a slug in the url." % self.__class__.__name__)
# Get the single item from the filtered queryset
obj = await thread_pool_exec(queryset.one_or_none)
if obj is None:
raise Http404
return obj
def get_queryset(self):
"""
Return the queryset that will be used to look up the object.
This method is called by the default implementation of get_object() and
may not be called if get_object() is overridden.
"""
if self.queryset is None:
if self.model:
return self.model.query
else:
raise ImproperlyConfigured(
"%(cls)s is missing a queryset. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset
def get_slug_field(self):
"""Get the name of a slug field to be used to look up by slug."""
return self.slug_field
class MarshmallowSingleObjectMixin(MarshmallowMixin):
def get_schema(self):
schema_class = self.get_schema_class()
return schema_class()
def get_schema_class(self):
if self.schema_class is None:
try:
return self.object.__marshmallow__
except AttributeError:
raise ImproperlyConfigured(
"No schema class for dumping data. Either provide a schema_class "
"or define schema on the Model.")
return super().get_schema_class()
class DetailMixin(SingleObjectMixin, MarshmallowSingleObjectMixin, JSONHandlerMixin):
def get_schema_class(self):
if self.schema_class is None:
try:
return self.object.__marshmallow__
except AttributeError:
raise ImproperlyConfigured(
"No schema class for dumping data. Either provide a schema_class "
"or define schema on the Model.")
return self.schema_class
async def get(self, *args, **kwargs):
# noinspection PyAttributeOutsideInit
self.object = await self.get_object()
self.write_json(data=self.serialize(self.object))
class DetailHandler(DetailMixin, RequestHandler):
pass | en | 0.85309 | Provide the ability to retrieve a single object for further manipulation. Return the object the handler is displaying. Require `self.queryset` and a `pk` or `slug` argument in the url entry. Subclasses can override this to return any object. # Use a custom queryset if provided. # Next, try looking up by primary key. # Next, try looking up by slug. # If none of those are defined, it's an error. # Get the single item from the filtered queryset Return the queryset that will be used to look up the object. This method is called by the default implementation of get_object() and may not be called if get_object() is overridden. Get the name of a slug field to be used to look up by slug. # noinspection PyAttributeOutsideInit | 2.028302 | 2 |
homeassistant/components/camera/prefs.py | MrDelik/core | 22,481 | 6630554 | """Preference management for camera component."""
from __future__ import annotations
from typing import Final
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import UNDEFINED, UndefinedType
from .const import DOMAIN, PREF_PRELOAD_STREAM
STORAGE_KEY: Final = DOMAIN
STORAGE_VERSION: Final = 1
class CameraEntityPreferences:
"""Handle preferences for camera entity."""
def __init__(self, prefs: dict[str, bool]) -> None:
"""Initialize prefs."""
self._prefs = prefs
def as_dict(self) -> dict[str, bool]:
"""Return dictionary version."""
return self._prefs
@property
def preload_stream(self) -> bool:
"""Return if stream is loaded on hass start."""
return self._prefs.get(PREF_PRELOAD_STREAM, False)
class CameraPreferences:
"""Handle camera preferences."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize camera prefs."""
self._hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._prefs: dict[str, dict[str, bool]] | None = None
async def async_initialize(self) -> None:
"""Finish initializing the preferences."""
if (prefs := await self._store.async_load()) is None:
prefs = {}
self._prefs = prefs
async def async_update(
self,
entity_id: str,
*,
preload_stream: bool | UndefinedType = UNDEFINED,
stream_options: dict[str, str] | UndefinedType = UNDEFINED,
) -> None:
"""Update camera preferences."""
# Prefs already initialized.
assert self._prefs is not None
if not self._prefs.get(entity_id):
self._prefs[entity_id] = {}
for key, value in ((PREF_PRELOAD_STREAM, preload_stream),):
if value is not UNDEFINED:
self._prefs[entity_id][key] = value
await self._store.async_save(self._prefs)
def get(self, entity_id: str) -> CameraEntityPreferences:
"""Get preferences for an entity."""
# Prefs are already initialized.
assert self._prefs is not None
return CameraEntityPreferences(self._prefs.get(entity_id, {}))
| """Preference management for camera component."""
from __future__ import annotations
from typing import Final
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import UNDEFINED, UndefinedType
from .const import DOMAIN, PREF_PRELOAD_STREAM
STORAGE_KEY: Final = DOMAIN
STORAGE_VERSION: Final = 1
class CameraEntityPreferences:
"""Handle preferences for camera entity."""
def __init__(self, prefs: dict[str, bool]) -> None:
"""Initialize prefs."""
self._prefs = prefs
def as_dict(self) -> dict[str, bool]:
"""Return dictionary version."""
return self._prefs
@property
def preload_stream(self) -> bool:
"""Return if stream is loaded on hass start."""
return self._prefs.get(PREF_PRELOAD_STREAM, False)
class CameraPreferences:
"""Handle camera preferences."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize camera prefs."""
self._hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._prefs: dict[str, dict[str, bool]] | None = None
async def async_initialize(self) -> None:
"""Finish initializing the preferences."""
if (prefs := await self._store.async_load()) is None:
prefs = {}
self._prefs = prefs
async def async_update(
self,
entity_id: str,
*,
preload_stream: bool | UndefinedType = UNDEFINED,
stream_options: dict[str, str] | UndefinedType = UNDEFINED,
) -> None:
"""Update camera preferences."""
# Prefs already initialized.
assert self._prefs is not None
if not self._prefs.get(entity_id):
self._prefs[entity_id] = {}
for key, value in ((PREF_PRELOAD_STREAM, preload_stream),):
if value is not UNDEFINED:
self._prefs[entity_id][key] = value
await self._store.async_save(self._prefs)
def get(self, entity_id: str) -> CameraEntityPreferences:
"""Get preferences for an entity."""
# Prefs are already initialized.
assert self._prefs is not None
return CameraEntityPreferences(self._prefs.get(entity_id, {}))
| en | 0.76769 | Preference management for camera component. Handle preferences for camera entity. Initialize prefs. Return dictionary version. Return if stream is loaded on hass start. Handle camera preferences. Initialize camera prefs. Finish initializing the preferences. Update camera preferences. # Prefs already initialized. Get preferences for an entity. # Prefs are already initialized. | 2.258393 | 2 |
dfwinreg/__init__.py | ict/dfwinreg | 0 | 6630555 | # -*- coding: utf-8 -*-
"""Digital Forensics Windows Registry (dfWinReg).
dfWinReg, or Digital Forensics Windows Registry, is a Python module
that provides read-only access to Windows Registry objects.
"""
__version__ = '20200211'
| # -*- coding: utf-8 -*-
"""Digital Forensics Windows Registry (dfWinReg).
dfWinReg, or Digital Forensics Windows Registry, is a Python module
that provides read-only access to Windows Registry objects.
"""
__version__ = '20200211'
| en | 0.623631 | # -*- coding: utf-8 -*- Digital Forensics Windows Registry (dfWinReg). dfWinReg, or Digital Forensics Windows Registry, is a Python module that provides read-only access to Windows Registry objects. | 1.370818 | 1 |
plugins/ctags_generator/test_ctags_generator.py | likev/gauravssnl.github.io | 2 | 6630556 | # -*- coding: utf-8 -*-
import os, shutil
from pelican.generators import ArticlesGenerator
from pelican.tests.support import get_settings, unittest
from pelican.writers import Writer
from ctags_generator import generate_ctags
CUR_DIR = os.path.dirname(__file__)
TEST_CONTENT_DIR = os.path.join(CUR_DIR, 'test_content')
class CtagsGeneratorTest(unittest.TestCase):
def test_generate_ctags(self):
settings = get_settings(filenames={})
settings['GENERATE_CTAGS'] = True
context = settings.copy()
context['generated_content'] = dict()
context['static_links'] = set()
generator = ArticlesGenerator(
context=context, settings=settings,
path=TEST_CONTENT_DIR, theme=settings['THEME'], output_path=TEST_CONTENT_DIR)
generator.generate_context()
writer = Writer(TEST_CONTENT_DIR, settings=settings)
generate_ctags(generator, writer)
output_path = os.path.join(TEST_CONTENT_DIR, 'tags')
self.assertTrue(os.path.exists(output_path))
try:
# output content is correct
with open(output_path, 'r') as output_file:
ctags = [l.split('\t')[0] for l in output_file.readlines()]
self.assertEqual(['bar', 'bar', 'foo', 'foo', 'foobar', 'foobar', 'マック', 'パイソン'], ctags)
finally:
os.remove(output_path)
| # -*- coding: utf-8 -*-
import os, shutil
from pelican.generators import ArticlesGenerator
from pelican.tests.support import get_settings, unittest
from pelican.writers import Writer
from ctags_generator import generate_ctags
CUR_DIR = os.path.dirname(__file__)
TEST_CONTENT_DIR = os.path.join(CUR_DIR, 'test_content')
class CtagsGeneratorTest(unittest.TestCase):
def test_generate_ctags(self):
settings = get_settings(filenames={})
settings['GENERATE_CTAGS'] = True
context = settings.copy()
context['generated_content'] = dict()
context['static_links'] = set()
generator = ArticlesGenerator(
context=context, settings=settings,
path=TEST_CONTENT_DIR, theme=settings['THEME'], output_path=TEST_CONTENT_DIR)
generator.generate_context()
writer = Writer(TEST_CONTENT_DIR, settings=settings)
generate_ctags(generator, writer)
output_path = os.path.join(TEST_CONTENT_DIR, 'tags')
self.assertTrue(os.path.exists(output_path))
try:
# output content is correct
with open(output_path, 'r') as output_file:
ctags = [l.split('\t')[0] for l in output_file.readlines()]
self.assertEqual(['bar', 'bar', 'foo', 'foo', 'foobar', 'foobar', 'マック', 'パイソン'], ctags)
finally:
os.remove(output_path)
| en | 0.773322 | # -*- coding: utf-8 -*- # output content is correct | 2.214304 | 2 |
PublicConfig.py | CzechGlobe/MeteoDataMiner | 0 | 6630557 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
class Configuration(object):
LINKS_ARRAY = ['http://www.emsbrno.cz/p.axd/cs/%C5%BDab%C4%8Dice.PPS.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Vigantice.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Marchegg.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Kameni%C4%8Dky.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Jev%C3%AD%C4%8Dko.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Edelhof.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Doksany.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Byst%C5%99ice.n_u_.P._t_.doln%C3%AD.topoly.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Byst%C5%99ice.n_u_.P_u_._t_.horn%C3%AD.topoly.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Byst%C5%99ice.n_u_.P_u_._t_.horn%C3%AD.tr%C3%A1vn%C3%ADk.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Sumperalm.MZLUUAB.html'
]
USER_NAME = ''
PASSWORD = ''
EXPORT_DIR = ''
class MinerConfiguration (object):
START_DATE = '2016-11-28'
END_DATE = '2016-11-21'
RUN_DATE = str( datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
COUNT_OF_DAYS_IN_ONE_STEP = 8
FTP_HOST = ''
FTP_LOGIN = ''
FTP_PSWD = ''
FTP_DIR_PATH = ''
FTP_FILE_EXTENSION = '.mnd'
CZECH_GLOBE_FTP_ADDRESS = ''
CZECH_GLOBE_FTP_PORT = 21
CZECH_GLOBE_FTP_USER = ''
CZECH_GLOBE_FTP_PASSWORD = ''
CZECH_GLOBE_FTP_DIRS = ['/Scinti_Polk', '/Scinti_Doman']
CZECH_GLOBE_FTP_DIRS_FILE_EXTENSION = '.csv'
LOG_FILE_PATH = ''
LOGGER_START = '--START--'
LOGGER_ERROR = '--ERROR--'
LOGGER_WARNING = '--WARNING--'
LOGGER_INFO = '--INFO--'
LOGGER_FAILURE = '--FAILURE--'
LOGGER_SUCCESS = '--SUCCESS--'
LOGGER_END = '--END--'
LOCAL_DIR = ''
CONNECTED_FILES_EXTENSION = '.csv'
WEEK_FILE = str(START_DATE) + '_' + str(END_DATE) + '.csv'
WEEK_SUMMARY = str(START_DATE) + '_' + str(END_DATE) + '_week_sum.csv'
DATA_LINES_POINTER = 'PT'
CSV_DELIMITER = ';'
NO_DATA_VALUE = -9999
INDEXES_OF_NEEDED_DATA = [0, 1, 2, 3, 7, 10, 13, 16, 19, 22, 26, 27, 28, 43, 47, 51, 55, 59, 63, 67, 71, 75, 112]
HEADERS = ['Time',
'Structure Function Constant of Refractive Index Fluctuations at Instrument Wavelength',
'Structure Function Constant of Temperature Fluctuations',
'Heat Flux',
'Monin-Obukhov Length',
'Friction Velocity ',
'Turbulent Temperature Scale',
'Momentum Flux',
'Inner Scale Length ',
'Dissipation Rate of Turbulent Kinetic Energy',
'Latent Heat Flux',
'Water Evapotranspiration',
'Albedo',
'WSP Pressure',
'WSP Temperatur',
'WSP Relative Humidity A',
'WSP Relative Humidity B',
'WSP Relative Humidity C',
'WSP Temperature Upper',
'WSP Temperature Lower',
'WSP Net Radiation',
'WSP Global Radiation',
'WSP Reflected Global Radiation',
'WSP Soil Heat Flux A',
'WSP Soil Heat Flux B',
'Error Code']
DGN_COUNT_OF_DAYS_IN_ONE_STEP = 30
DGN_FTP_FILE_EXTENSION = '.dgn'
DGN_LOG_FILE_PATH = ''
DGN_LOCAL_DIR = ''
INDEXES_OF_NEEDED_DGN_DATA = [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 99, 100, 101, 102, 103]
DGN_HEADERS = ['Time',
'Average XA (Corrected)',
'Average YA (Corrected)',
'Normalized Std.Dev. XA (Corrected)',
'Normalized Std.Dev. YA (Corrected)',
'Correlation XA/YA (Corrected)',
'Number of Samples',
'Average XA',
'Average YA',
'Average XB',
'Average YB',
'Std.Dev. XA',
'Std.Dev. YA',
'Std.Dev. XB',
'Std.Dev. YB',
'Minimum XA',
'Minimum YA',
'Minimum XB',
'Minimum YB',
'Maximum XA',
'Maximum YA',
'Maximum XB',
'Maximum YB',
'Correlation XA/YA',
'Correlation XB/YB',
'Correlation XA/XB',
'Correlation YA/YB',
'Correlation XA/YB',
'Correlation YA/XB',
'Channel Flags XA',
'Channel Flags YA',
'Channel Flags XB',
'Channel Flags YB',
'Error code']
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
class Configuration(object):
LINKS_ARRAY = ['http://www.emsbrno.cz/p.axd/cs/%C5%BDab%C4%8Dice.PPS.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Vigantice.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Marchegg.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Kameni%C4%8Dky.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Jev%C3%AD%C4%8Dko.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Edelhof.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Doksany.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Byst%C5%99ice.n_u_.P._t_.doln%C3%AD.topoly.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Byst%C5%99ice.n_u_.P_u_._t_.horn%C3%AD.topoly.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Byst%C5%99ice.n_u_.P_u_._t_.horn%C3%AD.tr%C3%A1vn%C3%ADk.MZLUUAB.html',
'http://www.emsbrno.cz/p.axd/cs/Sumperalm.MZLUUAB.html'
]
USER_NAME = ''
PASSWORD = ''
EXPORT_DIR = ''
class MinerConfiguration (object):
START_DATE = '2016-11-28'
END_DATE = '2016-11-21'
RUN_DATE = str( datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
COUNT_OF_DAYS_IN_ONE_STEP = 8
FTP_HOST = ''
FTP_LOGIN = ''
FTP_PSWD = ''
FTP_DIR_PATH = ''
FTP_FILE_EXTENSION = '.mnd'
CZECH_GLOBE_FTP_ADDRESS = ''
CZECH_GLOBE_FTP_PORT = 21
CZECH_GLOBE_FTP_USER = ''
CZECH_GLOBE_FTP_PASSWORD = ''
CZECH_GLOBE_FTP_DIRS = ['/Scinti_Polk', '/Scinti_Doman']
CZECH_GLOBE_FTP_DIRS_FILE_EXTENSION = '.csv'
LOG_FILE_PATH = ''
LOGGER_START = '--START--'
LOGGER_ERROR = '--ERROR--'
LOGGER_WARNING = '--WARNING--'
LOGGER_INFO = '--INFO--'
LOGGER_FAILURE = '--FAILURE--'
LOGGER_SUCCESS = '--SUCCESS--'
LOGGER_END = '--END--'
LOCAL_DIR = ''
CONNECTED_FILES_EXTENSION = '.csv'
WEEK_FILE = str(START_DATE) + '_' + str(END_DATE) + '.csv'
WEEK_SUMMARY = str(START_DATE) + '_' + str(END_DATE) + '_week_sum.csv'
DATA_LINES_POINTER = 'PT'
CSV_DELIMITER = ';'
NO_DATA_VALUE = -9999
INDEXES_OF_NEEDED_DATA = [0, 1, 2, 3, 7, 10, 13, 16, 19, 22, 26, 27, 28, 43, 47, 51, 55, 59, 63, 67, 71, 75, 112]
HEADERS = ['Time',
'Structure Function Constant of Refractive Index Fluctuations at Instrument Wavelength',
'Structure Function Constant of Temperature Fluctuations',
'Heat Flux',
'Monin-Obukhov Length',
'Friction Velocity ',
'Turbulent Temperature Scale',
'Momentum Flux',
'Inner Scale Length ',
'Dissipation Rate of Turbulent Kinetic Energy',
'Latent Heat Flux',
'Water Evapotranspiration',
'Albedo',
'WSP Pressure',
'WSP Temperatur',
'WSP Relative Humidity A',
'WSP Relative Humidity B',
'WSP Relative Humidity C',
'WSP Temperature Upper',
'WSP Temperature Lower',
'WSP Net Radiation',
'WSP Global Radiation',
'WSP Reflected Global Radiation',
'WSP Soil Heat Flux A',
'WSP Soil Heat Flux B',
'Error Code']
DGN_COUNT_OF_DAYS_IN_ONE_STEP = 30
DGN_FTP_FILE_EXTENSION = '.dgn'
DGN_LOG_FILE_PATH = ''
DGN_LOCAL_DIR = ''
INDEXES_OF_NEEDED_DGN_DATA = [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 99, 100, 101, 102, 103]
DGN_HEADERS = ['Time',
'Average XA (Corrected)',
'Average YA (Corrected)',
'Normalized Std.Dev. XA (Corrected)',
'Normalized Std.Dev. YA (Corrected)',
'Correlation XA/YA (Corrected)',
'Number of Samples',
'Average XA',
'Average YA',
'Average XB',
'Average YB',
'Std.Dev. XA',
'Std.Dev. YA',
'Std.Dev. XB',
'Std.Dev. YB',
'Minimum XA',
'Minimum YA',
'Minimum XB',
'Minimum YB',
'Maximum XA',
'Maximum YA',
'Maximum XB',
'Maximum YB',
'Correlation XA/YA',
'Correlation XB/YB',
'Correlation XA/XB',
'Correlation YA/YB',
'Correlation XA/YB',
'Correlation YA/XB',
'Channel Flags XA',
'Channel Flags YA',
'Channel Flags XB',
'Channel Flags YB',
'Error code']
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.785385 | 2 |
tests/unit/test_utils.py | ITV/aws-scheduled-event-adjuster | 1 | 6630558 | <reponame>ITV/aws-scheduled-event-adjuster
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import pytest
from lib import utils
def test_get_tag_by_key():
tags = [{'Key': 'foo', 'Value': 'bar'}, {'Key': 'baz', 'Value': 'quux'}]
assert utils.get_tag_by_key(tags, 'foo') == 'bar'
assert utils.get_tag_by_key(tags, 'nope') == None
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import pytest
from lib import utils
def test_get_tag_by_key():
tags = [{'Key': 'foo', 'Value': 'bar'}, {'Key': 'baz', 'Value': 'quux'}]
assert utils.get_tag_by_key(tags, 'foo') == 'bar'
assert utils.get_tag_by_key(tags, 'nope') == None | en | 0.655458 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 | 2.273025 | 2 |
scripts/spring 2020/relax results/relax_results_cf_calibration.py | ronan-keane/hav-sim | 0 | 6630559 | <filename>scripts/spring 2020/relax results/relax_results_cf_calibration.py
"""
@author: <EMAIL>
"""
import havsim.calibration.calibration as hc
import time
import scipy.optimize as sc
import matplotlib.pyplot as plt
import math
import pickle
import havsim.calibration.calibration_models as hm
# load data
try:
with open('C:/Users/rlk268/OneDrive - Cornell University/havsim/data/recon-ngsim.pkl', 'rb') as f:
meas, platooninfo = pickle.load(f) #load data
except:
with open('/home/rlk268/havsim/data/recon-ngsim.pkl', 'rb') as f:
meas, platooninfo = pickle.load(f) #load data
# categorize vehicles
veh_list = meas.keys()
merge_list = []
lc_list = []
nolc_list = []
for veh in veh_list:
t_nstar, t_n = platooninfo[veh][0:2]
if t_n > t_nstar and meas[veh][t_n-t_nstar-1,7]==7 and meas[veh][t_n-t_nstar,7]==6:
merge_list.append(veh)
elif len(platooninfo[veh][4]) > 1:
lc_list.append(veh)
elif len(platooninfo[veh][4]) == 1:
nolc_list.append(veh)
# define training loop
def training_ga(veh_id_list, bounds, meas, platooninfo, dt, workers = 2, kwargs = {}):
"""Runs differential evolution to fit parameters for a list of CalibrationVehicle's"""
#veh_id_list = list of float vehicle id, bounds = bounds for optimizer (list of tuples),
#kwargs = dictionary with keyword arguments for hc.make_calibration
out = []
for veh_id in veh_id_list:
cal = hc.make_calibration([veh_id], meas, platooninfo, dt, **kwargs)
ga = sc.differential_evolution(cal.simulate, bounds = bounds, workers = workers)
out.append(ga)
return out
def training(plist, veh_id_list, bounds, meas, platooninfo, dt, vehicle_object, cutoff = 6, kwargs = {}):
"""Runs bfgs with multiple initial guesses to fit parameters for a CalibrationVehicle"""
#veh_id = float vehicle id, plist = list of parameters, bounds = bounds for optimizer (list of tuples),
#cutoff = minimum mse required for multiple guesses
#kwargs = dictionary with keyword arguments for hc.make_calibration
out = []
for veh_id in veh_id_list:
cal = hc.make_calibration([veh_id], meas, platooninfo, dt, **kwargs)
bestmse = math.inf
best = None
for guess in plist:
bfgs = sc.fmin_l_bfgs_b(cal.simulate, guess, bounds = bounds, approx_grad=1)
if bfgs[1] < bestmse:
best = bfgs
bestmse = bfgs[1]
if bestmse < cutoff:
break
out.append(best)
return out
class NoRelaxIDM(hc.CalibrationVehicle):
def set_relax(self, *args):
pass
def initialize(self, parameters): # just need to set parameters correctly
super().initialize(parameters)
self.cf_parameters = parameters
class NoRelaxOVM(hm.OVMCalibrationVehicle):
def set_relax(self, *args):
pass
def initialize(self, parameters):
super().initialize(parameters)
self.cf_parameters = parameters
class NoRelaxNewell(hm.NewellCalibrationVehicle):
def set_relax(self, *args):
pass
def initialize(self, parameters):
super().initialize(parameters)
self.cf_parameters = parameters
#%% # updated, but not tested, after the 'refactored calibration + added calibration_models' commit
"""Used GA + ballistic update for paper results. Using euler update is probably better in terms of mse.
Can use BFGS instead of GA, which is significantly faster, but can have problems with local minima."""
"""
Run 1: IDM with no accident-free relax, no max speed bound, no acceleration bound (only for merge, lc)
"""
plist = [[40,1,1,3,10,25], [60,1,1,3,10,5], [80,1,15,1,1,35], [70,2,10,2,2,15]]
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,75)]
relax_lc_res = training_ga(lc_list, bounds, meas, platooninfo, .1)
relax_merge_res = training_ga(merge_list, bounds, meas, platooninfo, .1)
with open('IDMrelax.pkl','wb') as f:
pickle.dump((relax_lc_res,relax_merge_res), f)
# """
# Run 2: Like Run 1, but with relax disabled. (for all vehicles)
# """
# plist = [[40,1,1,3,10], [60,1,1,3,10], [80,1,15,1,1], [70,2,10,2,2]]
# bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20)]
# kwargs = {'vehicle_class': NoRelaxIDM}
# norelax_lc_res = training_ga(lc_list, bounds, meas, platooninfo, .1 , kwargs = kwargs)
# norelax_merge_res = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
# norelax_nolc_res = training_ga(nolc_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
# with open('IDMnorelax.pkl','wb') as f:
# pickle.dump((norelax_lc_res,norelax_merge_res,norelax_nolc_res),f)
"""
Run 3: OVM with no accident-free relax, no max speed bound, no acceleration bound (only for merge, lc)
"""
plist = [[10*3.3,.086/3.3, 1.545, 2, .175, 5 ], [20*3.3,.086/3.3/2, 1.545, .5, .175, 60 ],
[10*3.3,.086/3.3/2, .5, .5, .175, 60 ], [25,.05, 1,3, 1, 25]]
bounds = [(20,120),(.001,.1),(.1,2),(.1,5),(0,3), (.1,75)]
kwargs = {'vehicle_class': hm.OVMCalibrationVehicle}
relax_lc_res_ovm = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
relax_merge_res_ovm = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
with open('OVMrelax.pkl', 'wb') as f:
pickle.dump((relax_lc_res_ovm, relax_merge_res_ovm),f)
# """
# Run 4: Like Run 3, but with relax disabled. (for all vehicles)
# """
# plist = [[10*3.3,.086/3.3, 1.545, 2, .175], [20*3.3,.086/3.3/2, 1.545, .5, .175 ],
# [10*3.3,.086/3.3/2, .5, .5, .175 ], [25,.05, 1,3, 1]]
# bounds = [(20,120),(.001,.1),(.1,2),(.1,5),(0,3)]
# kwargs = {'vehicle_class': NoRelaxOVM}
# norelax_lc_res_ovm = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs)
# norelax_merge_res_ovm = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs)
# norelax_nolc_res_ovm = training_ga(nolc_list, bounds, meas, platooninfo, .1, kwargs)
# with open('OVMnorelax.pkl', 'wb') as f:
# pickle.dump((norelax_lc_res_ovm, norelax_merge_res_ovm, norelax_nolc_res_ovm),f)
"""
Run 7: Try existing Relaxation model due to Schakel, Knoop, <NAME> (2012)
"""
plist = [[40,1,1,3,10,1, 25], [60,1,1,3,10,1,5], [80,1,15,1,1,1,35], [70,2,10,2,2,2,15]]
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.101,75)]
kwargs = {'vehicle_class': hm.SKA_IDM}
relax_lc_res_ska = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_ska = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('SKArelax.pkl', 'wb') as f:
pickle.dump([relax_lc_res_ska, relax_merge_res_ska],f)
"""
2 Parameter positive/negative relax IDM
"""
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.1,75),(.1,75)]
kwargs = {'vehicle_class': hm.Relax2IDM}
relax_lc_res_2p = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_2p = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('2pIDM.pkl', 'wb') as f:
pickle.dump([relax_lc_res_2p, relax_merge_res_2p],f)
# """
# 2 parameter shape/time relax IDM
# """
# bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.1,75),(-1,1)]
# kwargs = {'vehicle_class': hm.RelaxShapeIDM}
# relax_lc_res_2ps = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
# relax_merge_res_2ps = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
# with open('2psIDM.pkl', 'wb') as f:
# pickle.dump([relax_lc_res_2ps, relax_merge_res_2ps],f)
"""
Run 5: Newell with no accident free
"""
bounds = [(.1,10),(0,100),(40,120),(.1,75)]
kwargs = {'vehicle_class': hm.NewellCalibrationVehicle}
relax_lc_res_newell = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_newell = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('Newellrelax.pkl','wb') as f:
pickle.dump([relax_lc_res_newell, relax_merge_res_newell], f)
"""
Run 6: Like Run 5, but with no relax
"""
bounds = [(.1,10),(0,100),(40,120)]
kwargs = {'vehicle_class': NoRelaxNewell}
norelax_lc_res_newell = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
norelax_merge_res_newell = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
norelax_nolc_res_newell = training_ga(nolc_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
with open('Newellnorelax.pkl','wb') as f:
pickle.dump([norelax_lc_res_newell, norelax_merge_res_newell, norelax_nolc_res_newell], f)
#%%
"""
LL Relaxation Model
"""
bounds = [(1,100),(1,120),(40,120),(.5, 20)]
kwargs = {'vehicle_class': hm.NewellLL, 'event_maker':hm.make_ll_lc_event, 'lc_event_fun':hm.ll_lc_event}
relax_lc_res_ll = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_ll = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('NewellLL.pkl', 'wb') as f:
pickle.dump([relax_lc_res_ll, relax_merge_res_ll], f)
#%%
"""
Exponential Relaxation
"""
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,75)]
kwargs = {'vehicle_class': hm.RelaxExpIDM}
relax_lc_res_exp = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_exp = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('ExpIDM.pkl', 'wb') as f:
pickle.dump([relax_lc_res_exp, relax_merge_res_exp], f)
#%%
"""
"""
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.1,75),(.1,75)]
kwargs = {'vehicle_class': hm.Relax2vhdIDM}
relax_lc_res_2p = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_2p = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('2pvhdIDM.pkl', 'wb') as f:
pickle.dump([relax_lc_res_2p, relax_merge_res_2p],f)
| <filename>scripts/spring 2020/relax results/relax_results_cf_calibration.py
"""
@author: <EMAIL>
"""
import havsim.calibration.calibration as hc
import time
import scipy.optimize as sc
import matplotlib.pyplot as plt
import math
import pickle
import havsim.calibration.calibration_models as hm
# load data
try:
with open('C:/Users/rlk268/OneDrive - Cornell University/havsim/data/recon-ngsim.pkl', 'rb') as f:
meas, platooninfo = pickle.load(f) #load data
except:
with open('/home/rlk268/havsim/data/recon-ngsim.pkl', 'rb') as f:
meas, platooninfo = pickle.load(f) #load data
# categorize vehicles
veh_list = meas.keys()
merge_list = []
lc_list = []
nolc_list = []
for veh in veh_list:
t_nstar, t_n = platooninfo[veh][0:2]
if t_n > t_nstar and meas[veh][t_n-t_nstar-1,7]==7 and meas[veh][t_n-t_nstar,7]==6:
merge_list.append(veh)
elif len(platooninfo[veh][4]) > 1:
lc_list.append(veh)
elif len(platooninfo[veh][4]) == 1:
nolc_list.append(veh)
# define training loop
def training_ga(veh_id_list, bounds, meas, platooninfo, dt, workers = 2, kwargs = {}):
"""Runs differential evolution to fit parameters for a list of CalibrationVehicle's"""
#veh_id_list = list of float vehicle id, bounds = bounds for optimizer (list of tuples),
#kwargs = dictionary with keyword arguments for hc.make_calibration
out = []
for veh_id in veh_id_list:
cal = hc.make_calibration([veh_id], meas, platooninfo, dt, **kwargs)
ga = sc.differential_evolution(cal.simulate, bounds = bounds, workers = workers)
out.append(ga)
return out
def training(plist, veh_id_list, bounds, meas, platooninfo, dt, vehicle_object, cutoff = 6, kwargs = {}):
"""Runs bfgs with multiple initial guesses to fit parameters for a CalibrationVehicle"""
#veh_id = float vehicle id, plist = list of parameters, bounds = bounds for optimizer (list of tuples),
#cutoff = minimum mse required for multiple guesses
#kwargs = dictionary with keyword arguments for hc.make_calibration
out = []
for veh_id in veh_id_list:
cal = hc.make_calibration([veh_id], meas, platooninfo, dt, **kwargs)
bestmse = math.inf
best = None
for guess in plist:
bfgs = sc.fmin_l_bfgs_b(cal.simulate, guess, bounds = bounds, approx_grad=1)
if bfgs[1] < bestmse:
best = bfgs
bestmse = bfgs[1]
if bestmse < cutoff:
break
out.append(best)
return out
class NoRelaxIDM(hc.CalibrationVehicle):
def set_relax(self, *args):
pass
def initialize(self, parameters): # just need to set parameters correctly
super().initialize(parameters)
self.cf_parameters = parameters
class NoRelaxOVM(hm.OVMCalibrationVehicle):
def set_relax(self, *args):
pass
def initialize(self, parameters):
super().initialize(parameters)
self.cf_parameters = parameters
class NoRelaxNewell(hm.NewellCalibrationVehicle):
def set_relax(self, *args):
pass
def initialize(self, parameters):
super().initialize(parameters)
self.cf_parameters = parameters
#%% # updated, but not tested, after the 'refactored calibration + added calibration_models' commit
"""Used GA + ballistic update for paper results. Using euler update is probably better in terms of mse.
Can use BFGS instead of GA, which is significantly faster, but can have problems with local minima."""
"""
Run 1: IDM with no accident-free relax, no max speed bound, no acceleration bound (only for merge, lc)
"""
plist = [[40,1,1,3,10,25], [60,1,1,3,10,5], [80,1,15,1,1,35], [70,2,10,2,2,15]]
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,75)]
relax_lc_res = training_ga(lc_list, bounds, meas, platooninfo, .1)
relax_merge_res = training_ga(merge_list, bounds, meas, platooninfo, .1)
with open('IDMrelax.pkl','wb') as f:
pickle.dump((relax_lc_res,relax_merge_res), f)
# """
# Run 2: Like Run 1, but with relax disabled. (for all vehicles)
# """
# plist = [[40,1,1,3,10], [60,1,1,3,10], [80,1,15,1,1], [70,2,10,2,2]]
# bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20)]
# kwargs = {'vehicle_class': NoRelaxIDM}
# norelax_lc_res = training_ga(lc_list, bounds, meas, platooninfo, .1 , kwargs = kwargs)
# norelax_merge_res = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
# norelax_nolc_res = training_ga(nolc_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
# with open('IDMnorelax.pkl','wb') as f:
# pickle.dump((norelax_lc_res,norelax_merge_res,norelax_nolc_res),f)
"""
Run 3: OVM with no accident-free relax, no max speed bound, no acceleration bound (only for merge, lc)
"""
plist = [[10*3.3,.086/3.3, 1.545, 2, .175, 5 ], [20*3.3,.086/3.3/2, 1.545, .5, .175, 60 ],
[10*3.3,.086/3.3/2, .5, .5, .175, 60 ], [25,.05, 1,3, 1, 25]]
bounds = [(20,120),(.001,.1),(.1,2),(.1,5),(0,3), (.1,75)]
kwargs = {'vehicle_class': hm.OVMCalibrationVehicle}
relax_lc_res_ovm = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
relax_merge_res_ovm = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
with open('OVMrelax.pkl', 'wb') as f:
pickle.dump((relax_lc_res_ovm, relax_merge_res_ovm),f)
# """
# Run 4: Like Run 3, but with relax disabled. (for all vehicles)
# """
# plist = [[10*3.3,.086/3.3, 1.545, 2, .175], [20*3.3,.086/3.3/2, 1.545, .5, .175 ],
# [10*3.3,.086/3.3/2, .5, .5, .175 ], [25,.05, 1,3, 1]]
# bounds = [(20,120),(.001,.1),(.1,2),(.1,5),(0,3)]
# kwargs = {'vehicle_class': NoRelaxOVM}
# norelax_lc_res_ovm = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs)
# norelax_merge_res_ovm = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs)
# norelax_nolc_res_ovm = training_ga(nolc_list, bounds, meas, platooninfo, .1, kwargs)
# with open('OVMnorelax.pkl', 'wb') as f:
# pickle.dump((norelax_lc_res_ovm, norelax_merge_res_ovm, norelax_nolc_res_ovm),f)
"""
Run 7: Try existing Relaxation model due to Schakel, Knoop, <NAME> (2012)
"""
plist = [[40,1,1,3,10,1, 25], [60,1,1,3,10,1,5], [80,1,15,1,1,1,35], [70,2,10,2,2,2,15]]
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.101,75)]
kwargs = {'vehicle_class': hm.SKA_IDM}
relax_lc_res_ska = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_ska = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('SKArelax.pkl', 'wb') as f:
pickle.dump([relax_lc_res_ska, relax_merge_res_ska],f)
"""
2 Parameter positive/negative relax IDM
"""
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.1,75),(.1,75)]
kwargs = {'vehicle_class': hm.Relax2IDM}
relax_lc_res_2p = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_2p = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('2pIDM.pkl', 'wb') as f:
pickle.dump([relax_lc_res_2p, relax_merge_res_2p],f)
# """
# 2 parameter shape/time relax IDM
# """
# bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.1,75),(-1,1)]
# kwargs = {'vehicle_class': hm.RelaxShapeIDM}
# relax_lc_res_2ps = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
# relax_merge_res_2ps = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
# with open('2psIDM.pkl', 'wb') as f:
# pickle.dump([relax_lc_res_2ps, relax_merge_res_2ps],f)
"""
Run 5: Newell with no accident free
"""
bounds = [(.1,10),(0,100),(40,120),(.1,75)]
kwargs = {'vehicle_class': hm.NewellCalibrationVehicle}
relax_lc_res_newell = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_newell = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('Newellrelax.pkl','wb') as f:
pickle.dump([relax_lc_res_newell, relax_merge_res_newell], f)
"""
Run 6: Like Run 5, but with no relax
"""
bounds = [(.1,10),(0,100),(40,120)]
kwargs = {'vehicle_class': NoRelaxNewell}
norelax_lc_res_newell = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
norelax_merge_res_newell = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
norelax_nolc_res_newell = training_ga(nolc_list, bounds, meas, platooninfo, .1, kwargs = kwargs)
with open('Newellnorelax.pkl','wb') as f:
pickle.dump([norelax_lc_res_newell, norelax_merge_res_newell, norelax_nolc_res_newell], f)
#%%
"""
LL Relaxation Model
"""
bounds = [(1,100),(1,120),(40,120),(.5, 20)]
kwargs = {'vehicle_class': hm.NewellLL, 'event_maker':hm.make_ll_lc_event, 'lc_event_fun':hm.ll_lc_event}
relax_lc_res_ll = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_ll = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('NewellLL.pkl', 'wb') as f:
pickle.dump([relax_lc_res_ll, relax_merge_res_ll], f)
#%%
"""
Exponential Relaxation
"""
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,75)]
kwargs = {'vehicle_class': hm.RelaxExpIDM}
relax_lc_res_exp = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_exp = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('ExpIDM.pkl', 'wb') as f:
pickle.dump([relax_lc_res_exp, relax_merge_res_exp], f)
#%%
"""
"""
bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.1,75),(.1,75)]
kwargs = {'vehicle_class': hm.Relax2vhdIDM}
relax_lc_res_2p = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
relax_merge_res_2p = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs)
with open('2pvhdIDM.pkl', 'wb') as f:
pickle.dump([relax_lc_res_2p, relax_merge_res_2p],f)
| en | 0.690544 | @author: <EMAIL> # load data #load data #load data # categorize vehicles # define training loop Runs differential evolution to fit parameters for a list of CalibrationVehicle's #veh_id_list = list of float vehicle id, bounds = bounds for optimizer (list of tuples), #kwargs = dictionary with keyword arguments for hc.make_calibration Runs bfgs with multiple initial guesses to fit parameters for a CalibrationVehicle #veh_id = float vehicle id, plist = list of parameters, bounds = bounds for optimizer (list of tuples), #cutoff = minimum mse required for multiple guesses #kwargs = dictionary with keyword arguments for hc.make_calibration # just need to set parameters correctly #%% # updated, but not tested, after the 'refactored calibration + added calibration_models' commit Used GA + ballistic update for paper results. Using euler update is probably better in terms of mse. Can use BFGS instead of GA, which is significantly faster, but can have problems with local minima. Run 1: IDM with no accident-free relax, no max speed bound, no acceleration bound (only for merge, lc) # """ # Run 2: Like Run 1, but with relax disabled. (for all vehicles) # """ # plist = [[40,1,1,3,10], [60,1,1,3,10], [80,1,15,1,1], [70,2,10,2,2]] # bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20)] # kwargs = {'vehicle_class': NoRelaxIDM} # norelax_lc_res = training_ga(lc_list, bounds, meas, platooninfo, .1 , kwargs = kwargs) # norelax_merge_res = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs = kwargs) # norelax_nolc_res = training_ga(nolc_list, bounds, meas, platooninfo, .1, kwargs = kwargs) # with open('IDMnorelax.pkl','wb') as f: # pickle.dump((norelax_lc_res,norelax_merge_res,norelax_nolc_res),f) Run 3: OVM with no accident-free relax, no max speed bound, no acceleration bound (only for merge, lc) # """ # Run 4: Like Run 3, but with relax disabled. (for all vehicles) # """ # plist = [[10*3.3,.086/3.3, 1.545, 2, .175], [20*3.3,.086/3.3/2, 1.545, .5, .175 ], # [10*3.3,.086/3.3/2, .5, .5, .175 ], [25,.05, 1,3, 1]] # bounds = [(20,120),(.001,.1),(.1,2),(.1,5),(0,3)] # kwargs = {'vehicle_class': NoRelaxOVM} # norelax_lc_res_ovm = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs) # norelax_merge_res_ovm = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs) # norelax_nolc_res_ovm = training_ga(nolc_list, bounds, meas, platooninfo, .1, kwargs) # with open('OVMnorelax.pkl', 'wb') as f: # pickle.dump((norelax_lc_res_ovm, norelax_merge_res_ovm, norelax_nolc_res_ovm),f) Run 7: Try existing Relaxation model due to Schakel, Knoop, <NAME> (2012) 2 Parameter positive/negative relax IDM # """ # 2 parameter shape/time relax IDM # """ # bounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20),(.1,5),(.1,75),(-1,1)] # kwargs = {'vehicle_class': hm.RelaxShapeIDM} # relax_lc_res_2ps = training_ga(lc_list, bounds, meas, platooninfo, .1, kwargs= kwargs) # relax_merge_res_2ps = training_ga(merge_list, bounds, meas, platooninfo, .1, kwargs= kwargs) # with open('2psIDM.pkl', 'wb') as f: # pickle.dump([relax_lc_res_2ps, relax_merge_res_2ps],f) Run 5: Newell with no accident free Run 6: Like Run 5, but with no relax #%% LL Relaxation Model #%% Exponential Relaxation #%% | 2.250499 | 2 |
dropzone_backup_server/security.py | nagylzs/dropzone-backup-server | 0 | 6630560 | import time
import os
import re
import warnings
from argon2 import PasswordHasher
from argon2.exceptions import VerifyMismatchError
from .error import AbortRequest
VALID_PERM_CODES = "W"
HEADER = "# username:upload_dir_prefix:permission_flags:password_hash"
class SecurityManager(object):
"""Manages a list of users.
Can only be used from a single thread (async server).
DO NOT USE FROM MULTIPLE THREADS OR PROCESSES.
You can write into the passwd file on the disk, and it will be reloaded within the number of seconds defined in the
TTL value below. Otherwise all updates should be done through a security manager object, with this pattern:
"""
TTL = 10.0 # In seconds
def __init__(self, passwdfile):
self.passwdfile = passwdfile
self._last_loaded = 0
self._last_mtime = 0
self._users = None
self._users = {}
self._passwords = {}
def get_users(self) -> dict:
self._load_all()
return self._users
def get_user(self, login):
users = self.get_users()
if login in users:
return users[login]
else:
return None
def _load_all(self):
now = time.time()
if self._last_loaded + self.TTL < now:
mtime = os.stat(self.passwdfile).st_mtime
if mtime != self._last_mtime:
self._load_users()
self._last_mtime = mtime
self._last_loaded = now
def _load_users(self):
# TODO: check permissions of the passwd file and issue a warning when not protected.
print("Reloading users from %s" % self.passwdfile)
self._users.clear()
self._passwords.clear()
lineno = 0
for line in open(self.passwdfile, "r"):
lineno += 1
line = line.strip()
if line and not line.startswith("#"):
login, prefix, perms, *parts = line.split(":")
pwd = ":".join(parts)
login = login.strip().lower()
prefix = prefix.strip()
login_ok = re.match("[a-z][a-z0-9]*", login)
prefix_ok = not prefix or re.match(
"[a-z][a-z0-9]*(/[a-z][a-z0-9]*)*", prefix) and not prefix.endswith("/")
if not login_ok:
warnings.warn("WARNING: invalid login name '%s' at line %d" % (login, lineno))
if not prefix_ok:
warnings.warn("WARNING: invalid prefix name '%s' at line %d" % (prefix, lineno))
if login_ok and prefix_ok:
self._users[login] = {
"name": login,
"prefix": prefix,
"perms": perms,
}
self._passwords[login] = pwd
def _dump_users(self):
print("Saving users to %s" % self.passwdfile)
usernames = sorted(self._users.keys())
with open(self.passwdfile + ".part", "w+") as fout:
fout.write(HEADER + "\n")
for username in usernames:
user = self._users[username]
# print("???",self._passwords)
# print("???",self._passwords[username])
line = "%s:%s:%s:%s" % (
username,
user["prefix"],
user["perms"],
self._passwords[username]
)
# print(repr(line))
fout.write(line + "\n")
bakfile = self.passwdfile + ".bak"
if os.path.isfile(bakfile):
os.unlink(bakfile)
os.rename(self.passwdfile, bakfile)
os.rename(fout.name, self.passwdfile)
def check_password(self, login, password) -> bool:
if not password:
return False
user = self.get_user(login)
if user:
if not self._passwords[login]:
return False # Null password -> disable usedr
else:
try:
PasswordHasher().verify(self._passwords[login], password)
return True
except VerifyMismatchError:
return False
else:
return False
def get_perms(self, login) -> str:
users = self.get_users()
if login in users:
return users[login]["perms"]
else:
return ""
def save_user(self, login, prefix, perms, password):
# Make sure that we have a fresh db
self.get_users()
# Validate parameters
login = login.strip().lower()
prefix = prefix.strip()
login_ok = re.match("[a-z][a-z0-9]*", login)
prefix_ok = not prefix or re.match(
"[a-z][a-z0-9]*(/[a-z][a-z0-9]*)*", prefix) and \
not prefix.endswith("/")
if not login_ok:
raise AbortRequest(400, "Invalid login name '%s'" % login)
if not prefix_ok:
raise AbortRequest(400, "Invalid prefix '%s'" % prefix)
perms = "".join([perm for perm in perms if perm in VALID_PERM_CODES])
if not password:
if login in self._passwords:
password = self._passwords[login]
else:
if password and len(password) < 6:
raise AbortRequest(403, "Minimum password length is 6.")
elif password == login:
raise AbortRequest(403, "Password and login must not match.")
password = PasswordHasher().hash(password)
if login_ok and prefix_ok:
# Save to memory
user = {
"name": login,
"prefix": prefix,
"perms": perms,
}
print("Saving user %s" % login)
self._users[login] = user
self._passwords[login] = password
self._dump_users()
def delete_user(self, login):
# Make sure that we have a fresh db
self.get_users()
# Validate parameters
login = login.strip().lower()
login_ok = re.match("[a-z][a-z0-9]*", login)
if not login_ok:
raise AbortRequest(400, "Invalid login name '%s'" % login)
if login in self._users:
print("Deleting user %s" % login)
del self._users[login]
self._dump_users()
else:
raise AbortRequest(404, "Cannot delete, user does not exist.")
| import time
import os
import re
import warnings
from argon2 import PasswordHasher
from argon2.exceptions import VerifyMismatchError
from .error import AbortRequest
VALID_PERM_CODES = "W"
HEADER = "# username:upload_dir_prefix:permission_flags:password_hash"
class SecurityManager(object):
"""Manages a list of users.
Can only be used from a single thread (async server).
DO NOT USE FROM MULTIPLE THREADS OR PROCESSES.
You can write into the passwd file on the disk, and it will be reloaded within the number of seconds defined in the
TTL value below. Otherwise all updates should be done through a security manager object, with this pattern:
"""
TTL = 10.0 # In seconds
def __init__(self, passwdfile):
self.passwdfile = passwdfile
self._last_loaded = 0
self._last_mtime = 0
self._users = None
self._users = {}
self._passwords = {}
def get_users(self) -> dict:
self._load_all()
return self._users
def get_user(self, login):
users = self.get_users()
if login in users:
return users[login]
else:
return None
def _load_all(self):
now = time.time()
if self._last_loaded + self.TTL < now:
mtime = os.stat(self.passwdfile).st_mtime
if mtime != self._last_mtime:
self._load_users()
self._last_mtime = mtime
self._last_loaded = now
def _load_users(self):
# TODO: check permissions of the passwd file and issue a warning when not protected.
print("Reloading users from %s" % self.passwdfile)
self._users.clear()
self._passwords.clear()
lineno = 0
for line in open(self.passwdfile, "r"):
lineno += 1
line = line.strip()
if line and not line.startswith("#"):
login, prefix, perms, *parts = line.split(":")
pwd = ":".join(parts)
login = login.strip().lower()
prefix = prefix.strip()
login_ok = re.match("[a-z][a-z0-9]*", login)
prefix_ok = not prefix or re.match(
"[a-z][a-z0-9]*(/[a-z][a-z0-9]*)*", prefix) and not prefix.endswith("/")
if not login_ok:
warnings.warn("WARNING: invalid login name '%s' at line %d" % (login, lineno))
if not prefix_ok:
warnings.warn("WARNING: invalid prefix name '%s' at line %d" % (prefix, lineno))
if login_ok and prefix_ok:
self._users[login] = {
"name": login,
"prefix": prefix,
"perms": perms,
}
self._passwords[login] = pwd
def _dump_users(self):
print("Saving users to %s" % self.passwdfile)
usernames = sorted(self._users.keys())
with open(self.passwdfile + ".part", "w+") as fout:
fout.write(HEADER + "\n")
for username in usernames:
user = self._users[username]
# print("???",self._passwords)
# print("???",self._passwords[username])
line = "%s:%s:%s:%s" % (
username,
user["prefix"],
user["perms"],
self._passwords[username]
)
# print(repr(line))
fout.write(line + "\n")
bakfile = self.passwdfile + ".bak"
if os.path.isfile(bakfile):
os.unlink(bakfile)
os.rename(self.passwdfile, bakfile)
os.rename(fout.name, self.passwdfile)
def check_password(self, login, password) -> bool:
if not password:
return False
user = self.get_user(login)
if user:
if not self._passwords[login]:
return False # Null password -> disable usedr
else:
try:
PasswordHasher().verify(self._passwords[login], password)
return True
except VerifyMismatchError:
return False
else:
return False
def get_perms(self, login) -> str:
users = self.get_users()
if login in users:
return users[login]["perms"]
else:
return ""
def save_user(self, login, prefix, perms, password):
# Make sure that we have a fresh db
self.get_users()
# Validate parameters
login = login.strip().lower()
prefix = prefix.strip()
login_ok = re.match("[a-z][a-z0-9]*", login)
prefix_ok = not prefix or re.match(
"[a-z][a-z0-9]*(/[a-z][a-z0-9]*)*", prefix) and \
not prefix.endswith("/")
if not login_ok:
raise AbortRequest(400, "Invalid login name '%s'" % login)
if not prefix_ok:
raise AbortRequest(400, "Invalid prefix '%s'" % prefix)
perms = "".join([perm for perm in perms if perm in VALID_PERM_CODES])
if not password:
if login in self._passwords:
password = self._passwords[login]
else:
if password and len(password) < 6:
raise AbortRequest(403, "Minimum password length is 6.")
elif password == login:
raise AbortRequest(403, "Password and login must not match.")
password = PasswordHasher().hash(password)
if login_ok and prefix_ok:
# Save to memory
user = {
"name": login,
"prefix": prefix,
"perms": perms,
}
print("Saving user %s" % login)
self._users[login] = user
self._passwords[login] = password
self._dump_users()
def delete_user(self, login):
# Make sure that we have a fresh db
self.get_users()
# Validate parameters
login = login.strip().lower()
login_ok = re.match("[a-z][a-z0-9]*", login)
if not login_ok:
raise AbortRequest(400, "Invalid login name '%s'" % login)
if login in self._users:
print("Deleting user %s" % login)
del self._users[login]
self._dump_users()
else:
raise AbortRequest(404, "Cannot delete, user does not exist.")
| en | 0.761413 | Manages a list of users. Can only be used from a single thread (async server). DO NOT USE FROM MULTIPLE THREADS OR PROCESSES. You can write into the passwd file on the disk, and it will be reloaded within the number of seconds defined in the TTL value below. Otherwise all updates should be done through a security manager object, with this pattern: # In seconds # TODO: check permissions of the passwd file and issue a warning when not protected. # print("???",self._passwords) # print("???",self._passwords[username]) # print(repr(line)) # Null password -> disable usedr # Make sure that we have a fresh db # Validate parameters # Save to memory # Make sure that we have a fresh db # Validate parameters | 2.875267 | 3 |
MISP/cef_format.py | andradjp/tools | 0 | 6630561 | import datetime, send_data
cefmapping = {"ip-src": "src", "ip-dst": "dst", "hostname": "dhost", "domain": "dhost",
"md5": "fileHash", "sha1": "fileHash", "sha256": "fileHash",
"url": "request"}
mispattributes = {'input': list(cefmapping.keys())}
outputFileExtension = "cef"
responseType = "application/txt"
def export_data(request=False):
if request is False:
return False
if "config" in request:
config = request["config"]
else:
config = {"Default_Severity": 1, "Device_Vendor": "MISP", "Device_Product": "MISP", "Device_Version": 1,
'custom1':'deviceCustomDate1'}
if request["type"] in cefmapping:
send_data.send("{} host CEF:0|{}|{}|{}|{}|{}|{}|{}={} {}={}\n".format(
datetime.datetime.now().strftime("%b %d %H:%M:%S"),
config["Device_Vendor"],
config["Device_Product"],
config["Device_Version"],
request["category"],
request["category"],
config["Default_Severity"],
cefmapping[request["type"]],
request["value"],
config["custom1"],
datetime.datetime.fromtimestamp(int(request["timestamp"])).strftime("%b %d %H:%M:%S"),
))
| import datetime, send_data
cefmapping = {"ip-src": "src", "ip-dst": "dst", "hostname": "dhost", "domain": "dhost",
"md5": "fileHash", "sha1": "fileHash", "sha256": "fileHash",
"url": "request"}
mispattributes = {'input': list(cefmapping.keys())}
outputFileExtension = "cef"
responseType = "application/txt"
def export_data(request=False):
if request is False:
return False
if "config" in request:
config = request["config"]
else:
config = {"Default_Severity": 1, "Device_Vendor": "MISP", "Device_Product": "MISP", "Device_Version": 1,
'custom1':'deviceCustomDate1'}
if request["type"] in cefmapping:
send_data.send("{} host CEF:0|{}|{}|{}|{}|{}|{}|{}={} {}={}\n".format(
datetime.datetime.now().strftime("%b %d %H:%M:%S"),
config["Device_Vendor"],
config["Device_Product"],
config["Device_Version"],
request["category"],
request["category"],
config["Default_Severity"],
cefmapping[request["type"]],
request["value"],
config["custom1"],
datetime.datetime.fromtimestamp(int(request["timestamp"])).strftime("%b %d %H:%M:%S"),
))
| none | 1 | 2.322717 | 2 |
|
autoload/SimplenoteList.py | Andrea-Augello/simplenote.vim | 208 | 6630562 | def SimplenoteList():
if (float(vim.eval("a:0"))>=1):
try:
# check for valid date string
datetime.datetime.strptime(vim.eval("a:1"), "%Y-%m-%d")
interface.list_note_index_in_scratch_buffer(since=vim.eval("a:1"))
except ValueError:
interface.list_note_index_in_scratch_buffer(tags=vim.eval("a:1").split(","))
else:
interface.list_note_index_in_scratch_buffer()
try:
set_cred()
SimplenoteList()
except simplenote.SimplenoteLoginFailed:
# Note: error has to be caught here and not in __init__
reset_user_pass('Login Failed. Check token?')
# vim: expandtab
| def SimplenoteList():
if (float(vim.eval("a:0"))>=1):
try:
# check for valid date string
datetime.datetime.strptime(vim.eval("a:1"), "%Y-%m-%d")
interface.list_note_index_in_scratch_buffer(since=vim.eval("a:1"))
except ValueError:
interface.list_note_index_in_scratch_buffer(tags=vim.eval("a:1").split(","))
else:
interface.list_note_index_in_scratch_buffer()
try:
set_cred()
SimplenoteList()
except simplenote.SimplenoteLoginFailed:
# Note: error has to be caught here and not in __init__
reset_user_pass('Login Failed. Check token?')
# vim: expandtab
| en | 0.64254 | # check for valid date string # Note: error has to be caught here and not in __init__ # vim: expandtab | 2.272382 | 2 |
src/openbiolink/graph_creation/metadata_infile/mapping/inMetaMapString.py | jerryhluo/OpenBioLink | 97 | 6630563 | <filename>src/openbiolink/graph_creation/metadata_infile/mapping/inMetaMapString.py
from openbiolink.graph_creation.metadata_infile.infileMetadata import InfileMetadata
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.namespace import *
class InMetaMapString(InfileMetadata):
CSV_NAME = "DB_String_mapping_gene_ncbi_string.csv"
USE_COLS = ["ncbiID", "stringID"]
SOURCE_COL = 1
TARGET_COL = 0
TARGET_NAMESPACE = Namespace(Namespaces.NCBI, False)
MAPPING_SEP = "|"
INFILE_TYPE = InfileType.IN_MAP_STRING
def __init__(self):
super().__init__(csv_name=InMetaMapString.CSV_NAME, cols=self.USE_COLS, infileType=InMetaMapString.INFILE_TYPE)
| <filename>src/openbiolink/graph_creation/metadata_infile/mapping/inMetaMapString.py
from openbiolink.graph_creation.metadata_infile.infileMetadata import InfileMetadata
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.namespace import *
class InMetaMapString(InfileMetadata):
CSV_NAME = "DB_String_mapping_gene_ncbi_string.csv"
USE_COLS = ["ncbiID", "stringID"]
SOURCE_COL = 1
TARGET_COL = 0
TARGET_NAMESPACE = Namespace(Namespaces.NCBI, False)
MAPPING_SEP = "|"
INFILE_TYPE = InfileType.IN_MAP_STRING
def __init__(self):
super().__init__(csv_name=InMetaMapString.CSV_NAME, cols=self.USE_COLS, infileType=InMetaMapString.INFILE_TYPE)
| none | 1 | 2.146186 | 2 |
|
substructure.py | egepaksoy/python | 1 | 6630564 | def search(self):
response = []
z = 0
while z < len(self):
if self[z] == ' ':
response.append(self[:z])
self = self[z+1:]
z = 0
if z == len(self)-1:
response.append(self)
break
else:
z += 1
return response
def molds(self):
response = []
z = 0
# 0 1 2 3 4 5
molds = [['hi', 'hello'],['how are you'] , ["how it's going", "whats up"], ["what are you doing"], ['im fine', 'thanks', 'fine', 'im good', 'i feel good', 'i feel great', 'i feel fine'], ['not a real answer', 'not an answer']]
for x in molds:
for y in x:
for a in search(self):
if a in search(y):
z = 0
response.append(y)
return response, self, molds
def search_algorithm(self):
self = self.lower()
self = molds(self)
key_num = 0
key_val = ''
response = self[0]
control = self[1]
for a in response:
if a == control[0] and a[:3] == control[:3]:
num = response.count(a) + 1
else:
num = response.count(a)
if num > key_num:
key_num = num
key_val = a
return key_val
#! import substructure
#! print(substructure.search_algorithm('')) | def search(self):
response = []
z = 0
while z < len(self):
if self[z] == ' ':
response.append(self[:z])
self = self[z+1:]
z = 0
if z == len(self)-1:
response.append(self)
break
else:
z += 1
return response
def molds(self):
response = []
z = 0
# 0 1 2 3 4 5
molds = [['hi', 'hello'],['how are you'] , ["how it's going", "whats up"], ["what are you doing"], ['im fine', 'thanks', 'fine', 'im good', 'i feel good', 'i feel great', 'i feel fine'], ['not a real answer', 'not an answer']]
for x in molds:
for y in x:
for a in search(self):
if a in search(y):
z = 0
response.append(y)
return response, self, molds
def search_algorithm(self):
self = self.lower()
self = molds(self)
key_num = 0
key_val = ''
response = self[0]
control = self[1]
for a in response:
if a == control[0] and a[:3] == control[:3]:
num = response.count(a) + 1
else:
num = response.count(a)
if num > key_num:
key_num = num
key_val = a
return key_val
#! import substructure
#! print(substructure.search_algorithm('')) | en | 0.314001 | # 0 1 2 3 4 5 #! import substructure #! print(substructure.search_algorithm('')) | 3.6102 | 4 |
catch/filter/tests/test_adapter_filter.py | broadinstitute/catch | 58 | 6630565 | """Tests for adapter_filter module.
"""
import logging
import unittest
from catch.filter import adapter_filter as af
from catch.filter import candidate_probes as cp
from catch import genome
from catch import probe
from catch.utils import interval
__author__ = '<NAME> <<EMAIL>>'
class TestAdapterFilter(unittest.TestCase):
"""Tests the adapter filter output on contrived input.
"""
def setUp(self):
# Disable logging
logging.disable(logging.INFO)
# Specify default adapter sequences
self.ADAPTER_A_5END = 'ATACGCCATGCTGGGTCTCC'
self.ADAPTER_A_3END = 'CGTACTTGGGAGTCGGCCAT'
self.ADAPTER_B_5END = 'AGGCCCTGGCTGCTGATATG'
self.ADAPTER_B_3END = 'GACCTTTTGGGACAGCGGTG'
def get_filter_and_output(self, lcf_thres, mismatches, target_genomes,
input_probes, k, num_kmers_per_probe):
f = af.AdapterFilter((self.ADAPTER_A_5END, self.ADAPTER_A_3END),
(self.ADAPTER_B_5END, self.ADAPTER_B_3END),
mismatches=mismatches,
lcf_thres=lcf_thres,
kmer_probe_map_k=3)
output_probes = f.filter(input_probes, target_genomes)
return (f, output_probes)
def assert_has_adapter(self, probe, adapter):
"""Assert that probe has a particular adapter.
Args:
probe: probe to check
adapter: check if 'probe' has this adapter
Returns:
whether 'probe' starts and ends with either an 'A' or 'B'
adapter, as specified by 'adapter'
"""
if adapter == 'A':
start = self.ADAPTER_A_5END
end = self.ADAPTER_A_3END
elif adapter == 'B':
start = self.ADAPTER_B_5END
end = self.ADAPTER_B_3END
else:
raise ValueError("Unknown adapter %s" % adapter)
self.assertTrue(self.probe.seq_str.startswith(start))
self.assertTrue(self.probe.seq_str.endswith(end))
def make_probes_with_adapters(self, probe_str_a, probe_str_b):
"""Make probes with both 'A' and 'B' adapters.
Args:
probe_str_a: list of strings of the sequences of probes that
should receive an 'A' adapter
probe_str_b: list of strings of the sequences of probes that
should receive a 'B' adapter
Returns:
list of probes (instances of probe.Probe) from the input
with the corresponding adapters added
"""
probes = []
for p_str in probe_str_a:
probes += [probe.Probe.from_str(p_str).with_prepended_str(
self.ADAPTER_A_5END).with_appended_str(self.ADAPTER_A_3END)]
for p_str in probe_str_b:
probes += [probe.Probe.from_str(p_str).with_prepended_str(
self.ADAPTER_B_5END).with_appended_str(self.ADAPTER_B_3END)]
return probes
def convert_target_genomes(self, target_genomes):
"""Convert genomes to instances of genome.Genome.
Args:
target_genomes: nested list of genomes, as strings, to be
converted
Returns:
nested list of genomes, with the same structure as the input,
in which each genome is an instance of genome.Genome instead
of a string
"""
r = []
for genomes_from_group in target_genomes:
rg = []
for g in genomes_from_group:
rg += [genome.Genome.from_one_seq(g)]
r += [rg]
return r
def test_one_genome(self):
target_genomes = [['ABCDEFGHIJKLMNOPQRSTUVWXYZ']]
target_genomes = self.convert_target_genomes(target_genomes)
# Create probes of length 6 bp with a stride of 3 bp
input = []
for genomes_from_group in target_genomes:
for g in genomes_from_group:
input += cp.make_candidate_probes_from_sequences(
g.seqs,
probe_length=6,
probe_stride=3)
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'MNOPQR', 'STUVWX'],
['DEFGHI', 'JKLMNO',
'PQRSTU', 'UVWXYZ'])
self.assertCountEqual(output, desired_output)
def test_two_genomes(self):
target_genomes = [['ABCDEFGHIJKLMNOPQRSTUVWXYZ'],
['ZYXWVUTSRQPONMLKJIHGFEDCBA']]
target_genomes = self.convert_target_genomes(target_genomes)
# Create probes of length 6 bp with a stride of 3 bp
input = []
for genomes_from_group in target_genomes:
for g in genomes_from_group:
input += cp.make_candidate_probes_from_sequences(
g.seqs,
probe_length=6,
probe_stride=3)
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(
['ABCDEF', 'GHIJKL', 'MNOPQR', 'STUVWX', 'ZYXWVU', 'TSRQPO',
'NMLKJI', 'HGFEDC'], ['DEFGHI', 'JKLMNO', 'PQRSTU', 'UVWXYZ',
'WVUTSR', 'QPONML', 'KJIHGF', 'FEDCBA'])
self.assertCountEqual(output, desired_output)
def test_almost_identical_probe(self):
"""Test four probes that align like:
------ ------
------
------
where the bottom two are the same up to one mismatch. The top
two probes should be assigned adapter 'A' and the bottom two should
be assigned adapter 'B'.
"""
target_genomes = [['ABCDEFGHIJKLMNOP', 'ABCDEFGHXJKLMNOP']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'FGHIJK', 'FGHXJK', 'KLMNOP']
input = [probe.Probe.from_str(s) for s in input]
for allowed_mismatches in [0, 1]:
f, output = self.get_filter_and_output(6, allowed_mismatches,
target_genomes, input, 3,
100)
desired_output = self.make_probes_with_adapters(['ABCDEF',
'KLMNOP'],
['FGHIJK',
'FGHXJK'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
if allowed_mismatches == 0:
# Each middle probe should align to one genome
self.assertEqual(votes, [(2, 0), (0, 1), (0, 1), (2, 0)])
if allowed_mismatches == 1:
# Both middle probes should align to both genomes
self.assertEqual(votes, [(2, 0), (0, 2), (0, 2), (2, 0)])
def test_misaligned(self):
"""Test probes that align to two genomes, but in which the ones
aligning to one genome are offset from the other.
"""
target_genomes = [['ABCDEFGHIJKLMNOPQR', 'XYZABCDEFGHIJKLMNOPQR']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['XYZABC', 'ABCDEF', 'DEFGHI', 'GHIJKL', 'JKLMNO', 'MNOPQR']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
# Assume 'ABCDEF' gets 'A' adapter and 'XYZABC' gets 'B' adapter,
# and so on. But flipping the 'A' and 'B' adapters would also
# be OK.
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'MNOPQR'],
['XYZABC', 'DEFGHI',
'JKLMNO'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
self.assertEqual(votes, [(0, 1), (2, 0), (0, 2), (2, 0), (0, 2),
(2, 0)])
def test_three_genomes(self):
"""Test probes that align adjacent to each other in one genome,
but overlapping in two others. One should be assigned adapter 'A'
and the other should be assigned adapter 'B'.
"""
target_genomes = [['ABCDEFGHEFKLMN', 'ABCDEFKLMN', 'ABCDEFKLMNO']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'EFKLMN']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF'], ['EFKLMN'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
self.assertEqual(votes, [(3, 0), (1, 2)])
def test_with_mismatches(self):
target_genomes = [['ABCDEFGHIJKLMNO', 'ABCXEFGXIJKXMNO',
'ABCDEFGYYJKLMNO', 'ABCDEXGHIJKLXNO',
'ABCDEFGHIJKLMNX', 'AXCDEFGHIJKLMNO',
'ABCDEFGHIYYLMNO']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'DEFGHI', 'GHIJKL', 'JKLMNO', 'DEFGYY', 'GYYJKL',
'IYYLMN']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 1, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'GYYJKL', 'IYYLMN'],
['DEFGHI', 'JKLMNO',
'DEFGYY'])
self.assertCountEqual(output, desired_output)
def tearDown(self):
# Re-enable logging
logging.disable(logging.NOTSET)
| """Tests for adapter_filter module.
"""
import logging
import unittest
from catch.filter import adapter_filter as af
from catch.filter import candidate_probes as cp
from catch import genome
from catch import probe
from catch.utils import interval
__author__ = '<NAME> <<EMAIL>>'
class TestAdapterFilter(unittest.TestCase):
"""Tests the adapter filter output on contrived input.
"""
def setUp(self):
# Disable logging
logging.disable(logging.INFO)
# Specify default adapter sequences
self.ADAPTER_A_5END = 'ATACGCCATGCTGGGTCTCC'
self.ADAPTER_A_3END = 'CGTACTTGGGAGTCGGCCAT'
self.ADAPTER_B_5END = 'AGGCCCTGGCTGCTGATATG'
self.ADAPTER_B_3END = 'GACCTTTTGGGACAGCGGTG'
def get_filter_and_output(self, lcf_thres, mismatches, target_genomes,
input_probes, k, num_kmers_per_probe):
f = af.AdapterFilter((self.ADAPTER_A_5END, self.ADAPTER_A_3END),
(self.ADAPTER_B_5END, self.ADAPTER_B_3END),
mismatches=mismatches,
lcf_thres=lcf_thres,
kmer_probe_map_k=3)
output_probes = f.filter(input_probes, target_genomes)
return (f, output_probes)
def assert_has_adapter(self, probe, adapter):
"""Assert that probe has a particular adapter.
Args:
probe: probe to check
adapter: check if 'probe' has this adapter
Returns:
whether 'probe' starts and ends with either an 'A' or 'B'
adapter, as specified by 'adapter'
"""
if adapter == 'A':
start = self.ADAPTER_A_5END
end = self.ADAPTER_A_3END
elif adapter == 'B':
start = self.ADAPTER_B_5END
end = self.ADAPTER_B_3END
else:
raise ValueError("Unknown adapter %s" % adapter)
self.assertTrue(self.probe.seq_str.startswith(start))
self.assertTrue(self.probe.seq_str.endswith(end))
def make_probes_with_adapters(self, probe_str_a, probe_str_b):
"""Make probes with both 'A' and 'B' adapters.
Args:
probe_str_a: list of strings of the sequences of probes that
should receive an 'A' adapter
probe_str_b: list of strings of the sequences of probes that
should receive a 'B' adapter
Returns:
list of probes (instances of probe.Probe) from the input
with the corresponding adapters added
"""
probes = []
for p_str in probe_str_a:
probes += [probe.Probe.from_str(p_str).with_prepended_str(
self.ADAPTER_A_5END).with_appended_str(self.ADAPTER_A_3END)]
for p_str in probe_str_b:
probes += [probe.Probe.from_str(p_str).with_prepended_str(
self.ADAPTER_B_5END).with_appended_str(self.ADAPTER_B_3END)]
return probes
def convert_target_genomes(self, target_genomes):
"""Convert genomes to instances of genome.Genome.
Args:
target_genomes: nested list of genomes, as strings, to be
converted
Returns:
nested list of genomes, with the same structure as the input,
in which each genome is an instance of genome.Genome instead
of a string
"""
r = []
for genomes_from_group in target_genomes:
rg = []
for g in genomes_from_group:
rg += [genome.Genome.from_one_seq(g)]
r += [rg]
return r
def test_one_genome(self):
target_genomes = [['ABCDEFGHIJKLMNOPQRSTUVWXYZ']]
target_genomes = self.convert_target_genomes(target_genomes)
# Create probes of length 6 bp with a stride of 3 bp
input = []
for genomes_from_group in target_genomes:
for g in genomes_from_group:
input += cp.make_candidate_probes_from_sequences(
g.seqs,
probe_length=6,
probe_stride=3)
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'MNOPQR', 'STUVWX'],
['DEFGHI', 'JKLMNO',
'PQRSTU', 'UVWXYZ'])
self.assertCountEqual(output, desired_output)
def test_two_genomes(self):
target_genomes = [['ABCDEFGHIJKLMNOPQRSTUVWXYZ'],
['ZYXWVUTSRQPONMLKJIHGFEDCBA']]
target_genomes = self.convert_target_genomes(target_genomes)
# Create probes of length 6 bp with a stride of 3 bp
input = []
for genomes_from_group in target_genomes:
for g in genomes_from_group:
input += cp.make_candidate_probes_from_sequences(
g.seqs,
probe_length=6,
probe_stride=3)
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(
['ABCDEF', 'GHIJKL', 'MNOPQR', 'STUVWX', 'ZYXWVU', 'TSRQPO',
'NMLKJI', 'HGFEDC'], ['DEFGHI', 'JKLMNO', 'PQRSTU', 'UVWXYZ',
'WVUTSR', 'QPONML', 'KJIHGF', 'FEDCBA'])
self.assertCountEqual(output, desired_output)
def test_almost_identical_probe(self):
"""Test four probes that align like:
------ ------
------
------
where the bottom two are the same up to one mismatch. The top
two probes should be assigned adapter 'A' and the bottom two should
be assigned adapter 'B'.
"""
target_genomes = [['ABCDEFGHIJKLMNOP', 'ABCDEFGHXJKLMNOP']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'FGHIJK', 'FGHXJK', 'KLMNOP']
input = [probe.Probe.from_str(s) for s in input]
for allowed_mismatches in [0, 1]:
f, output = self.get_filter_and_output(6, allowed_mismatches,
target_genomes, input, 3,
100)
desired_output = self.make_probes_with_adapters(['ABCDEF',
'KLMNOP'],
['FGHIJK',
'FGHXJK'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
if allowed_mismatches == 0:
# Each middle probe should align to one genome
self.assertEqual(votes, [(2, 0), (0, 1), (0, 1), (2, 0)])
if allowed_mismatches == 1:
# Both middle probes should align to both genomes
self.assertEqual(votes, [(2, 0), (0, 2), (0, 2), (2, 0)])
def test_misaligned(self):
"""Test probes that align to two genomes, but in which the ones
aligning to one genome are offset from the other.
"""
target_genomes = [['ABCDEFGHIJKLMNOPQR', 'XYZABCDEFGHIJKLMNOPQR']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['XYZABC', 'ABCDEF', 'DEFGHI', 'GHIJKL', 'JKLMNO', 'MNOPQR']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
# Assume 'ABCDEF' gets 'A' adapter and 'XYZABC' gets 'B' adapter,
# and so on. But flipping the 'A' and 'B' adapters would also
# be OK.
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'MNOPQR'],
['XYZABC', 'DEFGHI',
'JKLMNO'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
self.assertEqual(votes, [(0, 1), (2, 0), (0, 2), (2, 0), (0, 2),
(2, 0)])
def test_three_genomes(self):
"""Test probes that align adjacent to each other in one genome,
but overlapping in two others. One should be assigned adapter 'A'
and the other should be assigned adapter 'B'.
"""
target_genomes = [['ABCDEFGHEFKLMN', 'ABCDEFKLMN', 'ABCDEFKLMNO']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'EFKLMN']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 0, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF'], ['EFKLMN'])
self.assertCountEqual(output, desired_output)
# Check votes too
votes = f._make_votes_across_target_genomes(input, target_genomes)
self.assertEqual(votes, [(3, 0), (1, 2)])
def test_with_mismatches(self):
target_genomes = [['ABCDEFGHIJKLMNO', 'ABCXEFGXIJKXMNO',
'ABCDEFGYYJKLMNO', 'ABCDEXGHIJKLXNO',
'ABCDEFGHIJKLMNX', 'AXCDEFGHIJKLMNO',
'ABCDEFGHIYYLMNO']]
target_genomes = self.convert_target_genomes(target_genomes)
input = ['ABCDEF', 'DEFGHI', 'GHIJKL', 'JKLMNO', 'DEFGYY', 'GYYJKL',
'IYYLMN']
input = [probe.Probe.from_str(s) for s in input]
f, output = self.get_filter_and_output(6, 1, target_genomes, input, 3,
10)
desired_output = self.make_probes_with_adapters(['ABCDEF', 'GHIJKL',
'GYYJKL', 'IYYLMN'],
['DEFGHI', 'JKLMNO',
'DEFGYY'])
self.assertCountEqual(output, desired_output)
def tearDown(self):
# Re-enable logging
logging.disable(logging.NOTSET)
| en | 0.880545 | Tests for adapter_filter module. Tests the adapter filter output on contrived input. # Disable logging # Specify default adapter sequences Assert that probe has a particular adapter. Args: probe: probe to check adapter: check if 'probe' has this adapter Returns: whether 'probe' starts and ends with either an 'A' or 'B' adapter, as specified by 'adapter' Make probes with both 'A' and 'B' adapters. Args: probe_str_a: list of strings of the sequences of probes that should receive an 'A' adapter probe_str_b: list of strings of the sequences of probes that should receive a 'B' adapter Returns: list of probes (instances of probe.Probe) from the input with the corresponding adapters added Convert genomes to instances of genome.Genome. Args: target_genomes: nested list of genomes, as strings, to be converted Returns: nested list of genomes, with the same structure as the input, in which each genome is an instance of genome.Genome instead of a string # Create probes of length 6 bp with a stride of 3 bp # Create probes of length 6 bp with a stride of 3 bp Test four probes that align like: ------ ------ ------ ------ where the bottom two are the same up to one mismatch. The top two probes should be assigned adapter 'A' and the bottom two should be assigned adapter 'B'. # Check votes too # Each middle probe should align to one genome # Both middle probes should align to both genomes Test probes that align to two genomes, but in which the ones aligning to one genome are offset from the other. # Assume 'ABCDEF' gets 'A' adapter and 'XYZABC' gets 'B' adapter, # and so on. But flipping the 'A' and 'B' adapters would also # be OK. # Check votes too Test probes that align adjacent to each other in one genome, but overlapping in two others. One should be assigned adapter 'A' and the other should be assigned adapter 'B'. # Check votes too # Re-enable logging | 2.907005 | 3 |
sdc/hiframes/split_impl.py | samir-nasibli/sdc | 0 | 6630566 | # *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import operator
import numpy
import pandas
import numba
import sdc
from numba import types
from numba.typing.templates import (infer_global, AbstractTemplate, infer,
signature, AttributeTemplate, infer_getattr, bound_function)
import numba.typing.typeof
from numba.datamodel import StructModel
from numba.errors import TypingError
from numba.extending import (typeof_impl, type_callable, models, register_model, NativeValue,
make_attribute_wrapper, lower_builtin, box, unbox,
lower_getattr, intrinsic, overload_method, overload, overload_attribute)
from numba import cgutils
from sdc.str_ext import string_type
from numba.targets.imputils import (impl_ret_new_ref, impl_ret_borrowed,
iternext_impl, RefType)
from sdc.str_arr_ext import (string_array_type, get_data_ptr,
is_str_arr_typ, pre_alloc_string_array, _memcpy)
from llvmlite import ir as lir
import llvmlite.binding as ll
from llvmlite.llvmpy.core import Type as LLType
from .. import hstr_ext
ll.add_symbol('array_setitem', hstr_ext.array_setitem)
ll.add_symbol('array_getptr1', hstr_ext.array_getptr1)
ll.add_symbol('dtor_str_arr_split_view', hstr_ext.dtor_str_arr_split_view)
ll.add_symbol('str_arr_split_view_impl', hstr_ext.str_arr_split_view_impl)
ll.add_symbol('str_arr_split_view_alloc', hstr_ext.str_arr_split_view_alloc)
char_typ = types.uint8
offset_typ = types.uint32
data_ctypes_type = types.ArrayCTypes(types.Array(char_typ, 1, 'C'))
offset_ctypes_type = types.ArrayCTypes(types.Array(offset_typ, 1, 'C'))
# nested offset structure to represent S.str.split()
# data_offsets array includes offsets to character data array
# index_offsets array includes offsets to data_offsets array to identify lists
class StringArraySplitViewType(types.IterableType):
def __init__(self):
super(StringArraySplitViewType, self).__init__(
name='StringArraySplitViewType()')
@property
def dtype(self):
# TODO: optimized list type
return types.List(string_type)
# TODO
@property
def iterator_type(self):
return # StringArrayIterator()
def copy(self):
return StringArraySplitViewType()
string_array_split_view_type = StringArraySplitViewType()
class StringArraySplitViewPayloadType(types.Type):
def __init__(self):
super(StringArraySplitViewPayloadType, self).__init__(
name='StringArraySplitViewPayloadType()')
str_arr_split_view_payload_type = StringArraySplitViewPayloadType()
# XXX: C equivalent in _str_ext.cpp
@register_model(StringArraySplitViewPayloadType)
class StringArrayPayloadModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('index_offsets', types.CPointer(offset_typ)),
('data_offsets', types.CPointer(offset_typ)),
#('null_bitmap', types.CPointer(char_typ)),
]
models.StructModel.__init__(self, dmm, fe_type, members)
str_arr_model_members = [
('num_items', types.uint64), # number of lists
# ('num_total_strs', types.uint64), # number of strings total
#('num_total_chars', types.uint64),
('index_offsets', types.CPointer(offset_typ)),
('data_offsets', types.CPointer(offset_typ)),
('data', data_ctypes_type),
#('null_bitmap', types.CPointer(char_typ)),
('meminfo', types.MemInfoPointer(str_arr_split_view_payload_type)),
]
@register_model(StringArraySplitViewType)
class StringArrayModel(models.StructModel):
def __init__(self, dmm, fe_type):
models.StructModel.__init__(self, dmm, fe_type, str_arr_model_members)
make_attribute_wrapper(StringArraySplitViewType, 'num_items', '_num_items')
make_attribute_wrapper(StringArraySplitViewType, 'index_offsets', '_index_offsets')
make_attribute_wrapper(StringArraySplitViewType, 'data_offsets', '_data_offsets')
make_attribute_wrapper(StringArraySplitViewType, 'data', '_data')
class SplitViewStringMethodsType(types.IterableType):
"""
Type definition for pandas.core.strings.StringMethods functions handling.
Members
----------
_data: :class:`SeriesType`
input arg
"""
def __init__(self, data):
self.data = data
name = 'SplitViewStringMethodsType({})'.format(self.data)
super(SplitViewStringMethodsType, self).__init__(name)
@property
def iterator_type(self):
return None
@register_model(SplitViewStringMethodsType)
class SplitViewStringMethodsTypeModel(StructModel):
"""
Model for SplitViewStringMethodsType type
All members must be the same as main type for this model
"""
def __init__(self, dmm, fe_type):
members = [
('data', fe_type.data)
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(SplitViewStringMethodsType, 'data', '_data')
def construct_str_arr_split_view(context, builder):
"""Creates meminfo and sets dtor.
"""
alloc_type = context.get_data_type(str_arr_split_view_payload_type)
alloc_size = context.get_abi_sizeof(alloc_type)
llvoidptr = context.get_value_type(types.voidptr)
llsize = context.get_value_type(types.uintp)
dtor_ftype = lir.FunctionType(lir.VoidType(),
[llvoidptr, llsize, llvoidptr])
dtor_fn = builder.module.get_or_insert_function(
dtor_ftype, name="dtor_str_arr_split_view")
meminfo = context.nrt.meminfo_alloc_dtor(
builder,
context.get_constant(types.uintp, alloc_size),
dtor_fn,
)
meminfo_data_ptr = context.nrt.meminfo_data(builder, meminfo)
meminfo_data_ptr = builder.bitcast(meminfo_data_ptr,
alloc_type.as_pointer())
# Nullify all data
# builder.store( cgutils.get_null_value(alloc_type),
# meminfo_data_ptr)
return meminfo, meminfo_data_ptr
@intrinsic
def compute_split_view(typingctx, str_arr_typ, sep_typ=None):
assert str_arr_typ == string_array_type and isinstance(sep_typ, types.StringLiteral)
def codegen(context, builder, sig, args):
str_arr, _ = args
meminfo, meminfo_data_ptr = construct_str_arr_split_view(
context, builder)
in_str_arr = context.make_helper(
builder, string_array_type, str_arr)
# (str_arr_split_view_payload* out_view, int64_t n_strs,
# uint32_t* offsets, char* data, char sep)
fnty = lir.FunctionType(lir.VoidType(),
[meminfo_data_ptr.type,
lir.IntType(64),
lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
lir.IntType(8)])
fn_impl = builder.module.get_or_insert_function(
fnty, name="str_arr_split_view_impl")
sep_val = context.get_constant(types.int8, ord(sep_typ.literal_value))
builder.call(fn_impl,
[meminfo_data_ptr, in_str_arr.num_items,
in_str_arr.offsets, in_str_arr.data, sep_val])
view_payload = cgutils.create_struct_proxy(
str_arr_split_view_payload_type)(
context, builder, value=builder.load(meminfo_data_ptr))
out_view = context.make_helper(builder, string_array_split_view_type)
out_view.num_items = in_str_arr.num_items
out_view.index_offsets = view_payload.index_offsets
out_view.data_offsets = view_payload.data_offsets
# TODO: incref?
out_view.data = context.compile_internal(
builder, lambda S: get_data_ptr(S),
data_ctypes_type(string_array_type), [str_arr])
# out_view.null_bitmap = view_payload.null_bitmap
out_view.meminfo = meminfo
ret = out_view._getvalue()
#context.nrt.decref(builder, ty, ret)
return impl_ret_new_ref(
context, builder, string_array_split_view_type, ret)
return string_array_split_view_type(
string_array_type, sep_typ), codegen
@box(StringArraySplitViewType)
def box_str_arr_split_view(typ, val, c):
context = c.context
builder = c.builder
sp_view = context.make_helper(builder, string_array_split_view_type, val)
# create array of objects with num_items shape
mod_name = c.context.insert_const_string(c.builder.module, "numpy")
np_class_obj = c.pyapi.import_module_noblock(mod_name)
dtype = c.pyapi.object_getattr_string(np_class_obj, 'object_')
l_num_items = builder.sext(sp_view.num_items, c.pyapi.longlong)
num_items_obj = c.pyapi.long_from_longlong(l_num_items)
out_arr = c.pyapi.call_method(
np_class_obj, "ndarray", (num_items_obj, dtype))
# Array setitem call
arr_get_fnty = LLType.function(
lir.IntType(8).as_pointer(), [c.pyapi.pyobj, c.pyapi.py_ssize_t])
arr_get_fn = c.pyapi._get_function(arr_get_fnty, name="array_getptr1")
arr_setitem_fnty = LLType.function(
lir.VoidType(),
[c.pyapi.pyobj, lir.IntType(8).as_pointer(), c.pyapi.pyobj])
arr_setitem_fn = c.pyapi._get_function(
arr_setitem_fnty, name="array_setitem")
# for each string
with cgutils.for_range(builder, sp_view.num_items) as loop:
str_ind = loop.index
# start and end offset of string's list in index_offsets
# sp_view.index_offsets[str_ind]
list_start_offset = builder.sext(builder.load(builder.gep(sp_view.index_offsets, [str_ind])), lir.IntType(64))
# sp_view.index_offsets[str_ind+1]
list_end_offset = builder.sext(
builder.load(
builder.gep(
sp_view.index_offsets, [
builder.add(
str_ind, str_ind.type(1))])), lir.IntType(64))
# cgutils.printf(builder, "%d %d\n", list_start, list_end)
# Build a new Python list
nitems = builder.sub(list_end_offset, list_start_offset)
nitems = builder.sub(nitems, nitems.type(1))
# cgutils.printf(builder, "str %lld n %lld\n", str_ind, nitems)
list_obj = c.pyapi.list_new(nitems)
with c.builder.if_then(cgutils.is_not_null(c.builder, list_obj),
likely=True):
with cgutils.for_range(c.builder, nitems) as loop:
# data_offsets of current list
start_index = builder.add(list_start_offset, loop.index)
data_start = builder.load(builder.gep(sp_view.data_offsets, [start_index]))
# add 1 since starts from -1
data_start = builder.add(data_start, data_start.type(1))
data_end = builder.load(
builder.gep(
sp_view.data_offsets, [
builder.add(
start_index, start_index.type(1))]))
# cgutils.printf(builder, "ind %lld %lld\n", data_start, data_end)
data_ptr = builder.gep(builder.extract_value(sp_view.data, 0), [data_start])
str_size = builder.sext(builder.sub(data_end, data_start), lir.IntType(64))
str_obj = c.pyapi.string_from_string_and_size(data_ptr, str_size)
c.pyapi.list_setitem(list_obj, loop.index, str_obj)
arr_ptr = builder.call(arr_get_fn, [out_arr, str_ind])
builder.call(arr_setitem_fn, [out_arr, arr_ptr, list_obj])
c.pyapi.decref(np_class_obj)
return out_arr
@intrinsic
def pre_alloc_str_arr_view(typingctx, num_items_t, num_offsets_t, data_t=None):
assert num_items_t == types.intp and num_offsets_t == types.intp
def codegen(context, builder, sig, args):
num_items, num_offsets, data_ptr = args
meminfo, meminfo_data_ptr = construct_str_arr_split_view(
context, builder)
# (str_arr_split_view_payload* out_view, int64_t num_items,
# int64_t num_offsets)
fnty = lir.FunctionType(
lir.VoidType(),
[meminfo_data_ptr.type, lir.IntType(64), lir.IntType(64)])
fn_impl = builder.module.get_or_insert_function(
fnty, name="str_arr_split_view_alloc")
builder.call(fn_impl,
[meminfo_data_ptr, num_items, num_offsets])
view_payload = cgutils.create_struct_proxy(
str_arr_split_view_payload_type)(
context, builder, value=builder.load(meminfo_data_ptr))
out_view = context.make_helper(builder, string_array_split_view_type)
out_view.num_items = num_items
out_view.index_offsets = view_payload.index_offsets
out_view.data_offsets = view_payload.data_offsets
# TODO: incref?
out_view.data = data_ptr
if context.enable_nrt:
context.nrt.incref(builder, data_t, data_ptr)
# out_view.null_bitmap = view_payload.null_bitmap
out_view.meminfo = meminfo
ret = out_view._getvalue()
return impl_ret_new_ref(
context, builder, string_array_split_view_type, ret)
return string_array_split_view_type(
types.intp, types.intp, data_t), codegen
@intrinsic
def get_c_arr_ptr(typingctx, c_arr, ind_t=None):
assert isinstance(c_arr, (types.CPointer, types.ArrayCTypes))
def codegen(context, builder, sig, args):
in_arr, ind = args
if isinstance(sig.args[0], types.ArrayCTypes):
in_arr = builder.extract_value(in_arr, 0)
return builder.bitcast(
builder.gep(in_arr, [ind]), lir.IntType(8).as_pointer())
return types.voidptr(c_arr, ind_t), codegen
@intrinsic
def getitem_c_arr(typingctx, c_arr, ind_t=None):
def codegen(context, builder, sig, args):
in_arr, ind = args
return builder.load(builder.gep(in_arr, [ind]))
return c_arr.dtype(c_arr, ind_t), codegen
@intrinsic
def setitem_c_arr(typingctx, c_arr, ind_t, item_t=None):
def codegen(context, builder, sig, args):
in_arr, ind, item = args
ptr = builder.gep(in_arr, [ind])
builder.store(item, ptr)
return types.void(c_arr, ind_t, c_arr.dtype), codegen
@intrinsic
def get_array_ctypes_ptr(typingctx, arr_ctypes_t, ind_t=None):
def codegen(context, builder, sig, args):
in_arr_ctypes, ind = args
arr_ctypes = context.make_helper(
builder, arr_ctypes_t, in_arr_ctypes)
out = context.make_helper(builder, arr_ctypes_t)
out.data = builder.gep(arr_ctypes.data, [ind])
out.meminfo = arr_ctypes.meminfo
res = out._getvalue()
return impl_ret_borrowed(context, builder, arr_ctypes_t, res)
return arr_ctypes_t(arr_ctypes_t, ind_t), codegen
@numba.njit(no_cpython_wrapper=True)
def get_split_view_index(arr, item_ind, str_ind):
start_index = getitem_c_arr(arr._index_offsets, item_ind)
# TODO: check num strings and support NAN
# end_index = getitem_c_arr(arr._index_offsets, item_ind+1)
data_start = getitem_c_arr(
arr._data_offsets, start_index + str_ind)
data_start += 1
# get around -1 storage in uint32 problem
if start_index + str_ind == 0:
data_start = 0
data_end = getitem_c_arr(
arr._data_offsets, start_index + str_ind + 1)
return data_start, (data_end - data_start)
@numba.njit(no_cpython_wrapper=True)
def get_split_view_data_ptr(arr, data_start):
return get_array_ctypes_ptr(arr._data, data_start)
@overload(len)
def str_arr_split_view_len_overload(arr):
if arr == string_array_split_view_type:
return lambda arr: arr._num_items
@overload_method(SplitViewStringMethodsType, 'len')
def hpat_pandas_spliview_stringmethods_len(self):
"""
Pandas Series method :meth:`pandas.core.strings.StringMethods.len()` implementation.
Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_split_filter
Parameters
----------
self: :class:`pandas.core.strings.StringMethods`
input arg
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
if not isinstance(self, SplitViewStringMethodsType):
msg = 'Method len(). The object must be a pandas.core.strings. Given: {}'
raise TypingError(msg.format(self))
def hpat_pandas_spliview_stringmethods_len_impl(self):
item_count = len(self._data)
result = numpy.empty(item_count, numba.types.int64)
local_data = self._data._data
for i in range(len(local_data)):
result[i] = len(local_data[i])
return pandas.Series(result, self._data._index, name=self._data._name)
return hpat_pandas_spliview_stringmethods_len_impl
# @infer_global(operator.getitem)
class GetItemStringArraySplitView(AbstractTemplate):
key = operator.getitem
def generic(self, args, kws):
assert not kws
[ary, idx] = args
if ary == string_array_split_view_type:
if isinstance(idx, types.SliceType):
return signature(string_array_split_view_type, *args)
elif isinstance(idx, types.Integer):
return signature(types.List(string_type), *args)
elif idx == types.Array(types.bool_, 1, 'C'):
return signature(string_array_split_view_type, *args)
elif idx == types.Array(types.intp, 1, 'C'):
return signature(string_array_split_view_type, *args)
@overload(operator.getitem)
def str_arr_split_view_getitem_overload(A, ind):
if A == string_array_split_view_type and isinstance(ind, types.Integer):
kind = numba.unicode.PY_UNICODE_1BYTE_KIND
def _impl(A, ind):
start_index = getitem_c_arr(A._index_offsets, ind)
end_index = getitem_c_arr(A._index_offsets, ind + 1)
n = end_index - start_index - 1
str_list = sdc.str_ext.alloc_str_list(n)
for i in range(n):
data_start = getitem_c_arr(
A._data_offsets, start_index + i)
data_start += 1
# get around -1 storage in uint32 problem
if start_index + i == 0:
data_start = 0
data_end = getitem_c_arr(
A._data_offsets, start_index + i + 1)
length = data_end - data_start
_str = numba.unicode._empty_string(kind, length)
ptr = get_array_ctypes_ptr(A._data, data_start)
_memcpy(_str._data, ptr, length, 1)
str_list[i] = _str
return str_list
return _impl
if A == string_array_split_view_type and ind == types.Array(types.bool_, 1, 'C'):
def _impl(A, ind):
n = len(A)
if n != len(ind):
raise IndexError("boolean index did not match indexed array"
" along dimension 0")
num_items = 0
num_offsets = 0
for i in range(n):
if ind[i]:
num_items += 1
start_index = getitem_c_arr(A._index_offsets, i)
end_index = getitem_c_arr(A._index_offsets, i + 1)
num_offsets += end_index - start_index
out_arr = pre_alloc_str_arr_view(num_items, num_offsets, A._data)
item_ind = 0
offset_ind = 0
for i in range(n):
if ind[i]:
start_index = getitem_c_arr(A._index_offsets, i)
end_index = getitem_c_arr(A._index_offsets, i + 1)
n_offsets = end_index - start_index
setitem_c_arr(out_arr._index_offsets, item_ind, offset_ind)
ptr = get_c_arr_ptr(A._data_offsets, start_index)
out_ptr = get_c_arr_ptr(out_arr._data_offsets, offset_ind)
_memcpy(out_ptr, ptr, n_offsets, 4)
item_ind += 1
offset_ind += n_offsets
# last item
setitem_c_arr(out_arr._index_offsets, item_ind, offset_ind)
return out_arr
return _impl
| # *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import operator
import numpy
import pandas
import numba
import sdc
from numba import types
from numba.typing.templates import (infer_global, AbstractTemplate, infer,
signature, AttributeTemplate, infer_getattr, bound_function)
import numba.typing.typeof
from numba.datamodel import StructModel
from numba.errors import TypingError
from numba.extending import (typeof_impl, type_callable, models, register_model, NativeValue,
make_attribute_wrapper, lower_builtin, box, unbox,
lower_getattr, intrinsic, overload_method, overload, overload_attribute)
from numba import cgutils
from sdc.str_ext import string_type
from numba.targets.imputils import (impl_ret_new_ref, impl_ret_borrowed,
iternext_impl, RefType)
from sdc.str_arr_ext import (string_array_type, get_data_ptr,
is_str_arr_typ, pre_alloc_string_array, _memcpy)
from llvmlite import ir as lir
import llvmlite.binding as ll
from llvmlite.llvmpy.core import Type as LLType
from .. import hstr_ext
ll.add_symbol('array_setitem', hstr_ext.array_setitem)
ll.add_symbol('array_getptr1', hstr_ext.array_getptr1)
ll.add_symbol('dtor_str_arr_split_view', hstr_ext.dtor_str_arr_split_view)
ll.add_symbol('str_arr_split_view_impl', hstr_ext.str_arr_split_view_impl)
ll.add_symbol('str_arr_split_view_alloc', hstr_ext.str_arr_split_view_alloc)
char_typ = types.uint8
offset_typ = types.uint32
data_ctypes_type = types.ArrayCTypes(types.Array(char_typ, 1, 'C'))
offset_ctypes_type = types.ArrayCTypes(types.Array(offset_typ, 1, 'C'))
# nested offset structure to represent S.str.split()
# data_offsets array includes offsets to character data array
# index_offsets array includes offsets to data_offsets array to identify lists
class StringArraySplitViewType(types.IterableType):
def __init__(self):
super(StringArraySplitViewType, self).__init__(
name='StringArraySplitViewType()')
@property
def dtype(self):
# TODO: optimized list type
return types.List(string_type)
# TODO
@property
def iterator_type(self):
return # StringArrayIterator()
def copy(self):
return StringArraySplitViewType()
string_array_split_view_type = StringArraySplitViewType()
class StringArraySplitViewPayloadType(types.Type):
def __init__(self):
super(StringArraySplitViewPayloadType, self).__init__(
name='StringArraySplitViewPayloadType()')
str_arr_split_view_payload_type = StringArraySplitViewPayloadType()
# XXX: C equivalent in _str_ext.cpp
@register_model(StringArraySplitViewPayloadType)
class StringArrayPayloadModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('index_offsets', types.CPointer(offset_typ)),
('data_offsets', types.CPointer(offset_typ)),
#('null_bitmap', types.CPointer(char_typ)),
]
models.StructModel.__init__(self, dmm, fe_type, members)
str_arr_model_members = [
('num_items', types.uint64), # number of lists
# ('num_total_strs', types.uint64), # number of strings total
#('num_total_chars', types.uint64),
('index_offsets', types.CPointer(offset_typ)),
('data_offsets', types.CPointer(offset_typ)),
('data', data_ctypes_type),
#('null_bitmap', types.CPointer(char_typ)),
('meminfo', types.MemInfoPointer(str_arr_split_view_payload_type)),
]
@register_model(StringArraySplitViewType)
class StringArrayModel(models.StructModel):
def __init__(self, dmm, fe_type):
models.StructModel.__init__(self, dmm, fe_type, str_arr_model_members)
make_attribute_wrapper(StringArraySplitViewType, 'num_items', '_num_items')
make_attribute_wrapper(StringArraySplitViewType, 'index_offsets', '_index_offsets')
make_attribute_wrapper(StringArraySplitViewType, 'data_offsets', '_data_offsets')
make_attribute_wrapper(StringArraySplitViewType, 'data', '_data')
class SplitViewStringMethodsType(types.IterableType):
"""
Type definition for pandas.core.strings.StringMethods functions handling.
Members
----------
_data: :class:`SeriesType`
input arg
"""
def __init__(self, data):
self.data = data
name = 'SplitViewStringMethodsType({})'.format(self.data)
super(SplitViewStringMethodsType, self).__init__(name)
@property
def iterator_type(self):
return None
@register_model(SplitViewStringMethodsType)
class SplitViewStringMethodsTypeModel(StructModel):
"""
Model for SplitViewStringMethodsType type
All members must be the same as main type for this model
"""
def __init__(self, dmm, fe_type):
members = [
('data', fe_type.data)
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(SplitViewStringMethodsType, 'data', '_data')
def construct_str_arr_split_view(context, builder):
"""Creates meminfo and sets dtor.
"""
alloc_type = context.get_data_type(str_arr_split_view_payload_type)
alloc_size = context.get_abi_sizeof(alloc_type)
llvoidptr = context.get_value_type(types.voidptr)
llsize = context.get_value_type(types.uintp)
dtor_ftype = lir.FunctionType(lir.VoidType(),
[llvoidptr, llsize, llvoidptr])
dtor_fn = builder.module.get_or_insert_function(
dtor_ftype, name="dtor_str_arr_split_view")
meminfo = context.nrt.meminfo_alloc_dtor(
builder,
context.get_constant(types.uintp, alloc_size),
dtor_fn,
)
meminfo_data_ptr = context.nrt.meminfo_data(builder, meminfo)
meminfo_data_ptr = builder.bitcast(meminfo_data_ptr,
alloc_type.as_pointer())
# Nullify all data
# builder.store( cgutils.get_null_value(alloc_type),
# meminfo_data_ptr)
return meminfo, meminfo_data_ptr
@intrinsic
def compute_split_view(typingctx, str_arr_typ, sep_typ=None):
assert str_arr_typ == string_array_type and isinstance(sep_typ, types.StringLiteral)
def codegen(context, builder, sig, args):
str_arr, _ = args
meminfo, meminfo_data_ptr = construct_str_arr_split_view(
context, builder)
in_str_arr = context.make_helper(
builder, string_array_type, str_arr)
# (str_arr_split_view_payload* out_view, int64_t n_strs,
# uint32_t* offsets, char* data, char sep)
fnty = lir.FunctionType(lir.VoidType(),
[meminfo_data_ptr.type,
lir.IntType(64),
lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
lir.IntType(8)])
fn_impl = builder.module.get_or_insert_function(
fnty, name="str_arr_split_view_impl")
sep_val = context.get_constant(types.int8, ord(sep_typ.literal_value))
builder.call(fn_impl,
[meminfo_data_ptr, in_str_arr.num_items,
in_str_arr.offsets, in_str_arr.data, sep_val])
view_payload = cgutils.create_struct_proxy(
str_arr_split_view_payload_type)(
context, builder, value=builder.load(meminfo_data_ptr))
out_view = context.make_helper(builder, string_array_split_view_type)
out_view.num_items = in_str_arr.num_items
out_view.index_offsets = view_payload.index_offsets
out_view.data_offsets = view_payload.data_offsets
# TODO: incref?
out_view.data = context.compile_internal(
builder, lambda S: get_data_ptr(S),
data_ctypes_type(string_array_type), [str_arr])
# out_view.null_bitmap = view_payload.null_bitmap
out_view.meminfo = meminfo
ret = out_view._getvalue()
#context.nrt.decref(builder, ty, ret)
return impl_ret_new_ref(
context, builder, string_array_split_view_type, ret)
return string_array_split_view_type(
string_array_type, sep_typ), codegen
@box(StringArraySplitViewType)
def box_str_arr_split_view(typ, val, c):
context = c.context
builder = c.builder
sp_view = context.make_helper(builder, string_array_split_view_type, val)
# create array of objects with num_items shape
mod_name = c.context.insert_const_string(c.builder.module, "numpy")
np_class_obj = c.pyapi.import_module_noblock(mod_name)
dtype = c.pyapi.object_getattr_string(np_class_obj, 'object_')
l_num_items = builder.sext(sp_view.num_items, c.pyapi.longlong)
num_items_obj = c.pyapi.long_from_longlong(l_num_items)
out_arr = c.pyapi.call_method(
np_class_obj, "ndarray", (num_items_obj, dtype))
# Array setitem call
arr_get_fnty = LLType.function(
lir.IntType(8).as_pointer(), [c.pyapi.pyobj, c.pyapi.py_ssize_t])
arr_get_fn = c.pyapi._get_function(arr_get_fnty, name="array_getptr1")
arr_setitem_fnty = LLType.function(
lir.VoidType(),
[c.pyapi.pyobj, lir.IntType(8).as_pointer(), c.pyapi.pyobj])
arr_setitem_fn = c.pyapi._get_function(
arr_setitem_fnty, name="array_setitem")
# for each string
with cgutils.for_range(builder, sp_view.num_items) as loop:
str_ind = loop.index
# start and end offset of string's list in index_offsets
# sp_view.index_offsets[str_ind]
list_start_offset = builder.sext(builder.load(builder.gep(sp_view.index_offsets, [str_ind])), lir.IntType(64))
# sp_view.index_offsets[str_ind+1]
list_end_offset = builder.sext(
builder.load(
builder.gep(
sp_view.index_offsets, [
builder.add(
str_ind, str_ind.type(1))])), lir.IntType(64))
# cgutils.printf(builder, "%d %d\n", list_start, list_end)
# Build a new Python list
nitems = builder.sub(list_end_offset, list_start_offset)
nitems = builder.sub(nitems, nitems.type(1))
# cgutils.printf(builder, "str %lld n %lld\n", str_ind, nitems)
list_obj = c.pyapi.list_new(nitems)
with c.builder.if_then(cgutils.is_not_null(c.builder, list_obj),
likely=True):
with cgutils.for_range(c.builder, nitems) as loop:
# data_offsets of current list
start_index = builder.add(list_start_offset, loop.index)
data_start = builder.load(builder.gep(sp_view.data_offsets, [start_index]))
# add 1 since starts from -1
data_start = builder.add(data_start, data_start.type(1))
data_end = builder.load(
builder.gep(
sp_view.data_offsets, [
builder.add(
start_index, start_index.type(1))]))
# cgutils.printf(builder, "ind %lld %lld\n", data_start, data_end)
data_ptr = builder.gep(builder.extract_value(sp_view.data, 0), [data_start])
str_size = builder.sext(builder.sub(data_end, data_start), lir.IntType(64))
str_obj = c.pyapi.string_from_string_and_size(data_ptr, str_size)
c.pyapi.list_setitem(list_obj, loop.index, str_obj)
arr_ptr = builder.call(arr_get_fn, [out_arr, str_ind])
builder.call(arr_setitem_fn, [out_arr, arr_ptr, list_obj])
c.pyapi.decref(np_class_obj)
return out_arr
@intrinsic
def pre_alloc_str_arr_view(typingctx, num_items_t, num_offsets_t, data_t=None):
assert num_items_t == types.intp and num_offsets_t == types.intp
def codegen(context, builder, sig, args):
num_items, num_offsets, data_ptr = args
meminfo, meminfo_data_ptr = construct_str_arr_split_view(
context, builder)
# (str_arr_split_view_payload* out_view, int64_t num_items,
# int64_t num_offsets)
fnty = lir.FunctionType(
lir.VoidType(),
[meminfo_data_ptr.type, lir.IntType(64), lir.IntType(64)])
fn_impl = builder.module.get_or_insert_function(
fnty, name="str_arr_split_view_alloc")
builder.call(fn_impl,
[meminfo_data_ptr, num_items, num_offsets])
view_payload = cgutils.create_struct_proxy(
str_arr_split_view_payload_type)(
context, builder, value=builder.load(meminfo_data_ptr))
out_view = context.make_helper(builder, string_array_split_view_type)
out_view.num_items = num_items
out_view.index_offsets = view_payload.index_offsets
out_view.data_offsets = view_payload.data_offsets
# TODO: incref?
out_view.data = data_ptr
if context.enable_nrt:
context.nrt.incref(builder, data_t, data_ptr)
# out_view.null_bitmap = view_payload.null_bitmap
out_view.meminfo = meminfo
ret = out_view._getvalue()
return impl_ret_new_ref(
context, builder, string_array_split_view_type, ret)
return string_array_split_view_type(
types.intp, types.intp, data_t), codegen
@intrinsic
def get_c_arr_ptr(typingctx, c_arr, ind_t=None):
assert isinstance(c_arr, (types.CPointer, types.ArrayCTypes))
def codegen(context, builder, sig, args):
in_arr, ind = args
if isinstance(sig.args[0], types.ArrayCTypes):
in_arr = builder.extract_value(in_arr, 0)
return builder.bitcast(
builder.gep(in_arr, [ind]), lir.IntType(8).as_pointer())
return types.voidptr(c_arr, ind_t), codegen
@intrinsic
def getitem_c_arr(typingctx, c_arr, ind_t=None):
def codegen(context, builder, sig, args):
in_arr, ind = args
return builder.load(builder.gep(in_arr, [ind]))
return c_arr.dtype(c_arr, ind_t), codegen
@intrinsic
def setitem_c_arr(typingctx, c_arr, ind_t, item_t=None):
def codegen(context, builder, sig, args):
in_arr, ind, item = args
ptr = builder.gep(in_arr, [ind])
builder.store(item, ptr)
return types.void(c_arr, ind_t, c_arr.dtype), codegen
@intrinsic
def get_array_ctypes_ptr(typingctx, arr_ctypes_t, ind_t=None):
def codegen(context, builder, sig, args):
in_arr_ctypes, ind = args
arr_ctypes = context.make_helper(
builder, arr_ctypes_t, in_arr_ctypes)
out = context.make_helper(builder, arr_ctypes_t)
out.data = builder.gep(arr_ctypes.data, [ind])
out.meminfo = arr_ctypes.meminfo
res = out._getvalue()
return impl_ret_borrowed(context, builder, arr_ctypes_t, res)
return arr_ctypes_t(arr_ctypes_t, ind_t), codegen
@numba.njit(no_cpython_wrapper=True)
def get_split_view_index(arr, item_ind, str_ind):
start_index = getitem_c_arr(arr._index_offsets, item_ind)
# TODO: check num strings and support NAN
# end_index = getitem_c_arr(arr._index_offsets, item_ind+1)
data_start = getitem_c_arr(
arr._data_offsets, start_index + str_ind)
data_start += 1
# get around -1 storage in uint32 problem
if start_index + str_ind == 0:
data_start = 0
data_end = getitem_c_arr(
arr._data_offsets, start_index + str_ind + 1)
return data_start, (data_end - data_start)
@numba.njit(no_cpython_wrapper=True)
def get_split_view_data_ptr(arr, data_start):
return get_array_ctypes_ptr(arr._data, data_start)
@overload(len)
def str_arr_split_view_len_overload(arr):
if arr == string_array_split_view_type:
return lambda arr: arr._num_items
@overload_method(SplitViewStringMethodsType, 'len')
def hpat_pandas_spliview_stringmethods_len(self):
"""
Pandas Series method :meth:`pandas.core.strings.StringMethods.len()` implementation.
Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_split_filter
Parameters
----------
self: :class:`pandas.core.strings.StringMethods`
input arg
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
if not isinstance(self, SplitViewStringMethodsType):
msg = 'Method len(). The object must be a pandas.core.strings. Given: {}'
raise TypingError(msg.format(self))
def hpat_pandas_spliview_stringmethods_len_impl(self):
item_count = len(self._data)
result = numpy.empty(item_count, numba.types.int64)
local_data = self._data._data
for i in range(len(local_data)):
result[i] = len(local_data[i])
return pandas.Series(result, self._data._index, name=self._data._name)
return hpat_pandas_spliview_stringmethods_len_impl
# @infer_global(operator.getitem)
class GetItemStringArraySplitView(AbstractTemplate):
key = operator.getitem
def generic(self, args, kws):
assert not kws
[ary, idx] = args
if ary == string_array_split_view_type:
if isinstance(idx, types.SliceType):
return signature(string_array_split_view_type, *args)
elif isinstance(idx, types.Integer):
return signature(types.List(string_type), *args)
elif idx == types.Array(types.bool_, 1, 'C'):
return signature(string_array_split_view_type, *args)
elif idx == types.Array(types.intp, 1, 'C'):
return signature(string_array_split_view_type, *args)
@overload(operator.getitem)
def str_arr_split_view_getitem_overload(A, ind):
if A == string_array_split_view_type and isinstance(ind, types.Integer):
kind = numba.unicode.PY_UNICODE_1BYTE_KIND
def _impl(A, ind):
start_index = getitem_c_arr(A._index_offsets, ind)
end_index = getitem_c_arr(A._index_offsets, ind + 1)
n = end_index - start_index - 1
str_list = sdc.str_ext.alloc_str_list(n)
for i in range(n):
data_start = getitem_c_arr(
A._data_offsets, start_index + i)
data_start += 1
# get around -1 storage in uint32 problem
if start_index + i == 0:
data_start = 0
data_end = getitem_c_arr(
A._data_offsets, start_index + i + 1)
length = data_end - data_start
_str = numba.unicode._empty_string(kind, length)
ptr = get_array_ctypes_ptr(A._data, data_start)
_memcpy(_str._data, ptr, length, 1)
str_list[i] = _str
return str_list
return _impl
if A == string_array_split_view_type and ind == types.Array(types.bool_, 1, 'C'):
def _impl(A, ind):
n = len(A)
if n != len(ind):
raise IndexError("boolean index did not match indexed array"
" along dimension 0")
num_items = 0
num_offsets = 0
for i in range(n):
if ind[i]:
num_items += 1
start_index = getitem_c_arr(A._index_offsets, i)
end_index = getitem_c_arr(A._index_offsets, i + 1)
num_offsets += end_index - start_index
out_arr = pre_alloc_str_arr_view(num_items, num_offsets, A._data)
item_ind = 0
offset_ind = 0
for i in range(n):
if ind[i]:
start_index = getitem_c_arr(A._index_offsets, i)
end_index = getitem_c_arr(A._index_offsets, i + 1)
n_offsets = end_index - start_index
setitem_c_arr(out_arr._index_offsets, item_ind, offset_ind)
ptr = get_c_arr_ptr(A._data_offsets, start_index)
out_ptr = get_c_arr_ptr(out_arr._data_offsets, offset_ind)
_memcpy(out_ptr, ptr, n_offsets, 4)
item_ind += 1
offset_ind += n_offsets
# last item
setitem_c_arr(out_arr._index_offsets, item_ind, offset_ind)
return out_arr
return _impl
| en | 0.519849 | # ***************************************************************************** # Copyright (c) 2019, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** # nested offset structure to represent S.str.split() # data_offsets array includes offsets to character data array # index_offsets array includes offsets to data_offsets array to identify lists # TODO: optimized list type # TODO # StringArrayIterator() # XXX: C equivalent in _str_ext.cpp #('null_bitmap', types.CPointer(char_typ)), # number of lists # ('num_total_strs', types.uint64), # number of strings total #('num_total_chars', types.uint64), #('null_bitmap', types.CPointer(char_typ)), Type definition for pandas.core.strings.StringMethods functions handling. Members ---------- _data: :class:`SeriesType` input arg Model for SplitViewStringMethodsType type All members must be the same as main type for this model Creates meminfo and sets dtor. # Nullify all data # builder.store( cgutils.get_null_value(alloc_type), # meminfo_data_ptr) # (str_arr_split_view_payload* out_view, int64_t n_strs, # uint32_t* offsets, char* data, char sep) # TODO: incref? # out_view.null_bitmap = view_payload.null_bitmap #context.nrt.decref(builder, ty, ret) # create array of objects with num_items shape # Array setitem call # for each string # start and end offset of string's list in index_offsets # sp_view.index_offsets[str_ind] # sp_view.index_offsets[str_ind+1] # cgutils.printf(builder, "%d %d\n", list_start, list_end) # Build a new Python list # cgutils.printf(builder, "str %lld n %lld\n", str_ind, nitems) # data_offsets of current list # add 1 since starts from -1 # cgutils.printf(builder, "ind %lld %lld\n", data_start, data_end) # (str_arr_split_view_payload* out_view, int64_t num_items, # int64_t num_offsets) # TODO: incref? # out_view.null_bitmap = view_payload.null_bitmap # TODO: check num strings and support NAN # end_index = getitem_c_arr(arr._index_offsets, item_ind+1) # get around -1 storage in uint32 problem Pandas Series method :meth:`pandas.core.strings.StringMethods.len()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_split_filter Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object # @infer_global(operator.getitem) # get around -1 storage in uint32 problem # last item | 1.510955 | 2 |
explore.py | GouthamShiv/ML-salaryPrediction | 0 | 6630567 | <gh_stars>0
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
# Function to group countries which are having records less than provided cut-off
def shorten_categories(categories, cutoff):
categorical_map = {}
for i in range(len(categories)):
if categories.values[i] >= cutoff:
categorical_map[categories.index[i]] = categories.index[i]
else:
categorical_map[categories.index[i]] = 'Other'
return categorical_map
# Function to convert the YearsCode to float value and map
# TEXT values ('More than 50 years' & 'Less than 1 year') to float as well
def clean_experience(exp):
if exp == 'More than 50 years':
return 50
if exp == 'Less than 1 year':
return 0.5
return float(exp)
# Function to convert the EdLevel to simpler category
def clean_education(ed):
if 'Bachelor’s degree' in ed:
return 'Bachelor’s degree'
if 'Master’s degree' in ed:
return 'Master’s degree'
if 'Professional degree' in ed or 'Other doctoral' in ed:
return 'Post graduation'
return 'Less than Bachelor’s'
@st.cache
def loadData():
df = pd.read_csv('survey_results_public.csv')
# Filter to required columns only
df = df[["Country", "EdLevel", "YearsCode", "Employment", "ConvertedCompYearly"]]
# Rename the column ConvertedCompYearly to Salary
df = df.rename({"ConvertedCompYearly": "Salary"}, axis=1)
# Filter data frame to eleminate rows not having salary
df = df[df["Salary"].notnull()]
# Drop rows if any of it's fields are not having value (NA)
df = df.dropna()
# Filter for full-time employed records and drop 'Employment' column
df = df[df['Employment'] == 'Employed full-time']
df = df.drop('Employment', axis=1)
# Function call to group the countries to 'Other' category if count of records for that country is less than 400
country_map = shorten_categories(df.Country.value_counts(), 400)
df['Country'] = df['Country'].map(country_map)
# Filter the data frame [salary <= 250000 and salary >= 10000]
df = df[df['Salary'] <= 250000]
df = df[df['Salary'] >= 10000]
df = df[df['Country'] != 'Other']
# Clean experience and education
df['YearsCode'] = df['YearsCode'].apply(clean_experience)
df['EdLevel'] = df['EdLevel'].apply(clean_education)
return df
df = loadData()
def showExplorePage():
st.title('Explore Software Engineer Salaries')
st.write("""### Stack Overflow Developer Survey 2021""")
# PiChart
data = df['Country'].value_counts()
fig1, ax1 = plt.subplots()
ax1.pie(data, labels=data.index, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that the PIE is drawn as a circle.
st.write("""#### Number of data from different countries""")
st.pyplot(fig1)
# BarChart
st.write("""#### Mean Salary Based on Country""")
data = df.groupby(['Country'])['Salary'].mean().sort_values(ascending=True)
st.bar_chart(data)
# LineChart
st.write("""#### Mean Salary Based on Experience""")
data = df.groupby(['YearsCode'])['Salary'].mean().sort_values(ascending=True)
st.line_chart(data)
| import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
# Function to group countries which are having records less than provided cut-off
def shorten_categories(categories, cutoff):
categorical_map = {}
for i in range(len(categories)):
if categories.values[i] >= cutoff:
categorical_map[categories.index[i]] = categories.index[i]
else:
categorical_map[categories.index[i]] = 'Other'
return categorical_map
# Function to convert the YearsCode to float value and map
# TEXT values ('More than 50 years' & 'Less than 1 year') to float as well
def clean_experience(exp):
if exp == 'More than 50 years':
return 50
if exp == 'Less than 1 year':
return 0.5
return float(exp)
# Function to convert the EdLevel to simpler category
def clean_education(ed):
if 'Bachelor’s degree' in ed:
return 'Bachelor’s degree'
if 'Master’s degree' in ed:
return 'Master’s degree'
if 'Professional degree' in ed or 'Other doctoral' in ed:
return 'Post graduation'
return 'Less than Bachelor’s'
@st.cache
def loadData():
df = pd.read_csv('survey_results_public.csv')
# Filter to required columns only
df = df[["Country", "EdLevel", "YearsCode", "Employment", "ConvertedCompYearly"]]
# Rename the column ConvertedCompYearly to Salary
df = df.rename({"ConvertedCompYearly": "Salary"}, axis=1)
# Filter data frame to eleminate rows not having salary
df = df[df["Salary"].notnull()]
# Drop rows if any of it's fields are not having value (NA)
df = df.dropna()
# Filter for full-time employed records and drop 'Employment' column
df = df[df['Employment'] == 'Employed full-time']
df = df.drop('Employment', axis=1)
# Function call to group the countries to 'Other' category if count of records for that country is less than 400
country_map = shorten_categories(df.Country.value_counts(), 400)
df['Country'] = df['Country'].map(country_map)
# Filter the data frame [salary <= 250000 and salary >= 10000]
df = df[df['Salary'] <= 250000]
df = df[df['Salary'] >= 10000]
df = df[df['Country'] != 'Other']
# Clean experience and education
df['YearsCode'] = df['YearsCode'].apply(clean_experience)
df['EdLevel'] = df['EdLevel'].apply(clean_education)
return df
df = loadData()
def showExplorePage():
st.title('Explore Software Engineer Salaries')
st.write("""### Stack Overflow Developer Survey 2021""")
# PiChart
data = df['Country'].value_counts()
fig1, ax1 = plt.subplots()
ax1.pie(data, labels=data.index, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that the PIE is drawn as a circle.
st.write("""#### Number of data from different countries""")
st.pyplot(fig1)
# BarChart
st.write("""#### Mean Salary Based on Country""")
data = df.groupby(['Country'])['Salary'].mean().sort_values(ascending=True)
st.bar_chart(data)
# LineChart
st.write("""#### Mean Salary Based on Experience""")
data = df.groupby(['YearsCode'])['Salary'].mean().sort_values(ascending=True)
st.line_chart(data) | en | 0.836853 | # Function to group countries which are having records less than provided cut-off # Function to convert the YearsCode to float value and map # TEXT values ('More than 50 years' & 'Less than 1 year') to float as well # Function to convert the EdLevel to simpler category # Filter to required columns only # Rename the column ConvertedCompYearly to Salary # Filter data frame to eleminate rows not having salary # Drop rows if any of it's fields are not having value (NA) # Filter for full-time employed records and drop 'Employment' column # Function call to group the countries to 'Other' category if count of records for that country is less than 400 # Filter the data frame [salary <= 250000 and salary >= 10000] # Clean experience and education ### Stack Overflow Developer Survey 2021 # PiChart # Equal aspect ratio ensures that the PIE is drawn as a circle. #### Number of data from different countries # BarChart #### Mean Salary Based on Country # LineChart #### Mean Salary Based on Experience | 4.049232 | 4 |
utils/scripts/end2end_iterator_test.py | TeeKay53/Dialogue-systems-for-language-learning | 1 | 6630568 | import os
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from utils import end2end_iterator_utils
from utils import vocab_utils
def iterator_split():
text_file_path = os.path.abspath("test_file.txt")
dataset = tf.contrib.data.TextLineDataset(text_file_path)
dataset = dataset.map(lambda line: tf.cast(tf.string_split([line], "</s>").values, tf.string))
# Split into words
dataset = dataset.map(lambda line: tf.sparse_tensor_to_dense(tf.string_split(line),
default_value='<pad>'))
dataset = dataset.map(lambda dialogue: (dialogue, tf.equal(dialogue, '<pad>')))
# dataset = dataset.map(lambda indices, shape, values: (tf.sparse_to_dense(sparse_indices=indices,
# output_shape=shape,
# sparse_values=values,
# default_value='</pad>'),
# shape))
# dataset = dataset.map(lambda dialogue: (dialogue, tf.cast(tf.constant(not dialogue == '<pad>'), tf.int32)))
print("mapped")
print_dataset(dataset)
def print_dataset(dataset):
tf.InteractiveSession()
iterator = dataset.make_one_shot_iterator()
(tens, weights) = iterator.get_next()
# tens = iterator.get_next()
tens = tens.eval()
weights = weights.eval()
print(tens, tens.shape)
print(weights, weights.shape)
def infer_iter():
file_path = os.path.abspath("test_files/en2end_iterator.txt")
dataset = tf.contrib.data.TextLineDataset(file_path)
eou = '</u>'
eos = '</s>'
src_reverse = False
batch_size = 1
utt_max_len = 20
dialogue_max_len = 20
vocab_table = lookup_ops.index_table_from_tensor(
tf.constant([""])
)
dataset = tf.contrib.data.Dataset.from_tensor_slices(
tf.constant(["a b c </u> a a b </u>", "c a b c a </u> c b c a a </u>"])
)
iterator = end2end_iterator_utils.get_infer_iterator(dataset, vocab_table, batch_size, src_reverse,
eos, eou, utt_max_len, dialogue_max_len)
with tf.Session() as sess:
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
for i in range(2):
source, lengths = sess.run([iterator.source, iterator.source_sequence_length])
print(source)
print(lengths)
infer_iter()
| import os
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from utils import end2end_iterator_utils
from utils import vocab_utils
def iterator_split():
text_file_path = os.path.abspath("test_file.txt")
dataset = tf.contrib.data.TextLineDataset(text_file_path)
dataset = dataset.map(lambda line: tf.cast(tf.string_split([line], "</s>").values, tf.string))
# Split into words
dataset = dataset.map(lambda line: tf.sparse_tensor_to_dense(tf.string_split(line),
default_value='<pad>'))
dataset = dataset.map(lambda dialogue: (dialogue, tf.equal(dialogue, '<pad>')))
# dataset = dataset.map(lambda indices, shape, values: (tf.sparse_to_dense(sparse_indices=indices,
# output_shape=shape,
# sparse_values=values,
# default_value='</pad>'),
# shape))
# dataset = dataset.map(lambda dialogue: (dialogue, tf.cast(tf.constant(not dialogue == '<pad>'), tf.int32)))
print("mapped")
print_dataset(dataset)
def print_dataset(dataset):
tf.InteractiveSession()
iterator = dataset.make_one_shot_iterator()
(tens, weights) = iterator.get_next()
# tens = iterator.get_next()
tens = tens.eval()
weights = weights.eval()
print(tens, tens.shape)
print(weights, weights.shape)
def infer_iter():
file_path = os.path.abspath("test_files/en2end_iterator.txt")
dataset = tf.contrib.data.TextLineDataset(file_path)
eou = '</u>'
eos = '</s>'
src_reverse = False
batch_size = 1
utt_max_len = 20
dialogue_max_len = 20
vocab_table = lookup_ops.index_table_from_tensor(
tf.constant([""])
)
dataset = tf.contrib.data.Dataset.from_tensor_slices(
tf.constant(["a b c </u> a a b </u>", "c a b c a </u> c b c a a </u>"])
)
iterator = end2end_iterator_utils.get_infer_iterator(dataset, vocab_table, batch_size, src_reverse,
eos, eou, utt_max_len, dialogue_max_len)
with tf.Session() as sess:
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
for i in range(2):
source, lengths = sess.run([iterator.source, iterator.source_sequence_length])
print(source)
print(lengths)
infer_iter()
| en | 0.192425 | # Split into words # dataset = dataset.map(lambda indices, shape, values: (tf.sparse_to_dense(sparse_indices=indices, # output_shape=shape, # sparse_values=values, # default_value='</pad>'), # shape)) # dataset = dataset.map(lambda dialogue: (dialogue, tf.cast(tf.constant(not dialogue == '<pad>'), tf.int32))) # tens = iterator.get_next() | 2.683466 | 3 |
Automation/NeonOcean.S4.Order/Mod_NeonOcean_S4_Order/Mod.py | NeonOcean/Order | 0 | 6630569 | <filename>Automation/NeonOcean.S4.Order/Mod_NeonOcean_S4_Order/Mod.py
import os
import typing
from json import decoder
from Mod_NeonOcean_S4_Order import Paths
# noinspection PyTypeChecker
_mod = None # type: Mod
_modData = dict() # type: typing.Dict[str, typing.Any]
class Mod:
def __init__ (self, informationDictionary: typing.Dict[str, typing.Any]):
self.Namespace = informationDictionary["Namespace"] # type: str
self.Name = informationDictionary["Name"] # type: str
self.ChangesFilePath = os.path.join(Paths.RootPath, "Changes.md") # type: str
self.PlansFilePath = os.path.join(Paths.RootPath, "Plans.md") # type: str
self.GithubName = informationDictionary["Name"] # type: str
self.Packages = list() # type: typing.List[Package]
self.InformationRelativeFilePath = informationDictionary["Information"] # type: str
self.InformationBuildFilePath = os.path.join(Paths.InformationBuildPath, self.InformationRelativeFilePath) # type: str
self.InformationSourceFilePath = os.path.join(Paths.InformationSourcesPath, self.InformationRelativeFilePath) # type: str
self.Version = self.GetModVersion() # type: str
for packageInformation in informationDictionary["Packages"]: # type: typing.Dict[str, str]
self.Packages.append(Package(Paths.RootPath, Paths.BuildPath, packageInformation["FileName"], packageInformation["MergeRoot"])) # type: str
self.DistributionInstallerFilePath = os.path.join(Paths.PublishingDistributionInstallerPath, informationDictionary["Publishing"]["InstallerFileName"]) # type: str
self.DistributionInstallerFilePath = self.DistributionInstallerFilePath.format(self.Version)
self.DistributionFilesFilePath = os.path.join(Paths.PublishingDistributionFilesPath, informationDictionary["Publishing"]["FilesFileName"]) # type: str
self.DistributionFilesFilePath = self.DistributionFilesFilePath.format(self.Version)
self.SourcesFileName = informationDictionary["Publishing"]["SourcesFileName"] # type: str
self.SourcesFileName = self.SourcesFileName.format(self.Version)
self.PythonBuildArchiveFileName = informationDictionary["Python"]["ArchiveFileName"] # type: str
self.PythonBuildArchiveFilePath = os.path.join(Paths.PythonBuildArchivePath, self.PythonBuildArchiveFileName) # type: str
self.PythonSourcePath = os.path.join(Paths.PythonPath, self.Namespace) # type: str
self.PythonSourceRootPath = os.path.normpath(os.path.join(self.PythonSourcePath, informationDictionary["Python"]["SourceRoot"]))
self.PythonSourceTargetPath = os.path.normpath(os.path.join(self.PythonSourcePath, informationDictionary["Python"]["SourceTarget"]))
self.PythonSourceExcludedFiles = informationDictionary["Python"]["SourceExcluded"] # type: typing.List[str]
self.PythonMergeRelativeRoot = informationDictionary["Python"]["MergeRoot"] # type: str
self.PythonMergeRoot = os.path.join(Paths.BuildPath, self.PythonMergeRelativeRoot) # type: str
for pythonExcludedFileIndex in range(0, len(self.PythonSourceExcludedFiles)): # type: int
self.PythonSourceExcludedFiles[pythonExcludedFileIndex] = os.path.normpath(os.path.join(self.PythonSourceTargetPath, self.PythonSourceExcludedFiles[pythonExcludedFileIndex]))
self.UninstallPath = os.path.join(Paths.S4ModsPath, informationDictionary["Uninstall"]) # type: str
self.UninstallFilesFilePath = os.path.join(Paths.S4ModsPath, informationDictionary["UninstallFiles"]) # type: str
def GetModVersion (self) -> str:
with open(self.InformationSourceFilePath) as informationFile:
informationDictionary = decoder.JSONDecoder().decode(informationFile.read()) # type: typing.Dict[str, typing.Any]
if not "Version" in informationDictionary:
raise ValueError("Entry 'Version' does not exist.")
version = informationDictionary["Version"]
if not isinstance(version, str):
raise TypeError("Entry 'Version' is not a string.")
return version
class Package:
def __init__ (self, modPath: str, modBuildPath: str, fileName: str, mergeRoot: str):
self.Name = os.path.splitext(fileName)[0] # type: str
self.FileName = fileName # type: str
self.PackagePath = os.path.join(modPath, "Packages", self.Name) # type: str
self.BuildPath = os.path.join(self.PackagePath, "Build") # type: str
self.BuildFilePath = os.path.join(self.BuildPath, self.FileName) # type: str
self.BuildManifestFilePath = os.path.join(self.BuildPath, self.Name + "_Manifest.json") # type: str
self.MergeRoot = os.path.join(modBuildPath, mergeRoot) # type: str
self.SourcePath = os.path.join(self.PackagePath, "Sources") # type: str
self.SourceLoosePath = os.path.join(self.SourcePath, "Loose") # type: str
self.SourceBaseFilePath = os.path.join(self.SourcePath, "Base", self.FileName) # type: str
self.STBLPath = os.path.join(modPath, "STBL", self.Name) # type: str
self.STBLBuildPath = os.path.join(self.STBLPath, "Build") # type: str
self.STBLSourcePath = os.path.join(self.STBLPath, "Sources") # type: str
def GetCurrentMod () -> Mod:
return _mod
def GetModData () -> typing.Dict[str, typing.Any]:
return _modData
def _Setup () -> None:
global _mod, _modData
informationFilePath = os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), "Mod.json") # type: str
try:
with open(os.path.join(informationFilePath)) as informationFile:
_mod = Mod(decoder.JSONDecoder().decode(informationFile.read()))
except Exception as e:
raise Exception("Failed to read mod information for '" + informationFilePath + "'. \n") from e
_modData = {
"Namespace": GetCurrentMod().Namespace,
"Name": GetCurrentMod().Name,
"Version": GetCurrentMod().Version,
"ChangesFilePath": GetCurrentMod().ChangesFilePath,
"PlansFilePath": GetCurrentMod().PlansFilePath,
"InstallerFilePath": GetCurrentMod().DistributionInstallerFilePath,
"FilesFilePath": GetCurrentMod().DistributionFilesFilePath,
"SourcesFileName": GetCurrentMod().SourcesFileName,
"GithubName": GetCurrentMod().GithubName
} # type: typing.Dict[str, typing.Any]
_Setup()
| <filename>Automation/NeonOcean.S4.Order/Mod_NeonOcean_S4_Order/Mod.py
import os
import typing
from json import decoder
from Mod_NeonOcean_S4_Order import Paths
# noinspection PyTypeChecker
_mod = None # type: Mod
_modData = dict() # type: typing.Dict[str, typing.Any]
class Mod:
def __init__ (self, informationDictionary: typing.Dict[str, typing.Any]):
self.Namespace = informationDictionary["Namespace"] # type: str
self.Name = informationDictionary["Name"] # type: str
self.ChangesFilePath = os.path.join(Paths.RootPath, "Changes.md") # type: str
self.PlansFilePath = os.path.join(Paths.RootPath, "Plans.md") # type: str
self.GithubName = informationDictionary["Name"] # type: str
self.Packages = list() # type: typing.List[Package]
self.InformationRelativeFilePath = informationDictionary["Information"] # type: str
self.InformationBuildFilePath = os.path.join(Paths.InformationBuildPath, self.InformationRelativeFilePath) # type: str
self.InformationSourceFilePath = os.path.join(Paths.InformationSourcesPath, self.InformationRelativeFilePath) # type: str
self.Version = self.GetModVersion() # type: str
for packageInformation in informationDictionary["Packages"]: # type: typing.Dict[str, str]
self.Packages.append(Package(Paths.RootPath, Paths.BuildPath, packageInformation["FileName"], packageInformation["MergeRoot"])) # type: str
self.DistributionInstallerFilePath = os.path.join(Paths.PublishingDistributionInstallerPath, informationDictionary["Publishing"]["InstallerFileName"]) # type: str
self.DistributionInstallerFilePath = self.DistributionInstallerFilePath.format(self.Version)
self.DistributionFilesFilePath = os.path.join(Paths.PublishingDistributionFilesPath, informationDictionary["Publishing"]["FilesFileName"]) # type: str
self.DistributionFilesFilePath = self.DistributionFilesFilePath.format(self.Version)
self.SourcesFileName = informationDictionary["Publishing"]["SourcesFileName"] # type: str
self.SourcesFileName = self.SourcesFileName.format(self.Version)
self.PythonBuildArchiveFileName = informationDictionary["Python"]["ArchiveFileName"] # type: str
self.PythonBuildArchiveFilePath = os.path.join(Paths.PythonBuildArchivePath, self.PythonBuildArchiveFileName) # type: str
self.PythonSourcePath = os.path.join(Paths.PythonPath, self.Namespace) # type: str
self.PythonSourceRootPath = os.path.normpath(os.path.join(self.PythonSourcePath, informationDictionary["Python"]["SourceRoot"]))
self.PythonSourceTargetPath = os.path.normpath(os.path.join(self.PythonSourcePath, informationDictionary["Python"]["SourceTarget"]))
self.PythonSourceExcludedFiles = informationDictionary["Python"]["SourceExcluded"] # type: typing.List[str]
self.PythonMergeRelativeRoot = informationDictionary["Python"]["MergeRoot"] # type: str
self.PythonMergeRoot = os.path.join(Paths.BuildPath, self.PythonMergeRelativeRoot) # type: str
for pythonExcludedFileIndex in range(0, len(self.PythonSourceExcludedFiles)): # type: int
self.PythonSourceExcludedFiles[pythonExcludedFileIndex] = os.path.normpath(os.path.join(self.PythonSourceTargetPath, self.PythonSourceExcludedFiles[pythonExcludedFileIndex]))
self.UninstallPath = os.path.join(Paths.S4ModsPath, informationDictionary["Uninstall"]) # type: str
self.UninstallFilesFilePath = os.path.join(Paths.S4ModsPath, informationDictionary["UninstallFiles"]) # type: str
def GetModVersion (self) -> str:
with open(self.InformationSourceFilePath) as informationFile:
informationDictionary = decoder.JSONDecoder().decode(informationFile.read()) # type: typing.Dict[str, typing.Any]
if not "Version" in informationDictionary:
raise ValueError("Entry 'Version' does not exist.")
version = informationDictionary["Version"]
if not isinstance(version, str):
raise TypeError("Entry 'Version' is not a string.")
return version
class Package:
def __init__ (self, modPath: str, modBuildPath: str, fileName: str, mergeRoot: str):
self.Name = os.path.splitext(fileName)[0] # type: str
self.FileName = fileName # type: str
self.PackagePath = os.path.join(modPath, "Packages", self.Name) # type: str
self.BuildPath = os.path.join(self.PackagePath, "Build") # type: str
self.BuildFilePath = os.path.join(self.BuildPath, self.FileName) # type: str
self.BuildManifestFilePath = os.path.join(self.BuildPath, self.Name + "_Manifest.json") # type: str
self.MergeRoot = os.path.join(modBuildPath, mergeRoot) # type: str
self.SourcePath = os.path.join(self.PackagePath, "Sources") # type: str
self.SourceLoosePath = os.path.join(self.SourcePath, "Loose") # type: str
self.SourceBaseFilePath = os.path.join(self.SourcePath, "Base", self.FileName) # type: str
self.STBLPath = os.path.join(modPath, "STBL", self.Name) # type: str
self.STBLBuildPath = os.path.join(self.STBLPath, "Build") # type: str
self.STBLSourcePath = os.path.join(self.STBLPath, "Sources") # type: str
def GetCurrentMod () -> Mod:
return _mod
def GetModData () -> typing.Dict[str, typing.Any]:
return _modData
def _Setup () -> None:
global _mod, _modData
informationFilePath = os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), "Mod.json") # type: str
try:
with open(os.path.join(informationFilePath)) as informationFile:
_mod = Mod(decoder.JSONDecoder().decode(informationFile.read()))
except Exception as e:
raise Exception("Failed to read mod information for '" + informationFilePath + "'. \n") from e
_modData = {
"Namespace": GetCurrentMod().Namespace,
"Name": GetCurrentMod().Name,
"Version": GetCurrentMod().Version,
"ChangesFilePath": GetCurrentMod().ChangesFilePath,
"PlansFilePath": GetCurrentMod().PlansFilePath,
"InstallerFilePath": GetCurrentMod().DistributionInstallerFilePath,
"FilesFilePath": GetCurrentMod().DistributionFilesFilePath,
"SourcesFileName": GetCurrentMod().SourcesFileName,
"GithubName": GetCurrentMod().GithubName
} # type: typing.Dict[str, typing.Any]
_Setup()
| en | 0.288695 | # noinspection PyTypeChecker # type: Mod # type: typing.Dict[str, typing.Any] # type: str # type: str # type: str # type: str # type: str # type: typing.List[Package] # type: str # type: str # type: str # type: str # type: typing.Dict[str, str] # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: typing.List[str] # type: str # type: str # type: int # type: str # type: str # type: typing.Dict[str, typing.Any] # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: str # type: typing.Dict[str, typing.Any] | 2.244375 | 2 |
examples/flask_threaded_rpc_client.py | cloudamqp/amqpstorm | 0 | 6630570 | <gh_stars>0
import threading
from time import sleep
from flask import Flask
import amqpstorm
from amqpstorm import Message
app = Flask(__name__)
class RpcClient(object):
"""Asynchronous Rpc client."""
def __init__(self, host, username, password, rpc_queue):
self.queue = {}
self.host = host
self.username = username
self.password = password
self.channel = None
self.connection = None
self.callback_queue = None
self.rpc_queue = rpc_queue
self.open()
def open(self):
"""Open Connection."""
self.connection = amqpstorm.Connection(self.host, self.username,
self.password)
self.channel = self.connection.channel()
self.channel.queue.declare(self.rpc_queue)
result = self.channel.queue.declare(exclusive=True)
self.callback_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.callback_queue)
self._create_process_thread()
def _create_process_thread(self):
"""Create a thread responsible for consuming messages in response
to RPC requests.
"""
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
"""Process Data Events using the Process Thread."""
self.channel.start_consuming()
def _on_response(self, message):
"""On Response store the message with the correlation id in a local
dictionary.
"""
self.queue[message.correlation_id] = message.body
def send_request(self, payload):
# Create the Message object.
message = Message.create(self.channel, payload)
message.reply_to = self.callback_queue
# Create an entry in our local dictionary, using the automatically
# generated correlation_id as our key.
self.queue[message.correlation_id] = None
# Publish the RPC request.
message.publish(routing_key=self.rpc_queue)
# Return the Unique ID used to identify the request.
return message.correlation_id
@app.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
# Send the request and store the requests Unique ID.
corr_id = RPC_CLIENT.send_request(payload)
# Wait until we have received a response.
while RPC_CLIENT.queue[corr_id] is None:
sleep(0.1)
# Return the response to the user.
return RPC_CLIENT.queue[corr_id]
if __name__ == '__main__':
RPC_CLIENT = RpcClient('127.0.0.1', 'guest', 'guest', 'rpc_queue')
app.run()
| import threading
from time import sleep
from flask import Flask
import amqpstorm
from amqpstorm import Message
app = Flask(__name__)
class RpcClient(object):
"""Asynchronous Rpc client."""
def __init__(self, host, username, password, rpc_queue):
self.queue = {}
self.host = host
self.username = username
self.password = password
self.channel = None
self.connection = None
self.callback_queue = None
self.rpc_queue = rpc_queue
self.open()
def open(self):
"""Open Connection."""
self.connection = amqpstorm.Connection(self.host, self.username,
self.password)
self.channel = self.connection.channel()
self.channel.queue.declare(self.rpc_queue)
result = self.channel.queue.declare(exclusive=True)
self.callback_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.callback_queue)
self._create_process_thread()
def _create_process_thread(self):
"""Create a thread responsible for consuming messages in response
to RPC requests.
"""
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
"""Process Data Events using the Process Thread."""
self.channel.start_consuming()
def _on_response(self, message):
"""On Response store the message with the correlation id in a local
dictionary.
"""
self.queue[message.correlation_id] = message.body
def send_request(self, payload):
# Create the Message object.
message = Message.create(self.channel, payload)
message.reply_to = self.callback_queue
# Create an entry in our local dictionary, using the automatically
# generated correlation_id as our key.
self.queue[message.correlation_id] = None
# Publish the RPC request.
message.publish(routing_key=self.rpc_queue)
# Return the Unique ID used to identify the request.
return message.correlation_id
@app.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
# Send the request and store the requests Unique ID.
corr_id = RPC_CLIENT.send_request(payload)
# Wait until we have received a response.
while RPC_CLIENT.queue[corr_id] is None:
sleep(0.1)
# Return the response to the user.
return RPC_CLIENT.queue[corr_id]
if __name__ == '__main__':
RPC_CLIENT = RpcClient('127.0.0.1', 'guest', 'guest', 'rpc_queue')
app.run() | en | 0.837218 | Asynchronous Rpc client. Open Connection. Create a thread responsible for consuming messages in response to RPC requests. Process Data Events using the Process Thread. On Response store the message with the correlation id in a local dictionary. # Create the Message object. # Create an entry in our local dictionary, using the automatically # generated correlation_id as our key. # Publish the RPC request. # Return the Unique ID used to identify the request. Simple Flask implementation for making asynchronous Rpc calls. # Send the request and store the requests Unique ID. # Wait until we have received a response. # Return the response to the user. | 2.911928 | 3 |
csgo_gsi_arduino_lcd/main.py | Darkness4/csgo-gsi-arduino | 5 | 6630571 | # -*- coding: utf-8 -*-
"""
CSGO's informations displayed on an Arduino featuring a bomb timer.
@auteur: tsuriga, Darkness4
"""
import sys
from qtpy.QtWidgets import QApplication
from csgo_gsi_arduino_lcd.ui.csgo_window import CsgoWindow
def main():
global w
app = QApplication(sys.argv)
w = CsgoWindow()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""
CSGO's informations displayed on an Arduino featuring a bomb timer.
@auteur: tsuriga, Darkness4
"""
import sys
from qtpy.QtWidgets import QApplication
from csgo_gsi_arduino_lcd.ui.csgo_window import CsgoWindow
def main():
global w
app = QApplication(sys.argv)
w = CsgoWindow()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| en | 0.289986 | # -*- coding: utf-8 -*- CSGO's informations displayed on an Arduino featuring a bomb timer. @auteur: tsuriga, Darkness4 | 1.571026 | 2 |
drdown/medicalrecords/urls.py | fga-gpp-mds/2018.1-Cris-Down | 11 | 6630572 | <reponame>fga-gpp-mds/2018.1-Cris-Down<gh_stars>10-100
from django.conf.urls import url
from drdown.medicalrecords.forms.exam_forms import ExamForm
from drdown.medicalrecords.forms.medicalrecords_forms import MedicalRecordForm
from drdown.medicalrecords.forms.static_data_forms import StaticDataForm
from drdown.medicalrecords.forms.medicines_forms import MedicineForm
from drdown.medicalrecords.forms.complaint_forms import ComplaintForm
from drdown.medicalrecords.forms.risk_forms import RiskForm
from drdown.medicalrecords.views import (
view_medical_record, view_static_data,
view_medicines, view_complaint, view_exams,
view_pdf, view_risk, view_curves,
)
app_name = 'medicalrecords'
urlpatterns = [
url(
regex=r'list/(?P<username>[\w.@+-]+)/$',
view=view_medical_record.MedicalRecordsList.as_view(),
name='list_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new/$',
view=view_medical_record.MedicalRecordsCreateView.as_view(
form_class=MedicalRecordForm),
name='create_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-static-data/$',
view=view_static_data.StaticDataCreateView.as_view(
form_class=StaticDataForm),
name='create_static_data_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-medicine/$',
view=view_medicines.MedicinesCreateView.as_view(
form_class=MedicineForm),
name='create_medicine_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-complaint/$',
view=view_complaint.ComplaintCreateView.as_view(
form_class=ComplaintForm),
name='create_complaint_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-exam/$',
view=view_exams.ExamCreateView.as_view(
form_class=ExamForm),
name='create_exam_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/update-static-data/$',
view=view_static_data.StaticDataUpdateView.as_view(
form_class=StaticDataForm),
name='update_static_data_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-medicine/$',
view=view_medicines.MedicinesCreateView.as_view(
form_class=MedicineForm),
name='create_medicine'
),
url(
regex=r'(?P<username>[\w.@+-]+)/update-medicine/(?P<pk>\d+)/$',
view=view_medicines.MedicinesUpdateView.as_view(
form_class=MedicineForm),
name='update_medicine'
),
url(
regex=r'(?P<username>[\w.@+-]+)/risk/edit/$',
view=view_risk.RiskUpdateView.as_view(),
name='update_risk'
),
url(
regex=r'(?P<username>[\w.@+-]+)/pdf/$',
view=view_pdf.PDFView.as_view(),
name='pdf'
),
url(
regex=r'(?P<username>[\w.@+-]+)/curves/create-height/$',
view=view_curves.CurvesCreateView.as_view(),
name='create_curve'
),
url(
regex=r'(?P<username>[\w.@+-]+)/curves/update-height/(?P<pk>\d+)/$',
view=view_curves.CurvesUpdateView.as_view(),
name='update_curve'
),
url(
regex=r'curves/ajax/$',
view=view_curves.CurveDataParser.as_view(),
name='curve_ajax'
),
]
| from django.conf.urls import url
from drdown.medicalrecords.forms.exam_forms import ExamForm
from drdown.medicalrecords.forms.medicalrecords_forms import MedicalRecordForm
from drdown.medicalrecords.forms.static_data_forms import StaticDataForm
from drdown.medicalrecords.forms.medicines_forms import MedicineForm
from drdown.medicalrecords.forms.complaint_forms import ComplaintForm
from drdown.medicalrecords.forms.risk_forms import RiskForm
from drdown.medicalrecords.views import (
view_medical_record, view_static_data,
view_medicines, view_complaint, view_exams,
view_pdf, view_risk, view_curves,
)
app_name = 'medicalrecords'
urlpatterns = [
url(
regex=r'list/(?P<username>[\w.@+-]+)/$',
view=view_medical_record.MedicalRecordsList.as_view(),
name='list_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new/$',
view=view_medical_record.MedicalRecordsCreateView.as_view(
form_class=MedicalRecordForm),
name='create_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-static-data/$',
view=view_static_data.StaticDataCreateView.as_view(
form_class=StaticDataForm),
name='create_static_data_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-medicine/$',
view=view_medicines.MedicinesCreateView.as_view(
form_class=MedicineForm),
name='create_medicine_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-complaint/$',
view=view_complaint.ComplaintCreateView.as_view(
form_class=ComplaintForm),
name='create_complaint_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-exam/$',
view=view_exams.ExamCreateView.as_view(
form_class=ExamForm),
name='create_exam_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/update-static-data/$',
view=view_static_data.StaticDataUpdateView.as_view(
form_class=StaticDataForm),
name='update_static_data_medicalrecords'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/new-medicine/$',
view=view_medicines.MedicinesCreateView.as_view(
form_class=MedicineForm),
name='create_medicine'
),
url(
regex=r'(?P<username>[\w.@+-]+)/update-medicine/(?P<pk>\d+)/$',
view=view_medicines.MedicinesUpdateView.as_view(
form_class=MedicineForm),
name='update_medicine'
),
url(
regex=r'(?P<username>[\w.@+-]+)/risk/edit/$',
view=view_risk.RiskUpdateView.as_view(),
name='update_risk'
),
url(
regex=r'(?P<username>[\w.@+-]+)/pdf/$',
view=view_pdf.PDFView.as_view(),
name='pdf'
),
url(
regex=r'(?P<username>[\w.@+-]+)/curves/create-height/$',
view=view_curves.CurvesCreateView.as_view(),
name='create_curve'
),
url(
regex=r'(?P<username>[\w.@+-]+)/curves/update-height/(?P<pk>\d+)/$',
view=view_curves.CurvesUpdateView.as_view(),
name='update_curve'
),
url(
regex=r'curves/ajax/$',
view=view_curves.CurveDataParser.as_view(),
name='curve_ajax'
),
] | none | 1 | 1.865036 | 2 |
|
PyMemAPI/schema/schema.py | josephquang97/pymemapi | 1 | 6630573 | from pydantic import BaseModel, HttpUrl
from typing import List
class Category(BaseModel):
name: str
photo: HttpUrl
class Language(BaseModel):
"""Memrise course language."""
id: int
slug: str
name: str
photo: HttpUrl
parent_id: int
index: int
language_code: str
class CourseSchema(BaseModel):
"""Memrise course schema."""
id: int
name: str
slug: str
url: str
description: str
photo: HttpUrl
photo_small: HttpUrl
photo_large: HttpUrl
num_things: int
num_levels: int
num_learners: int
source: Language
target: Language
learned: int
review: int
ignored: int
ltm: int
difficult: int
category: Category
percent_complete: int
class CourseList(BaseModel):
courses: List[CourseSchema]
to_review_total: int
has_more_courses: bool
class EditLevel(BaseModel):
"""Learnable is present for vocabulary"""
success: bool
rendered: str
class LevelSchema(BaseModel):
"""Level schema"""
id: int
index: int
kind: int
title: str
pool_id: int
course_id: int
learnable_ids: List[int]
class LevelList(BaseModel):
"""List of level schema"""
levels: List[LevelSchema]
version: str
| from pydantic import BaseModel, HttpUrl
from typing import List
class Category(BaseModel):
name: str
photo: HttpUrl
class Language(BaseModel):
"""Memrise course language."""
id: int
slug: str
name: str
photo: HttpUrl
parent_id: int
index: int
language_code: str
class CourseSchema(BaseModel):
"""Memrise course schema."""
id: int
name: str
slug: str
url: str
description: str
photo: HttpUrl
photo_small: HttpUrl
photo_large: HttpUrl
num_things: int
num_levels: int
num_learners: int
source: Language
target: Language
learned: int
review: int
ignored: int
ltm: int
difficult: int
category: Category
percent_complete: int
class CourseList(BaseModel):
courses: List[CourseSchema]
to_review_total: int
has_more_courses: bool
class EditLevel(BaseModel):
"""Learnable is present for vocabulary"""
success: bool
rendered: str
class LevelSchema(BaseModel):
"""Level schema"""
id: int
index: int
kind: int
title: str
pool_id: int
course_id: int
learnable_ids: List[int]
class LevelList(BaseModel):
"""List of level schema"""
levels: List[LevelSchema]
version: str
| en | 0.722169 | Memrise course language. Memrise course schema. Learnable is present for vocabulary Level schema List of level schema | 2.707675 | 3 |
20210825.py | alexholft/TIL | 0 | 6630574 | <filename>20210825.py
Today I learned - Database
print('Oracle')
| <filename>20210825.py
Today I learned - Database
print('Oracle')
| none | 1 | 1.02237 | 1 |
|
news/news3k.py | lofmat/history_now | 0 | 6630575 | <filename>news/news3k.py
import datetime
import newspaper
from newspaper.article import ArticleException
from newspaper import Config
import logging
from newspaper import Article
# url = 'https://www.berliner-zeitung.de/'
# url_meduza = "https://meduza.io/"
cfg = Config()
cfg.request_timeout = 15
logging.basicConfig(level=logging.INFO)
# SEP = '#'
# sep_line = SEP * 30
# class NewspaperData:
# def __init__(self, source):
# self.source = source
def get_np3k_data(url: str) -> list:
# list of tuples
rs = []
data = newspaper.build(url, memoize_articles=False, config=cfg)
for art in data.articles:
try:
art.download()
art.parse()
# Sometimes publish date is empty
rs.append((art.publish_date if art.publish_date else datetime.datetime.now(),
art.meta_keywords,
art.authors,
art.title,
url, art.text))
except ArticleException as e:
logging.exception(f'Exception -> {e}')
logging.exception(f"Cannot download url content -> {url}")
continue
return rs
#
# for art in x.articles:
# logging.info(sep_line)
# logging.info(f'URL -> {art.url}')
# try:
# art.download()
# art.parse()
# except ArticleException:
# logging.info('00000000000000000000000000000000 ')
# logging.exception(f"Cannot download url content -> {url}")
# continue
#
# | <filename>news/news3k.py
import datetime
import newspaper
from newspaper.article import ArticleException
from newspaper import Config
import logging
from newspaper import Article
# url = 'https://www.berliner-zeitung.de/'
# url_meduza = "https://meduza.io/"
cfg = Config()
cfg.request_timeout = 15
logging.basicConfig(level=logging.INFO)
# SEP = '#'
# sep_line = SEP * 30
# class NewspaperData:
# def __init__(self, source):
# self.source = source
def get_np3k_data(url: str) -> list:
# list of tuples
rs = []
data = newspaper.build(url, memoize_articles=False, config=cfg)
for art in data.articles:
try:
art.download()
art.parse()
# Sometimes publish date is empty
rs.append((art.publish_date if art.publish_date else datetime.datetime.now(),
art.meta_keywords,
art.authors,
art.title,
url, art.text))
except ArticleException as e:
logging.exception(f'Exception -> {e}')
logging.exception(f"Cannot download url content -> {url}")
continue
return rs
#
# for art in x.articles:
# logging.info(sep_line)
# logging.info(f'URL -> {art.url}')
# try:
# art.download()
# art.parse()
# except ArticleException:
# logging.info('00000000000000000000000000000000 ')
# logging.exception(f"Cannot download url content -> {url}")
# continue
#
# | en | 0.541841 | # url = 'https://www.berliner-zeitung.de/' # url_meduza = "https://meduza.io/" # SEP = '#' # sep_line = SEP * 30 # class NewspaperData: # def __init__(self, source): # self.source = source # list of tuples # Sometimes publish date is empty # # for art in x.articles: # logging.info(sep_line) # logging.info(f'URL -> {art.url}') # try: # art.download() # art.parse() # except ArticleException: # logging.info('00000000000000000000000000000000 ') # logging.exception(f"Cannot download url content -> {url}") # continue # # | 3.002727 | 3 |
ptp/components/publishers/global_variable_publisher.py | aasseman/pytorchpipe | 232 | 6630576 | # -*- coding: utf-8 -*-
#
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
from ptp.components.component import Component
from ptp.configuration.configuration_error import ConfigurationError
class GlobalVariablePublisher(Component):
"""
Component responsible for publishing variables set in configuration file as globals.
"""
def __init__(self, name, config):
"""
Initializes object. Loads keys and values of variables and adds them to globals.
:param name: Loss name.
:type name: str
:param config: Dictionary of parameters (read from the configuration ``.yaml`` file).
:type config: :py:class:`ptp.configuration.ConfigInterface`
"""
# Call constructors of parent classes.
Component.__init__(self, name, GlobalVariablePublisher, config)
# Get list of keys of global variables - can be both list of strings or a single string with comma-separated values.
keys = self.config["keys"]
if type(keys) is str:
keys = keys.replace(" ","").split(",")
# Get list of values - must be a single value or a list.
values = self.config["values"]
if type(values) is list:
# Make sure that both are lists.
if type(keys) is not list or len(keys) != len(values):
raise ConfigurationError("Number of parameters indicated by provided 'keys' must be equal to number of provided 'values'")
# Publish globals one by one.
for (key, value) in zip(keys, values):
self.globals[key] = value
elif keys != '':
# Publish single global.
self.globals[keys[0]] = values
def input_data_definitions(self):
"""
Function returns a dictionary with definitions of input data that are required by the component.
:return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return { }
def output_data_definitions(self):
"""
Function returns a empty dictionary with definitions of output data produced the component.
:return: Empty dictionary.
"""
return { }
def __call__(self, data_streams):
"""
Empty method.
:param data_streams: :py:class:`ptp.utils.DataStreams` object.
"""
pass | # -*- coding: utf-8 -*-
#
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
from ptp.components.component import Component
from ptp.configuration.configuration_error import ConfigurationError
class GlobalVariablePublisher(Component):
"""
Component responsible for publishing variables set in configuration file as globals.
"""
def __init__(self, name, config):
"""
Initializes object. Loads keys and values of variables and adds them to globals.
:param name: Loss name.
:type name: str
:param config: Dictionary of parameters (read from the configuration ``.yaml`` file).
:type config: :py:class:`ptp.configuration.ConfigInterface`
"""
# Call constructors of parent classes.
Component.__init__(self, name, GlobalVariablePublisher, config)
# Get list of keys of global variables - can be both list of strings or a single string with comma-separated values.
keys = self.config["keys"]
if type(keys) is str:
keys = keys.replace(" ","").split(",")
# Get list of values - must be a single value or a list.
values = self.config["values"]
if type(values) is list:
# Make sure that both are lists.
if type(keys) is not list or len(keys) != len(values):
raise ConfigurationError("Number of parameters indicated by provided 'keys' must be equal to number of provided 'values'")
# Publish globals one by one.
for (key, value) in zip(keys, values):
self.globals[key] = value
elif keys != '':
# Publish single global.
self.globals[keys[0]] = values
def input_data_definitions(self):
"""
Function returns a dictionary with definitions of input data that are required by the component.
:return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return { }
def output_data_definitions(self):
"""
Function returns a empty dictionary with definitions of output data produced the component.
:return: Empty dictionary.
"""
return { }
def __call__(self, data_streams):
"""
Empty method.
:param data_streams: :py:class:`ptp.utils.DataStreams` object.
"""
pass | en | 0.753225 | # -*- coding: utf-8 -*- # # Copyright (C) tkornuta, IBM Corporation 2019 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Component responsible for publishing variables set in configuration file as globals. Initializes object. Loads keys and values of variables and adds them to globals. :param name: Loss name. :type name: str :param config: Dictionary of parameters (read from the configuration ``.yaml`` file). :type config: :py:class:`ptp.configuration.ConfigInterface` # Call constructors of parent classes. # Get list of keys of global variables - can be both list of strings or a single string with comma-separated values. # Get list of values - must be a single value or a list. # Make sure that both are lists. # Publish globals one by one. # Publish single global. Function returns a dictionary with definitions of input data that are required by the component. :return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`). Function returns a empty dictionary with definitions of output data produced the component. :return: Empty dictionary. Empty method. :param data_streams: :py:class:`ptp.utils.DataStreams` object. | 2.517421 | 3 |
minette/datastore/mysqlstores.py | uezo/minette-python | 31 | 6630577 | <reponame>uezo/minette-python
import MySQLdb
from MySQLdb.cursors import DictCursor
from MySQLdb.connections import Connection
from .connectionprovider import ConnectionProvider
from .contextstore import ContextStore
from .userstore import UserStore
from .messagelogstore import MessageLogStore
from .storeset import StoreSet
class MySQLConnection(Connection):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class MySQLConnectionProvider(ConnectionProvider):
"""
Connection provider for MySQL
Attributes
----------
connection_str : str
Connection string
connection_params : dict
Parameters for connection
"""
def __init__(self, connection_str, **kwargs):
"""
Parameters
----------
connection_str : str
Connection string
"""
self.connection_str = connection_str
self.connection_params = {"cursorclass": DictCursor, "charset": "utf8"}
param_values = self.connection_str.split(";")
for pv in param_values:
if "=" in pv:
p, v = list(map(str.strip, pv.split("=")))
self.connection_params[p] = v
def get_connection(self):
"""
Get connection
Returns
-------
connection : Connection
Database connection
"""
return MySQLConnection(**self.connection_params)
def get_prepare_params(self):
"""
Get parameters for preparing tables
Returns
-------
prepare_params : tuple or None
Parameters for preparing tables
"""
return (self.connection_params["db"], )
class MySQLContextStore(ContextStore):
def get_sqls(self):
"""
Get SQLs used in ContextStore
Returns
-------
sqls : dict
SQLs used in SessionStore
"""
return {
"prepare_check": "select * from information_schema.TABLES where TABLE_NAME='{0}' and TABLE_SCHEMA=%s".format(self.table_name),
"prepare_create": "create table {0} (channel VARCHAR(20), channel_user_id VARCHAR(100), timestamp DATETIME, topic_name VARCHAR(100), topic_status VARCHAR(100), topic_previous VARCHAR(500), topic_priority INT, data JSON, primary key(channel, channel_user_id))".format(self.table_name),
"get_context": "select channel, channel_user_id, timestamp, topic_name, topic_status, topic_previous, topic_priority, data from {0} where channel=%s and channel_user_id=%s limit 1".format(self.table_name),
"save_context": "replace into {0} (channel, channel_user_id, timestamp, topic_name, topic_status, topic_previous, topic_priority, data) values (%s,%s,%s,%s,%s,%s,%s,%s)".format(self.table_name),
}
class MySQLUserStore(UserStore):
def get_sqls(self):
"""
Get SQLs used in UserStore
Returns
-------
sqls : dict
SQLs used in UserRepository
"""
return {
"prepare_check": "select * from information_schema.TABLES where TABLE_NAME='{0}' and TABLE_SCHEMA=%s".format(self.table_name),
"prepare_create": "create table {0} (channel VARCHAR(20), channel_user_id VARCHAR(100), user_id VARCHAR(100), timestamp DATETIME, name VARCHAR(100), nickname VARCHAR(100), profile_image_url VARCHAR(500), data JSON, primary key(channel, channel_user_id))".format(self.table_name),
"get_user": "select channel, channel_user_id, user_id, timestamp, name, nickname, profile_image_url, data from {0} where channel=%s and channel_user_id=%s limit 1".format(self.table_name),
"add_user": "insert into {0} (channel, channel_user_id, user_id, timestamp, name, nickname, profile_image_url, data) values (%s,%s,%s,%s,%s,%s,%s,%s)".format(self.table_name),
"save_user": "update {0} set timestamp=%s, name=%s, nickname=%s, profile_image_url=%s, data=%s where channel=%s and channel_user_id=%s".format(self.table_name),
}
class MySQLMessageLogStore(MessageLogStore):
def get_sqls(self):
"""
Get SQLs used in MessageLogStore
Returns
-------
sqls : dict
SQLs used in MessageLogger
"""
return {
"prepare_check": "select * from information_schema.TABLES where TABLE_NAME='{0}' and TABLE_SCHEMA=%s".format(self.table_name),
"prepare_create": """
create table {0} (
id INT PRIMARY KEY AUTO_INCREMENT,
channel VARCHAR(20),
channel_detail VARCHAR(100),
channel_user_id VARCHAR(100),
request_timestamp DATETIME,
request_id VARCHAR(100),
request_type VARCHAR(100),
request_text VARCHAR(4000),
request_payloads JSON,
request_intent VARCHAR(100),
request_is_adhoc BOOLEAN,
response_type VARCHAR(100),
response_text VARCHAR(4000),
response_payloads JSON,
response_milliseconds INT,
context_is_new BOOLEAN,
context_topic_name TEXT,
context_topic_status TEXT,
context_topic_is_new BOOLEAN,
context_topic_keep_on BOOLEAN,
context_topic_priority INT,
context_error JSON,
request_json JSON,
response_json JSON,
context_json JSON)
""".format(self.table_name),
"write": """
insert into {0} (
channel,
channel_detail,
channel_user_id,
request_timestamp,
request_id,
request_type,
request_text,
request_payloads,
request_intent,
request_is_adhoc,
response_type,
response_text,
response_payloads,
response_milliseconds,
context_is_new,
context_topic_name,
context_topic_status,
context_topic_is_new,
context_topic_keep_on,
context_topic_priority,
context_error,
request_json, response_json, context_json)
values (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
""".format(self.table_name),
}
class MySQLStores(StoreSet):
connection_provider = MySQLConnectionProvider
context_store = MySQLContextStore
user_store = MySQLUserStore
messagelog_store = MySQLMessageLogStore
| import MySQLdb
from MySQLdb.cursors import DictCursor
from MySQLdb.connections import Connection
from .connectionprovider import ConnectionProvider
from .contextstore import ContextStore
from .userstore import UserStore
from .messagelogstore import MessageLogStore
from .storeset import StoreSet
class MySQLConnection(Connection):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class MySQLConnectionProvider(ConnectionProvider):
"""
Connection provider for MySQL
Attributes
----------
connection_str : str
Connection string
connection_params : dict
Parameters for connection
"""
def __init__(self, connection_str, **kwargs):
"""
Parameters
----------
connection_str : str
Connection string
"""
self.connection_str = connection_str
self.connection_params = {"cursorclass": DictCursor, "charset": "utf8"}
param_values = self.connection_str.split(";")
for pv in param_values:
if "=" in pv:
p, v = list(map(str.strip, pv.split("=")))
self.connection_params[p] = v
def get_connection(self):
"""
Get connection
Returns
-------
connection : Connection
Database connection
"""
return MySQLConnection(**self.connection_params)
def get_prepare_params(self):
"""
Get parameters for preparing tables
Returns
-------
prepare_params : tuple or None
Parameters for preparing tables
"""
return (self.connection_params["db"], )
class MySQLContextStore(ContextStore):
def get_sqls(self):
"""
Get SQLs used in ContextStore
Returns
-------
sqls : dict
SQLs used in SessionStore
"""
return {
"prepare_check": "select * from information_schema.TABLES where TABLE_NAME='{0}' and TABLE_SCHEMA=%s".format(self.table_name),
"prepare_create": "create table {0} (channel VARCHAR(20), channel_user_id VARCHAR(100), timestamp DATETIME, topic_name VARCHAR(100), topic_status VARCHAR(100), topic_previous VARCHAR(500), topic_priority INT, data JSON, primary key(channel, channel_user_id))".format(self.table_name),
"get_context": "select channel, channel_user_id, timestamp, topic_name, topic_status, topic_previous, topic_priority, data from {0} where channel=%s and channel_user_id=%s limit 1".format(self.table_name),
"save_context": "replace into {0} (channel, channel_user_id, timestamp, topic_name, topic_status, topic_previous, topic_priority, data) values (%s,%s,%s,%s,%s,%s,%s,%s)".format(self.table_name),
}
class MySQLUserStore(UserStore):
def get_sqls(self):
"""
Get SQLs used in UserStore
Returns
-------
sqls : dict
SQLs used in UserRepository
"""
return {
"prepare_check": "select * from information_schema.TABLES where TABLE_NAME='{0}' and TABLE_SCHEMA=%s".format(self.table_name),
"prepare_create": "create table {0} (channel VARCHAR(20), channel_user_id VARCHAR(100), user_id VARCHAR(100), timestamp DATETIME, name VARCHAR(100), nickname VARCHAR(100), profile_image_url VARCHAR(500), data JSON, primary key(channel, channel_user_id))".format(self.table_name),
"get_user": "select channel, channel_user_id, user_id, timestamp, name, nickname, profile_image_url, data from {0} where channel=%s and channel_user_id=%s limit 1".format(self.table_name),
"add_user": "insert into {0} (channel, channel_user_id, user_id, timestamp, name, nickname, profile_image_url, data) values (%s,%s,%s,%s,%s,%s,%s,%s)".format(self.table_name),
"save_user": "update {0} set timestamp=%s, name=%s, nickname=%s, profile_image_url=%s, data=%s where channel=%s and channel_user_id=%s".format(self.table_name),
}
class MySQLMessageLogStore(MessageLogStore):
def get_sqls(self):
"""
Get SQLs used in MessageLogStore
Returns
-------
sqls : dict
SQLs used in MessageLogger
"""
return {
"prepare_check": "select * from information_schema.TABLES where TABLE_NAME='{0}' and TABLE_SCHEMA=%s".format(self.table_name),
"prepare_create": """
create table {0} (
id INT PRIMARY KEY AUTO_INCREMENT,
channel VARCHAR(20),
channel_detail VARCHAR(100),
channel_user_id VARCHAR(100),
request_timestamp DATETIME,
request_id VARCHAR(100),
request_type VARCHAR(100),
request_text VARCHAR(4000),
request_payloads JSON,
request_intent VARCHAR(100),
request_is_adhoc BOOLEAN,
response_type VARCHAR(100),
response_text VARCHAR(4000),
response_payloads JSON,
response_milliseconds INT,
context_is_new BOOLEAN,
context_topic_name TEXT,
context_topic_status TEXT,
context_topic_is_new BOOLEAN,
context_topic_keep_on BOOLEAN,
context_topic_priority INT,
context_error JSON,
request_json JSON,
response_json JSON,
context_json JSON)
""".format(self.table_name),
"write": """
insert into {0} (
channel,
channel_detail,
channel_user_id,
request_timestamp,
request_id,
request_type,
request_text,
request_payloads,
request_intent,
request_is_adhoc,
response_type,
response_text,
response_payloads,
response_milliseconds,
context_is_new,
context_topic_name,
context_topic_status,
context_topic_is_new,
context_topic_keep_on,
context_topic_priority,
context_error,
request_json, response_json, context_json)
values (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
""".format(self.table_name),
}
class MySQLStores(StoreSet):
connection_provider = MySQLConnectionProvider
context_store = MySQLContextStore
user_store = MySQLUserStore
messagelog_store = MySQLMessageLogStore | en | 0.50507 | Connection provider for MySQL Attributes ---------- connection_str : str Connection string connection_params : dict Parameters for connection Parameters ---------- connection_str : str Connection string Get connection Returns ------- connection : Connection Database connection Get parameters for preparing tables Returns ------- prepare_params : tuple or None Parameters for preparing tables Get SQLs used in ContextStore Returns ------- sqls : dict SQLs used in SessionStore Get SQLs used in UserStore Returns ------- sqls : dict SQLs used in UserRepository Get SQLs used in MessageLogStore Returns ------- sqls : dict SQLs used in MessageLogger create table {0} ( id INT PRIMARY KEY AUTO_INCREMENT, channel VARCHAR(20), channel_detail VARCHAR(100), channel_user_id VARCHAR(100), request_timestamp DATETIME, request_id VARCHAR(100), request_type VARCHAR(100), request_text VARCHAR(4000), request_payloads JSON, request_intent VARCHAR(100), request_is_adhoc BOOLEAN, response_type VARCHAR(100), response_text VARCHAR(4000), response_payloads JSON, response_milliseconds INT, context_is_new BOOLEAN, context_topic_name TEXT, context_topic_status TEXT, context_topic_is_new BOOLEAN, context_topic_keep_on BOOLEAN, context_topic_priority INT, context_error JSON, request_json JSON, response_json JSON, context_json JSON) insert into {0} ( channel, channel_detail, channel_user_id, request_timestamp, request_id, request_type, request_text, request_payloads, request_intent, request_is_adhoc, response_type, response_text, response_payloads, response_milliseconds, context_is_new, context_topic_name, context_topic_status, context_topic_is_new, context_topic_keep_on, context_topic_priority, context_error, request_json, response_json, context_json) values ( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) | 2.622166 | 3 |
toolkit/plsa.py | addgene/Research | 7 | 6630578 | import pandas as pd
import os, argparse
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--inputfile',
type=str,
help='File with raw sequencing reads')
parser.add_argument('--column',
type=int,
help='Column for which data has to be plotted')
parser.add_argument('--outputfile',
type=str,
default='read_frac_plot.pdf',
help='Path to outputfile')
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
inputfile = args.inputfile
fig1,ax1 = plt.subplots()
input_df = pd.read_table(inputfile, sep=',')
c = args.column-1
col_sum = float(sum(input_df.ix[:,c]))
input_df['read_frac'] = [x/col_sum for x in input_df.ix[:,c]]
input_df = input_df.sort_values(by='read_frac',ascending=False)
input_df['Cumulative_sum'] = np.cumsum(input_df.read_frac)
x_axis = [x/float(len(input_df)) for x in range(0,len(input_df))]
y_axis = list(input_df.Cumulative_sum)
ax1.plot(x_axis,y_axis,linewidth=1)
ax1.set_xlim(0.0,1.0)
ax1.set_ylim(0.0,1.0)
auc = metrics.auc(x_axis,y_axis)
ax1.text(0.6,0.2,'AUC = '+str(round(auc,2)),fontsize=14,fontweight='bold')
ax1.tick_params(axis='both',labelsize=14,)
ax1.set_xlabel('sgRNAs ranked by abundance',fontsize=14,fontweight='bold')
ax1.set_ylabel('Cumulative fraction of total represented',fontsize=14,fontweight='bold')
fig1.savefig(args.outputfile,format='pdf')
| import pandas as pd
import os, argparse
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--inputfile',
type=str,
help='File with raw sequencing reads')
parser.add_argument('--column',
type=int,
help='Column for which data has to be plotted')
parser.add_argument('--outputfile',
type=str,
default='read_frac_plot.pdf',
help='Path to outputfile')
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
inputfile = args.inputfile
fig1,ax1 = plt.subplots()
input_df = pd.read_table(inputfile, sep=',')
c = args.column-1
col_sum = float(sum(input_df.ix[:,c]))
input_df['read_frac'] = [x/col_sum for x in input_df.ix[:,c]]
input_df = input_df.sort_values(by='read_frac',ascending=False)
input_df['Cumulative_sum'] = np.cumsum(input_df.read_frac)
x_axis = [x/float(len(input_df)) for x in range(0,len(input_df))]
y_axis = list(input_df.Cumulative_sum)
ax1.plot(x_axis,y_axis,linewidth=1)
ax1.set_xlim(0.0,1.0)
ax1.set_ylim(0.0,1.0)
auc = metrics.auc(x_axis,y_axis)
ax1.text(0.6,0.2,'AUC = '+str(round(auc,2)),fontsize=14,fontweight='bold')
ax1.tick_params(axis='both',labelsize=14,)
ax1.set_xlabel('sgRNAs ranked by abundance',fontsize=14,fontweight='bold')
ax1.set_ylabel('Cumulative fraction of total represented',fontsize=14,fontweight='bold')
fig1.savefig(args.outputfile,format='pdf')
| none | 1 | 3.005572 | 3 |
|
server.py | JeffreyUrban/pi-relay-controller | 0 | 6630579 | <gh_stars>0
# Raspberry Pi Relay Controller
from __future__ import print_function
import sys
import time
import json
from flask import Flask
from flask import make_response
from flask import render_template
from flask_bootstrap import Bootstrap
from relay_lib import *
from digital_in_lib import *
error_msg = '{msg:"error"}'
success_msg = '{msg:"success"}'
# Initialize these from channels.json
RELAY_PORTS = {}
root_dir = '/home/pi/pi-relay-controller'
with open('{}/channels.json'.format(root_dir)) as json_file:
channel_config = json.load(json_file)
RELAY_PORTS = {ch['channel']: ch['pin'] for ch in channel_config['channels'] if ch['type'] == "relay"}
DIGITAL_IN_PORTS = [ch['pin'] for ch in channel_config['channels'] if ch['type'] == "digital-in"]
RELAY_NAME = 'Generic Relay Controller'
# initialize the relay library with the system's port configuration
relay_control = RelayControl(RELAY_PORTS)
digital_in_control = DigitalInControl(DIGITAL_IN_PORTS)
app = Flask(__name__)
bootstrap = Bootstrap(app)
@app.route('/')
def index():
print("Loading app Main page")
return render_template('index.html', relay_name=RELAY_NAME, channel_info=channel_config['channels'])
@app.route('/state/<int:digital_in>')
def api_get_state(digital_in):
res = digital_in_control.input_get_state(digital_in)
if res:
print("State is HIGH")
return make_response("1", 200)
else:
print("State is LOW")
return make_response("0", 200)
@app.route('/status/<int:relay>')
def api_get_status(relay):
res = relay_control.relay_get_port_status(relay)
if res:
print("Relay is ON")
return make_response("1", 200)
else:
print("Relay is OFF")
return make_response("0", 200)
@app.route('/toggle/<int:relay>')
def api_toggle_relay(relay):
print("Executing api_relay_toggle:", relay)
relay_control.relay_toggle_port(relay)
return make_response(success_msg, 200)
@app.route('/on/<int:relay>')
def api_relay_on(relay):
print("Executing api_relay_on:", relay)
relay_control.relay_on(relay)
return make_response(success_msg, 200)
@app.route('/off/<int:relay>')
def api_relay_off(relay):
print("Executing api_relay_off:", relay)
relay_control.relay_off(relay)
return make_response(success_msg, 200)
@app.route('/all_toggle/')
def api_relay_all_toggle():
print("Executing api_relay_all_toggle")
relay_control.relay_toggle_all_port()
return make_response(success_msg, 200)
@app.route('/all_on/')
def api_relay_all_on():
print("Executing api_relay_all_on")
relay_control.relay_all_on()
return make_response(success_msg, 200)
@app.route('/all_off/')
def api_all_relay_off():
print("Executing api_relay_all_off")
relay_control.relay_all_off()
return make_response(success_msg, 200)
@app.route('/reboot/<int:relay>')
def api_relay_reboot(relay, sleep_time=3):
print("Executing api_relay_reboot:", relay)
relay_control.relay_off(relay)
time.sleep(sleep_time)
relay_control.relay_on(relay)
return make_response(success_msg, 200)
@app.errorhandler(404)
def page_not_found(e):
print("ERROR: 404")
return render_template('404.html', the_error=e), 404
@app.errorhandler(500)
def internal_server_error(e):
print("ERROR: 500")
return render_template('500.html', the_error=e), 500
if __name__ == "__main__":
# On the Pi, you need to run the app using this command to make sure it
# listens for requests outside of the device.
app.run(host='0.0.0.0', port=8080)
| # Raspberry Pi Relay Controller
from __future__ import print_function
import sys
import time
import json
from flask import Flask
from flask import make_response
from flask import render_template
from flask_bootstrap import Bootstrap
from relay_lib import *
from digital_in_lib import *
error_msg = '{msg:"error"}'
success_msg = '{msg:"success"}'
# Initialize these from channels.json
RELAY_PORTS = {}
root_dir = '/home/pi/pi-relay-controller'
with open('{}/channels.json'.format(root_dir)) as json_file:
channel_config = json.load(json_file)
RELAY_PORTS = {ch['channel']: ch['pin'] for ch in channel_config['channels'] if ch['type'] == "relay"}
DIGITAL_IN_PORTS = [ch['pin'] for ch in channel_config['channels'] if ch['type'] == "digital-in"]
RELAY_NAME = 'Generic Relay Controller'
# initialize the relay library with the system's port configuration
relay_control = RelayControl(RELAY_PORTS)
digital_in_control = DigitalInControl(DIGITAL_IN_PORTS)
app = Flask(__name__)
bootstrap = Bootstrap(app)
@app.route('/')
def index():
print("Loading app Main page")
return render_template('index.html', relay_name=RELAY_NAME, channel_info=channel_config['channels'])
@app.route('/state/<int:digital_in>')
def api_get_state(digital_in):
res = digital_in_control.input_get_state(digital_in)
if res:
print("State is HIGH")
return make_response("1", 200)
else:
print("State is LOW")
return make_response("0", 200)
@app.route('/status/<int:relay>')
def api_get_status(relay):
res = relay_control.relay_get_port_status(relay)
if res:
print("Relay is ON")
return make_response("1", 200)
else:
print("Relay is OFF")
return make_response("0", 200)
@app.route('/toggle/<int:relay>')
def api_toggle_relay(relay):
print("Executing api_relay_toggle:", relay)
relay_control.relay_toggle_port(relay)
return make_response(success_msg, 200)
@app.route('/on/<int:relay>')
def api_relay_on(relay):
print("Executing api_relay_on:", relay)
relay_control.relay_on(relay)
return make_response(success_msg, 200)
@app.route('/off/<int:relay>')
def api_relay_off(relay):
print("Executing api_relay_off:", relay)
relay_control.relay_off(relay)
return make_response(success_msg, 200)
@app.route('/all_toggle/')
def api_relay_all_toggle():
print("Executing api_relay_all_toggle")
relay_control.relay_toggle_all_port()
return make_response(success_msg, 200)
@app.route('/all_on/')
def api_relay_all_on():
print("Executing api_relay_all_on")
relay_control.relay_all_on()
return make_response(success_msg, 200)
@app.route('/all_off/')
def api_all_relay_off():
print("Executing api_relay_all_off")
relay_control.relay_all_off()
return make_response(success_msg, 200)
@app.route('/reboot/<int:relay>')
def api_relay_reboot(relay, sleep_time=3):
print("Executing api_relay_reboot:", relay)
relay_control.relay_off(relay)
time.sleep(sleep_time)
relay_control.relay_on(relay)
return make_response(success_msg, 200)
@app.errorhandler(404)
def page_not_found(e):
print("ERROR: 404")
return render_template('404.html', the_error=e), 404
@app.errorhandler(500)
def internal_server_error(e):
print("ERROR: 500")
return render_template('500.html', the_error=e), 500
if __name__ == "__main__":
# On the Pi, you need to run the app using this command to make sure it
# listens for requests outside of the device.
app.run(host='0.0.0.0', port=8080) | en | 0.861314 | # Raspberry Pi Relay Controller # Initialize these from channels.json # initialize the relay library with the system's port configuration # On the Pi, you need to run the app using this command to make sure it # listens for requests outside of the device. | 2.778015 | 3 |
experimental/roofit_expert.py | jjacob/DailyPythonScripts | 0 | 6630580 | from __future__ import division
from optparse import OptionParser
from math import sqrt
import sys
# rootpy
from rootpy.io import File
from ROOT import RooFit, RooRealVar, RooDataHist, RooArgList, RooHistPdf, RooArgSet, RooAddPdf
from ROOT import RooChi2Var, RooFormulaVar, RooMinuit, TCanvas, RooPlot, RooGaussian, RooProdPdf, RooLinkedList
from config.variable_binning import variable_bins_ROOT
from tools.Calculation import decombine_result
from uncertainties import ufloat
from config import XSectionConfig
from config.summations_common import b_tag_summations
# copied from 01_get_fit_results.py
def get_histogram(input_file, histogram_path, b_tag_bin=''):
b_tag_bin_sum_rules = b_tag_summations
histogram = None
if b_tag_bin in b_tag_bin_sum_rules.keys(): # summing needed
b_tag_bins_to_sum = b_tag_bin_sum_rules[b_tag_bin]
histogram = input_file.Get(histogram_path + '_' + b_tag_bins_to_sum[0]).Clone()
for bin_i in b_tag_bins_to_sum[1:]:
histogram += input_file.Get(histogram_path + '_' + bin_i)
else:
if b_tag_bin == '':
histogram = input_file.Get(histogram_path)
else:
histogram = input_file.Get(histogram_path + '_' + b_tag_bin)
return histogram.Clone()
def get_histograms(channel, input_files, variable, met_type, variable_bin, b_tag_bin, rebin=1):
global b_tag_bin_VJets
global electron_control_region, muon_control_region
histograms = {}
if not variable in measurement_config.histogram_path_templates.keys():
print 'Fatal Error: unknown variable ', variable
sys.exit()
abs_eta = ''
abs_eta_data = ''
abs_eta_template = measurement_config.histogram_path_templates[variable]
if variable == 'HT':
abs_eta = abs_eta_template % (analysis_type[channel], variable_bin, channel)
abs_eta_data = abs_eta
else:
if measurement_config.centre_of_mass == 8:
abs_eta = abs_eta_template % (analysis_type[channel], met_type, variable_bin, channel)
else: # hot fix for 2011 data. Needs reprocessing for nicer paths
lepton = channel.title()
abs_eta = abs_eta_template % (analysis_type[channel], lepton, met_type, variable_bin, channel)
if 'JetRes' in met_type:
abs_eta_data = abs_eta.replace('JetResDown', '')
abs_eta_data = abs_eta_data.replace('JetResUp', '')
if 'patPFMet' in met_type:
abs_eta = abs_eta.replace('patPFMet', 'PFMET')
else:
abs_eta_data = abs_eta
for sample, file_name in input_files.iteritems():
h_abs_eta = None
if sample == 'data':
h_abs_eta = get_histogram(file_name, abs_eta_data, b_tag_bin)
elif sample == 'V+Jets':
# extracting the V+Jets template from its specific b-tag bin (>=0 by default) and scaling it to analysis b-tag bin
h_abs_eta = get_histogram(file_name, abs_eta, b_tag_bin)
h_abs_eta_VJets_specific_b_tag_bin = get_histogram(file_name, abs_eta, b_tag_bin_VJets)
try:
h_abs_eta_VJets_specific_b_tag_bin.Scale(h_abs_eta.Integral() / h_abs_eta_VJets_specific_b_tag_bin.Integral())
h_abs_eta = h_abs_eta_VJets_specific_b_tag_bin
except:
print 'WARNING: V+Jets template from ' + str(file_name) + ', histogram ' + abs_eta + ' in ' + b_tag_bin_VJets + \
' b-tag bin is empty. Using central bin (' + b_tag_bin + '), integral = ' + str(h_abs_eta.Integral())
else:
h_abs_eta = get_histogram(file_name, abs_eta, b_tag_bin)
h_abs_eta.Rebin(rebin)
histograms[sample] = h_abs_eta
if channel == 'electron':
global electron_QCD_MC_file
h_abs_eta_mc = get_histogram(electron_QCD_MC_file, abs_eta, b_tag_bin)
h_abs_eta_mc.Rebin(rebin)
# data-driven QCD template extracted from all-inclusive eta distributions
abs_eta = 'TTbar_plus_X_analysis/%s/Ref selection/Electron/electron_AbsEta' % (analysis_type[channel])
abs_eta = abs_eta.replace('Ref selection', electron_control_region)
h_abs_eta = get_histogram(input_files['data'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['V+Jets'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['TTJet'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['SingleTop'], abs_eta, '0btag')
electron_QCD_normalisation_factor = 1
h_abs_eta.Rebin(20)
if measurement_config.centre_of_mass == 8:
electron_QCD_normalisation_factor = h_abs_eta_mc.Integral() / h_abs_eta.Integral()
if electron_QCD_normalisation_factor == 0:
electron_QCD_normalisation_factor = 1 / h_abs_eta.Integral()
if measurement_config.centre_of_mass == 7:
# scaling to 10% of data
electron_QCD_normalisation_factor = 0.1 * histograms['data'].Integral() / h_abs_eta.Integral()
h_abs_eta.Scale(electron_QCD_normalisation_factor)
histograms['QCD'] = h_abs_eta
if channel == 'muon':
# data-driven QCD template extracted from all-inclusive eta distributions
global muon_QCD_file, muon_QCD_MC_file
h_abs_eta_mc = get_histogram(muon_QCD_MC_file, abs_eta, b_tag_bin)
h_abs_eta_mc.Rebin(rebin)
abs_eta = 'TTbar_plus_X_analysis/%s/Ref selection/Muon/muon_AbsEta' % (analysis_type[channel])
abs_eta = abs_eta.replace('Ref selection', muon_control_region)
# abs_eta = measurement_config.special_muon_histogram
# h_abs_eta = get_histogram(muon_QCD_file, abs_eta, '')
h_abs_eta = get_histogram(input_files['data'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['TTJet'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['V+Jets'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['SingleTop'], abs_eta, '0btag')
muon_QCD_normalisation_factor = 1
h_abs_eta.Rebin(20)
if measurement_config.centre_of_mass == 8:
muon_QCD_normalisation_factor = h_abs_eta_mc.Integral() / h_abs_eta.Integral()
if muon_QCD_normalisation_factor == 0:
muon_QCD_normalisation_factor = 1 / h_abs_eta.Integral()
if measurement_config.centre_of_mass == 7:
muon_QCD_normalisation_factor = 0.05 * histograms['data'].Integral() / h_abs_eta.Integral()
h_abs_eta.Scale(muon_QCD_normalisation_factor)
histograms['QCD'] = h_abs_eta
return histograms
def get_fitted_normalisation_from_ROOT(channel, input_files, variable, met_type, b_tag_bin):
results = {}
initial_values = {}
templates = {}
for variable_bin in variable_bins_ROOT[variable]:
histograms = get_histograms(channel,
input_files,
variable=variable,
met_type=met_type,
variable_bin=variable_bin,
b_tag_bin=b_tag_bin,
rebin=measurement_config.rebin
)
# create signal histograms
h_eta_signal = histograms['TTJet'] + histograms['SingleTop']
N_ttbar_before_fit = histograms['TTJet'].Integral()
N_SingleTop_before_fit = histograms['SingleTop'].Integral()
N_vjets_before_fit = histograms['V+Jets'].Integral()
N_qcd_before_fit = histograms['QCD'].Integral()
N_signal_before_fit = N_ttbar_before_fit + N_SingleTop_before_fit
N_ttbar_error_before_fit = sum(histograms['TTJet'].errors())
N_SingleTop_error_before_fit = sum(histograms['SingleTop'].errors())
N_vjets_error_before_fit = sum(histograms['V+Jets'].errors())
N_QCD_error_before_fit = sum(histograms['QCD'].errors())
if (N_SingleTop_before_fit != 0):
TTJet_SingleTop_ratio = N_ttbar_before_fit / N_SingleTop_before_fit
else:
print 'Bin ', variable_bin, ': ttbar/singleTop ratio undefined for %s channel! Setting to 0.' % channel
TTJet_SingleTop_ratio = 0
leptonAbsEta = RooRealVar("leptonAbsEta", "leptonAbsEta", 0., 2.4)
# this has to move to tools/Fitting.py
vars = RooArgList()
vars.add(leptonAbsEta)
vars_set = RooArgSet()
vars_set.add(leptonAbsEta)
n_event_obs = histograms['data'].Integral()
lowerBound = 0.
upperBound = n_event_obs + 10 * sqrt(n_event_obs)
n_init = n_event_obs / 2.
data = RooDataHist("data", "dataset with leptonAbsEta", vars, histograms['data'])
rh_vj = RooDataHist("rh_vj", "vj", vars, histograms['V+Jets'])
rh_qcd = RooDataHist("rh_qcd", "qcd", vars, histograms['QCD'])
rh_signal = RooDataHist("rh_signal", "signal", vars, h_eta_signal)
pdf_vj = RooHistPdf ("pdf_vj", "V+Jets pdf", vars_set, rh_vj, 0)
pdf_qcd = RooHistPdf("pdf_qcd", "QCD pdf ", vars_set, rh_qcd, 0)
pdf_signal = RooHistPdf("pdf_signal", "single top pdf", vars_set, rh_signal, 0)
# RooRealVar(const char *name, const char *title, Double_t value, Double_t minValue, Double_t maxValue, const char *unit) :
nSignal = RooRealVar("nSignal", "number of single top + ttbar events", N_signal_before_fit, lowerBound, upperBound, "event")
nvj = RooRealVar ("nvj", "number of V+Jets bgnd events", N_vjets_before_fit, lowerBound, upperBound, "event")
nqcd = RooRealVar("nqcd", "number of QCD bgnd events", N_QCD_error_before_fit, lowerBound, upperBound, "event")
model = RooAddPdf("model", "sig+vj+qcd",
RooArgList(pdf_signal, pdf_vj, pdf_qcd),
RooArgList(nSignal, nvj, nqcd)
)
vj_constraint = RooGaussian("nvj_constraint", "nvj_constraint", nvj, RooFit.RooConst(N_vjets_before_fit), RooFit.RooConst(0.5 * N_vjets_before_fit))
qcd_constraint = RooGaussian("nqcd_constraint", "nqcd_constraint", nqcd, RooFit.RooConst(N_qcd_before_fit), RooFit.RooConst(2 * N_qcd_before_fit))
model_with_constraints = RooProdPdf("model_with_constraints", "model with gaussian constraints",
RooArgSet(model, vj_constraint, qcd_constraint), RooLinkedList())
model_with_constraints.fitTo(data, RooFit.Minimizer("Minuit2", "Migrad")) #WARNING: number of cores changes the results!!!
# nll = model.createNLL(data, RooFit.NumCPU(2))
# RooMinuit(nll).migrad()
# frame1 = nSignal.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nSignal"))
# nll.plotOn(frame1, RooFit.ShiftToZero())
# frame2 = nvj.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nvj"))
# nll.plotOn(frame2, RooFit.ShiftToZero())
# frame3 = nqcd.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nqcd"))
# nll.plotOn(frame3, RooFit.ShiftToZero())
#
# pll_nSignal = nll.createProfile(nSignal)
# pll_nSignal.plotOn(frame1, RooFit.LineColor(2))
# frame1.SetMinimum(0)
# frame1.SetMaximum(3)
#
# pll_nvj = nll.createProfile(nvj)
# pll_nvj.plotOn(frame2, RooFit.LineColor(2))
# frame2.SetMinimum(0)
# frame2.SetMaximum(3)
#
# pll_nqcd = nll.createProfile(nqcd)
# pll_nqcd.plotOn(frame3, RooFit.LineColor(2))
# frame3.SetMinimum(0)
# frame3.SetMaximum(3)
# c = TCanvas("profilell","profilell",1200, 400)
# c.Divide(3)
# c.cd(1)
# frame1.Draw()
# c.cd(2)
# frame2.Draw()
# c.cd(3)
# frame3.Draw()
# c.SaveAs('profileLL.png')
# model.fitTo(data, RooFit.Minimizer("Minuit2", "Migrad"), RooFit.NumCPU(1))#WARNING: number of cores changes the results!!!
fit_results = {}
fit_results['signal'] = (nSignal.getVal(), nSignal.getError())
fit_results['QCD'] = ufloat(nqcd.getVal(), nqcd.getError())
fit_results['V+Jets'] = ufloat(nvj.getVal(), nvj.getError())
N_ttbar, N_SingleTop = decombine_result(fit_results['signal'], TTJet_SingleTop_ratio)
fit_results['signal'] = ufloat(nSignal.getVal(), nSignal.getError())
fit_results['TTJet'] = ufloat(N_ttbar)
fit_results['SingleTop'] = ufloat(N_SingleTop)
if results == {}: # empty
for sample in fit_results.keys():
results[sample] = [fit_results[sample]]
else:
for sample in fit_results.keys():
results[sample].append(fit_results[sample])
return results, None, None
if __name__ == '__main__':
# setup
parser = OptionParser()
parser.add_option("-p", "--path", dest="path", default='data',
help="set output path for JSON files")
parser.add_option("-v", "--variable", dest="variable", default='MET',
help="set the variable to analyse (MET, HT, ST, MT)")
parser.add_option("-b", "--bjetbin", dest="bjetbin", default='2m',
help="set b-jet multiplicity for analysis. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m")
parser.add_option("--bjetbin-vjets", dest="bjetbin_VJets", default='0m',
help="set b-jet multiplicity for V+Jets samples. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m")
parser.add_option("-m", "--metType", dest="metType", default='type1',
help="set MET type for analysis of MET, ST or MT")
parser.add_option("-c", "--centre-of-mass-energy", dest="CoM", default=8, type=int,
help="set the centre of mass energy for analysis. Default = 8 [TeV]")
(options, args) = parser.parse_args()
measurement_config = XSectionConfig(options.CoM)
# caching of variables for shorter access
ttbar_theory_systematic_prefix = measurement_config.ttbar_theory_systematic_prefix
vjets_theory_systematic_prefix = measurement_config.vjets_theory_systematic_prefix
generator_systematics = measurement_config.generator_systematics
categories_and_prefixes = measurement_config.categories_and_prefixes
met_systematics_suffixes = measurement_config.met_systematics_suffixes
analysis_types = measurement_config.analysis_types
translate_options = measurement_config.translate_options
generator_systematics = measurement_config.generator_systematics
categories_and_prefixes = measurement_config.categories_and_prefixes
met_systematics_suffixes = met_systematics_suffixes
analysis_type = analysis_types
variable = options.variable
met_type = translate_options[options.metType]
b_tag_bin = translate_options[options.bjetbin]
b_tag_bin_VJets = translate_options[options.bjetbin_VJets]
path_to_files = measurement_config.path_to_files
output_path = options.path
# possible options:
# --continue : continue from saved - skips ROOT files, reads from JSON?
# get data from histograms or JSON files
# data and muon_QCD file with SFs are the same for central measurement and all systematics
data_file_electron = File(measurement_config.data_file_electron)
data_file_muon = File(measurement_config.data_file_muon)
muon_QCD_file = File(measurement_config.muon_QCD_file)
SingleTop_file = File(measurement_config.SingleTop_file)
muon_QCD_MC_file = File(measurement_config.muon_QCD_MC_file)
electron_QCD_MC_file = File(measurement_config.electron_QCD_MC_file)
TTJet_file = File(measurement_config.ttbar_category_templates['central'])
VJets_file = File(measurement_config.VJets_category_templates['central'])
electron_control_region = measurement_config.electron_control_region
muon_control_region = measurement_config.muon_control_region
input_files = {
'TTJet': TTJet_file,
'SingleTop': SingleTop_file,
'V+Jets': VJets_file,
'data': data_file_electron,
}
fit_results_electron, initial_values_electron, templates_electron = get_fitted_normalisation_from_ROOT('electron',
input_files={
'TTJet': TTJet_file,
'SingleTop': SingleTop_file,
'V+Jets': VJets_file,
'data': data_file_electron,
},
variable=variable,
met_type=met_type,
b_tag_bin=b_tag_bin,
)
print 'TTJet:', fit_results_electron['TTJet']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['TTJet']))
print
print 'SingleTop:', fit_results_electron['SingleTop']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['SingleTop']))
print
print 'V+Jets:', fit_results_electron['V+Jets']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['V+Jets']))
print
print 'QCD:', fit_results_electron['QCD']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['QCD']))
| from __future__ import division
from optparse import OptionParser
from math import sqrt
import sys
# rootpy
from rootpy.io import File
from ROOT import RooFit, RooRealVar, RooDataHist, RooArgList, RooHistPdf, RooArgSet, RooAddPdf
from ROOT import RooChi2Var, RooFormulaVar, RooMinuit, TCanvas, RooPlot, RooGaussian, RooProdPdf, RooLinkedList
from config.variable_binning import variable_bins_ROOT
from tools.Calculation import decombine_result
from uncertainties import ufloat
from config import XSectionConfig
from config.summations_common import b_tag_summations
# copied from 01_get_fit_results.py
def get_histogram(input_file, histogram_path, b_tag_bin=''):
b_tag_bin_sum_rules = b_tag_summations
histogram = None
if b_tag_bin in b_tag_bin_sum_rules.keys(): # summing needed
b_tag_bins_to_sum = b_tag_bin_sum_rules[b_tag_bin]
histogram = input_file.Get(histogram_path + '_' + b_tag_bins_to_sum[0]).Clone()
for bin_i in b_tag_bins_to_sum[1:]:
histogram += input_file.Get(histogram_path + '_' + bin_i)
else:
if b_tag_bin == '':
histogram = input_file.Get(histogram_path)
else:
histogram = input_file.Get(histogram_path + '_' + b_tag_bin)
return histogram.Clone()
def get_histograms(channel, input_files, variable, met_type, variable_bin, b_tag_bin, rebin=1):
global b_tag_bin_VJets
global electron_control_region, muon_control_region
histograms = {}
if not variable in measurement_config.histogram_path_templates.keys():
print 'Fatal Error: unknown variable ', variable
sys.exit()
abs_eta = ''
abs_eta_data = ''
abs_eta_template = measurement_config.histogram_path_templates[variable]
if variable == 'HT':
abs_eta = abs_eta_template % (analysis_type[channel], variable_bin, channel)
abs_eta_data = abs_eta
else:
if measurement_config.centre_of_mass == 8:
abs_eta = abs_eta_template % (analysis_type[channel], met_type, variable_bin, channel)
else: # hot fix for 2011 data. Needs reprocessing for nicer paths
lepton = channel.title()
abs_eta = abs_eta_template % (analysis_type[channel], lepton, met_type, variable_bin, channel)
if 'JetRes' in met_type:
abs_eta_data = abs_eta.replace('JetResDown', '')
abs_eta_data = abs_eta_data.replace('JetResUp', '')
if 'patPFMet' in met_type:
abs_eta = abs_eta.replace('patPFMet', 'PFMET')
else:
abs_eta_data = abs_eta
for sample, file_name in input_files.iteritems():
h_abs_eta = None
if sample == 'data':
h_abs_eta = get_histogram(file_name, abs_eta_data, b_tag_bin)
elif sample == 'V+Jets':
# extracting the V+Jets template from its specific b-tag bin (>=0 by default) and scaling it to analysis b-tag bin
h_abs_eta = get_histogram(file_name, abs_eta, b_tag_bin)
h_abs_eta_VJets_specific_b_tag_bin = get_histogram(file_name, abs_eta, b_tag_bin_VJets)
try:
h_abs_eta_VJets_specific_b_tag_bin.Scale(h_abs_eta.Integral() / h_abs_eta_VJets_specific_b_tag_bin.Integral())
h_abs_eta = h_abs_eta_VJets_specific_b_tag_bin
except:
print 'WARNING: V+Jets template from ' + str(file_name) + ', histogram ' + abs_eta + ' in ' + b_tag_bin_VJets + \
' b-tag bin is empty. Using central bin (' + b_tag_bin + '), integral = ' + str(h_abs_eta.Integral())
else:
h_abs_eta = get_histogram(file_name, abs_eta, b_tag_bin)
h_abs_eta.Rebin(rebin)
histograms[sample] = h_abs_eta
if channel == 'electron':
global electron_QCD_MC_file
h_abs_eta_mc = get_histogram(electron_QCD_MC_file, abs_eta, b_tag_bin)
h_abs_eta_mc.Rebin(rebin)
# data-driven QCD template extracted from all-inclusive eta distributions
abs_eta = 'TTbar_plus_X_analysis/%s/Ref selection/Electron/electron_AbsEta' % (analysis_type[channel])
abs_eta = abs_eta.replace('Ref selection', electron_control_region)
h_abs_eta = get_histogram(input_files['data'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['V+Jets'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['TTJet'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['SingleTop'], abs_eta, '0btag')
electron_QCD_normalisation_factor = 1
h_abs_eta.Rebin(20)
if measurement_config.centre_of_mass == 8:
electron_QCD_normalisation_factor = h_abs_eta_mc.Integral() / h_abs_eta.Integral()
if electron_QCD_normalisation_factor == 0:
electron_QCD_normalisation_factor = 1 / h_abs_eta.Integral()
if measurement_config.centre_of_mass == 7:
# scaling to 10% of data
electron_QCD_normalisation_factor = 0.1 * histograms['data'].Integral() / h_abs_eta.Integral()
h_abs_eta.Scale(electron_QCD_normalisation_factor)
histograms['QCD'] = h_abs_eta
if channel == 'muon':
# data-driven QCD template extracted from all-inclusive eta distributions
global muon_QCD_file, muon_QCD_MC_file
h_abs_eta_mc = get_histogram(muon_QCD_MC_file, abs_eta, b_tag_bin)
h_abs_eta_mc.Rebin(rebin)
abs_eta = 'TTbar_plus_X_analysis/%s/Ref selection/Muon/muon_AbsEta' % (analysis_type[channel])
abs_eta = abs_eta.replace('Ref selection', muon_control_region)
# abs_eta = measurement_config.special_muon_histogram
# h_abs_eta = get_histogram(muon_QCD_file, abs_eta, '')
h_abs_eta = get_histogram(input_files['data'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['TTJet'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['V+Jets'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['SingleTop'], abs_eta, '0btag')
muon_QCD_normalisation_factor = 1
h_abs_eta.Rebin(20)
if measurement_config.centre_of_mass == 8:
muon_QCD_normalisation_factor = h_abs_eta_mc.Integral() / h_abs_eta.Integral()
if muon_QCD_normalisation_factor == 0:
muon_QCD_normalisation_factor = 1 / h_abs_eta.Integral()
if measurement_config.centre_of_mass == 7:
muon_QCD_normalisation_factor = 0.05 * histograms['data'].Integral() / h_abs_eta.Integral()
h_abs_eta.Scale(muon_QCD_normalisation_factor)
histograms['QCD'] = h_abs_eta
return histograms
def get_fitted_normalisation_from_ROOT(channel, input_files, variable, met_type, b_tag_bin):
results = {}
initial_values = {}
templates = {}
for variable_bin in variable_bins_ROOT[variable]:
histograms = get_histograms(channel,
input_files,
variable=variable,
met_type=met_type,
variable_bin=variable_bin,
b_tag_bin=b_tag_bin,
rebin=measurement_config.rebin
)
# create signal histograms
h_eta_signal = histograms['TTJet'] + histograms['SingleTop']
N_ttbar_before_fit = histograms['TTJet'].Integral()
N_SingleTop_before_fit = histograms['SingleTop'].Integral()
N_vjets_before_fit = histograms['V+Jets'].Integral()
N_qcd_before_fit = histograms['QCD'].Integral()
N_signal_before_fit = N_ttbar_before_fit + N_SingleTop_before_fit
N_ttbar_error_before_fit = sum(histograms['TTJet'].errors())
N_SingleTop_error_before_fit = sum(histograms['SingleTop'].errors())
N_vjets_error_before_fit = sum(histograms['V+Jets'].errors())
N_QCD_error_before_fit = sum(histograms['QCD'].errors())
if (N_SingleTop_before_fit != 0):
TTJet_SingleTop_ratio = N_ttbar_before_fit / N_SingleTop_before_fit
else:
print 'Bin ', variable_bin, ': ttbar/singleTop ratio undefined for %s channel! Setting to 0.' % channel
TTJet_SingleTop_ratio = 0
leptonAbsEta = RooRealVar("leptonAbsEta", "leptonAbsEta", 0., 2.4)
# this has to move to tools/Fitting.py
vars = RooArgList()
vars.add(leptonAbsEta)
vars_set = RooArgSet()
vars_set.add(leptonAbsEta)
n_event_obs = histograms['data'].Integral()
lowerBound = 0.
upperBound = n_event_obs + 10 * sqrt(n_event_obs)
n_init = n_event_obs / 2.
data = RooDataHist("data", "dataset with leptonAbsEta", vars, histograms['data'])
rh_vj = RooDataHist("rh_vj", "vj", vars, histograms['V+Jets'])
rh_qcd = RooDataHist("rh_qcd", "qcd", vars, histograms['QCD'])
rh_signal = RooDataHist("rh_signal", "signal", vars, h_eta_signal)
pdf_vj = RooHistPdf ("pdf_vj", "V+Jets pdf", vars_set, rh_vj, 0)
pdf_qcd = RooHistPdf("pdf_qcd", "QCD pdf ", vars_set, rh_qcd, 0)
pdf_signal = RooHistPdf("pdf_signal", "single top pdf", vars_set, rh_signal, 0)
# RooRealVar(const char *name, const char *title, Double_t value, Double_t minValue, Double_t maxValue, const char *unit) :
nSignal = RooRealVar("nSignal", "number of single top + ttbar events", N_signal_before_fit, lowerBound, upperBound, "event")
nvj = RooRealVar ("nvj", "number of V+Jets bgnd events", N_vjets_before_fit, lowerBound, upperBound, "event")
nqcd = RooRealVar("nqcd", "number of QCD bgnd events", N_QCD_error_before_fit, lowerBound, upperBound, "event")
model = RooAddPdf("model", "sig+vj+qcd",
RooArgList(pdf_signal, pdf_vj, pdf_qcd),
RooArgList(nSignal, nvj, nqcd)
)
vj_constraint = RooGaussian("nvj_constraint", "nvj_constraint", nvj, RooFit.RooConst(N_vjets_before_fit), RooFit.RooConst(0.5 * N_vjets_before_fit))
qcd_constraint = RooGaussian("nqcd_constraint", "nqcd_constraint", nqcd, RooFit.RooConst(N_qcd_before_fit), RooFit.RooConst(2 * N_qcd_before_fit))
model_with_constraints = RooProdPdf("model_with_constraints", "model with gaussian constraints",
RooArgSet(model, vj_constraint, qcd_constraint), RooLinkedList())
model_with_constraints.fitTo(data, RooFit.Minimizer("Minuit2", "Migrad")) #WARNING: number of cores changes the results!!!
# nll = model.createNLL(data, RooFit.NumCPU(2))
# RooMinuit(nll).migrad()
# frame1 = nSignal.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nSignal"))
# nll.plotOn(frame1, RooFit.ShiftToZero())
# frame2 = nvj.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nvj"))
# nll.plotOn(frame2, RooFit.ShiftToZero())
# frame3 = nqcd.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nqcd"))
# nll.plotOn(frame3, RooFit.ShiftToZero())
#
# pll_nSignal = nll.createProfile(nSignal)
# pll_nSignal.plotOn(frame1, RooFit.LineColor(2))
# frame1.SetMinimum(0)
# frame1.SetMaximum(3)
#
# pll_nvj = nll.createProfile(nvj)
# pll_nvj.plotOn(frame2, RooFit.LineColor(2))
# frame2.SetMinimum(0)
# frame2.SetMaximum(3)
#
# pll_nqcd = nll.createProfile(nqcd)
# pll_nqcd.plotOn(frame3, RooFit.LineColor(2))
# frame3.SetMinimum(0)
# frame3.SetMaximum(3)
# c = TCanvas("profilell","profilell",1200, 400)
# c.Divide(3)
# c.cd(1)
# frame1.Draw()
# c.cd(2)
# frame2.Draw()
# c.cd(3)
# frame3.Draw()
# c.SaveAs('profileLL.png')
# model.fitTo(data, RooFit.Minimizer("Minuit2", "Migrad"), RooFit.NumCPU(1))#WARNING: number of cores changes the results!!!
fit_results = {}
fit_results['signal'] = (nSignal.getVal(), nSignal.getError())
fit_results['QCD'] = ufloat(nqcd.getVal(), nqcd.getError())
fit_results['V+Jets'] = ufloat(nvj.getVal(), nvj.getError())
N_ttbar, N_SingleTop = decombine_result(fit_results['signal'], TTJet_SingleTop_ratio)
fit_results['signal'] = ufloat(nSignal.getVal(), nSignal.getError())
fit_results['TTJet'] = ufloat(N_ttbar)
fit_results['SingleTop'] = ufloat(N_SingleTop)
if results == {}: # empty
for sample in fit_results.keys():
results[sample] = [fit_results[sample]]
else:
for sample in fit_results.keys():
results[sample].append(fit_results[sample])
return results, None, None
if __name__ == '__main__':
# setup
parser = OptionParser()
parser.add_option("-p", "--path", dest="path", default='data',
help="set output path for JSON files")
parser.add_option("-v", "--variable", dest="variable", default='MET',
help="set the variable to analyse (MET, HT, ST, MT)")
parser.add_option("-b", "--bjetbin", dest="bjetbin", default='2m',
help="set b-jet multiplicity for analysis. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m")
parser.add_option("--bjetbin-vjets", dest="bjetbin_VJets", default='0m',
help="set b-jet multiplicity for V+Jets samples. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m")
parser.add_option("-m", "--metType", dest="metType", default='type1',
help="set MET type for analysis of MET, ST or MT")
parser.add_option("-c", "--centre-of-mass-energy", dest="CoM", default=8, type=int,
help="set the centre of mass energy for analysis. Default = 8 [TeV]")
(options, args) = parser.parse_args()
measurement_config = XSectionConfig(options.CoM)
# caching of variables for shorter access
ttbar_theory_systematic_prefix = measurement_config.ttbar_theory_systematic_prefix
vjets_theory_systematic_prefix = measurement_config.vjets_theory_systematic_prefix
generator_systematics = measurement_config.generator_systematics
categories_and_prefixes = measurement_config.categories_and_prefixes
met_systematics_suffixes = measurement_config.met_systematics_suffixes
analysis_types = measurement_config.analysis_types
translate_options = measurement_config.translate_options
generator_systematics = measurement_config.generator_systematics
categories_and_prefixes = measurement_config.categories_and_prefixes
met_systematics_suffixes = met_systematics_suffixes
analysis_type = analysis_types
variable = options.variable
met_type = translate_options[options.metType]
b_tag_bin = translate_options[options.bjetbin]
b_tag_bin_VJets = translate_options[options.bjetbin_VJets]
path_to_files = measurement_config.path_to_files
output_path = options.path
# possible options:
# --continue : continue from saved - skips ROOT files, reads from JSON?
# get data from histograms or JSON files
# data and muon_QCD file with SFs are the same for central measurement and all systematics
data_file_electron = File(measurement_config.data_file_electron)
data_file_muon = File(measurement_config.data_file_muon)
muon_QCD_file = File(measurement_config.muon_QCD_file)
SingleTop_file = File(measurement_config.SingleTop_file)
muon_QCD_MC_file = File(measurement_config.muon_QCD_MC_file)
electron_QCD_MC_file = File(measurement_config.electron_QCD_MC_file)
TTJet_file = File(measurement_config.ttbar_category_templates['central'])
VJets_file = File(measurement_config.VJets_category_templates['central'])
electron_control_region = measurement_config.electron_control_region
muon_control_region = measurement_config.muon_control_region
input_files = {
'TTJet': TTJet_file,
'SingleTop': SingleTop_file,
'V+Jets': VJets_file,
'data': data_file_electron,
}
fit_results_electron, initial_values_electron, templates_electron = get_fitted_normalisation_from_ROOT('electron',
input_files={
'TTJet': TTJet_file,
'SingleTop': SingleTop_file,
'V+Jets': VJets_file,
'data': data_file_electron,
},
variable=variable,
met_type=met_type,
b_tag_bin=b_tag_bin,
)
print 'TTJet:', fit_results_electron['TTJet']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['TTJet']))
print
print 'SingleTop:', fit_results_electron['SingleTop']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['SingleTop']))
print
print 'V+Jets:', fit_results_electron['V+Jets']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['V+Jets']))
print
print 'QCD:', fit_results_electron['QCD']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['QCD']))
| en | 0.45467 | # rootpy # copied from 01_get_fit_results.py # summing needed # hot fix for 2011 data. Needs reprocessing for nicer paths # extracting the V+Jets template from its specific b-tag bin (>=0 by default) and scaling it to analysis b-tag bin # data-driven QCD template extracted from all-inclusive eta distributions # scaling to 10% of data # data-driven QCD template extracted from all-inclusive eta distributions # abs_eta = measurement_config.special_muon_histogram # h_abs_eta = get_histogram(muon_QCD_file, abs_eta, '') # create signal histograms # this has to move to tools/Fitting.py # RooRealVar(const char *name, const char *title, Double_t value, Double_t minValue, Double_t maxValue, const char *unit) : #WARNING: number of cores changes the results!!! # nll = model.createNLL(data, RooFit.NumCPU(2)) # RooMinuit(nll).migrad() # frame1 = nSignal.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nSignal")) # nll.plotOn(frame1, RooFit.ShiftToZero()) # frame2 = nvj.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nvj")) # nll.plotOn(frame2, RooFit.ShiftToZero()) # frame3 = nqcd.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nqcd")) # nll.plotOn(frame3, RooFit.ShiftToZero()) # # pll_nSignal = nll.createProfile(nSignal) # pll_nSignal.plotOn(frame1, RooFit.LineColor(2)) # frame1.SetMinimum(0) # frame1.SetMaximum(3) # # pll_nvj = nll.createProfile(nvj) # pll_nvj.plotOn(frame2, RooFit.LineColor(2)) # frame2.SetMinimum(0) # frame2.SetMaximum(3) # # pll_nqcd = nll.createProfile(nqcd) # pll_nqcd.plotOn(frame3, RooFit.LineColor(2)) # frame3.SetMinimum(0) # frame3.SetMaximum(3) # c = TCanvas("profilell","profilell",1200, 400) # c.Divide(3) # c.cd(1) # frame1.Draw() # c.cd(2) # frame2.Draw() # c.cd(3) # frame3.Draw() # c.SaveAs('profileLL.png') # model.fitTo(data, RooFit.Minimizer("Minuit2", "Migrad"), RooFit.NumCPU(1))#WARNING: number of cores changes the results!!! # empty # setup # caching of variables for shorter access # possible options: # --continue : continue from saved - skips ROOT files, reads from JSON? # get data from histograms or JSON files # data and muon_QCD file with SFs are the same for central measurement and all systematics | 2.040224 | 2 |
var/spack/repos/builtin/packages/r-tidyverse/package.py | kkauder/spack | 2 | 6630581 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTidyverse(RPackage):
"""Easily Install and Load the 'Tidyverse'
The 'tidyverse' is a set of packages that work in harmony because they
share common data representations and 'API' design. This package is
designed to make it easy to install and load multiple 'tidyverse' packages
in a single step. Learn more about the 'tidyverse' at
<https://tidyverse.org>."""
homepage = "http://tidyverse.tidyverse.org/"
url = "https://cloud.r-project.org/src/contrib/tidyverse_1.2.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/tidyverse"
version('1.3.0', sha256='6d8acb81e994f9bef5e4dcf908bcea3786d108adcf982628235b6c8c80f6fe09')
version('1.2.1', sha256='ad67a27bb4e89417a15338fe1a40251a7b5dedba60e9b72637963d3de574c37b')
depends_on('r+X', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTidyverse(RPackage):
"""Easily Install and Load the 'Tidyverse'
The 'tidyverse' is a set of packages that work in harmony because they
share common data representations and 'API' design. This package is
designed to make it easy to install and load multiple 'tidyverse' packages
in a single step. Learn more about the 'tidyverse' at
<https://tidyverse.org>."""
homepage = "http://tidyverse.tidyverse.org/"
url = "https://cloud.r-project.org/src/contrib/tidyverse_1.2.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/tidyverse"
version('1.3.0', sha256='6d8acb81e994f9bef5e4dcf908bcea3786d108adcf982628235b6c8c80f6fe09')
version('1.2.1', sha256='ad67a27bb4e89417a15338fe1a40251a7b5dedba60e9b72637963d3de574c37b')
depends_on('r+X', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.3.0:', type=('build', 'run'))
| en | 0.811442 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Easily Install and Load the 'Tidyverse' The 'tidyverse' is a set of packages that work in harmony because they share common data representations and 'API' design. This package is designed to make it easy to install and load multiple 'tidyverse' packages in a single step. Learn more about the 'tidyverse' at <https://tidyverse.org>. | 1.643444 | 2 |
tests/e2e/workloads/couchbase/test_couchbase_pod_respin.py | keemano/ocs-ci | 0 | 6630582 | import logging
import pytest
from ocs_ci.framework.testlib import E2ETest, workloads, ignore_leftovers
from ocs_ci.helpers.disruption_helpers import Disruptions
from ocs_ci.ocs import flowtest
from ocs_ci.helpers.sanity_helpers import Sanity
log = logging.getLogger(__name__)
@workloads
@ignore_leftovers
@pytest.mark.skip(reason="ocs-ci issue: 4488, cb-example pod readiness probe fail")
class TestCouchBasePodRespin(E2ETest):
"""
Deploy an CouchBase workload using operator
"""
@pytest.fixture()
def cb_setup(self, couchbase_factory_fixture):
"""
Creates couchbase workload
"""
self.cb = couchbase_factory_fixture(
replicas=3, run_in_bg=True, skip_analyze=True
)
self.sanity_helpers = Sanity()
@pytest.mark.parametrize(
argnames=["pod_name"],
argvalues=[
pytest.param(*["osd"], marks=pytest.mark.polarion_id("OCS-780")),
pytest.param(*["mon"], marks=pytest.mark.polarion_id("OCS-779")),
pytest.param(*["mgr"], marks=pytest.mark.polarion_id("OCS-781")),
pytest.param(*["couchbase"], marks=pytest.mark.polarion_id("OCS-786")),
],
)
def test_run_couchbase_respin_pod(self, cb_setup, pod_name):
log.info(f"Respin Ceph pod {pod_name}")
if pod_name == "couchbase":
self.cb.respin_couchbase_app_pod()
else:
disruption = Disruptions()
disruption.set_resource(resource=f"{pod_name}")
disruption.delete_resource()
bg_handler = flowtest.BackgroundOps()
bg_ops = [self.cb.result]
bg_handler.wait_for_bg_operations(bg_ops, timeout=3600)
self.sanity_helpers.health_check(tries=40)
| import logging
import pytest
from ocs_ci.framework.testlib import E2ETest, workloads, ignore_leftovers
from ocs_ci.helpers.disruption_helpers import Disruptions
from ocs_ci.ocs import flowtest
from ocs_ci.helpers.sanity_helpers import Sanity
log = logging.getLogger(__name__)
@workloads
@ignore_leftovers
@pytest.mark.skip(reason="ocs-ci issue: 4488, cb-example pod readiness probe fail")
class TestCouchBasePodRespin(E2ETest):
"""
Deploy an CouchBase workload using operator
"""
@pytest.fixture()
def cb_setup(self, couchbase_factory_fixture):
"""
Creates couchbase workload
"""
self.cb = couchbase_factory_fixture(
replicas=3, run_in_bg=True, skip_analyze=True
)
self.sanity_helpers = Sanity()
@pytest.mark.parametrize(
argnames=["pod_name"],
argvalues=[
pytest.param(*["osd"], marks=pytest.mark.polarion_id("OCS-780")),
pytest.param(*["mon"], marks=pytest.mark.polarion_id("OCS-779")),
pytest.param(*["mgr"], marks=pytest.mark.polarion_id("OCS-781")),
pytest.param(*["couchbase"], marks=pytest.mark.polarion_id("OCS-786")),
],
)
def test_run_couchbase_respin_pod(self, cb_setup, pod_name):
log.info(f"Respin Ceph pod {pod_name}")
if pod_name == "couchbase":
self.cb.respin_couchbase_app_pod()
else:
disruption = Disruptions()
disruption.set_resource(resource=f"{pod_name}")
disruption.delete_resource()
bg_handler = flowtest.BackgroundOps()
bg_ops = [self.cb.result]
bg_handler.wait_for_bg_operations(bg_ops, timeout=3600)
self.sanity_helpers.health_check(tries=40)
| en | 0.26387 | Deploy an CouchBase workload using operator Creates couchbase workload | 2.061501 | 2 |
external_apps/docutils-snapshot/test/test_readers/test_python/test_token_parser.py | spreeker/democracygame | 2 | 6630583 | <reponame>spreeker/democracygame
#! /usr/bin/env python
# $Id: test_token_parser.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: <NAME> <<EMAIL>>
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils/readers/python/moduleparser.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.PythonModuleParserTestSuite()
s.generateTests(totest, testmethod='test_token_parser_rhs')
return s
totest = {}
totest['expressions'] = [
['''a = 1''', '''1'''],
['''a = b = 1''', '''1'''],
['''\
a = (
1 + 2
+ 3
)
''',
'''(1 + 2 + 3)'''],
['''\
a = """\\
line one
line two"""
''',
'''"""\\\nline one\nline two"""'''],
['''a = `1`''', '''`1`'''],
['''a = `1`+`2`''', '''`1` + `2`'''],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| #! /usr/bin/env python
# $Id: test_token_parser.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: <NAME> <<EMAIL>>
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils/readers/python/moduleparser.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.PythonModuleParserTestSuite()
s.generateTests(totest, testmethod='test_token_parser_rhs')
return s
totest = {}
totest['expressions'] = [
['''a = 1''', '''1'''],
['''a = b = 1''', '''1'''],
['''\
a = (
1 + 2
+ 3
)
''',
'''(1 + 2 + 3)'''],
['''\
a = """\\
line one
line two"""
''',
'''"""\\\nline one\nline two"""'''],
['''a = `1`''', '''`1`'''],
['''a = `1`+`2`''', '''`1` + `2`'''],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite') | en | 0.606656 | #! /usr/bin/env python # $Id: test_token_parser.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: <NAME> <<EMAIL>> # Copyright: This module has been placed in the public domain. Tests for docutils/readers/python/moduleparser.py. a = 1 1 a = b = 1 1 \ a = ( 1 + 2 + 3 ) (1 + 2 + 3) \ a = """\\ line one line two""" """\\\nline one\nline two""" a = `1` `1` a = `1`+`2` `1` + `2` | 2.731875 | 3 |
tools/kaldi_to_mace.py | TonyMou/kaldi-onnx | 0 | 6630584 | <reponame>TonyMou/kaldi-onnx
# Copyright 2019 Xiaomi, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import sys
import argparse
def save_to_txt(data, shape, out_path):
header = 'utterance-id1 [\n'
with open(out_path, 'w') as f:
f.write(header)
for n in xrange(shape[0]):
d = data[n, :]
d_str = " ".join(str(x) for x in d)
if n < shape[0] - 1:
d_str = d_str + '\n'
else:
d_str = d_str + ' ]\n'
f.write(d_str)
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, type=str,
help="kaldi data file path")
parser.add_argument("--output", required=True, type=str,
help="mace data file path")
args = parser.parse_args()
return args
def read_kaldi_output(file_path):
data_lines = []
with open(file_path, 'r') as f:
lines = f.readlines()
for l in lines:
if '[' not in l:
tmp = l.split()
if ']' in l:
del tmp[-1]
data_line = [float(x) for x in tmp]
data_lines.append(data_line)
return np.array(data_lines)
def main():
args = get_args()
kaldi_data = read_kaldi_output(args.input)
kaldi_data.astype(np.float32).tofile(args.output)
if __name__ == "__main__":
main()
| # Copyright 2019 Xiaomi, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import sys
import argparse
def save_to_txt(data, shape, out_path):
header = 'utterance-id1 [\n'
with open(out_path, 'w') as f:
f.write(header)
for n in xrange(shape[0]):
d = data[n, :]
d_str = " ".join(str(x) for x in d)
if n < shape[0] - 1:
d_str = d_str + '\n'
else:
d_str = d_str + ' ]\n'
f.write(d_str)
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, type=str,
help="kaldi data file path")
parser.add_argument("--output", required=True, type=str,
help="mace data file path")
args = parser.parse_args()
return args
def read_kaldi_output(file_path):
data_lines = []
with open(file_path, 'r') as f:
lines = f.readlines()
for l in lines:
if '[' not in l:
tmp = l.split()
if ']' in l:
del tmp[-1]
data_line = [float(x) for x in tmp]
data_lines.append(data_line)
return np.array(data_lines)
def main():
args = get_args()
kaldi_data = read_kaldi_output(args.input)
kaldi_data.astype(np.float32).tofile(args.output)
if __name__ == "__main__":
main() | en | 0.849856 | # Copyright 2019 Xiaomi, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Parse commandline. | 2.412524 | 2 |
frille-lang/lib/python3.6/site-packages/thinc/config.py | frillecode/CDS-spring-2021-language | 10 | 6630585 | from __future__ import unicode_literals
import configparser
import json
from pathlib import Path
class Config(dict):
def __init__(self, data=None):
dict.__init__(self)
if data is None:
data = {}
self.update(data)
def interpret_config(self, config):
for section, values in config.items():
parts = section.split(".")
node = self
for part in parts:
node = node.setdefault(part, {})
for key, value in values.items():
node[key] = json.loads(config.get(section, key))
def from_str(self, text):
config = configparser.ConfigParser(
interpolation=configparser.ExtendedInterpolation())
config.read_string(text)
for key in list(self.keys()):
self.pop(key)
self.interpret_config(config)
return self
def from_bytes(self, byte_string):
return self.from_str(byte_string.decode("utf8"))
def from_disk(self, path):
with Path(path).open("r", encoding="utf8") as file_:
text = file_.read()
return self.from_str(text)
| from __future__ import unicode_literals
import configparser
import json
from pathlib import Path
class Config(dict):
def __init__(self, data=None):
dict.__init__(self)
if data is None:
data = {}
self.update(data)
def interpret_config(self, config):
for section, values in config.items():
parts = section.split(".")
node = self
for part in parts:
node = node.setdefault(part, {})
for key, value in values.items():
node[key] = json.loads(config.get(section, key))
def from_str(self, text):
config = configparser.ConfigParser(
interpolation=configparser.ExtendedInterpolation())
config.read_string(text)
for key in list(self.keys()):
self.pop(key)
self.interpret_config(config)
return self
def from_bytes(self, byte_string):
return self.from_str(byte_string.decode("utf8"))
def from_disk(self, path):
with Path(path).open("r", encoding="utf8") as file_:
text = file_.read()
return self.from_str(text)
| none | 1 | 2.83742 | 3 |
|
iommi/form.py | tltx/iommi | 0 | 6630586 | import re
import warnings
from contextlib import contextmanager
from datetime import datetime
from decimal import (
Decimal,
InvalidOperation,
)
from functools import reduce
from operator import or_
from typing import (
Any,
Callable,
Dict,
List,
Set,
Tuple,
Type,
Union,
Optional
)
from django.db.models import (
Case,
IntegerField,
Model,
Q,
QuerySet,
When,
)
from django.utils.translation import gettext
from iommi.datetime_parsing import (
parse_relative_date,
parse_relative_datetime,
)
from iommi.debug import iommi_debug_on
from tri_declarative import (
class_shortcut,
declarative,
dispatch,
EMPTY,
flatten,
getattr_path,
Namespace,
Refinable,
refinable,
setattr_path,
setdefaults_path,
Shortcut,
with_meta,
)
from tri_struct import Struct
from iommi._db_compat import field_defaults_factory
from iommi._web_compat import (
csrf,
format_html,
HttpResponseRedirect,
render_template,
Template,
URLValidator,
validate_email,
ValidationError,
)
from iommi.action import (
Action,
Actions,
group_actions,
)
from iommi.attrs import Attrs
from iommi.base import (
build_as_view_wrapper,
get_display_name,
items,
MISSING,
capitalize,
values,
)
from iommi.error import Errors
from iommi.evaluate import (
evaluate,
evaluate_strict,
)
from iommi.from_model import (
AutoConfig,
create_members_from_model,
get_search_fields,
member_from_model,
NoRegisteredSearchFieldException,
)
from iommi.member import (
bind_members,
collect_members,
)
from iommi.page import (
Page,
)
from iommi.fragment import (
Fragment,
Header,
)
from iommi.part import (
Part,
request_data,
)
from iommi.traversable import (
EvaluatedRefinable,
evaluated_refinable,
)
from iommi.reinvokable import reinvokable
# Prevent django templates from calling That Which Must Not Be Called
Namespace.do_not_call_in_templates = True
FULL_FORM_FROM_REQUEST = 'full_form_from_request' # pragma: no mutate The string is just to make debugging nice
INITIALS_FROM_GET = 'initials_from_get' # pragma: no mutate The string is just to make debugging nice
@contextmanager
def validation_errors_reported_on(obj):
try:
yield
except ValidationError as e:
for msg in e.messages:
obj.add_error(msg)
def bool_parse(string_value, **_):
s = string_value.lower()
if s in ('1', 'true', 't', 'yes', 'y', 'on'):
return True
elif s in ('0', 'false', 'f', 'no', 'n', 'off'):
return False
else:
raise ValueError('%s is not a valid boolean value' % string_value)
def many_to_many_factory_read_from_instance(field, instance):
return getattr_path(instance, field.attr).all()
def many_to_many_factory_write_to_instance(field, instance, value):
getattr_path(instance, field.attr).set(value)
_field_factory_by_field_type = {}
def register_field_factory(django_field_class, *, shortcut_name=MISSING, factory=MISSING):
assert shortcut_name is not MISSING or factory is not MISSING
if factory is MISSING:
factory = Shortcut(call_target__attribute=shortcut_name)
_field_factory_by_field_type[django_field_class] = factory
def create_object__post_handler(*, form, **kwargs):
return create_or_edit_object__post_handler(form=form, is_create=True, **kwargs)
def edit_object__post_handler(*, form, **kwargs):
return create_or_edit_object__post_handler(form=form, is_create=False, **kwargs)
def find_unique_prefixes(attributes):
result = set()
for attribute in attributes:
prefix, _, _ = attribute.rpartition('__')
parts = prefix.split('__')
for i in range(len(parts)):
result.add(tuple(parts[:i + 1]))
return ['__'.join(p) for p in sorted(sorted(result), key=len)]
def create_or_edit_object__post_handler(*, form, is_create, **_):
if is_create:
assert form.instance is None
form.instance = evaluate(form.extra.new_instance, **form.iommi_evaluate_parameters())
for field in values(form.fields): # two phase save for creation in django, have to save main object before related stuff
if not field.extra.get('django_related_field', False):
form.apply_field(field=field, instance=form.instance)
with validation_errors_reported_on(form):
form.instance.validate_unique()
if not form.is_valid():
return
if is_create: # two phase save for creation in django...
form.extra.pre_save_all_but_related_fields(instance=form.instance, **form.iommi_evaluate_parameters())
form.instance.save()
form.extra.on_save_all_but_related_fields(instance=form.instance, **form.iommi_evaluate_parameters())
form.apply(form.instance)
if not is_create:
with validation_errors_reported_on(form):
form.instance.validate_unique()
if form.is_valid():
attributes = filter(None, [f.attr for f in form.fields.values()])
form.extra.pre_save(instance=form.instance, **form.iommi_evaluate_parameters())
for prefix in find_unique_prefixes(attributes):
model_object = form.instance
if prefix: # Might be ''
model_object = getattr_path(model_object, prefix)
model_object.save()
form.extra.on_save(instance=form.instance, **form.iommi_evaluate_parameters())
return create_or_edit_object_redirect(is_create, form.extra.redirect_to, form.get_request(), form.extra.redirect, form)
def default_endpoints__config(field: 'Field', **_) -> dict:
return dict(
name=field._name,
)
def default_endpoints__validate(field: 'Field', **_) -> dict:
return dict(
valid=not bool(field._errors),
errors=list(field._errors),
)
def float_parse(string_value: str, **_):
try:
return float(string_value)
except ValueError:
# Acrobatics so we get equal formatting in python 2/3
raise ValueError("could not convert string to float: %s" % string_value)
def int_parse(string_value, **_):
return int(string_value)
def choice_is_valid(field, parsed_data, **_):
return parsed_data in field.choices, f'{parsed_data} not in available choices'
def choice_parse(form, field, string_value):
for c in field.choices:
option = field._choice_to_option_shim(form=form, field=field, choice=c)
if option[1] == string_value:
return option[0]
if string_value in [None, '']:
return None
return string_value
def choice_queryset__is_valid(field, parsed_data, **_):
return field.choices.filter(pk=parsed_data.pk).exists(), f'{", ".join(field.raw_data) if field.is_list else field.raw_data} not in available choices'
def choice_queryset__endpoint_handler(*, form, field, value, page_size=40, **_):
from django.core.paginator import (
EmptyPage,
Paginator,
)
page = int(form.get_request().GET.get('page', 1))
choices = field.extra.filter_and_sort(form=form, field=field, value=value)
try:
paginator = Paginator(choices, page_size)
result = paginator.page(page)
has_more = result.has_next()
except EmptyPage:
result = []
has_more = False
return dict(
results=field.extra.model_from_choices(form, field, result),
page=page,
pagination=dict(
more=has_more,
),
)
def choice_queryset__extra__model_from_choices(form, field, choices):
def traverse():
for choice in choices:
option = field._choice_to_option_shim(form=form, field=field, choice=choice)
yield Struct(
id=option[1],
text=option[2],
)
return list(traverse())
def choice_queryset__extra__filter_and_sort(field, value, **_):
if not value:
return field.choices.order_by(*field.search_fields)
q_objects = []
def create_q_objects(suffix):
q_objects.extend([
Q(**{search_field + suffix: value})
for search_field in field.search_fields]
)
create_q_objects(suffix='')
create_q_objects(suffix='__istartswith')
create_q_objects(suffix='__icontains')
when_clauses = [When(q, then=rank) for rank, q in enumerate(q_objects)]
choices = field.choices.annotate(iommi_ranking=Case(*when_clauses, default=len(q_objects) + 1, output_field=IntegerField()))
return choices.filter(reduce(or_, q_objects)).order_by('iommi_ranking', *field.search_fields)
def choice_queryset__parse(field, string_value, **_):
try:
return field.choices.get(pk=string_value) if string_value else None
except field.model.DoesNotExist as e:
raise ValidationError(str(e))
datetime_iso_formats = [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H',
]
def datetime_parse(string_value, **_):
for iso_format in datetime_iso_formats:
try:
return datetime.strptime(string_value, iso_format)
except ValueError:
pass
result = parse_relative_datetime(string_value)
if result is None:
formats = ', '.join('"%s"' % x for x in datetime_iso_formats)
raise ValidationError(f'Time data "{string_value}" does not match any of the formats "now", {formats}, and is not a relative date like "2d" or "2 weeks ago"')
return result
def datetime_render_value(value, **_):
return value.strftime(datetime_iso_formats[0]) if value else ''
date_iso_format = '%Y-%m-%d'
def date_parse(string_value, **_):
extra_information = ''
try:
return datetime.strptime(string_value, date_iso_format).date()
except ValueError as e:
if 'out of range' in str(e) or 'unconverted data remains' in str(e):
extra_information = ' (out of range)'
result = parse_relative_date(string_value)
if result is None:
formats = ', '.join('"%s"' % x for x in datetime_iso_formats)
raise ValidationError(f'Time data "{string_value}" does not match any of the formats "now", {formats}, and is not a relative date like "2d" or "2 weeks ago"{extra_information}')
return result
def date_render_value(value, **_):
return value.strftime(date_iso_format) if value else ''
time_iso_formats = [
'%H:%M:%S',
'%H:%M',
'%H',
]
def time_parse(string_value, **_):
if string_value.lower() == 'now':
return datetime.now().time()
for time_iso_format in time_iso_formats:
try:
return datetime.strptime(string_value, time_iso_format).time()
except ValueError:
pass
formats = ', '.join('"%s"' % x for x in time_iso_formats)
raise ValidationError(f'Time data "{string_value}" does not match any of the formats "now" or {formats}')
def time_render_value(value, **_):
return value.strftime(time_iso_formats[0]) if value else ''
def decimal_parse(string_value, **_):
try:
return Decimal(string_value)
except InvalidOperation:
raise ValidationError(f"Invalid literal for Decimal: '{string_value}'")
def url_parse(string_value, **_):
return URLValidator()(string_value) or string_value
def file_write_to_instance(field, instance, value):
if value:
Field.write_to_instance(field=field, instance=instance, value=value)
def email_parse(string_value, **_):
return validate_email(string_value) or string_value
def phone_number_is_valid(parsed_data, **_):
return re.match(r'^\+\d{1,3}(([ \-])?\(\d+\))?(([ \-])?\d+)+$', parsed_data, re.IGNORECASE), 'Please use format +<country code> (XX) XX XX. Example of US number: +1 (212) 123 4567 or +1 212 123 4567'
def default_input_id(field, **_):
return f'id_{field.iommi_path.replace("/", "__")}'
def file__raw_data(form, field, **_):
request = form.get_request()
if field.iommi_path not in request.FILES:
return None
return request.FILES[field.iommi_path]
def boolean_tristate__parse(string_value, **_):
if not string_value:
return None
return bool_parse(string_value)
def render_fragment(fragment):
if fragment is None:
return ''
return str(fragment)
@with_meta
class Field(Part):
"""
Class that describes a field, i.e. what input controls to render, the label, etc.
See :doc:`Form` for more complete examples.
The life cycle of the data is:
1. `raw_data`: will be set if the corresponding key is present in the HTTP request
2. `parsed_data`: set if parsing is successful, which only happens if the previous step succeeded
3. `value`: set if validation is successful, which only happens if the previous step succeeded
"""
tag: str = EvaluatedRefinable()
attr: str = EvaluatedRefinable()
display_name: str = EvaluatedRefinable()
# raw_data/raw_data contains the strings grabbed directly from the request data
# It is useful that they are evaluated for example when doing file upload. In that case the data is on request.FILES, not request.POST so we can use this to grab it from there
raw_data: str = Refinable() # raw_data is evaluated, but in a special way
parse_empty_string_as_none: bool = EvaluatedRefinable()
# parsed_data/parsed_data contains data that has been interpreted, but not checked for validity or access control
parsed_data: Any = Refinable() # parsed_data is evaluated, but in a special way so gets no EvaluatedRefinable type
initial: Any = Refinable() # initial is evaluated, but in a special way so gets no EvaluatedRefinable type
template: Union[str, Template] = EvaluatedRefinable()
attrs: Attrs = Refinable() # attrs is evaluated, but in a special way so gets no EvaluatedRefinable type
required: bool = EvaluatedRefinable()
input: Fragment = Refinable()
label: Fragment = Refinable()
non_editable_input: Fragment = Refinable()
help: Fragment = Refinable()
is_list: bool = EvaluatedRefinable()
is_boolean: bool = EvaluatedRefinable()
model: Type[Model] = Refinable() # model is evaluated, but in a special way so gets no EvaluatedRefinable type
model_field = Refinable()
model_field_name = Refinable()
editable: bool = EvaluatedRefinable()
strip_input: bool = EvaluatedRefinable()
# choices is evaluated, but in a special way so gets no EvaluatedRefinable type
choices: Callable[..., List[Any]] = Refinable()
choice_to_option: Callable[..., Tuple[Any, str, str, bool]] = Refinable() # deprecated, replaced by the two below:
choice_id_formatter: Callable[..., str] = Refinable()
choice_display_name_formatter: Callable[..., str] = Refinable()
choice_to_optgroup: Optional[Callable[..., Optional[str]]] = Refinable()
# deprecated: the formatters should be able to handle None
empty_choice_tuple: Tuple[Any, str, str, bool] = EvaluatedRefinable()
search_fields = Refinable()
errors: Errors = Refinable()
empty_label: str = EvaluatedRefinable()
@reinvokable
@dispatch(
tag=None,
attr=MISSING,
display_name=MISSING,
attrs__class=EMPTY,
attrs__style=EMPTY,
parse_empty_string_as_none=True,
required=True,
is_list=False,
is_boolean=False,
editable=True,
strip_input=True,
endpoints__config__func=default_endpoints__config,
endpoints__validate__func=default_endpoints__validate,
errors=EMPTY,
label__call_target=Fragment,
label__attrs__for=default_input_id,
help__call_target=Fragment,
input__call_target=Fragment,
input__attrs__id=default_input_id,
input__attrs__name=lambda field, **_: field.iommi_path,
input__extra__placeholder='',
non_editable_input__call_target=Fragment,
non_editable_input__attrs__type=None,
initial=MISSING,
choice_to_optgroup=None,
choice_id_formatter=lambda choice, **_: '%s' % choice,
choice_display_name_formatter=lambda choice, **_: '%s' % choice,
)
def __init__(self, **kwargs):
"""
Note that, in addition to the parameters with the defined behavior below, you can pass in any keyword argument you need yourself, including callables that conform to the protocol, and they will be added and evaluated as members.
All these parameters can be callables, and if they are, will be evaluated with the keyword arguments form and field. The only exceptions are `is_valid` (which gets `form`, `field` and `parsed_data`), `render_value` (which takes `form`, `field` and `value`) and `parse` (which gets `form`, `field`, `string_value`). Example of using a lambda to specify a value:
.. code:: python
Field(attrs__id=lambda form, field: 'my_id_%s' % field._name)
:param after: Set the order of columns, see the `howto <https://docs.iommi.rocks/en/latest/howto.html#how-do-i-change-the-order-of-the-fields>`_ for an example.
:param is_valid: validation function. Should return a tuple of `(bool, reason_for_failure_if_bool_is_false)` or raise ValidationError. Default: `lambda form, field, parsed_data: (True, '')`
:param parse: Parse function. Default just returns the string input unchanged: `lambda form, field, string_value: string_value`. This function can raise `ValueError` or `ValidationError` to produce a field error message.
:param initial: Initial value of the field
:param attr: The attribute path to apply or get the data from. For example using `foo__bar__baz` will result in `your_instance.foo.bar.baz` will be set by the `apply()` function. Defaults to same as name
:param attrs: A dict containing any custom html attributes to be sent to the `input__template`.
:param display_name: The text in the HTML label tag. Default: `capitalize(name).replace('_', ' ')`
:param template: django template filename for the entire row. Normally you shouldn't need to override on this level. Prefer overriding `input__template`, `label__template` or `error__template` as needed.
:param template_string: You can inline a template string here if it's more convenient than creating a file. Default: `None`
:param input__template: Django template filename for the template for just the input control.
:param label__template: Django template filename for the template for just the label tab.
:param required: If the field is a required field. Default: `True`
:param help_text: The help text will be grabbed from the django model if specified and available.
:param editable: Default: `True`
:param strip_input: Runs the input data through standard python .strip() before passing it to the parse function (can NOT be callable). Default: `True`
:param render_value: Render the parsed and validated value into a string. Default just converts to unicode: `lambda form, field, value: unicode(value)`
:param is_list: Interpret request data as a list (can NOT be a callable). Default: `False``
:param read_from_instance: Callback to retrieve value from edited instance. Invoked with parameters field and instance.
:param write_to_instance: Callback to write value to instance. Invoked with parameters field, instance and value.
:param choice_to_option: DEPRECATED: Callback to generate the choice data given a choice value. It will get the keyword arguments `form`, `field` and `choice`. It should return a 4-tuple: `(choice, internal_value, display_name, is_selected)`
:param choice_to_optgroup Callback to generate the optgroup for the given choice. It will get the keywoard argument `choice`. It should return None if the choice should not be grouped.
"""
model_field = kwargs.get('model_field')
if model_field and model_field.remote_field:
kwargs['model'] = model_field.remote_field.model
super(Field, self).__init__(**kwargs)
# value/value_data_list is the final step that contains parsed and valid data
self.value = None
self.non_editable_input = Namespace({
**flatten(self.input),
**self.non_editable_input,
'_name': 'non_editable_input',
})()
self.input = self.input(_name='input')
self.label = self.label(_name='label')
self.help = self.help(_name='help')
self._errors: Set[str] = set()
@property
def form(self):
return self.iommi_evaluate_parameters()['form']
# noinspection PyUnusedLocal
@staticmethod
@refinable
def is_valid(form: 'Form', field: 'Field', parsed_data: Any, **_) -> Tuple[bool, str]:
return True, ''
# noinspection PyUnusedLocal
@staticmethod
@refinable
def parse(form: 'Form', field: 'Field', string_value: str, **_) -> Any:
del form, field
return string_value
@staticmethod
@refinable
def post_validation(form: 'Form', field: 'Field', **_) -> None:
pass
@staticmethod
@refinable
def render_value(form: 'Form', field: 'Field', value: Any) -> str:
if isinstance(value, (list, QuerySet)):
return ', '.join(field.render_value(form=form, field=field, value=v) for v in value)
else:
return f'{value}' if value is not None else ''
# grab help_text from model if applicable
# noinspection PyProtectedMember
@staticmethod
@evaluated_refinable
def help_text(field, **_):
if field.model_field is None:
return ''
return field.model_field.help_text or ''
@staticmethod
@refinable
def read_from_instance(field: 'Field', instance: Any) -> Any:
return getattr_path(instance, field.attr)
@staticmethod
@refinable
def write_to_instance(field: 'Field', instance: Any, value: Any) -> None:
setattr_path(instance, field.attr, value)
def add_error(self, msg):
assert msg
self._errors.add(msg)
self.form._valid = False
def on_bind(self) -> None:
form = self.form
assert form is not None, "Each field needs a form."
form.all_fields[self._name] = self
if self.attr is MISSING:
self.attr = self._name
if self.display_name is MISSING:
self.display_name = get_display_name(self)
self.errors = Errors(parent=self, **self.errors)
if form.editable is False:
self.editable = False
# Not strict evaluate on purpose
self.model = evaluate(self.model, **self.iommi_evaluate_parameters())
self.choices = evaluate_strict(self.choices, **self.iommi_evaluate_parameters())
self.initial = evaluate_strict(self.initial, **self.iommi_evaluate_parameters())
self._read_initial()
if not self.editable:
self.value = self.initial
else:
self._read_raw_data()
self.parsed_data = evaluate_strict(self.parsed_data, **self.iommi_evaluate_parameters())
self._parse()
self._validate()
self.input = self.input.bind(parent=self)
self.label = self.label.bind(parent=self)
if self.label is not None:
assert not self.label.children
self.label.children = dict(text=evaluate_strict(self.display_name, **self.iommi_evaluate_parameters()))
if self.display_name is None:
self.label = None
self.non_editable_input = self.non_editable_input.bind(parent=self)
self.help = self.help.bind(parent=self)
if self.help is not None:
help_text = evaluate_strict(self.help_text, **self.iommi_evaluate_parameters())
self.help.children = dict(text=help_text)
else:
help_text = ''
if not help_text:
# To render cleanly in templates:
self.help = ''
if self.model and self.include:
try:
self.search_fields = get_search_fields(model=self.model)
except NoRegisteredSearchFieldException:
self.search_fields = ['pk']
if iommi_debug_on():
print(f'Warning: falling back to primary key as lookup and sorting on {self._name}. \nTo get rid of this warning and get a nicer lookup and sorting use register_search_fields for model {self.model}.')
def _parse(self):
if self.parsed_data is not None:
return
if self.form.mode is INITIALS_FROM_GET and self.raw_data is None:
return
if self.is_list:
if self.raw_data is not None:
self.parsed_data = [self._parse_raw_value(x) for x in self.raw_data]
else:
self.parsed_data = None
elif self.is_boolean:
self.parsed_data = self._parse_raw_value('0' if self.raw_data is None else self.raw_data)
else:
if self.raw_data == '' and self.parse_empty_string_as_none:
self.parsed_data = None
elif self.raw_data is not None:
self.parsed_data = self._parse_raw_value(self.raw_data)
else:
self.parsed_data = None
def _parse_raw_value(self, raw_data):
with validation_errors_reported_on(self):
try:
return self.parse(form=self.form, field=self, string_value=raw_data)
except ValueError as e:
msg = str(e)
assert msg != ''
self.add_error(msg)
def _validate(self):
form = self.form
if form.mode is INITIALS_FROM_GET and (self.raw_data is None or (self.raw_data == [] and self.is_list)):
self.value = self.initial
return
value = None
if self.is_list:
if self.parsed_data is not None:
value = [self._validate_parsed_data(x) for x in self.parsed_data if x is not None]
else:
if self.parsed_data is not None:
value = self._validate_parsed_data(self.parsed_data)
if not self.errors:
if form.mode is FULL_FORM_FROM_REQUEST and self.required and value in [None, '']:
self.add_error('This field is required')
else:
self.value = value
def _validate_parsed_data(self, value):
is_valid, error = self.is_valid(
form=self.form,
field=self,
parsed_data=value)
if is_valid and not self.errors and self.parsed_data is not None and not self.is_list:
value = self.parsed_data
elif not is_valid and self.form.mode:
if not isinstance(error, set):
error = {error}
for e in error:
self.add_error(e)
return value
def _read_initial(self):
form = self.form
if self.initial is MISSING and self.include and form.instance is not None:
if self.attr:
initial = self.read_from_instance(self, form.instance)
self.initial = initial
if self.initial is MISSING:
self.initial = None
def _read_raw_data(self):
# The client might have refined raw_data. If so evaluate it.
if self.raw_data is not None:
self.raw_data = evaluate_strict(self.raw_data, **self.iommi_evaluate_parameters())
return
# Otherwise get it from the request
form = self.form
if self.is_list:
try:
# django and similar
# noinspection PyUnresolvedReferences
raw_data = form._request_data.getlist(self.iommi_path)
except AttributeError: # pragma: no cover
# werkzeug and similar
raw_data = form._request_data.get(self.iommi_path)
if raw_data and self.strip_input:
raw_data = [x.strip() for x in raw_data]
if raw_data is not None:
self.raw_data = raw_data
else:
self.raw_data = form._request_data.get(self.iommi_path)
if self.raw_data and self.strip_input:
self.raw_data = self.raw_data.strip()
def own_evaluate_parameters(self):
return dict(field=self)
def get_errors(self):
return self._errors
@property
def rendered_value(self):
if self.errors:
return self.raw_data
return self.render_value(form=self.form, field=self, value=self.value)
def _choice_to_option_shim(self, form, field, choice):
if self.choice_to_option is not None:
warnings.warn('Field.choice_to_option is deprecated. It was too complicated and did too much, and has been replaced with choice_id_formatter, choice_display_name_formatter, and choice_is_selected. You normally just want to override choice_display_name_formatter and leave the others as their default.', category=DeprecationWarning)
return self.choice_to_option(form=form, field=field, choice=choice)
if not field.is_list:
is_selected = choice == field.value
else:
is_selected = field.value is not None and choice in field.value
# The legacy structure is `(choice, id, display_name, is_selected)`
return (
choice,
self.choice_id_formatter(choice=choice, **self.iommi_evaluate_parameters()),
self.choice_display_name_formatter(choice=choice, **self.iommi_evaluate_parameters()),
is_selected,
)
@property
def choice_to_options_selected(self):
if self.value is None:
return []
if self.is_list:
return [
self._choice_to_option_shim(form=self.form, field=self, choice=v)
for v in self.value
]
else:
return [self._choice_to_option_shim(form=self.form, field=self, choice=self.value)]
@property
def choice_tuples(self):
result = []
if not self.required and not self.is_list:
result.append(self.empty_choice_tuple + (0,))
for i, choice in enumerate(self.choices):
result.append(self._choice_to_option_shim(form=self.form, field=self, choice=choice) + (i + 1,))
return result
@property
def grouped_choice_tuples(self):
if self.choice_to_optgroup is None:
return [(None, self.choice_tuples)]
else:
groups = []
current_group_name = None
current_group = []
groups.append((current_group_name, current_group))
for choice_tuple in self.choice_tuples:
choice = choice_tuple[0]
group_name = self.choice_to_optgroup(choice=choice, **self.iommi_evaluate_parameters())
if current_group_name != group_name:
current_group_name = group_name
current_group = []
groups.append((current_group_name, current_group))
current_group.append(choice_tuple)
return groups
@classmethod
def from_model(cls, model, model_field_name=None, model_field=None, **kwargs):
return member_from_model(
cls=cls,
model=model,
factory_lookup=_field_factory_by_field_type,
factory_lookup_register_function=register_field_factory,
defaults_factory=field_defaults_factory,
model_field_name=model_field_name,
model_field=model_field,
**kwargs)
@dispatch(
render=EMPTY,
)
def __html__(self, *, render=None):
assert not render
if self.is_boolean:
if 'checked' not in self.input.attrs and self.value:
self.input.attrs.checked = ''
else:
if 'value' not in self.input.attrs:
self.input.attrs.value = self.rendered_value
if not self.editable:
self.non_editable_input.children['text'] = self.rendered_value
self.input = self.non_editable_input
if self.template:
return render_template(self.get_request(), self.template, self.iommi_evaluate_parameters())
return Fragment(
_name=self._name,
tag=self.tag,
attrs=self.attrs,
children=dict(
label=render_fragment(self.label),
input=render_fragment(self.input),
help=render_fragment(self.help),
errors=render_fragment(self.errors),
),
).bind(parent=self._parent).__html__()
@classmethod
@class_shortcut(
input__attrs__type='hidden',
attrs__style__display='none',
)
def hidden(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='text',
)
def text(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__tag='textarea',
input__attrs__type=None,
input__attrs__value=None,
input__children__text=lambda field, **_: field.rendered_value,
input__attrs__readonly=lambda field, **_: True if field.editable is False else None,
)
def textarea(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=int_parse,
)
def integer(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=float_parse,
)
def float(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='password',
)
def password(cls, call_target=None, **kwargs):
return call_target(**kwargs)
# Boolean field. Tries hard to parse a boolean value from its input.
@classmethod
@class_shortcut(
parse=bool_parse,
required=False,
is_boolean=True,
)
def boolean(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
required=True,
is_list=False,
empty_label='---',
is_valid=choice_is_valid,
input__attrs__multiple=lambda field, **_: True if field.is_list else None,
parse=choice_parse,
)
def choice(cls, call_target=None, **kwargs):
"""
Shortcut for single choice field. If required is false it will automatically add an option first with the value '' and the title '---'. To override that text pass in the parameter empty_label.
:param choice_to_option: callable with three arguments: form, field, choice. Convert from a choice object to a tuple of (choice, value, label, selected), the last three for the <option> element
"""
assert 'choices' in kwargs, 'To use Field.choice, you must pass the choices list'
setdefaults_path(
kwargs,
empty_choice_tuple=(None, '', kwargs['empty_label'], True),
)
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute="choice",
choices=[True, False],
choice_id_formatter=lambda choice, **_: 'true' if choice else 'false',
choice_display_name_formatter=lambda choice, **_: 'Yes' if choice else 'No',
parse=boolean_tristate__parse,
required=False,
)
def boolean_tristate(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute="choice",
parse=choice_queryset__parse,
choice_id_formatter=lambda choice, **_: choice.pk,
endpoints__choices__func=choice_queryset__endpoint_handler,
is_valid=choice_queryset__is_valid,
extra__filter_and_sort=choice_queryset__extra__filter_and_sort,
extra__model_from_choices=choice_queryset__extra__model_from_choices,
)
def choice_queryset(cls, choices, call_target=None, **kwargs):
if 'model' not in kwargs:
if isinstance(choices, QuerySet):
kwargs['model'] = choices.model
elif 'model_field' in kwargs:
kwargs['model'] = kwargs['model_field'].remote_field.model
else:
assert False, 'The convenience feature to automatically get the parameter model set only works for QuerySet instances or if you specify model_field'
setdefaults_path(
kwargs,
choices=(lambda form, **_: choices.all()) if isinstance(choices,
QuerySet) else choices, # clone the QuerySet if needed
)
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='choice',
is_list=True,
)
def multi_choice(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='choice_queryset',
is_list=True,
)
def multi_choice_queryset(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='choice',
input__attrs__id=None,
extra_evaluated__id=default_input_id,
)
def radio(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=datetime_parse,
render_value=datetime_render_value,
)
def datetime(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=date_parse,
render_value=date_render_value,
)
def date(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=time_parse,
render_value=time_render_value,
)
def time(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=decimal_parse,
)
def decimal(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='url',
parse=url_parse,
)
def url(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='file',
raw_data=file__raw_data,
write_to_instance=file_write_to_instance,
)
def file(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='file',
template='iommi/form/image_row.html',
)
def image(cls, call_target=None, **kwargs):
return call_target(**kwargs)
# Shortcut to create a fake input that performs no parsing but is useful to separate sections of a form.
@classmethod
@class_shortcut(
editable=False,
attr=None,
)
def heading(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
editable=False,
attr=None,
)
def info(cls, value, call_target=None, **kwargs):
"""
Shortcut to create an info entry.
"""
setdefaults_path(
kwargs,
initial=value,
)
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='email',
parse=email_parse,
)
def email(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
is_valid=phone_number_is_valid,
)
def phone_number(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='choice_queryset',
)
def foreign_key(cls, model_field, model, call_target, **kwargs):
del model
setdefaults_path(
kwargs,
choices=model_field.foreign_related_fields[0].model.objects.all(),
)
return call_target(model_field=model_field, **kwargs)
@classmethod
@class_shortcut(
call_target__attribute='multi_choice_queryset',
)
def many_to_many(cls, call_target, model_field, **kwargs):
setdefaults_path(
kwargs,
choices=model_field.remote_field.model.objects.all(),
read_from_instance=many_to_many_factory_read_from_instance,
write_to_instance=many_to_many_factory_write_to_instance,
extra__django_related_field=True,
)
return call_target(model_field=model_field, **kwargs)
def create_or_edit_object_redirect(is_create, redirect_to, request, redirect, form):
if redirect_to is None:
if is_create:
redirect_to = "../"
else:
redirect_to = "../../" # We guess here that the path ends with '<pk>/edit/' so this should end up at a good place
return redirect(request=request, redirect_to=redirect_to, form=form)
def delete_object__post_handler(form, **_):
instance = form.instance
form.extra.on_delete(instance=form.instance, **form.iommi_evaluate_parameters())
if instance.pk is not None: # Check if already deleted by the callback
instance.delete()
return HttpResponseRedirect('../..')
# noinspection PyUnreachableCode
if False:
# These are needed to make makemessages collect these strings
gettext('create')
gettext('edit')
gettext('delete')
class FormAutoConfig(AutoConfig):
instance = Refinable()
type = Refinable() # one of 'create', 'edit', 'delete'
@declarative(Part, '_fields_dict')
@with_meta
class Form(Part):
"""
Describe a Form. Example:
.. code:: python
class MyForm(Form):
a = Field()
b = Field.email()
form = MyForm().bind(request=request)
You can also create an instance of a form with this syntax if it's more convenient:
.. code:: python
form = Form(
fields=dict(
a=Field(),
b=Field.email(),
),
).bind(request=request)
See tri.declarative docs for more on this dual style of declaration.
In the common case the fields namespace will contain only instances of `Field`, but
iommi actually supports arbitrary `Part`s (except other `Form`s). For example:
.. code:: python
form = Form(
fields = dict(
# Display a and b inside a box
box = html.div(
attrs__class__box=True,
children__a = Field(),
children__b = Field.email()
),
# And c regularly
c = Field()
)
)
So that writing the application logic (e.g. validation and post handlers) is independent
of minor changes to the layout, after bind the `fields` namespace of the form will contain
only instances of `Field` keyed by their `_name` independently of how deep they are in the
hierarchy. Given the above, an appropriate post_handler would be:
.. code:: python
def post_handler(form, **_):
if not form.is_valid():
return
print(form.fields.a.value, form.fields.b.value, form.fields.c.value)
# And not:
# print(form.fields.box.a.value, form.fields.box.b.value, form.fields.c.value)
"""
actions: Namespace = Refinable()
actions_template: Union[str, Template] = Refinable()
attrs: Attrs = Refinable() # attrs is evaluated, but in a special way so gets no EvaluatedRefinable type
editable: bool = Refinable()
h_tag: Union[Fragment, str] = Refinable() # h_tag is evaluated, but in a special way so gets no EvaluatedRefinable type
title: Fragment = Refinable() # title is evaluated, but in a special way so gets no EvaluatedRefinable type
template: Union[str, Template] = EvaluatedRefinable()
errors: Errors = Refinable()
model: Type[Model] = Refinable() # model is evaluated, but in a special way so gets no EvaluatedRefinable type
member_class: Type[Field] = Refinable()
action_class: Type[Action] = Refinable()
page_class: Type[Page] = Refinable()
class Meta:
member_class = Field
action_class = Action
page_class = Page
@reinvokable
@dispatch(
model=None,
editable=True,
fields=EMPTY,
attrs__action='',
attrs__method='post',
attrs__enctype='multipart/form-data',
actions=EMPTY,
auto=EMPTY,
errors=EMPTY,
h_tag__call_target=Header,
)
def __init__(self, *, instance=None, fields: Dict[str, Field] = None, _fields_dict: Dict[str, Field] = None, actions: Dict[str, Any] = None, model=None, auto=None, title=MISSING, **kwargs):
if auto:
auto = FormAutoConfig(**auto)
assert not _fields_dict, "You can't have an auto generated Form AND a declarative Form at the same time"
assert not model, "You can't use the auto feature and explicitly pass model. Either pass auto__model, or we will set the model for you from auto__instance"
assert not instance, "You can't use the auto feature and explicitly pass instance. Pass auto__instance (None in the create case)"
if auto.model is None:
auto.model = auto.instance.__class__
model, fields = self._from_model(
model=auto.model,
fields=fields,
include=auto.include,
exclude=auto.exclude,
)
instance = auto.instance
if title is MISSING and auto.type is not None:
title = capitalize(gettext('%(crud_type)s %(model_name)s') % dict(
crud_type=gettext(auto.type), model_name=model._meta.verbose_name))
setdefaults_path(
actions,
submit__display_name=gettext('Save') if auto.type == 'edit' else capitalize(gettext(auto.type)),
)
# Submit is special.
# We used to have an automatic action submit button. Now we instead if something is inj
# the actions submit space assume you want to define it as a primary button (unless you
# explicitely specify differently). That way we get no button if you don't explicitely opt
# into it, by either directly defining something inside the submit namespace or using
# Form.edit/delete/...
if 'submit' in actions:
setdefaults_path(
actions,
submit__call_target__attribute='primary'
)
super(Form, self).__init__(model=model, title=title, **kwargs)
assert isinstance(fields, dict)
self.fields = None
self._errors: Set[str] = set()
self._valid = None
self.instance = instance
self.mode = INITIALS_FROM_GET
collect_members(self, name='actions', items=actions, cls=self.get_meta().action_class)
collect_members(self, name='fields', items=fields, items_dict=_fields_dict, cls=self.get_meta().member_class)
def on_bind(self) -> None:
assert self.actions_template
self._valid = None
request = self.get_request()
self._request_data = request_data(request)
self.title = evaluate_strict(self.title, **self.iommi_evaluate_parameters())
if isinstance(self.h_tag, Namespace):
if self.title not in (None, MISSING):
# noinspection PyCallingNonCallable
self.h_tag = self.h_tag(
_name='h_tag',
children__text=capitalize(self.title),
).bind(parent=self)
else:
self.h_tag = ''
else:
self.h_tag = self.h_tag.bind(parent=self)
# Actions have to be bound first because is_target() needs it
bind_members(self, name='actions', cls=Actions)
if self._request_data is not None and self.is_target():
self.mode = FULL_FORM_FROM_REQUEST
self.all_fields = Namespace()
bind_members(self, name='fields', lazy=False)
bind_members(self, name='endpoints')
self.parts = self.fields
self.fields = self.all_fields
del self.all_fields
self.errors = Errors(parent=self, **self.errors)
self.validate()
def own_evaluate_parameters(self):
return dict(form=self)
# property for jinja2 compatibility
@property
def render_actions(self):
assert self._is_bound, 'The form has not been bound. You need to call bind() before you can render it.'
non_grouped_actions, grouped_actions = group_actions(self.actions)
return render_template(
self.get_request(),
self.actions_template,
dict(
actions=self.iommi_bound_members().actions,
non_grouped_actions=non_grouped_actions,
grouped_actions=grouped_actions,
form=self,
))
@classmethod
@dispatch(
fields=EMPTY,
)
def fields_from_model(cls, fields, **kwargs):
return create_members_from_model(
member_class=cls.get_meta().member_class,
member_params_by_member_name=fields,
**kwargs
)
@classmethod
@dispatch(
fields=EMPTY,
)
def _from_model(cls, model, *, fields, include=None, exclude=None):
fields = cls.fields_from_model(model=model, include=include, exclude=exclude, fields=fields)
return model, fields
def is_target(self):
return any(action.is_target() for action in values(self.actions))
def is_valid(self):
"""Is the form valid? Can be called inside forms post_validation hook to determine if the
individual fields were all valid."""
assert self._is_bound, "Is valid can only be called on bound forms"
assert self._valid is not None, "Internal error: Once a form is bound we should know if it is valid or not"
return self._valid
def validate(self):
# When validate is called at the end of bind, self._valid will be either
# False becaues a field's add_error was called during the fields bind.
# Or it will still be None. In that latter case set it to True here,
# so that we can call is_valid inside post_validation hook to check if
# everything up until this point was valid.
if self._valid is None:
self._valid = True
for field in values(self.fields):
with validation_errors_reported_on(field):
field.post_validation(**field.iommi_evaluate_parameters())
if self.mode is FULL_FORM_FROM_REQUEST:
with validation_errors_reported_on(self):
self.post_validation(**self.iommi_evaluate_parameters())
return self
@staticmethod
@refinable
def post_validation(form, **_):
pass
def add_error(self, msg):
assert msg
self._errors.add(msg)
self._valid = False
# property for jinja2 compatibility
@property
def render_fields(self):
assert self._is_bound, "the form must be bound, otherwise self.parts will not be defined"
r = []
for part in values(self.parts):
r.append(part.__html__())
# We need to preserve all other GET parameters, so we can e.g. filter in two forms on the same page, and keep sorting after filtering
own_field_paths = {f.iommi_path for f in values(self.fields)}
for k, v in items(self.get_request().GET):
if k not in own_field_paths and not k.startswith('-'):
r.append(format_html('<input type="hidden" name="{}" value="{}" />', k, v))
return format_html('{}\n' * len(r), *r)
@dispatch(
render__call_target=render_template,
)
def __html__(self, *, render=None):
setdefaults_path(
render,
template=self.template,
context=self.iommi_evaluate_parameters().copy(),
)
request = self.get_request()
render.context.update(csrf(request))
return render(request=request)
def apply(self, instance):
"""
Write the new values specified in the form into the instance specified.
"""
assert self.is_valid(), f'errors: {self.get_errors()}'
for field in values(self.fields):
self.apply_field(instance=instance, field=field)
return instance
@staticmethod
def apply_field(instance, field):
if not field.editable:
field.value = field.initial
if field.attr is not None:
field.write_to_instance(field, instance, field.value)
def get_errors(self):
assert self._is_bound
r = {}
if self._errors:
r['global'] = self._errors
field_errors = {x._name: x.get_errors() for x in values(self.fields) if x.get_errors()}
if field_errors:
r['fields'] = field_errors
return r
@classmethod
@class_shortcut(
extra__pre_save_all_but_related_fields=lambda **kwargs: None, # pragma: no mutate
extra__on_save_all_but_related_fields=lambda **kwargs: None, # pragma: no mutate
extra__pre_save=lambda **kwargs: None, # pragma: no mutate
extra__on_save=lambda **kwargs: None, # pragma: no mutate
extra__on_delete=lambda **kwargs: None, # pragma: no mutate
extra__redirect=lambda redirect_to, **_: HttpResponseRedirect(redirect_to),
extra__redirect_to=None,
auto=EMPTY,
)
def crud(cls, call_target, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='crud',
extra__is_create=True,
extra__new_instance=lambda form, **_: form.model(),
actions__submit__post_handler=create_object__post_handler,
auto__type='create',
)
def create(cls, call_target, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='crud',
extra__is_create=False,
actions__submit__post_handler=edit_object__post_handler,
auto__type='edit',
)
def edit(cls, call_target, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='crud',
actions__submit__call_target__attribute='delete',
actions__submit__post_handler=delete_object__post_handler,
auto__type='delete',
editable=False,
)
def delete(cls, call_target, **kwargs):
return call_target(**kwargs)
def as_view(self):
return build_as_view_wrapper(self)
| import re
import warnings
from contextlib import contextmanager
from datetime import datetime
from decimal import (
Decimal,
InvalidOperation,
)
from functools import reduce
from operator import or_
from typing import (
Any,
Callable,
Dict,
List,
Set,
Tuple,
Type,
Union,
Optional
)
from django.db.models import (
Case,
IntegerField,
Model,
Q,
QuerySet,
When,
)
from django.utils.translation import gettext
from iommi.datetime_parsing import (
parse_relative_date,
parse_relative_datetime,
)
from iommi.debug import iommi_debug_on
from tri_declarative import (
class_shortcut,
declarative,
dispatch,
EMPTY,
flatten,
getattr_path,
Namespace,
Refinable,
refinable,
setattr_path,
setdefaults_path,
Shortcut,
with_meta,
)
from tri_struct import Struct
from iommi._db_compat import field_defaults_factory
from iommi._web_compat import (
csrf,
format_html,
HttpResponseRedirect,
render_template,
Template,
URLValidator,
validate_email,
ValidationError,
)
from iommi.action import (
Action,
Actions,
group_actions,
)
from iommi.attrs import Attrs
from iommi.base import (
build_as_view_wrapper,
get_display_name,
items,
MISSING,
capitalize,
values,
)
from iommi.error import Errors
from iommi.evaluate import (
evaluate,
evaluate_strict,
)
from iommi.from_model import (
AutoConfig,
create_members_from_model,
get_search_fields,
member_from_model,
NoRegisteredSearchFieldException,
)
from iommi.member import (
bind_members,
collect_members,
)
from iommi.page import (
Page,
)
from iommi.fragment import (
Fragment,
Header,
)
from iommi.part import (
Part,
request_data,
)
from iommi.traversable import (
EvaluatedRefinable,
evaluated_refinable,
)
from iommi.reinvokable import reinvokable
# Prevent django templates from calling That Which Must Not Be Called
Namespace.do_not_call_in_templates = True
FULL_FORM_FROM_REQUEST = 'full_form_from_request' # pragma: no mutate The string is just to make debugging nice
INITIALS_FROM_GET = 'initials_from_get' # pragma: no mutate The string is just to make debugging nice
@contextmanager
def validation_errors_reported_on(obj):
try:
yield
except ValidationError as e:
for msg in e.messages:
obj.add_error(msg)
def bool_parse(string_value, **_):
s = string_value.lower()
if s in ('1', 'true', 't', 'yes', 'y', 'on'):
return True
elif s in ('0', 'false', 'f', 'no', 'n', 'off'):
return False
else:
raise ValueError('%s is not a valid boolean value' % string_value)
def many_to_many_factory_read_from_instance(field, instance):
return getattr_path(instance, field.attr).all()
def many_to_many_factory_write_to_instance(field, instance, value):
getattr_path(instance, field.attr).set(value)
_field_factory_by_field_type = {}
def register_field_factory(django_field_class, *, shortcut_name=MISSING, factory=MISSING):
assert shortcut_name is not MISSING or factory is not MISSING
if factory is MISSING:
factory = Shortcut(call_target__attribute=shortcut_name)
_field_factory_by_field_type[django_field_class] = factory
def create_object__post_handler(*, form, **kwargs):
return create_or_edit_object__post_handler(form=form, is_create=True, **kwargs)
def edit_object__post_handler(*, form, **kwargs):
return create_or_edit_object__post_handler(form=form, is_create=False, **kwargs)
def find_unique_prefixes(attributes):
result = set()
for attribute in attributes:
prefix, _, _ = attribute.rpartition('__')
parts = prefix.split('__')
for i in range(len(parts)):
result.add(tuple(parts[:i + 1]))
return ['__'.join(p) for p in sorted(sorted(result), key=len)]
def create_or_edit_object__post_handler(*, form, is_create, **_):
if is_create:
assert form.instance is None
form.instance = evaluate(form.extra.new_instance, **form.iommi_evaluate_parameters())
for field in values(form.fields): # two phase save for creation in django, have to save main object before related stuff
if not field.extra.get('django_related_field', False):
form.apply_field(field=field, instance=form.instance)
with validation_errors_reported_on(form):
form.instance.validate_unique()
if not form.is_valid():
return
if is_create: # two phase save for creation in django...
form.extra.pre_save_all_but_related_fields(instance=form.instance, **form.iommi_evaluate_parameters())
form.instance.save()
form.extra.on_save_all_but_related_fields(instance=form.instance, **form.iommi_evaluate_parameters())
form.apply(form.instance)
if not is_create:
with validation_errors_reported_on(form):
form.instance.validate_unique()
if form.is_valid():
attributes = filter(None, [f.attr for f in form.fields.values()])
form.extra.pre_save(instance=form.instance, **form.iommi_evaluate_parameters())
for prefix in find_unique_prefixes(attributes):
model_object = form.instance
if prefix: # Might be ''
model_object = getattr_path(model_object, prefix)
model_object.save()
form.extra.on_save(instance=form.instance, **form.iommi_evaluate_parameters())
return create_or_edit_object_redirect(is_create, form.extra.redirect_to, form.get_request(), form.extra.redirect, form)
def default_endpoints__config(field: 'Field', **_) -> dict:
return dict(
name=field._name,
)
def default_endpoints__validate(field: 'Field', **_) -> dict:
return dict(
valid=not bool(field._errors),
errors=list(field._errors),
)
def float_parse(string_value: str, **_):
try:
return float(string_value)
except ValueError:
# Acrobatics so we get equal formatting in python 2/3
raise ValueError("could not convert string to float: %s" % string_value)
def int_parse(string_value, **_):
return int(string_value)
def choice_is_valid(field, parsed_data, **_):
return parsed_data in field.choices, f'{parsed_data} not in available choices'
def choice_parse(form, field, string_value):
for c in field.choices:
option = field._choice_to_option_shim(form=form, field=field, choice=c)
if option[1] == string_value:
return option[0]
if string_value in [None, '']:
return None
return string_value
def choice_queryset__is_valid(field, parsed_data, **_):
return field.choices.filter(pk=parsed_data.pk).exists(), f'{", ".join(field.raw_data) if field.is_list else field.raw_data} not in available choices'
def choice_queryset__endpoint_handler(*, form, field, value, page_size=40, **_):
from django.core.paginator import (
EmptyPage,
Paginator,
)
page = int(form.get_request().GET.get('page', 1))
choices = field.extra.filter_and_sort(form=form, field=field, value=value)
try:
paginator = Paginator(choices, page_size)
result = paginator.page(page)
has_more = result.has_next()
except EmptyPage:
result = []
has_more = False
return dict(
results=field.extra.model_from_choices(form, field, result),
page=page,
pagination=dict(
more=has_more,
),
)
def choice_queryset__extra__model_from_choices(form, field, choices):
def traverse():
for choice in choices:
option = field._choice_to_option_shim(form=form, field=field, choice=choice)
yield Struct(
id=option[1],
text=option[2],
)
return list(traverse())
def choice_queryset__extra__filter_and_sort(field, value, **_):
if not value:
return field.choices.order_by(*field.search_fields)
q_objects = []
def create_q_objects(suffix):
q_objects.extend([
Q(**{search_field + suffix: value})
for search_field in field.search_fields]
)
create_q_objects(suffix='')
create_q_objects(suffix='__istartswith')
create_q_objects(suffix='__icontains')
when_clauses = [When(q, then=rank) for rank, q in enumerate(q_objects)]
choices = field.choices.annotate(iommi_ranking=Case(*when_clauses, default=len(q_objects) + 1, output_field=IntegerField()))
return choices.filter(reduce(or_, q_objects)).order_by('iommi_ranking', *field.search_fields)
def choice_queryset__parse(field, string_value, **_):
try:
return field.choices.get(pk=string_value) if string_value else None
except field.model.DoesNotExist as e:
raise ValidationError(str(e))
datetime_iso_formats = [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H',
]
def datetime_parse(string_value, **_):
for iso_format in datetime_iso_formats:
try:
return datetime.strptime(string_value, iso_format)
except ValueError:
pass
result = parse_relative_datetime(string_value)
if result is None:
formats = ', '.join('"%s"' % x for x in datetime_iso_formats)
raise ValidationError(f'Time data "{string_value}" does not match any of the formats "now", {formats}, and is not a relative date like "2d" or "2 weeks ago"')
return result
def datetime_render_value(value, **_):
return value.strftime(datetime_iso_formats[0]) if value else ''
date_iso_format = '%Y-%m-%d'
def date_parse(string_value, **_):
extra_information = ''
try:
return datetime.strptime(string_value, date_iso_format).date()
except ValueError as e:
if 'out of range' in str(e) or 'unconverted data remains' in str(e):
extra_information = ' (out of range)'
result = parse_relative_date(string_value)
if result is None:
formats = ', '.join('"%s"' % x for x in datetime_iso_formats)
raise ValidationError(f'Time data "{string_value}" does not match any of the formats "now", {formats}, and is not a relative date like "2d" or "2 weeks ago"{extra_information}')
return result
def date_render_value(value, **_):
return value.strftime(date_iso_format) if value else ''
time_iso_formats = [
'%H:%M:%S',
'%H:%M',
'%H',
]
def time_parse(string_value, **_):
if string_value.lower() == 'now':
return datetime.now().time()
for time_iso_format in time_iso_formats:
try:
return datetime.strptime(string_value, time_iso_format).time()
except ValueError:
pass
formats = ', '.join('"%s"' % x for x in time_iso_formats)
raise ValidationError(f'Time data "{string_value}" does not match any of the formats "now" or {formats}')
def time_render_value(value, **_):
return value.strftime(time_iso_formats[0]) if value else ''
def decimal_parse(string_value, **_):
try:
return Decimal(string_value)
except InvalidOperation:
raise ValidationError(f"Invalid literal for Decimal: '{string_value}'")
def url_parse(string_value, **_):
return URLValidator()(string_value) or string_value
def file_write_to_instance(field, instance, value):
if value:
Field.write_to_instance(field=field, instance=instance, value=value)
def email_parse(string_value, **_):
return validate_email(string_value) or string_value
def phone_number_is_valid(parsed_data, **_):
return re.match(r'^\+\d{1,3}(([ \-])?\(\d+\))?(([ \-])?\d+)+$', parsed_data, re.IGNORECASE), 'Please use format +<country code> (XX) XX XX. Example of US number: +1 (212) 123 4567 or +1 212 123 4567'
def default_input_id(field, **_):
return f'id_{field.iommi_path.replace("/", "__")}'
def file__raw_data(form, field, **_):
request = form.get_request()
if field.iommi_path not in request.FILES:
return None
return request.FILES[field.iommi_path]
def boolean_tristate__parse(string_value, **_):
if not string_value:
return None
return bool_parse(string_value)
def render_fragment(fragment):
if fragment is None:
return ''
return str(fragment)
@with_meta
class Field(Part):
"""
Class that describes a field, i.e. what input controls to render, the label, etc.
See :doc:`Form` for more complete examples.
The life cycle of the data is:
1. `raw_data`: will be set if the corresponding key is present in the HTTP request
2. `parsed_data`: set if parsing is successful, which only happens if the previous step succeeded
3. `value`: set if validation is successful, which only happens if the previous step succeeded
"""
tag: str = EvaluatedRefinable()
attr: str = EvaluatedRefinable()
display_name: str = EvaluatedRefinable()
# raw_data/raw_data contains the strings grabbed directly from the request data
# It is useful that they are evaluated for example when doing file upload. In that case the data is on request.FILES, not request.POST so we can use this to grab it from there
raw_data: str = Refinable() # raw_data is evaluated, but in a special way
parse_empty_string_as_none: bool = EvaluatedRefinable()
# parsed_data/parsed_data contains data that has been interpreted, but not checked for validity or access control
parsed_data: Any = Refinable() # parsed_data is evaluated, but in a special way so gets no EvaluatedRefinable type
initial: Any = Refinable() # initial is evaluated, but in a special way so gets no EvaluatedRefinable type
template: Union[str, Template] = EvaluatedRefinable()
attrs: Attrs = Refinable() # attrs is evaluated, but in a special way so gets no EvaluatedRefinable type
required: bool = EvaluatedRefinable()
input: Fragment = Refinable()
label: Fragment = Refinable()
non_editable_input: Fragment = Refinable()
help: Fragment = Refinable()
is_list: bool = EvaluatedRefinable()
is_boolean: bool = EvaluatedRefinable()
model: Type[Model] = Refinable() # model is evaluated, but in a special way so gets no EvaluatedRefinable type
model_field = Refinable()
model_field_name = Refinable()
editable: bool = EvaluatedRefinable()
strip_input: bool = EvaluatedRefinable()
# choices is evaluated, but in a special way so gets no EvaluatedRefinable type
choices: Callable[..., List[Any]] = Refinable()
choice_to_option: Callable[..., Tuple[Any, str, str, bool]] = Refinable() # deprecated, replaced by the two below:
choice_id_formatter: Callable[..., str] = Refinable()
choice_display_name_formatter: Callable[..., str] = Refinable()
choice_to_optgroup: Optional[Callable[..., Optional[str]]] = Refinable()
# deprecated: the formatters should be able to handle None
empty_choice_tuple: Tuple[Any, str, str, bool] = EvaluatedRefinable()
search_fields = Refinable()
errors: Errors = Refinable()
empty_label: str = EvaluatedRefinable()
@reinvokable
@dispatch(
tag=None,
attr=MISSING,
display_name=MISSING,
attrs__class=EMPTY,
attrs__style=EMPTY,
parse_empty_string_as_none=True,
required=True,
is_list=False,
is_boolean=False,
editable=True,
strip_input=True,
endpoints__config__func=default_endpoints__config,
endpoints__validate__func=default_endpoints__validate,
errors=EMPTY,
label__call_target=Fragment,
label__attrs__for=default_input_id,
help__call_target=Fragment,
input__call_target=Fragment,
input__attrs__id=default_input_id,
input__attrs__name=lambda field, **_: field.iommi_path,
input__extra__placeholder='',
non_editable_input__call_target=Fragment,
non_editable_input__attrs__type=None,
initial=MISSING,
choice_to_optgroup=None,
choice_id_formatter=lambda choice, **_: '%s' % choice,
choice_display_name_formatter=lambda choice, **_: '%s' % choice,
)
def __init__(self, **kwargs):
"""
Note that, in addition to the parameters with the defined behavior below, you can pass in any keyword argument you need yourself, including callables that conform to the protocol, and they will be added and evaluated as members.
All these parameters can be callables, and if they are, will be evaluated with the keyword arguments form and field. The only exceptions are `is_valid` (which gets `form`, `field` and `parsed_data`), `render_value` (which takes `form`, `field` and `value`) and `parse` (which gets `form`, `field`, `string_value`). Example of using a lambda to specify a value:
.. code:: python
Field(attrs__id=lambda form, field: 'my_id_%s' % field._name)
:param after: Set the order of columns, see the `howto <https://docs.iommi.rocks/en/latest/howto.html#how-do-i-change-the-order-of-the-fields>`_ for an example.
:param is_valid: validation function. Should return a tuple of `(bool, reason_for_failure_if_bool_is_false)` or raise ValidationError. Default: `lambda form, field, parsed_data: (True, '')`
:param parse: Parse function. Default just returns the string input unchanged: `lambda form, field, string_value: string_value`. This function can raise `ValueError` or `ValidationError` to produce a field error message.
:param initial: Initial value of the field
:param attr: The attribute path to apply or get the data from. For example using `foo__bar__baz` will result in `your_instance.foo.bar.baz` will be set by the `apply()` function. Defaults to same as name
:param attrs: A dict containing any custom html attributes to be sent to the `input__template`.
:param display_name: The text in the HTML label tag. Default: `capitalize(name).replace('_', ' ')`
:param template: django template filename for the entire row. Normally you shouldn't need to override on this level. Prefer overriding `input__template`, `label__template` or `error__template` as needed.
:param template_string: You can inline a template string here if it's more convenient than creating a file. Default: `None`
:param input__template: Django template filename for the template for just the input control.
:param label__template: Django template filename for the template for just the label tab.
:param required: If the field is a required field. Default: `True`
:param help_text: The help text will be grabbed from the django model if specified and available.
:param editable: Default: `True`
:param strip_input: Runs the input data through standard python .strip() before passing it to the parse function (can NOT be callable). Default: `True`
:param render_value: Render the parsed and validated value into a string. Default just converts to unicode: `lambda form, field, value: unicode(value)`
:param is_list: Interpret request data as a list (can NOT be a callable). Default: `False``
:param read_from_instance: Callback to retrieve value from edited instance. Invoked with parameters field and instance.
:param write_to_instance: Callback to write value to instance. Invoked with parameters field, instance and value.
:param choice_to_option: DEPRECATED: Callback to generate the choice data given a choice value. It will get the keyword arguments `form`, `field` and `choice`. It should return a 4-tuple: `(choice, internal_value, display_name, is_selected)`
:param choice_to_optgroup Callback to generate the optgroup for the given choice. It will get the keywoard argument `choice`. It should return None if the choice should not be grouped.
"""
model_field = kwargs.get('model_field')
if model_field and model_field.remote_field:
kwargs['model'] = model_field.remote_field.model
super(Field, self).__init__(**kwargs)
# value/value_data_list is the final step that contains parsed and valid data
self.value = None
self.non_editable_input = Namespace({
**flatten(self.input),
**self.non_editable_input,
'_name': 'non_editable_input',
})()
self.input = self.input(_name='input')
self.label = self.label(_name='label')
self.help = self.help(_name='help')
self._errors: Set[str] = set()
@property
def form(self):
return self.iommi_evaluate_parameters()['form']
# noinspection PyUnusedLocal
@staticmethod
@refinable
def is_valid(form: 'Form', field: 'Field', parsed_data: Any, **_) -> Tuple[bool, str]:
return True, ''
# noinspection PyUnusedLocal
@staticmethod
@refinable
def parse(form: 'Form', field: 'Field', string_value: str, **_) -> Any:
del form, field
return string_value
@staticmethod
@refinable
def post_validation(form: 'Form', field: 'Field', **_) -> None:
pass
@staticmethod
@refinable
def render_value(form: 'Form', field: 'Field', value: Any) -> str:
if isinstance(value, (list, QuerySet)):
return ', '.join(field.render_value(form=form, field=field, value=v) for v in value)
else:
return f'{value}' if value is not None else ''
# grab help_text from model if applicable
# noinspection PyProtectedMember
@staticmethod
@evaluated_refinable
def help_text(field, **_):
if field.model_field is None:
return ''
return field.model_field.help_text or ''
@staticmethod
@refinable
def read_from_instance(field: 'Field', instance: Any) -> Any:
return getattr_path(instance, field.attr)
@staticmethod
@refinable
def write_to_instance(field: 'Field', instance: Any, value: Any) -> None:
setattr_path(instance, field.attr, value)
def add_error(self, msg):
assert msg
self._errors.add(msg)
self.form._valid = False
def on_bind(self) -> None:
form = self.form
assert form is not None, "Each field needs a form."
form.all_fields[self._name] = self
if self.attr is MISSING:
self.attr = self._name
if self.display_name is MISSING:
self.display_name = get_display_name(self)
self.errors = Errors(parent=self, **self.errors)
if form.editable is False:
self.editable = False
# Not strict evaluate on purpose
self.model = evaluate(self.model, **self.iommi_evaluate_parameters())
self.choices = evaluate_strict(self.choices, **self.iommi_evaluate_parameters())
self.initial = evaluate_strict(self.initial, **self.iommi_evaluate_parameters())
self._read_initial()
if not self.editable:
self.value = self.initial
else:
self._read_raw_data()
self.parsed_data = evaluate_strict(self.parsed_data, **self.iommi_evaluate_parameters())
self._parse()
self._validate()
self.input = self.input.bind(parent=self)
self.label = self.label.bind(parent=self)
if self.label is not None:
assert not self.label.children
self.label.children = dict(text=evaluate_strict(self.display_name, **self.iommi_evaluate_parameters()))
if self.display_name is None:
self.label = None
self.non_editable_input = self.non_editable_input.bind(parent=self)
self.help = self.help.bind(parent=self)
if self.help is not None:
help_text = evaluate_strict(self.help_text, **self.iommi_evaluate_parameters())
self.help.children = dict(text=help_text)
else:
help_text = ''
if not help_text:
# To render cleanly in templates:
self.help = ''
if self.model and self.include:
try:
self.search_fields = get_search_fields(model=self.model)
except NoRegisteredSearchFieldException:
self.search_fields = ['pk']
if iommi_debug_on():
print(f'Warning: falling back to primary key as lookup and sorting on {self._name}. \nTo get rid of this warning and get a nicer lookup and sorting use register_search_fields for model {self.model}.')
def _parse(self):
if self.parsed_data is not None:
return
if self.form.mode is INITIALS_FROM_GET and self.raw_data is None:
return
if self.is_list:
if self.raw_data is not None:
self.parsed_data = [self._parse_raw_value(x) for x in self.raw_data]
else:
self.parsed_data = None
elif self.is_boolean:
self.parsed_data = self._parse_raw_value('0' if self.raw_data is None else self.raw_data)
else:
if self.raw_data == '' and self.parse_empty_string_as_none:
self.parsed_data = None
elif self.raw_data is not None:
self.parsed_data = self._parse_raw_value(self.raw_data)
else:
self.parsed_data = None
def _parse_raw_value(self, raw_data):
with validation_errors_reported_on(self):
try:
return self.parse(form=self.form, field=self, string_value=raw_data)
except ValueError as e:
msg = str(e)
assert msg != ''
self.add_error(msg)
def _validate(self):
form = self.form
if form.mode is INITIALS_FROM_GET and (self.raw_data is None or (self.raw_data == [] and self.is_list)):
self.value = self.initial
return
value = None
if self.is_list:
if self.parsed_data is not None:
value = [self._validate_parsed_data(x) for x in self.parsed_data if x is not None]
else:
if self.parsed_data is not None:
value = self._validate_parsed_data(self.parsed_data)
if not self.errors:
if form.mode is FULL_FORM_FROM_REQUEST and self.required and value in [None, '']:
self.add_error('This field is required')
else:
self.value = value
def _validate_parsed_data(self, value):
is_valid, error = self.is_valid(
form=self.form,
field=self,
parsed_data=value)
if is_valid and not self.errors and self.parsed_data is not None and not self.is_list:
value = self.parsed_data
elif not is_valid and self.form.mode:
if not isinstance(error, set):
error = {error}
for e in error:
self.add_error(e)
return value
def _read_initial(self):
form = self.form
if self.initial is MISSING and self.include and form.instance is not None:
if self.attr:
initial = self.read_from_instance(self, form.instance)
self.initial = initial
if self.initial is MISSING:
self.initial = None
def _read_raw_data(self):
# The client might have refined raw_data. If so evaluate it.
if self.raw_data is not None:
self.raw_data = evaluate_strict(self.raw_data, **self.iommi_evaluate_parameters())
return
# Otherwise get it from the request
form = self.form
if self.is_list:
try:
# django and similar
# noinspection PyUnresolvedReferences
raw_data = form._request_data.getlist(self.iommi_path)
except AttributeError: # pragma: no cover
# werkzeug and similar
raw_data = form._request_data.get(self.iommi_path)
if raw_data and self.strip_input:
raw_data = [x.strip() for x in raw_data]
if raw_data is not None:
self.raw_data = raw_data
else:
self.raw_data = form._request_data.get(self.iommi_path)
if self.raw_data and self.strip_input:
self.raw_data = self.raw_data.strip()
def own_evaluate_parameters(self):
return dict(field=self)
def get_errors(self):
return self._errors
@property
def rendered_value(self):
if self.errors:
return self.raw_data
return self.render_value(form=self.form, field=self, value=self.value)
def _choice_to_option_shim(self, form, field, choice):
if self.choice_to_option is not None:
warnings.warn('Field.choice_to_option is deprecated. It was too complicated and did too much, and has been replaced with choice_id_formatter, choice_display_name_formatter, and choice_is_selected. You normally just want to override choice_display_name_formatter and leave the others as their default.', category=DeprecationWarning)
return self.choice_to_option(form=form, field=field, choice=choice)
if not field.is_list:
is_selected = choice == field.value
else:
is_selected = field.value is not None and choice in field.value
# The legacy structure is `(choice, id, display_name, is_selected)`
return (
choice,
self.choice_id_formatter(choice=choice, **self.iommi_evaluate_parameters()),
self.choice_display_name_formatter(choice=choice, **self.iommi_evaluate_parameters()),
is_selected,
)
@property
def choice_to_options_selected(self):
if self.value is None:
return []
if self.is_list:
return [
self._choice_to_option_shim(form=self.form, field=self, choice=v)
for v in self.value
]
else:
return [self._choice_to_option_shim(form=self.form, field=self, choice=self.value)]
@property
def choice_tuples(self):
result = []
if not self.required and not self.is_list:
result.append(self.empty_choice_tuple + (0,))
for i, choice in enumerate(self.choices):
result.append(self._choice_to_option_shim(form=self.form, field=self, choice=choice) + (i + 1,))
return result
@property
def grouped_choice_tuples(self):
if self.choice_to_optgroup is None:
return [(None, self.choice_tuples)]
else:
groups = []
current_group_name = None
current_group = []
groups.append((current_group_name, current_group))
for choice_tuple in self.choice_tuples:
choice = choice_tuple[0]
group_name = self.choice_to_optgroup(choice=choice, **self.iommi_evaluate_parameters())
if current_group_name != group_name:
current_group_name = group_name
current_group = []
groups.append((current_group_name, current_group))
current_group.append(choice_tuple)
return groups
@classmethod
def from_model(cls, model, model_field_name=None, model_field=None, **kwargs):
return member_from_model(
cls=cls,
model=model,
factory_lookup=_field_factory_by_field_type,
factory_lookup_register_function=register_field_factory,
defaults_factory=field_defaults_factory,
model_field_name=model_field_name,
model_field=model_field,
**kwargs)
@dispatch(
render=EMPTY,
)
def __html__(self, *, render=None):
assert not render
if self.is_boolean:
if 'checked' not in self.input.attrs and self.value:
self.input.attrs.checked = ''
else:
if 'value' not in self.input.attrs:
self.input.attrs.value = self.rendered_value
if not self.editable:
self.non_editable_input.children['text'] = self.rendered_value
self.input = self.non_editable_input
if self.template:
return render_template(self.get_request(), self.template, self.iommi_evaluate_parameters())
return Fragment(
_name=self._name,
tag=self.tag,
attrs=self.attrs,
children=dict(
label=render_fragment(self.label),
input=render_fragment(self.input),
help=render_fragment(self.help),
errors=render_fragment(self.errors),
),
).bind(parent=self._parent).__html__()
@classmethod
@class_shortcut(
input__attrs__type='hidden',
attrs__style__display='none',
)
def hidden(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='text',
)
def text(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__tag='textarea',
input__attrs__type=None,
input__attrs__value=None,
input__children__text=lambda field, **_: field.rendered_value,
input__attrs__readonly=lambda field, **_: True if field.editable is False else None,
)
def textarea(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=int_parse,
)
def integer(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=float_parse,
)
def float(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='password',
)
def password(cls, call_target=None, **kwargs):
return call_target(**kwargs)
# Boolean field. Tries hard to parse a boolean value from its input.
@classmethod
@class_shortcut(
parse=bool_parse,
required=False,
is_boolean=True,
)
def boolean(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
required=True,
is_list=False,
empty_label='---',
is_valid=choice_is_valid,
input__attrs__multiple=lambda field, **_: True if field.is_list else None,
parse=choice_parse,
)
def choice(cls, call_target=None, **kwargs):
"""
Shortcut for single choice field. If required is false it will automatically add an option first with the value '' and the title '---'. To override that text pass in the parameter empty_label.
:param choice_to_option: callable with three arguments: form, field, choice. Convert from a choice object to a tuple of (choice, value, label, selected), the last three for the <option> element
"""
assert 'choices' in kwargs, 'To use Field.choice, you must pass the choices list'
setdefaults_path(
kwargs,
empty_choice_tuple=(None, '', kwargs['empty_label'], True),
)
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute="choice",
choices=[True, False],
choice_id_formatter=lambda choice, **_: 'true' if choice else 'false',
choice_display_name_formatter=lambda choice, **_: 'Yes' if choice else 'No',
parse=boolean_tristate__parse,
required=False,
)
def boolean_tristate(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute="choice",
parse=choice_queryset__parse,
choice_id_formatter=lambda choice, **_: choice.pk,
endpoints__choices__func=choice_queryset__endpoint_handler,
is_valid=choice_queryset__is_valid,
extra__filter_and_sort=choice_queryset__extra__filter_and_sort,
extra__model_from_choices=choice_queryset__extra__model_from_choices,
)
def choice_queryset(cls, choices, call_target=None, **kwargs):
if 'model' not in kwargs:
if isinstance(choices, QuerySet):
kwargs['model'] = choices.model
elif 'model_field' in kwargs:
kwargs['model'] = kwargs['model_field'].remote_field.model
else:
assert False, 'The convenience feature to automatically get the parameter model set only works for QuerySet instances or if you specify model_field'
setdefaults_path(
kwargs,
choices=(lambda form, **_: choices.all()) if isinstance(choices,
QuerySet) else choices, # clone the QuerySet if needed
)
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='choice',
is_list=True,
)
def multi_choice(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='choice_queryset',
is_list=True,
)
def multi_choice_queryset(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='choice',
input__attrs__id=None,
extra_evaluated__id=default_input_id,
)
def radio(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=datetime_parse,
render_value=datetime_render_value,
)
def datetime(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=date_parse,
render_value=date_render_value,
)
def date(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=time_parse,
render_value=time_render_value,
)
def time(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
parse=decimal_parse,
)
def decimal(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='url',
parse=url_parse,
)
def url(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='file',
raw_data=file__raw_data,
write_to_instance=file_write_to_instance,
)
def file(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='file',
template='iommi/form/image_row.html',
)
def image(cls, call_target=None, **kwargs):
return call_target(**kwargs)
# Shortcut to create a fake input that performs no parsing but is useful to separate sections of a form.
@classmethod
@class_shortcut(
editable=False,
attr=None,
)
def heading(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
editable=False,
attr=None,
)
def info(cls, value, call_target=None, **kwargs):
"""
Shortcut to create an info entry.
"""
setdefaults_path(
kwargs,
initial=value,
)
return call_target(**kwargs)
@classmethod
@class_shortcut(
input__attrs__type='email',
parse=email_parse,
)
def email(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
is_valid=phone_number_is_valid,
)
def phone_number(cls, call_target=None, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='choice_queryset',
)
def foreign_key(cls, model_field, model, call_target, **kwargs):
del model
setdefaults_path(
kwargs,
choices=model_field.foreign_related_fields[0].model.objects.all(),
)
return call_target(model_field=model_field, **kwargs)
@classmethod
@class_shortcut(
call_target__attribute='multi_choice_queryset',
)
def many_to_many(cls, call_target, model_field, **kwargs):
setdefaults_path(
kwargs,
choices=model_field.remote_field.model.objects.all(),
read_from_instance=many_to_many_factory_read_from_instance,
write_to_instance=many_to_many_factory_write_to_instance,
extra__django_related_field=True,
)
return call_target(model_field=model_field, **kwargs)
def create_or_edit_object_redirect(is_create, redirect_to, request, redirect, form):
if redirect_to is None:
if is_create:
redirect_to = "../"
else:
redirect_to = "../../" # We guess here that the path ends with '<pk>/edit/' so this should end up at a good place
return redirect(request=request, redirect_to=redirect_to, form=form)
def delete_object__post_handler(form, **_):
instance = form.instance
form.extra.on_delete(instance=form.instance, **form.iommi_evaluate_parameters())
if instance.pk is not None: # Check if already deleted by the callback
instance.delete()
return HttpResponseRedirect('../..')
# noinspection PyUnreachableCode
if False:
# These are needed to make makemessages collect these strings
gettext('create')
gettext('edit')
gettext('delete')
class FormAutoConfig(AutoConfig):
instance = Refinable()
type = Refinable() # one of 'create', 'edit', 'delete'
@declarative(Part, '_fields_dict')
@with_meta
class Form(Part):
"""
Describe a Form. Example:
.. code:: python
class MyForm(Form):
a = Field()
b = Field.email()
form = MyForm().bind(request=request)
You can also create an instance of a form with this syntax if it's more convenient:
.. code:: python
form = Form(
fields=dict(
a=Field(),
b=Field.email(),
),
).bind(request=request)
See tri.declarative docs for more on this dual style of declaration.
In the common case the fields namespace will contain only instances of `Field`, but
iommi actually supports arbitrary `Part`s (except other `Form`s). For example:
.. code:: python
form = Form(
fields = dict(
# Display a and b inside a box
box = html.div(
attrs__class__box=True,
children__a = Field(),
children__b = Field.email()
),
# And c regularly
c = Field()
)
)
So that writing the application logic (e.g. validation and post handlers) is independent
of minor changes to the layout, after bind the `fields` namespace of the form will contain
only instances of `Field` keyed by their `_name` independently of how deep they are in the
hierarchy. Given the above, an appropriate post_handler would be:
.. code:: python
def post_handler(form, **_):
if not form.is_valid():
return
print(form.fields.a.value, form.fields.b.value, form.fields.c.value)
# And not:
# print(form.fields.box.a.value, form.fields.box.b.value, form.fields.c.value)
"""
actions: Namespace = Refinable()
actions_template: Union[str, Template] = Refinable()
attrs: Attrs = Refinable() # attrs is evaluated, but in a special way so gets no EvaluatedRefinable type
editable: bool = Refinable()
h_tag: Union[Fragment, str] = Refinable() # h_tag is evaluated, but in a special way so gets no EvaluatedRefinable type
title: Fragment = Refinable() # title is evaluated, but in a special way so gets no EvaluatedRefinable type
template: Union[str, Template] = EvaluatedRefinable()
errors: Errors = Refinable()
model: Type[Model] = Refinable() # model is evaluated, but in a special way so gets no EvaluatedRefinable type
member_class: Type[Field] = Refinable()
action_class: Type[Action] = Refinable()
page_class: Type[Page] = Refinable()
class Meta:
member_class = Field
action_class = Action
page_class = Page
@reinvokable
@dispatch(
model=None,
editable=True,
fields=EMPTY,
attrs__action='',
attrs__method='post',
attrs__enctype='multipart/form-data',
actions=EMPTY,
auto=EMPTY,
errors=EMPTY,
h_tag__call_target=Header,
)
def __init__(self, *, instance=None, fields: Dict[str, Field] = None, _fields_dict: Dict[str, Field] = None, actions: Dict[str, Any] = None, model=None, auto=None, title=MISSING, **kwargs):
if auto:
auto = FormAutoConfig(**auto)
assert not _fields_dict, "You can't have an auto generated Form AND a declarative Form at the same time"
assert not model, "You can't use the auto feature and explicitly pass model. Either pass auto__model, or we will set the model for you from auto__instance"
assert not instance, "You can't use the auto feature and explicitly pass instance. Pass auto__instance (None in the create case)"
if auto.model is None:
auto.model = auto.instance.__class__
model, fields = self._from_model(
model=auto.model,
fields=fields,
include=auto.include,
exclude=auto.exclude,
)
instance = auto.instance
if title is MISSING and auto.type is not None:
title = capitalize(gettext('%(crud_type)s %(model_name)s') % dict(
crud_type=gettext(auto.type), model_name=model._meta.verbose_name))
setdefaults_path(
actions,
submit__display_name=gettext('Save') if auto.type == 'edit' else capitalize(gettext(auto.type)),
)
# Submit is special.
# We used to have an automatic action submit button. Now we instead if something is inj
# the actions submit space assume you want to define it as a primary button (unless you
# explicitely specify differently). That way we get no button if you don't explicitely opt
# into it, by either directly defining something inside the submit namespace or using
# Form.edit/delete/...
if 'submit' in actions:
setdefaults_path(
actions,
submit__call_target__attribute='primary'
)
super(Form, self).__init__(model=model, title=title, **kwargs)
assert isinstance(fields, dict)
self.fields = None
self._errors: Set[str] = set()
self._valid = None
self.instance = instance
self.mode = INITIALS_FROM_GET
collect_members(self, name='actions', items=actions, cls=self.get_meta().action_class)
collect_members(self, name='fields', items=fields, items_dict=_fields_dict, cls=self.get_meta().member_class)
def on_bind(self) -> None:
assert self.actions_template
self._valid = None
request = self.get_request()
self._request_data = request_data(request)
self.title = evaluate_strict(self.title, **self.iommi_evaluate_parameters())
if isinstance(self.h_tag, Namespace):
if self.title not in (None, MISSING):
# noinspection PyCallingNonCallable
self.h_tag = self.h_tag(
_name='h_tag',
children__text=capitalize(self.title),
).bind(parent=self)
else:
self.h_tag = ''
else:
self.h_tag = self.h_tag.bind(parent=self)
# Actions have to be bound first because is_target() needs it
bind_members(self, name='actions', cls=Actions)
if self._request_data is not None and self.is_target():
self.mode = FULL_FORM_FROM_REQUEST
self.all_fields = Namespace()
bind_members(self, name='fields', lazy=False)
bind_members(self, name='endpoints')
self.parts = self.fields
self.fields = self.all_fields
del self.all_fields
self.errors = Errors(parent=self, **self.errors)
self.validate()
def own_evaluate_parameters(self):
return dict(form=self)
# property for jinja2 compatibility
@property
def render_actions(self):
assert self._is_bound, 'The form has not been bound. You need to call bind() before you can render it.'
non_grouped_actions, grouped_actions = group_actions(self.actions)
return render_template(
self.get_request(),
self.actions_template,
dict(
actions=self.iommi_bound_members().actions,
non_grouped_actions=non_grouped_actions,
grouped_actions=grouped_actions,
form=self,
))
@classmethod
@dispatch(
fields=EMPTY,
)
def fields_from_model(cls, fields, **kwargs):
return create_members_from_model(
member_class=cls.get_meta().member_class,
member_params_by_member_name=fields,
**kwargs
)
@classmethod
@dispatch(
fields=EMPTY,
)
def _from_model(cls, model, *, fields, include=None, exclude=None):
fields = cls.fields_from_model(model=model, include=include, exclude=exclude, fields=fields)
return model, fields
def is_target(self):
return any(action.is_target() for action in values(self.actions))
def is_valid(self):
"""Is the form valid? Can be called inside forms post_validation hook to determine if the
individual fields were all valid."""
assert self._is_bound, "Is valid can only be called on bound forms"
assert self._valid is not None, "Internal error: Once a form is bound we should know if it is valid or not"
return self._valid
def validate(self):
# When validate is called at the end of bind, self._valid will be either
# False becaues a field's add_error was called during the fields bind.
# Or it will still be None. In that latter case set it to True here,
# so that we can call is_valid inside post_validation hook to check if
# everything up until this point was valid.
if self._valid is None:
self._valid = True
for field in values(self.fields):
with validation_errors_reported_on(field):
field.post_validation(**field.iommi_evaluate_parameters())
if self.mode is FULL_FORM_FROM_REQUEST:
with validation_errors_reported_on(self):
self.post_validation(**self.iommi_evaluate_parameters())
return self
@staticmethod
@refinable
def post_validation(form, **_):
pass
def add_error(self, msg):
assert msg
self._errors.add(msg)
self._valid = False
# property for jinja2 compatibility
@property
def render_fields(self):
assert self._is_bound, "the form must be bound, otherwise self.parts will not be defined"
r = []
for part in values(self.parts):
r.append(part.__html__())
# We need to preserve all other GET parameters, so we can e.g. filter in two forms on the same page, and keep sorting after filtering
own_field_paths = {f.iommi_path for f in values(self.fields)}
for k, v in items(self.get_request().GET):
if k not in own_field_paths and not k.startswith('-'):
r.append(format_html('<input type="hidden" name="{}" value="{}" />', k, v))
return format_html('{}\n' * len(r), *r)
@dispatch(
render__call_target=render_template,
)
def __html__(self, *, render=None):
setdefaults_path(
render,
template=self.template,
context=self.iommi_evaluate_parameters().copy(),
)
request = self.get_request()
render.context.update(csrf(request))
return render(request=request)
def apply(self, instance):
"""
Write the new values specified in the form into the instance specified.
"""
assert self.is_valid(), f'errors: {self.get_errors()}'
for field in values(self.fields):
self.apply_field(instance=instance, field=field)
return instance
@staticmethod
def apply_field(instance, field):
if not field.editable:
field.value = field.initial
if field.attr is not None:
field.write_to_instance(field, instance, field.value)
def get_errors(self):
assert self._is_bound
r = {}
if self._errors:
r['global'] = self._errors
field_errors = {x._name: x.get_errors() for x in values(self.fields) if x.get_errors()}
if field_errors:
r['fields'] = field_errors
return r
@classmethod
@class_shortcut(
extra__pre_save_all_but_related_fields=lambda **kwargs: None, # pragma: no mutate
extra__on_save_all_but_related_fields=lambda **kwargs: None, # pragma: no mutate
extra__pre_save=lambda **kwargs: None, # pragma: no mutate
extra__on_save=lambda **kwargs: None, # pragma: no mutate
extra__on_delete=lambda **kwargs: None, # pragma: no mutate
extra__redirect=lambda redirect_to, **_: HttpResponseRedirect(redirect_to),
extra__redirect_to=None,
auto=EMPTY,
)
def crud(cls, call_target, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='crud',
extra__is_create=True,
extra__new_instance=lambda form, **_: form.model(),
actions__submit__post_handler=create_object__post_handler,
auto__type='create',
)
def create(cls, call_target, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='crud',
extra__is_create=False,
actions__submit__post_handler=edit_object__post_handler,
auto__type='edit',
)
def edit(cls, call_target, **kwargs):
return call_target(**kwargs)
@classmethod
@class_shortcut(
call_target__attribute='crud',
actions__submit__call_target__attribute='delete',
actions__submit__post_handler=delete_object__post_handler,
auto__type='delete',
editable=False,
)
def delete(cls, call_target, **kwargs):
return call_target(**kwargs)
def as_view(self):
return build_as_view_wrapper(self)
| en | 0.722794 | # Prevent django templates from calling That Which Must Not Be Called # pragma: no mutate The string is just to make debugging nice # pragma: no mutate The string is just to make debugging nice # two phase save for creation in django, have to save main object before related stuff # two phase save for creation in django... # Might be '' # Acrobatics so we get equal formatting in python 2/3 Class that describes a field, i.e. what input controls to render, the label, etc. See :doc:`Form` for more complete examples. The life cycle of the data is: 1. `raw_data`: will be set if the corresponding key is present in the HTTP request 2. `parsed_data`: set if parsing is successful, which only happens if the previous step succeeded 3. `value`: set if validation is successful, which only happens if the previous step succeeded # raw_data/raw_data contains the strings grabbed directly from the request data # It is useful that they are evaluated for example when doing file upload. In that case the data is on request.FILES, not request.POST so we can use this to grab it from there # raw_data is evaluated, but in a special way # parsed_data/parsed_data contains data that has been interpreted, but not checked for validity or access control # parsed_data is evaluated, but in a special way so gets no EvaluatedRefinable type # initial is evaluated, but in a special way so gets no EvaluatedRefinable type # attrs is evaluated, but in a special way so gets no EvaluatedRefinable type # model is evaluated, but in a special way so gets no EvaluatedRefinable type # choices is evaluated, but in a special way so gets no EvaluatedRefinable type # deprecated, replaced by the two below: # deprecated: the formatters should be able to handle None Note that, in addition to the parameters with the defined behavior below, you can pass in any keyword argument you need yourself, including callables that conform to the protocol, and they will be added and evaluated as members. All these parameters can be callables, and if they are, will be evaluated with the keyword arguments form and field. The only exceptions are `is_valid` (which gets `form`, `field` and `parsed_data`), `render_value` (which takes `form`, `field` and `value`) and `parse` (which gets `form`, `field`, `string_value`). Example of using a lambda to specify a value: .. code:: python Field(attrs__id=lambda form, field: 'my_id_%s' % field._name) :param after: Set the order of columns, see the `howto <https://docs.iommi.rocks/en/latest/howto.html#how-do-i-change-the-order-of-the-fields>`_ for an example. :param is_valid: validation function. Should return a tuple of `(bool, reason_for_failure_if_bool_is_false)` or raise ValidationError. Default: `lambda form, field, parsed_data: (True, '')` :param parse: Parse function. Default just returns the string input unchanged: `lambda form, field, string_value: string_value`. This function can raise `ValueError` or `ValidationError` to produce a field error message. :param initial: Initial value of the field :param attr: The attribute path to apply or get the data from. For example using `foo__bar__baz` will result in `your_instance.foo.bar.baz` will be set by the `apply()` function. Defaults to same as name :param attrs: A dict containing any custom html attributes to be sent to the `input__template`. :param display_name: The text in the HTML label tag. Default: `capitalize(name).replace('_', ' ')` :param template: django template filename for the entire row. Normally you shouldn't need to override on this level. Prefer overriding `input__template`, `label__template` or `error__template` as needed. :param template_string: You can inline a template string here if it's more convenient than creating a file. Default: `None` :param input__template: Django template filename for the template for just the input control. :param label__template: Django template filename for the template for just the label tab. :param required: If the field is a required field. Default: `True` :param help_text: The help text will be grabbed from the django model if specified and available. :param editable: Default: `True` :param strip_input: Runs the input data through standard python .strip() before passing it to the parse function (can NOT be callable). Default: `True` :param render_value: Render the parsed and validated value into a string. Default just converts to unicode: `lambda form, field, value: unicode(value)` :param is_list: Interpret request data as a list (can NOT be a callable). Default: `False`` :param read_from_instance: Callback to retrieve value from edited instance. Invoked with parameters field and instance. :param write_to_instance: Callback to write value to instance. Invoked with parameters field, instance and value. :param choice_to_option: DEPRECATED: Callback to generate the choice data given a choice value. It will get the keyword arguments `form`, `field` and `choice`. It should return a 4-tuple: `(choice, internal_value, display_name, is_selected)` :param choice_to_optgroup Callback to generate the optgroup for the given choice. It will get the keywoard argument `choice`. It should return None if the choice should not be grouped. # value/value_data_list is the final step that contains parsed and valid data # noinspection PyUnusedLocal # noinspection PyUnusedLocal # grab help_text from model if applicable # noinspection PyProtectedMember # Not strict evaluate on purpose # To render cleanly in templates: # The client might have refined raw_data. If so evaluate it. # Otherwise get it from the request # django and similar # noinspection PyUnresolvedReferences # pragma: no cover # werkzeug and similar # The legacy structure is `(choice, id, display_name, is_selected)` # Boolean field. Tries hard to parse a boolean value from its input. Shortcut for single choice field. If required is false it will automatically add an option first with the value '' and the title '---'. To override that text pass in the parameter empty_label. :param choice_to_option: callable with three arguments: form, field, choice. Convert from a choice object to a tuple of (choice, value, label, selected), the last three for the <option> element # clone the QuerySet if needed # Shortcut to create a fake input that performs no parsing but is useful to separate sections of a form. Shortcut to create an info entry. # We guess here that the path ends with '<pk>/edit/' so this should end up at a good place # Check if already deleted by the callback # noinspection PyUnreachableCode # These are needed to make makemessages collect these strings # one of 'create', 'edit', 'delete' Describe a Form. Example: .. code:: python class MyForm(Form): a = Field() b = Field.email() form = MyForm().bind(request=request) You can also create an instance of a form with this syntax if it's more convenient: .. code:: python form = Form( fields=dict( a=Field(), b=Field.email(), ), ).bind(request=request) See tri.declarative docs for more on this dual style of declaration. In the common case the fields namespace will contain only instances of `Field`, but iommi actually supports arbitrary `Part`s (except other `Form`s). For example: .. code:: python form = Form( fields = dict( # Display a and b inside a box box = html.div( attrs__class__box=True, children__a = Field(), children__b = Field.email() ), # And c regularly c = Field() ) ) So that writing the application logic (e.g. validation and post handlers) is independent of minor changes to the layout, after bind the `fields` namespace of the form will contain only instances of `Field` keyed by their `_name` independently of how deep they are in the hierarchy. Given the above, an appropriate post_handler would be: .. code:: python def post_handler(form, **_): if not form.is_valid(): return print(form.fields.a.value, form.fields.b.value, form.fields.c.value) # And not: # print(form.fields.box.a.value, form.fields.box.b.value, form.fields.c.value) # attrs is evaluated, but in a special way so gets no EvaluatedRefinable type # h_tag is evaluated, but in a special way so gets no EvaluatedRefinable type # title is evaluated, but in a special way so gets no EvaluatedRefinable type # model is evaluated, but in a special way so gets no EvaluatedRefinable type # Submit is special. # We used to have an automatic action submit button. Now we instead if something is inj # the actions submit space assume you want to define it as a primary button (unless you # explicitely specify differently). That way we get no button if you don't explicitely opt # into it, by either directly defining something inside the submit namespace or using # Form.edit/delete/... # noinspection PyCallingNonCallable # Actions have to be bound first because is_target() needs it # property for jinja2 compatibility Is the form valid? Can be called inside forms post_validation hook to determine if the individual fields were all valid. # When validate is called at the end of bind, self._valid will be either # False becaues a field's add_error was called during the fields bind. # Or it will still be None. In that latter case set it to True here, # so that we can call is_valid inside post_validation hook to check if # everything up until this point was valid. # property for jinja2 compatibility # We need to preserve all other GET parameters, so we can e.g. filter in two forms on the same page, and keep sorting after filtering Write the new values specified in the form into the instance specified. # pragma: no mutate # pragma: no mutate # pragma: no mutate # pragma: no mutate # pragma: no mutate | 1.590903 | 2 |
service_stats/stats/cpu.py | Justintime50/service | 1 | 6630587 | import psutil
class Cpu():
@staticmethod
def serve_data():
"""Serve CPU info
"""
# Title
cpu_title = '='*15 + ' CPU Information ' + '='*15
# Cores
physical_cores = 'Physical cores:' + \
str(psutil.cpu_count(logical=False))
total_cores = 'Total cores:' + str(psutil.cpu_count(logical=True))
# CPU Frequencies
cpufreq = psutil.cpu_freq()
max_freq = f'Max Frequency: {cpufreq.max:.2f}Mhz'
min_freq = f'Min Frequency: {cpufreq.min:.2f}Mhz'
current_freq = f'Current Frequency: {cpufreq.current:.2f}Mhz'
# CPU Usage
usage_message = 'CPU Usage Per Core:'
all_core_percentage = ''
for i, percentage in enumerate(
psutil.cpu_percent(
percpu=True,
interval=1
)
):
core_percentage = f'Core {i}: {percentage}%\n'
# Combine each core into a variable
all_core_percentage += core_percentage
total_usage = f'Total CPU Usage: {psutil.cpu_percent()}%'
final_message = (
'\n' + cpu_title +
'\n' + physical_cores +
'\n' + total_cores +
'\n' + max_freq +
'\n' + min_freq +
'\n' + current_freq +
'\n' + usage_message +
'\n' + all_core_percentage + total_usage
)
return final_message
| import psutil
class Cpu():
@staticmethod
def serve_data():
"""Serve CPU info
"""
# Title
cpu_title = '='*15 + ' CPU Information ' + '='*15
# Cores
physical_cores = 'Physical cores:' + \
str(psutil.cpu_count(logical=False))
total_cores = 'Total cores:' + str(psutil.cpu_count(logical=True))
# CPU Frequencies
cpufreq = psutil.cpu_freq()
max_freq = f'Max Frequency: {cpufreq.max:.2f}Mhz'
min_freq = f'Min Frequency: {cpufreq.min:.2f}Mhz'
current_freq = f'Current Frequency: {cpufreq.current:.2f}Mhz'
# CPU Usage
usage_message = 'CPU Usage Per Core:'
all_core_percentage = ''
for i, percentage in enumerate(
psutil.cpu_percent(
percpu=True,
interval=1
)
):
core_percentage = f'Core {i}: {percentage}%\n'
# Combine each core into a variable
all_core_percentage += core_percentage
total_usage = f'Total CPU Usage: {psutil.cpu_percent()}%'
final_message = (
'\n' + cpu_title +
'\n' + physical_cores +
'\n' + total_cores +
'\n' + max_freq +
'\n' + min_freq +
'\n' + current_freq +
'\n' + usage_message +
'\n' + all_core_percentage + total_usage
)
return final_message
| en | 0.695951 | Serve CPU info # Title # Cores # CPU Frequencies # CPU Usage # Combine each core into a variable | 3.064498 | 3 |
mailchimp_marketing_asyncio/models/subscriber_in_automation_queue2.py | john-parton/mailchimp-asyncio | 0 | 6630588 | <filename>mailchimp_marketing_asyncio/models/subscriber_in_automation_queue2.py
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SubscriberInAutomationQueue2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'workflow_id': 'str',
'email_id': 'str',
'list_id': 'str',
'list_is_active': 'bool',
'email_address': 'str',
'next_send': 'datetime',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'workflow_id': 'workflow_id',
'email_id': 'email_id',
'list_id': 'list_id',
'list_is_active': 'list_is_active',
'email_address': 'email_address',
'next_send': 'next_send',
'links': '_links'
}
def __init__(self, id=None, workflow_id=None, email_id=None, list_id=None, list_is_active=None, email_address=None, next_send=None, links=None): # noqa: E501
"""SubscriberInAutomationQueue2 - a model defined in Swagger""" # noqa: E501
self._id = None
self._workflow_id = None
self._email_id = None
self._list_id = None
self._list_is_active = None
self._email_address = None
self._next_send = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if workflow_id is not None:
self.workflow_id = workflow_id
if email_id is not None:
self.email_id = email_id
if list_id is not None:
self.list_id = list_id
if list_is_active is not None:
self.list_is_active = list_is_active
if email_address is not None:
self.email_address = email_address
if next_send is not None:
self.next_send = next_send
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this SubscriberInAutomationQueue2. # noqa: E501
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:return: The id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SubscriberInAutomationQueue2.
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:param id: The id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._id = id
@property
def workflow_id(self):
"""Gets the workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies an Automation workflow. # noqa: E501
:return: The workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._workflow_id
@workflow_id.setter
def workflow_id(self, workflow_id):
"""Sets the workflow_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies an Automation workflow. # noqa: E501
:param workflow_id: The workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._workflow_id = workflow_id
@property
def email_id(self):
"""Gets the email_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies an email in an Automation workflow. # noqa: E501
:return: The email_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._email_id
@email_id.setter
def email_id(self, email_id):
"""Sets the email_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies an email in an Automation workflow. # noqa: E501
:param email_id: The email_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._email_id = email_id
@property
def list_id(self):
"""Gets the list_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies a list. # noqa: E501
:return: The list_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies a list. # noqa: E501
:param list_id: The list_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def list_is_active(self):
"""Gets the list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:return: The list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: bool
"""
return self._list_is_active
@list_is_active.setter
def list_is_active(self, list_is_active):
"""Sets the list_is_active of this SubscriberInAutomationQueue2.
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:param list_is_active: The list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
:type: bool
"""
self._list_is_active = list_is_active
@property
def email_address(self):
"""Gets the email_address of this SubscriberInAutomationQueue2. # noqa: E501
The list member's email address. # noqa: E501
:return: The email_address of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this SubscriberInAutomationQueue2.
The list member's email address. # noqa: E501
:param email_address: The email_address of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._email_address = email_address
@property
def next_send(self):
"""Gets the next_send of this SubscriberInAutomationQueue2. # noqa: E501
The date and time of the next send for the workflow email in ISO 8601 format. # noqa: E501
:return: The next_send of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: datetime
"""
return self._next_send
@next_send.setter
def next_send(self, next_send):
"""Sets the next_send of this SubscriberInAutomationQueue2.
The date and time of the next send for the workflow email in ISO 8601 format. # noqa: E501
:param next_send: The next_send of this SubscriberInAutomationQueue2. # noqa: E501
:type: datetime
"""
self._next_send = next_send
@property
def links(self):
"""Gets the links of this SubscriberInAutomationQueue2. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this SubscriberInAutomationQueue2.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this SubscriberInAutomationQueue2. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SubscriberInAutomationQueue2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubscriberInAutomationQueue2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| <filename>mailchimp_marketing_asyncio/models/subscriber_in_automation_queue2.py
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SubscriberInAutomationQueue2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'workflow_id': 'str',
'email_id': 'str',
'list_id': 'str',
'list_is_active': 'bool',
'email_address': 'str',
'next_send': 'datetime',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'workflow_id': 'workflow_id',
'email_id': 'email_id',
'list_id': 'list_id',
'list_is_active': 'list_is_active',
'email_address': 'email_address',
'next_send': 'next_send',
'links': '_links'
}
def __init__(self, id=None, workflow_id=None, email_id=None, list_id=None, list_is_active=None, email_address=None, next_send=None, links=None): # noqa: E501
"""SubscriberInAutomationQueue2 - a model defined in Swagger""" # noqa: E501
self._id = None
self._workflow_id = None
self._email_id = None
self._list_id = None
self._list_is_active = None
self._email_address = None
self._next_send = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if workflow_id is not None:
self.workflow_id = workflow_id
if email_id is not None:
self.email_id = email_id
if list_id is not None:
self.list_id = list_id
if list_is_active is not None:
self.list_is_active = list_is_active
if email_address is not None:
self.email_address = email_address
if next_send is not None:
self.next_send = next_send
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this SubscriberInAutomationQueue2. # noqa: E501
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:return: The id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SubscriberInAutomationQueue2.
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:param id: The id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._id = id
@property
def workflow_id(self):
"""Gets the workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies an Automation workflow. # noqa: E501
:return: The workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._workflow_id
@workflow_id.setter
def workflow_id(self, workflow_id):
"""Sets the workflow_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies an Automation workflow. # noqa: E501
:param workflow_id: The workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._workflow_id = workflow_id
@property
def email_id(self):
"""Gets the email_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies an email in an Automation workflow. # noqa: E501
:return: The email_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._email_id
@email_id.setter
def email_id(self, email_id):
"""Sets the email_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies an email in an Automation workflow. # noqa: E501
:param email_id: The email_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._email_id = email_id
@property
def list_id(self):
"""Gets the list_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies a list. # noqa: E501
:return: The list_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies a list. # noqa: E501
:param list_id: The list_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def list_is_active(self):
"""Gets the list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:return: The list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: bool
"""
return self._list_is_active
@list_is_active.setter
def list_is_active(self, list_is_active):
"""Sets the list_is_active of this SubscriberInAutomationQueue2.
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:param list_is_active: The list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
:type: bool
"""
self._list_is_active = list_is_active
@property
def email_address(self):
"""Gets the email_address of this SubscriberInAutomationQueue2. # noqa: E501
The list member's email address. # noqa: E501
:return: The email_address of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this SubscriberInAutomationQueue2.
The list member's email address. # noqa: E501
:param email_address: The email_address of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._email_address = email_address
@property
def next_send(self):
"""Gets the next_send of this SubscriberInAutomationQueue2. # noqa: E501
The date and time of the next send for the workflow email in ISO 8601 format. # noqa: E501
:return: The next_send of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: datetime
"""
return self._next_send
@next_send.setter
def next_send(self, next_send):
"""Sets the next_send of this SubscriberInAutomationQueue2.
The date and time of the next send for the workflow email in ISO 8601 format. # noqa: E501
:param next_send: The next_send of this SubscriberInAutomationQueue2. # noqa: E501
:type: datetime
"""
self._next_send = next_send
@property
def links(self):
"""Gets the links of this SubscriberInAutomationQueue2. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this SubscriberInAutomationQueue2.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this SubscriberInAutomationQueue2. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SubscriberInAutomationQueue2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubscriberInAutomationQueue2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| en | 0.614132 | # coding: utf-8 Mailchimp Marketing API No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: 3.0.74 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 SubscriberInAutomationQueue2 - a model defined in Swagger # noqa: E501 Gets the id of this SubscriberInAutomationQueue2. # noqa: E501 The MD5 hash of the lowercase version of the list member's email address. # noqa: E501 :return: The id of this SubscriberInAutomationQueue2. # noqa: E501 :rtype: str Sets the id of this SubscriberInAutomationQueue2. The MD5 hash of the lowercase version of the list member's email address. # noqa: E501 :param id: The id of this SubscriberInAutomationQueue2. # noqa: E501 :type: str Gets the workflow_id of this SubscriberInAutomationQueue2. # noqa: E501 A string that uniquely identifies an Automation workflow. # noqa: E501 :return: The workflow_id of this SubscriberInAutomationQueue2. # noqa: E501 :rtype: str Sets the workflow_id of this SubscriberInAutomationQueue2. A string that uniquely identifies an Automation workflow. # noqa: E501 :param workflow_id: The workflow_id of this SubscriberInAutomationQueue2. # noqa: E501 :type: str Gets the email_id of this SubscriberInAutomationQueue2. # noqa: E501 A string that uniquely identifies an email in an Automation workflow. # noqa: E501 :return: The email_id of this SubscriberInAutomationQueue2. # noqa: E501 :rtype: str Sets the email_id of this SubscriberInAutomationQueue2. A string that uniquely identifies an email in an Automation workflow. # noqa: E501 :param email_id: The email_id of this SubscriberInAutomationQueue2. # noqa: E501 :type: str Gets the list_id of this SubscriberInAutomationQueue2. # noqa: E501 A string that uniquely identifies a list. # noqa: E501 :return: The list_id of this SubscriberInAutomationQueue2. # noqa: E501 :rtype: str Sets the list_id of this SubscriberInAutomationQueue2. A string that uniquely identifies a list. # noqa: E501 :param list_id: The list_id of this SubscriberInAutomationQueue2. # noqa: E501 :type: str Gets the list_is_active of this SubscriberInAutomationQueue2. # noqa: E501 The status of the list used, namely if it's deleted or disabled. # noqa: E501 :return: The list_is_active of this SubscriberInAutomationQueue2. # noqa: E501 :rtype: bool Sets the list_is_active of this SubscriberInAutomationQueue2. The status of the list used, namely if it's deleted or disabled. # noqa: E501 :param list_is_active: The list_is_active of this SubscriberInAutomationQueue2. # noqa: E501 :type: bool Gets the email_address of this SubscriberInAutomationQueue2. # noqa: E501 The list member's email address. # noqa: E501 :return: The email_address of this SubscriberInAutomationQueue2. # noqa: E501 :rtype: str Sets the email_address of this SubscriberInAutomationQueue2. The list member's email address. # noqa: E501 :param email_address: The email_address of this SubscriberInAutomationQueue2. # noqa: E501 :type: str Gets the next_send of this SubscriberInAutomationQueue2. # noqa: E501 The date and time of the next send for the workflow email in ISO 8601 format. # noqa: E501 :return: The next_send of this SubscriberInAutomationQueue2. # noqa: E501 :rtype: datetime Sets the next_send of this SubscriberInAutomationQueue2. The date and time of the next send for the workflow email in ISO 8601 format. # noqa: E501 :param next_send: The next_send of this SubscriberInAutomationQueue2. # noqa: E501 :type: datetime Gets the links of this SubscriberInAutomationQueue2. # noqa: E501 A list of link types and descriptions for the API schema documents. # noqa: E501 :return: The links of this SubscriberInAutomationQueue2. # noqa: E501 :rtype: list[ResourceLink] Sets the links of this SubscriberInAutomationQueue2. A list of link types and descriptions for the API schema documents. # noqa: E501 :param links: The links of this SubscriberInAutomationQueue2. # noqa: E501 :type: list[ResourceLink] Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.89453 | 2 |
utils.py | dirtycomputer/Medical-Transformer | 0 | 6630589 | import os
import numpy as np
import torch
from skimage import io,color
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms as T
from torchvision.transforms import functional as F
from typing import Callable
import os
import cv2
import pandas as pd
from numbers import Number
from typing import Container
from collections import defaultdict
def to_long_tensor(pic):
# handle numpy array
img = torch.from_numpy(np.array(pic, np.uint8))
# backward compatibility
return img.long()
def correct_dims(*images):
corr_images = []
# print(images)
for img in images:
if len(img.shape) == 2:
corr_images.append(np.expand_dims(img, axis=2))
else:
corr_images.append(img)
if len(corr_images) == 1:
return corr_images[0]
else:
return corr_images
class JointTransform2D:
"""
Performs augmentation on image and mask when called. Due to the randomness of augmentation transforms,
it is not enough to simply apply the same Transform from torchvision on the image and mask separetely.
Doing this will result in messing up the ground truth mask. To circumvent this problem, this class can
be used, which will take care of the problems above.
Args:
crop: tuple describing the size of the random crop. If bool(crop) evaluates to False, no crop will
be taken.
p_flip: float, the probability of performing a random horizontal flip.
color_jitter_params: tuple describing the parameters of torchvision.transforms.ColorJitter.
If bool(color_jitter_params) evaluates to false, no color jitter transformation will be used.
p_random_affine: float, the probability of performing a random affine transform using
torchvision.transforms.RandomAffine.
long_mask: bool, if True, returns the mask as LongTensor in label-encoded format.
"""
def __init__(self, crop=(32, 32), p_flip=0.5, color_jitter_params=(0.1, 0.1, 0.1, 0.1),
p_random_affine=0, long_mask=False):
self.crop = crop
self.p_flip = p_flip
self.color_jitter_params = color_jitter_params
if color_jitter_params:
self.color_tf = T.ColorJitter(*color_jitter_params)
self.p_random_affine = p_random_affine
self.long_mask = long_mask
def __call__(self, image, mask):
# transforming to PIL image
image, mask = F.to_pil_image(image), F.to_pil_image(mask)
# random crop
if self.crop:
i, j, h, w = T.RandomCrop.get_params(image, self.crop)
image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)
if np.random.rand() < self.p_flip:
image, mask = F.hflip(image), F.hflip(mask)
# color transforms || ONLY ON IMAGE
if self.color_jitter_params:
image = self.color_tf(image)
# random affine transform
if np.random.rand() < self.p_random_affine:
affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), self.crop)
image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params)
# transforming to tensor
image = F.to_tensor(image)
if not self.long_mask:
mask = F.to_tensor(mask)
else:
mask = to_long_tensor(mask)
return image, mask
class ImageToImage2D(Dataset):
"""
Reads the images and applies the augmentation transform on them.
Usage:
1. If used without the unet.model.Model wrapper, an instance of this object should be passed to
torch.utils.data.DataLoader. Iterating through this returns the tuple of image, mask and image
filename.
2. With unet.model.Model wrapper, an instance of this object should be passed as train or validation
datasets.
Args:
dataset_path: path to the dataset. Structure of the dataset should be:
dataset_path
|-- images
|-- img001.png
|-- img002.png
|-- ...
|-- masks
|-- img001.png
|-- img002.png
|-- ...
joint_transform: augmentation transform, an instance of JointTransform2D. If bool(joint_transform)
evaluates to False, torchvision.transforms.ToTensor will be used on both image and mask.
one_hot_mask: bool, if True, returns the mask in one-hot encoded form.
"""
def __init__(self, dataset_path: str, joint_transform: Callable = None, one_hot_mask: int = False) -> None:
self.dataset_path = dataset_path
self.input_path = os.path.join(dataset_path, 'img')
self.output_path = os.path.join(dataset_path, 'labelcol')
self.images_list = os.listdir(self.input_path)
self.one_hot_mask = one_hot_mask
if joint_transform:
self.joint_transform = joint_transform
else:
to_tensor = T.ToTensor()
self.joint_transform = lambda x, y: (to_tensor(x), to_tensor(y))
def __len__(self):
return len(os.listdir(self.input_path))
def __getitem__(self, idx):
image_filename = self.images_list[idx]
#print(image_filename[: -3])
# read image
# print(os.path.join(self.input_path, image_filename))
# print(os.path.join(self.output_path, image_filename[: -3] + "png"))
# print(os.path.join(self.input_path, image_filename))
image = cv2.imread(os.path.join(self.input_path, image_filename))
#print(image.shape)
# read mask image
mask = cv2.imread(os.path.join(self.output_path, image_filename),0)
mask[mask<=127] = 0
mask[mask>127] = 1
# correct dimensions if needed
image, mask = correct_dims(image, mask)
# print(image.shape)
if self.joint_transform:
image, mask = self.joint_transform(image, mask)
if self.one_hot_mask:
assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative'
mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1)
# mask = np.swapaxes(mask,2,0)
# print(image.shape)
# print(mask.shape)
# mask = np.transpose(mask,(2,0,1))
# image = np.transpose(image,(2,0,1))
# print(image.shape)
# print(mask.shape)
return image, mask, image_filename
class Image2D(Dataset):
"""
Reads the images and applies the augmentation transform on them. As opposed to ImageToImage2D, this
reads a single image and requires a simple augmentation transform.
Usage:
1. If used without the unet.model.Model wrapper, an instance of this object should be passed to
torch.utils.data.DataLoader. Iterating through this returns the tuple of image and image
filename.
2. With unet.model.Model wrapper, an instance of this object should be passed as a prediction
dataset.
Args:
dataset_path: path to the dataset. Structure of the dataset should be:
dataset_path
|-- images
|-- img001.png
|-- img002.png
|-- ...
transform: augmentation transform. If bool(joint_transform) evaluates to False,
torchvision.transforms.ToTensor will be used.
"""
def __init__(self, dataset_path: str, transform: Callable = None):
self.dataset_path = dataset_path
self.input_path = os.path.join(dataset_path, 'img')
self.images_list = os.listdir(self.input_path)
if transform:
self.transform = transform
else:
self.transform = T.ToTensor()
def __len__(self):
return len(os.listdir(self.input_path))
def __getitem__(self, idx):
image_filename = self.images_list[idx]
image = cv2.imread(os.path.join(self.input_path, image_filename))
# image = np.transpose(image,(2,0,1))
image = correct_dims(image)
image = self.transform(image)
# image = np.swapaxes(image,2,0)
return image, image_filename
def chk_mkdir(*paths: Container) -> None:
"""
Creates folders if they do not exist.
Args:
paths: Container of paths to be created.
"""
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
class Logger:
def __init__(self, verbose=False):
self.logs = defaultdict(list)
self.verbose = verbose
def log(self, logs):
for key, value in logs.items():
self.logs[key].append(value)
if self.verbose:
print(logs)
def get_logs(self):
return self.logs
def to_csv(self, path):
pd.DataFrame(self.logs).to_csv(path, index=None)
class MetricList:
def __init__(self, metrics):
assert isinstance(metrics, dict), '\'metrics\' must be a dictionary of callables'
self.metrics = metrics
self.results = {key: 0.0 for key in self.metrics.keys()}
def __call__(self, y_out, y_batch):
for key, value in self.metrics.items():
self.results[key] += value(y_out, y_batch)
def reset(self):
self.results = {key: 0.0 for key in self.metrics.keys()}
def get_results(self, normalize=False):
assert isinstance(normalize, bool) or isinstance(normalize, Number), '\'normalize\' must be boolean or a number'
if not normalize:
return self.results
else:
return {key: value/normalize for key, value in self.results.items()}
| import os
import numpy as np
import torch
from skimage import io,color
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms as T
from torchvision.transforms import functional as F
from typing import Callable
import os
import cv2
import pandas as pd
from numbers import Number
from typing import Container
from collections import defaultdict
def to_long_tensor(pic):
# handle numpy array
img = torch.from_numpy(np.array(pic, np.uint8))
# backward compatibility
return img.long()
def correct_dims(*images):
corr_images = []
# print(images)
for img in images:
if len(img.shape) == 2:
corr_images.append(np.expand_dims(img, axis=2))
else:
corr_images.append(img)
if len(corr_images) == 1:
return corr_images[0]
else:
return corr_images
class JointTransform2D:
"""
Performs augmentation on image and mask when called. Due to the randomness of augmentation transforms,
it is not enough to simply apply the same Transform from torchvision on the image and mask separetely.
Doing this will result in messing up the ground truth mask. To circumvent this problem, this class can
be used, which will take care of the problems above.
Args:
crop: tuple describing the size of the random crop. If bool(crop) evaluates to False, no crop will
be taken.
p_flip: float, the probability of performing a random horizontal flip.
color_jitter_params: tuple describing the parameters of torchvision.transforms.ColorJitter.
If bool(color_jitter_params) evaluates to false, no color jitter transformation will be used.
p_random_affine: float, the probability of performing a random affine transform using
torchvision.transforms.RandomAffine.
long_mask: bool, if True, returns the mask as LongTensor in label-encoded format.
"""
def __init__(self, crop=(32, 32), p_flip=0.5, color_jitter_params=(0.1, 0.1, 0.1, 0.1),
p_random_affine=0, long_mask=False):
self.crop = crop
self.p_flip = p_flip
self.color_jitter_params = color_jitter_params
if color_jitter_params:
self.color_tf = T.ColorJitter(*color_jitter_params)
self.p_random_affine = p_random_affine
self.long_mask = long_mask
def __call__(self, image, mask):
# transforming to PIL image
image, mask = F.to_pil_image(image), F.to_pil_image(mask)
# random crop
if self.crop:
i, j, h, w = T.RandomCrop.get_params(image, self.crop)
image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)
if np.random.rand() < self.p_flip:
image, mask = F.hflip(image), F.hflip(mask)
# color transforms || ONLY ON IMAGE
if self.color_jitter_params:
image = self.color_tf(image)
# random affine transform
if np.random.rand() < self.p_random_affine:
affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), self.crop)
image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params)
# transforming to tensor
image = F.to_tensor(image)
if not self.long_mask:
mask = F.to_tensor(mask)
else:
mask = to_long_tensor(mask)
return image, mask
class ImageToImage2D(Dataset):
"""
Reads the images and applies the augmentation transform on them.
Usage:
1. If used without the unet.model.Model wrapper, an instance of this object should be passed to
torch.utils.data.DataLoader. Iterating through this returns the tuple of image, mask and image
filename.
2. With unet.model.Model wrapper, an instance of this object should be passed as train or validation
datasets.
Args:
dataset_path: path to the dataset. Structure of the dataset should be:
dataset_path
|-- images
|-- img001.png
|-- img002.png
|-- ...
|-- masks
|-- img001.png
|-- img002.png
|-- ...
joint_transform: augmentation transform, an instance of JointTransform2D. If bool(joint_transform)
evaluates to False, torchvision.transforms.ToTensor will be used on both image and mask.
one_hot_mask: bool, if True, returns the mask in one-hot encoded form.
"""
def __init__(self, dataset_path: str, joint_transform: Callable = None, one_hot_mask: int = False) -> None:
self.dataset_path = dataset_path
self.input_path = os.path.join(dataset_path, 'img')
self.output_path = os.path.join(dataset_path, 'labelcol')
self.images_list = os.listdir(self.input_path)
self.one_hot_mask = one_hot_mask
if joint_transform:
self.joint_transform = joint_transform
else:
to_tensor = T.ToTensor()
self.joint_transform = lambda x, y: (to_tensor(x), to_tensor(y))
def __len__(self):
return len(os.listdir(self.input_path))
def __getitem__(self, idx):
image_filename = self.images_list[idx]
#print(image_filename[: -3])
# read image
# print(os.path.join(self.input_path, image_filename))
# print(os.path.join(self.output_path, image_filename[: -3] + "png"))
# print(os.path.join(self.input_path, image_filename))
image = cv2.imread(os.path.join(self.input_path, image_filename))
#print(image.shape)
# read mask image
mask = cv2.imread(os.path.join(self.output_path, image_filename),0)
mask[mask<=127] = 0
mask[mask>127] = 1
# correct dimensions if needed
image, mask = correct_dims(image, mask)
# print(image.shape)
if self.joint_transform:
image, mask = self.joint_transform(image, mask)
if self.one_hot_mask:
assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative'
mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1)
# mask = np.swapaxes(mask,2,0)
# print(image.shape)
# print(mask.shape)
# mask = np.transpose(mask,(2,0,1))
# image = np.transpose(image,(2,0,1))
# print(image.shape)
# print(mask.shape)
return image, mask, image_filename
class Image2D(Dataset):
"""
Reads the images and applies the augmentation transform on them. As opposed to ImageToImage2D, this
reads a single image and requires a simple augmentation transform.
Usage:
1. If used without the unet.model.Model wrapper, an instance of this object should be passed to
torch.utils.data.DataLoader. Iterating through this returns the tuple of image and image
filename.
2. With unet.model.Model wrapper, an instance of this object should be passed as a prediction
dataset.
Args:
dataset_path: path to the dataset. Structure of the dataset should be:
dataset_path
|-- images
|-- img001.png
|-- img002.png
|-- ...
transform: augmentation transform. If bool(joint_transform) evaluates to False,
torchvision.transforms.ToTensor will be used.
"""
def __init__(self, dataset_path: str, transform: Callable = None):
self.dataset_path = dataset_path
self.input_path = os.path.join(dataset_path, 'img')
self.images_list = os.listdir(self.input_path)
if transform:
self.transform = transform
else:
self.transform = T.ToTensor()
def __len__(self):
return len(os.listdir(self.input_path))
def __getitem__(self, idx):
image_filename = self.images_list[idx]
image = cv2.imread(os.path.join(self.input_path, image_filename))
# image = np.transpose(image,(2,0,1))
image = correct_dims(image)
image = self.transform(image)
# image = np.swapaxes(image,2,0)
return image, image_filename
def chk_mkdir(*paths: Container) -> None:
"""
Creates folders if they do not exist.
Args:
paths: Container of paths to be created.
"""
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
class Logger:
def __init__(self, verbose=False):
self.logs = defaultdict(list)
self.verbose = verbose
def log(self, logs):
for key, value in logs.items():
self.logs[key].append(value)
if self.verbose:
print(logs)
def get_logs(self):
return self.logs
def to_csv(self, path):
pd.DataFrame(self.logs).to_csv(path, index=None)
class MetricList:
def __init__(self, metrics):
assert isinstance(metrics, dict), '\'metrics\' must be a dictionary of callables'
self.metrics = metrics
self.results = {key: 0.0 for key in self.metrics.keys()}
def __call__(self, y_out, y_batch):
for key, value in self.metrics.items():
self.results[key] += value(y_out, y_batch)
def reset(self):
self.results = {key: 0.0 for key in self.metrics.keys()}
def get_results(self, normalize=False):
assert isinstance(normalize, bool) or isinstance(normalize, Number), '\'normalize\' must be boolean or a number'
if not normalize:
return self.results
else:
return {key: value/normalize for key, value in self.results.items()}
| en | 0.628414 | # handle numpy array # backward compatibility # print(images) Performs augmentation on image and mask when called. Due to the randomness of augmentation transforms, it is not enough to simply apply the same Transform from torchvision on the image and mask separetely. Doing this will result in messing up the ground truth mask. To circumvent this problem, this class can be used, which will take care of the problems above. Args: crop: tuple describing the size of the random crop. If bool(crop) evaluates to False, no crop will be taken. p_flip: float, the probability of performing a random horizontal flip. color_jitter_params: tuple describing the parameters of torchvision.transforms.ColorJitter. If bool(color_jitter_params) evaluates to false, no color jitter transformation will be used. p_random_affine: float, the probability of performing a random affine transform using torchvision.transforms.RandomAffine. long_mask: bool, if True, returns the mask as LongTensor in label-encoded format. # transforming to PIL image # random crop # color transforms || ONLY ON IMAGE # random affine transform # transforming to tensor Reads the images and applies the augmentation transform on them. Usage: 1. If used without the unet.model.Model wrapper, an instance of this object should be passed to torch.utils.data.DataLoader. Iterating through this returns the tuple of image, mask and image filename. 2. With unet.model.Model wrapper, an instance of this object should be passed as train or validation datasets. Args: dataset_path: path to the dataset. Structure of the dataset should be: dataset_path |-- images |-- img001.png |-- img002.png |-- ... |-- masks |-- img001.png |-- img002.png |-- ... joint_transform: augmentation transform, an instance of JointTransform2D. If bool(joint_transform) evaluates to False, torchvision.transforms.ToTensor will be used on both image and mask. one_hot_mask: bool, if True, returns the mask in one-hot encoded form. #print(image_filename[: -3]) # read image # print(os.path.join(self.input_path, image_filename)) # print(os.path.join(self.output_path, image_filename[: -3] + "png")) # print(os.path.join(self.input_path, image_filename)) #print(image.shape) # read mask image # correct dimensions if needed # print(image.shape) # mask = np.swapaxes(mask,2,0) # print(image.shape) # print(mask.shape) # mask = np.transpose(mask,(2,0,1)) # image = np.transpose(image,(2,0,1)) # print(image.shape) # print(mask.shape) Reads the images and applies the augmentation transform on them. As opposed to ImageToImage2D, this reads a single image and requires a simple augmentation transform. Usage: 1. If used without the unet.model.Model wrapper, an instance of this object should be passed to torch.utils.data.DataLoader. Iterating through this returns the tuple of image and image filename. 2. With unet.model.Model wrapper, an instance of this object should be passed as a prediction dataset. Args: dataset_path: path to the dataset. Structure of the dataset should be: dataset_path |-- images |-- img001.png |-- img002.png |-- ... transform: augmentation transform. If bool(joint_transform) evaluates to False, torchvision.transforms.ToTensor will be used. # image = np.transpose(image,(2,0,1)) # image = np.swapaxes(image,2,0) Creates folders if they do not exist. Args: paths: Container of paths to be created. | 2.661772 | 3 |
venv/lib/python3.9/site-packages/OpenSSL/_util.py | almmello/frozen | 7 | 6630590 | <gh_stars>1-10
import sys
import warnings
from six import PY2, text_type
from cryptography.hazmat.bindings.openssl.binding import Binding
binding = Binding()
binding.init_static_locks()
ffi = binding.ffi
lib = binding.lib
# This is a special CFFI allocator that does not bother to zero its memory
# after allocation. This has vastly better performance on large allocations and
# so should be used whenever we don't need the memory zeroed out.
no_zero_allocator = ffi.new_allocator(should_clear_after_alloc=False)
def text(charp):
"""
Get a native string type representing of the given CFFI ``char*`` object.
:param charp: A C-style string represented using CFFI.
:return: :class:`str`
"""
if not charp:
return ""
return native(ffi.string(charp))
def exception_from_error_queue(exception_type):
"""
Convert an OpenSSL library failure into a Python exception.
When a call to the native OpenSSL library fails, this is usually signalled
by the return value, and an error code is stored in an error queue
associated with the current thread. The err library provides functions to
obtain these error codes and textual error messages.
"""
errors = []
while True:
error = lib.ERR_get_error()
if error == 0:
break
errors.append(
(
text(lib.ERR_lib_error_string(error)),
text(lib.ERR_func_error_string(error)),
text(lib.ERR_reason_error_string(error)),
)
)
raise exception_type(errors)
def make_assert(error):
"""
Create an assert function that uses :func:`exception_from_error_queue` to
raise an exception wrapped by *error*.
"""
def openssl_assert(ok):
"""
If *ok* is not True, retrieve the error from OpenSSL and raise it.
"""
if ok is not True:
exception_from_error_queue(error)
return openssl_assert
def native(s):
"""
Convert :py:class:`bytes` or :py:class:`unicode` to the native
:py:class:`str` type, using UTF-8 encoding if conversion is necessary.
:raise UnicodeError: The input string is not UTF-8 decodeable.
:raise TypeError: The input is neither :py:class:`bytes` nor
:py:class:`unicode`.
"""
if not isinstance(s, (bytes, text_type)):
raise TypeError("%r is neither bytes nor unicode" % s)
if PY2:
if isinstance(s, text_type):
return s.encode("utf-8")
else:
if isinstance(s, bytes):
return s.decode("utf-8")
return s
def path_string(s):
"""
Convert a Python string to a :py:class:`bytes` string identifying the same
path and which can be passed into an OpenSSL API accepting a filename.
:param s: An instance of :py:class:`bytes` or :py:class:`unicode`.
:return: An instance of :py:class:`bytes`.
"""
if isinstance(s, bytes):
return s
elif isinstance(s, text_type):
return s.encode(sys.getfilesystemencoding())
else:
raise TypeError("Path must be represented as bytes or unicode string")
if PY2:
def byte_string(s):
return s
else:
def byte_string(s):
return s.encode("charmap")
# A marker object to observe whether some optional arguments are passed any
# value or not.
UNSPECIFIED = object()
_TEXT_WARNING = (
text_type.__name__ + " for {0} is no longer accepted, use bytes"
)
def text_to_bytes_and_warn(label, obj):
"""
If ``obj`` is text, emit a warning that it should be bytes instead and try
to convert it to bytes automatically.
:param str label: The name of the parameter from which ``obj`` was taken
(so a developer can easily find the source of the problem and correct
it).
:return: If ``obj`` is the text string type, a ``bytes`` object giving the
UTF-8 encoding of that text is returned. Otherwise, ``obj`` itself is
returned.
"""
if isinstance(obj, text_type):
warnings.warn(
_TEXT_WARNING.format(label),
category=DeprecationWarning,
stacklevel=3,
)
return obj.encode("utf-8")
return obj
| import sys
import warnings
from six import PY2, text_type
from cryptography.hazmat.bindings.openssl.binding import Binding
binding = Binding()
binding.init_static_locks()
ffi = binding.ffi
lib = binding.lib
# This is a special CFFI allocator that does not bother to zero its memory
# after allocation. This has vastly better performance on large allocations and
# so should be used whenever we don't need the memory zeroed out.
no_zero_allocator = ffi.new_allocator(should_clear_after_alloc=False)
def text(charp):
"""
Get a native string type representing of the given CFFI ``char*`` object.
:param charp: A C-style string represented using CFFI.
:return: :class:`str`
"""
if not charp:
return ""
return native(ffi.string(charp))
def exception_from_error_queue(exception_type):
"""
Convert an OpenSSL library failure into a Python exception.
When a call to the native OpenSSL library fails, this is usually signalled
by the return value, and an error code is stored in an error queue
associated with the current thread. The err library provides functions to
obtain these error codes and textual error messages.
"""
errors = []
while True:
error = lib.ERR_get_error()
if error == 0:
break
errors.append(
(
text(lib.ERR_lib_error_string(error)),
text(lib.ERR_func_error_string(error)),
text(lib.ERR_reason_error_string(error)),
)
)
raise exception_type(errors)
def make_assert(error):
"""
Create an assert function that uses :func:`exception_from_error_queue` to
raise an exception wrapped by *error*.
"""
def openssl_assert(ok):
"""
If *ok* is not True, retrieve the error from OpenSSL and raise it.
"""
if ok is not True:
exception_from_error_queue(error)
return openssl_assert
def native(s):
"""
Convert :py:class:`bytes` or :py:class:`unicode` to the native
:py:class:`str` type, using UTF-8 encoding if conversion is necessary.
:raise UnicodeError: The input string is not UTF-8 decodeable.
:raise TypeError: The input is neither :py:class:`bytes` nor
:py:class:`unicode`.
"""
if not isinstance(s, (bytes, text_type)):
raise TypeError("%r is neither bytes nor unicode" % s)
if PY2:
if isinstance(s, text_type):
return s.encode("utf-8")
else:
if isinstance(s, bytes):
return s.decode("utf-8")
return s
def path_string(s):
"""
Convert a Python string to a :py:class:`bytes` string identifying the same
path and which can be passed into an OpenSSL API accepting a filename.
:param s: An instance of :py:class:`bytes` or :py:class:`unicode`.
:return: An instance of :py:class:`bytes`.
"""
if isinstance(s, bytes):
return s
elif isinstance(s, text_type):
return s.encode(sys.getfilesystemencoding())
else:
raise TypeError("Path must be represented as bytes or unicode string")
if PY2:
def byte_string(s):
return s
else:
def byte_string(s):
return s.encode("charmap")
# A marker object to observe whether some optional arguments are passed any
# value or not.
UNSPECIFIED = object()
_TEXT_WARNING = (
text_type.__name__ + " for {0} is no longer accepted, use bytes"
)
def text_to_bytes_and_warn(label, obj):
"""
If ``obj`` is text, emit a warning that it should be bytes instead and try
to convert it to bytes automatically.
:param str label: The name of the parameter from which ``obj`` was taken
(so a developer can easily find the source of the problem and correct
it).
:return: If ``obj`` is the text string type, a ``bytes`` object giving the
UTF-8 encoding of that text is returned. Otherwise, ``obj`` itself is
returned.
"""
if isinstance(obj, text_type):
warnings.warn(
_TEXT_WARNING.format(label),
category=DeprecationWarning,
stacklevel=3,
)
return obj.encode("utf-8")
return obj | en | 0.745919 | # This is a special CFFI allocator that does not bother to zero its memory # after allocation. This has vastly better performance on large allocations and # so should be used whenever we don't need the memory zeroed out. Get a native string type representing of the given CFFI ``char*`` object. :param charp: A C-style string represented using CFFI. :return: :class:`str` Convert an OpenSSL library failure into a Python exception. When a call to the native OpenSSL library fails, this is usually signalled by the return value, and an error code is stored in an error queue associated with the current thread. The err library provides functions to obtain these error codes and textual error messages. Create an assert function that uses :func:`exception_from_error_queue` to raise an exception wrapped by *error*. If *ok* is not True, retrieve the error from OpenSSL and raise it. Convert :py:class:`bytes` or :py:class:`unicode` to the native :py:class:`str` type, using UTF-8 encoding if conversion is necessary. :raise UnicodeError: The input string is not UTF-8 decodeable. :raise TypeError: The input is neither :py:class:`bytes` nor :py:class:`unicode`. Convert a Python string to a :py:class:`bytes` string identifying the same path and which can be passed into an OpenSSL API accepting a filename. :param s: An instance of :py:class:`bytes` or :py:class:`unicode`. :return: An instance of :py:class:`bytes`. # A marker object to observe whether some optional arguments are passed any # value or not. If ``obj`` is text, emit a warning that it should be bytes instead and try to convert it to bytes automatically. :param str label: The name of the parameter from which ``obj`` was taken (so a developer can easily find the source of the problem and correct it). :return: If ``obj`` is the text string type, a ``bytes`` object giving the UTF-8 encoding of that text is returned. Otherwise, ``obj`` itself is returned. | 2.417528 | 2 |
random_tool/random_genetate.py | stwrd/ImageProcessTool | 0 | 6630591 | <filename>random_tool/random_genetate.py<gh_stars>0
import numpy as np
p = 0.8
sample_num = 10000
sample = np.random.random_sample(sample_num)
sample[sample>p] = 1
sample[sample<=p] = 0
print('sample ',sample.sum())
select = np.random.randint(0,2,sample_num)
print('select ',select.sum())
mask = (sample==select)
print(mask.astype('int32').sum())
a = [[1,2,3],[4,5,6],[7,8,9]] | <filename>random_tool/random_genetate.py<gh_stars>0
import numpy as np
p = 0.8
sample_num = 10000
sample = np.random.random_sample(sample_num)
sample[sample>p] = 1
sample[sample<=p] = 0
print('sample ',sample.sum())
select = np.random.randint(0,2,sample_num)
print('select ',select.sum())
mask = (sample==select)
print(mask.astype('int32').sum())
a = [[1,2,3],[4,5,6],[7,8,9]] | none | 1 | 2.851379 | 3 |
|
app/documents/migrations/0012_auto_20201022_0110.py | roxtrom13/real-back | 0 | 6630592 | # Generated by Django 3.1.2 on 2020-10-22 01:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0011_auto_20201022_0029'),
]
operations = [
migrations.AlterField(
model_name='document',
name='author',
field=models.ManyToManyField(default='Unknown', to='documents.Author'),
),
]
| # Generated by Django 3.1.2 on 2020-10-22 01:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0011_auto_20201022_0029'),
]
operations = [
migrations.AlterField(
model_name='document',
name='author',
field=models.ManyToManyField(default='Unknown', to='documents.Author'),
),
]
| en | 0.845332 | # Generated by Django 3.1.2 on 2020-10-22 01:10 | 1.588606 | 2 |
source/campo/op_experimental/network.py | Shellydun/campo | 2 | 6630593 | import copy
import numpy
import networkx as nx
from ..values import Values
def neighbour_network(nodes, neighbours, probability, seed=None):
G = nx.watts_strogatz_graph(n=nodes, k=neighbours, p=probability, seed=seed)
a = nx.to_numpy_array(G, dtype=numpy.int8)
# To avoid that location 0 always corresponds to node 0 we shuffle the nework
# this may lead to locations pointing at themselves?!
#numpy.random.shuffle(a)
return a
def network_average_def(source_prop, value_prop, default):
tmp_prop = copy.deepcopy(source_prop)
shapes = [()] * tmp_prop.nr_objects
tmp_prop._values = Values(tmp_prop.nr_objects, shapes, numpy.nan)
for idx,i in enumerate(tmp_prop.values()):
neighbour_ids = numpy.nonzero(source_prop.values()[idx]>0)
val = 0.0
if len(neighbour_ids[0]) == 0:
tmp_prop.values()[idx] = default.values()[0]
else:
for n in neighbour_ids[0]:
nval = value_prop.values()[n]
val += nval
tmp_prop.values()[idx] = val / len(neighbour_ids[0])
return tmp_prop
def network_average(source_prop, value_prop, fname):
tmp_prop = copy.deepcopy(value_prop)
for idx,i in enumerate(tmp_prop.values()):
neighbour_ids = numpy.nonzero(source_prop.values()[idx]>0)
val = 0.0
for n in neighbour_ids[0]:
#nval = value_prop.values.values[n]
nval = value_prop.values()[n]
val += nval
tmp_prop.values()[idx] = val / len(neighbour_ids[0])
return tmp_prop
| import copy
import numpy
import networkx as nx
from ..values import Values
def neighbour_network(nodes, neighbours, probability, seed=None):
G = nx.watts_strogatz_graph(n=nodes, k=neighbours, p=probability, seed=seed)
a = nx.to_numpy_array(G, dtype=numpy.int8)
# To avoid that location 0 always corresponds to node 0 we shuffle the nework
# this may lead to locations pointing at themselves?!
#numpy.random.shuffle(a)
return a
def network_average_def(source_prop, value_prop, default):
tmp_prop = copy.deepcopy(source_prop)
shapes = [()] * tmp_prop.nr_objects
tmp_prop._values = Values(tmp_prop.nr_objects, shapes, numpy.nan)
for idx,i in enumerate(tmp_prop.values()):
neighbour_ids = numpy.nonzero(source_prop.values()[idx]>0)
val = 0.0
if len(neighbour_ids[0]) == 0:
tmp_prop.values()[idx] = default.values()[0]
else:
for n in neighbour_ids[0]:
nval = value_prop.values()[n]
val += nval
tmp_prop.values()[idx] = val / len(neighbour_ids[0])
return tmp_prop
def network_average(source_prop, value_prop, fname):
tmp_prop = copy.deepcopy(value_prop)
for idx,i in enumerate(tmp_prop.values()):
neighbour_ids = numpy.nonzero(source_prop.values()[idx]>0)
val = 0.0
for n in neighbour_ids[0]:
#nval = value_prop.values.values[n]
nval = value_prop.values()[n]
val += nval
tmp_prop.values()[idx] = val / len(neighbour_ids[0])
return tmp_prop
| en | 0.766993 | # To avoid that location 0 always corresponds to node 0 we shuffle the nework # this may lead to locations pointing at themselves?! #numpy.random.shuffle(a) #nval = value_prop.values.values[n] | 2.447237 | 2 |
app.py | adulting-dev/adulting.dev | 5 | 6630594 | from flask import Flask, render_template
from flaskext.markdown import Markdown
import contentful
from rich_text_renderer import RichTextRenderer
from rich_text_renderer.base_node_renderer import BaseNodeRenderer
from rich_text_renderer.null_renderer import NullRenderer
import os
from dotenv import load_dotenv
load_dotenv()
from custom_renders import (
locationBlockEntryRenderer,
buttonEntryRenderer,
BaseInlineRenderer,
BaseBlockEntryRenderer,
)
SPACE_ID = os.environ.get("SPACE_ID")
DELIVERY_API_KEY = os.environ.get("DELIVERY_API_KEY")
API_URL = os.environ.get("API_URL")
MAP_KEY = os.environ.get("MAP_KEY")
DEBUG_STATUS = os.environ.get("DEBUG_STATUS")
ENV = os.environ.get("ENV")
client = contentful.Client(SPACE_ID, DELIVERY_API_KEY, API_URL, environment=ENV)
BaseBlockEntryRenderer.__RENDERERS__ += [
locationBlockEntryRenderer,
buttonEntryRenderer,
]
renderer = RichTextRenderer(
{
"embedded-entry-block": BaseBlockEntryRenderer,
"embedded-entry-inline": BaseInlineRenderer,
}
)
app = Flask(__name__)
Markdown(app)
@app.route("/")
def home_page():
entry = client.entry("1l3EHYzPbgf9UUV0oEyTDs")
return render_template(
"home.html",
renderer=renderer,
title=entry.page_title,
page_components=entry.page_component,
client=client,
MAP_KEY=MAP_KEY,
)
if __name__ == "__main__":
app.debug = DEBUG_STATUS
app.run()
| from flask import Flask, render_template
from flaskext.markdown import Markdown
import contentful
from rich_text_renderer import RichTextRenderer
from rich_text_renderer.base_node_renderer import BaseNodeRenderer
from rich_text_renderer.null_renderer import NullRenderer
import os
from dotenv import load_dotenv
load_dotenv()
from custom_renders import (
locationBlockEntryRenderer,
buttonEntryRenderer,
BaseInlineRenderer,
BaseBlockEntryRenderer,
)
SPACE_ID = os.environ.get("SPACE_ID")
DELIVERY_API_KEY = os.environ.get("DELIVERY_API_KEY")
API_URL = os.environ.get("API_URL")
MAP_KEY = os.environ.get("MAP_KEY")
DEBUG_STATUS = os.environ.get("DEBUG_STATUS")
ENV = os.environ.get("ENV")
client = contentful.Client(SPACE_ID, DELIVERY_API_KEY, API_URL, environment=ENV)
BaseBlockEntryRenderer.__RENDERERS__ += [
locationBlockEntryRenderer,
buttonEntryRenderer,
]
renderer = RichTextRenderer(
{
"embedded-entry-block": BaseBlockEntryRenderer,
"embedded-entry-inline": BaseInlineRenderer,
}
)
app = Flask(__name__)
Markdown(app)
@app.route("/")
def home_page():
entry = client.entry("1l3EHYzPbgf9UUV0oEyTDs")
return render_template(
"home.html",
renderer=renderer,
title=entry.page_title,
page_components=entry.page_component,
client=client,
MAP_KEY=MAP_KEY,
)
if __name__ == "__main__":
app.debug = DEBUG_STATUS
app.run()
| none | 1 | 2.348178 | 2 |
|
Models/Model 1/Additional Hyperparameter Tuning Scripts/pretrained_point001_point4.py | ChaojieZhang-cz/DL-Project-Brain-MRI-and-Personality-Type | 1 | 6630595 | import os
import yaml
import torch
import nibabel as nib
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import time
import pickle
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import LambdaLR,MultiStepLR
import matplotlib.pyplot as plt
import sys
# build model from Liu et al.'s github code
sys.path.insert(1, "./CNN_design_for_AD-master/models/")
import build_model_extrablock
# for untrained use:
#config_name = './CNN_design_for_AD-master/config.yaml'
# pretrained model
config_name = './CNN_design_for_AD-master/config2.yaml'
with open(os.path.join('./'+config_name), 'r') as f:
cfg = yaml.load(f)
device = torch.device('cuda')
model = build_model_extrablock.build_model(cfg).to(device)
class hcp_dataset(Dataset):
def __init__(self, df_path, train = False):
self.df = pd.read_csv(df_path)
self.train = train
def __len__(self):
return len(self.df)
def __getitem__(self,idx):
subject_name = self.df.iloc[idx]['Subject']
image_path ='./data/hcp2/'+str(subject_name)+'/T1w/T1w_acpc_dc_restore_brain.nii.gz'
image = nib.load(image_path)
image_array = image.get_fdata()
#Normalization
image_array = (image_array - image_array.mean()) / image_array.std()
#label = self.df.loc[idx][['N','E','O','A','C']].values.astype(int)
label = self.df.loc[idx][['N']].values[0].astype(int) # predict C
sample = {'x': image_array[None,:], 'y': label}
return sample
bs = 1
# full dataset
train_df_path = './train.csv'
val_df_path = './test.csv'
test_df_path = './val.csv'
transformed_dataset = {'train': hcp_dataset(train_df_path, train = True),
'validate':hcp_dataset(val_df_path),
'test':hcp_dataset(test_df_path),}
# for debugging and to see if model can learn training set on tiny sample
#sample_df_path = './sample.csv'
#sample_transformed_dataset = {'train': hcp_dataset(sample_df_path, train = True),
# 'validate':hcp_dataset(sample_df_path),
# 'test':hcp_dataset(sample_df_path),}
#
#dataloader_sample = {x: DataLoader(sample_transformed_dataset[x], batch_size=bs,
# shuffle=True, num_workers=0) for x in ['train', 'validate','test']}
# get data_loader
dataloader = {x: DataLoader(transformed_dataset[x], batch_size=bs,
shuffle=True, num_workers=0) for x in ['train', 'validate','test']}
data_sizes ={x: len(transformed_dataset[x]) for x in ['train', 'validate','test']}
def train_model(model, dataloader, optimizer, loss_fn, interpolation_scale, num_epochs = 10, verbose = True, scheduler=None, output_name="test.txt"):
acc_dict = {'train':[],'validate':[]}
loss_dict = {'train':[],'validate':[]}
best_acc = 0
phases = ['train','validate']
since = time.time()
number = 0
for i in range(num_epochs):
print('Epoch: {}/{}'.format(i, num_epochs-1))
print('-'*10)
for p in phases:
running_correct = 0
running_loss = 0
running_total = 0
if p == 'train':
model.train()
else:
model.eval()
for data in dataloader[p]:
optimizer.zero_grad()
image = F.interpolate(data['x'], mode="trilinear", scale_factor=interpolation_scale)
image = image.to(device,dtype=torch.float)
label = data['y'].to(device,dtype=torch.long)
output = model(image)
loss = loss_fn(output, label)
print(number)
number += 1
_, preds = torch.max(output, dim = 1)
num_imgs = image.size()[0]
running_correct += torch.sum(preds ==label).item()
running_loss += loss.item()*num_imgs
running_total += num_imgs
if p== 'train':
loss.backward()
optimizer.step()
epoch_acc = float(running_correct/running_total)
epoch_loss = float(running_loss/running_total)
if verbose or (i%10 == 0):
print('Phase:{}, epoch loss: {:.4f} Acc: {:.4f}'.format(p, epoch_loss, epoch_acc))
acc_dict[p].append(epoch_acc)
loss_dict[p].append(epoch_loss)
if p == 'validate':
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
save_model(best_model_wts, model, acc_dict, loss_dict)
else:
if scheduler:
scheduler.step()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
return model, acc_dict, loss_dict
def save_model(best_model_wts, model, acc_dict, loss_dict):
model_saved = {'best_model_wts':best_model_wts, 'model':model, 'acc_dict':acc_dict, 'loss_dict':loss_dict}
f=open(output_name,'wb')
pickle.dump(model_saved,f)
f.close()
return None
# from Liu et al.
lr_rate = 0.001
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr_rate)
interpolation_scale = 0.4
output_name = "pretrained_point001_point4.txt"
model, acc_dict, loss_dict = train_model(model, dataloader, optimizer, loss_fn, interpolation_scale, num_epochs = 50, verbose = True, scheduler= MultiStepLR(optimizer, milestones=[20,40], gamma=0.1), output_name = "")
| import os
import yaml
import torch
import nibabel as nib
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import time
import pickle
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import LambdaLR,MultiStepLR
import matplotlib.pyplot as plt
import sys
# build model from Liu et al.'s github code
sys.path.insert(1, "./CNN_design_for_AD-master/models/")
import build_model_extrablock
# for untrained use:
#config_name = './CNN_design_for_AD-master/config.yaml'
# pretrained model
config_name = './CNN_design_for_AD-master/config2.yaml'
with open(os.path.join('./'+config_name), 'r') as f:
cfg = yaml.load(f)
device = torch.device('cuda')
model = build_model_extrablock.build_model(cfg).to(device)
class hcp_dataset(Dataset):
def __init__(self, df_path, train = False):
self.df = pd.read_csv(df_path)
self.train = train
def __len__(self):
return len(self.df)
def __getitem__(self,idx):
subject_name = self.df.iloc[idx]['Subject']
image_path ='./data/hcp2/'+str(subject_name)+'/T1w/T1w_acpc_dc_restore_brain.nii.gz'
image = nib.load(image_path)
image_array = image.get_fdata()
#Normalization
image_array = (image_array - image_array.mean()) / image_array.std()
#label = self.df.loc[idx][['N','E','O','A','C']].values.astype(int)
label = self.df.loc[idx][['N']].values[0].astype(int) # predict C
sample = {'x': image_array[None,:], 'y': label}
return sample
bs = 1
# full dataset
train_df_path = './train.csv'
val_df_path = './test.csv'
test_df_path = './val.csv'
transformed_dataset = {'train': hcp_dataset(train_df_path, train = True),
'validate':hcp_dataset(val_df_path),
'test':hcp_dataset(test_df_path),}
# for debugging and to see if model can learn training set on tiny sample
#sample_df_path = './sample.csv'
#sample_transformed_dataset = {'train': hcp_dataset(sample_df_path, train = True),
# 'validate':hcp_dataset(sample_df_path),
# 'test':hcp_dataset(sample_df_path),}
#
#dataloader_sample = {x: DataLoader(sample_transformed_dataset[x], batch_size=bs,
# shuffle=True, num_workers=0) for x in ['train', 'validate','test']}
# get data_loader
dataloader = {x: DataLoader(transformed_dataset[x], batch_size=bs,
shuffle=True, num_workers=0) for x in ['train', 'validate','test']}
data_sizes ={x: len(transformed_dataset[x]) for x in ['train', 'validate','test']}
def train_model(model, dataloader, optimizer, loss_fn, interpolation_scale, num_epochs = 10, verbose = True, scheduler=None, output_name="test.txt"):
acc_dict = {'train':[],'validate':[]}
loss_dict = {'train':[],'validate':[]}
best_acc = 0
phases = ['train','validate']
since = time.time()
number = 0
for i in range(num_epochs):
print('Epoch: {}/{}'.format(i, num_epochs-1))
print('-'*10)
for p in phases:
running_correct = 0
running_loss = 0
running_total = 0
if p == 'train':
model.train()
else:
model.eval()
for data in dataloader[p]:
optimizer.zero_grad()
image = F.interpolate(data['x'], mode="trilinear", scale_factor=interpolation_scale)
image = image.to(device,dtype=torch.float)
label = data['y'].to(device,dtype=torch.long)
output = model(image)
loss = loss_fn(output, label)
print(number)
number += 1
_, preds = torch.max(output, dim = 1)
num_imgs = image.size()[0]
running_correct += torch.sum(preds ==label).item()
running_loss += loss.item()*num_imgs
running_total += num_imgs
if p== 'train':
loss.backward()
optimizer.step()
epoch_acc = float(running_correct/running_total)
epoch_loss = float(running_loss/running_total)
if verbose or (i%10 == 0):
print('Phase:{}, epoch loss: {:.4f} Acc: {:.4f}'.format(p, epoch_loss, epoch_acc))
acc_dict[p].append(epoch_acc)
loss_dict[p].append(epoch_loss)
if p == 'validate':
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
save_model(best_model_wts, model, acc_dict, loss_dict)
else:
if scheduler:
scheduler.step()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
return model, acc_dict, loss_dict
def save_model(best_model_wts, model, acc_dict, loss_dict):
model_saved = {'best_model_wts':best_model_wts, 'model':model, 'acc_dict':acc_dict, 'loss_dict':loss_dict}
f=open(output_name,'wb')
pickle.dump(model_saved,f)
f.close()
return None
# from Liu et al.
lr_rate = 0.001
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr_rate)
interpolation_scale = 0.4
output_name = "pretrained_point001_point4.txt"
model, acc_dict, loss_dict = train_model(model, dataloader, optimizer, loss_fn, interpolation_scale, num_epochs = 50, verbose = True, scheduler= MultiStepLR(optimizer, milestones=[20,40], gamma=0.1), output_name = "")
| en | 0.566351 | # build model from Liu et al.'s github code # for untrained use: #config_name = './CNN_design_for_AD-master/config.yaml' # pretrained model #Normalization #label = self.df.loc[idx][['N','E','O','A','C']].values.astype(int) # predict C # full dataset # for debugging and to see if model can learn training set on tiny sample #sample_df_path = './sample.csv' #sample_transformed_dataset = {'train': hcp_dataset(sample_df_path, train = True), # 'validate':hcp_dataset(sample_df_path), # 'test':hcp_dataset(sample_df_path),} # #dataloader_sample = {x: DataLoader(sample_transformed_dataset[x], batch_size=bs, # shuffle=True, num_workers=0) for x in ['train', 'validate','test']} # get data_loader # from Liu et al. | 2.081497 | 2 |
srt.py | KonstT-math/response-time | 0 | 6630596 |
# How to use:
#
# download psychopy for it is required
#
# In the "initialization section":
# 1) Input a sequence which represents the stimulus position as follows: observables=[3,1,2,0,1]
# 2) Set the file path to print output: f=open(r"path\to\file.txt", "w")
# 3) Set the file path for the image stimulus: the_image=r'path\to\image.jpg'
#
# In the "first/second experiment" sections:
# 4) Enter the number of times your experiment(s) is going to be repeated: experiment(observables, repetition_times)
# 5) run the python script (in the command line: python3 name_of_script.py)
# -----------------------------------------------------------------
# --------------------------- imports -----------------------------
# Import the PsychoPy library, et c.
from psychopy import core, visual, event
from random import random, randrange
import numpy as np
# -----------------------------------------------------------------
# -------------------------- functions ----------------------------
# make a vector containing the transitions (ex i->j == [i,j])
def pairlist(tlist):
res = [[tlist[i], tlist[i + 1]] for i in range(len(tlist) - 1)]
res.append([tlist[len(tlist) - 1], tlist[0]])
return res
# a function to create the transition probability distribution matrix
# input: observable sequence
def prob_distro(observables):
# list of transition pairs:
Pairs_observables=pairlist(observables)
# we count each elements (to be used for restriction 1 for block 5):
n0=observables.count(0)
n1=observables.count(1)
n2=observables.count(2)
n3=observables.count(3)
# we create the probability distribution (transition matrix for the Markov process):
for i in range(0,4):
if i==0:
a00=Pairs_observables.count([i,0])/n0
a01=Pairs_observables.count([i,1])/n0
a02=Pairs_observables.count([i,2])/n0
a03=Pairs_observables.count([i,3])/n0
elif i==1:
a10=Pairs_observables.count([i,0])/n1
a11=Pairs_observables.count([i,1])/n1
a12=Pairs_observables.count([i,2])/n1
a13=Pairs_observables.count([i,3])/n1
elif i==2:
a20=Pairs_observables.count([i,0])/n2
a21=Pairs_observables.count([i,1])/n2
a22=Pairs_observables.count([i,2])/n2
a23=Pairs_observables.count([i,3])/n2
else:
a30=Pairs_observables.count([i,0])/n3
a31=Pairs_observables.count([i,1])/n3
a32=Pairs_observables.count([i,2])/n3
a33=Pairs_observables.count([i,3])/n3
A=np.array([[a00,a01,a02,a03], [a10,a11,a12,a13], [a20,a21,a22,a23], [a30,a31,a32,a33]])
return A, n0, n1, n2, n3
# the experiment function
# input: observable sequence, number of times to repeat, file to write on, image stimulus, window
def experiment(observables, repeat):
block=1
while (block <= repeat):
print('BLOCK {}\n'.format(block),file=f)
print('Observables are: {}\n'.format(observables),file=f)
# counter is the variable counting the correct responses
counter=0
# attempt is the variable counting the number of responses for the block
attempt=1
for i in observables:
# fixation cross that appears between image stimulus
fix_cross.draw()
win.flip()
# position for ImageStim: stim.pos = (horizontal axis, vertical axis)
# wait for 3-8 seconds to read feedback and have uncertain wait time before next trial
core.wait(3+int(random()*6))
if i==0:
stim.pos = (0, 0.5) # up
stim.draw()
win.flip() # the win.flip() is required in order for the win to appear
# -----------
# clear any keystrokes before starting
event.clearEvents()
allKeys=[]
# reaction time reset
RT.reset()
while len(allKeys)==0: # wait for a keypress
allKeys=event.getKeys(timeStamped=RT)
# note that allKeys = [(key, milliseconds)]
# if you don't have pyglet, you need to get the time explicitly via getTime()
if not wintype == 'pyglet':
allKeys[0][1] = RT.getTime()
# note that allKeys = [(key, milliseconds)]
thekey=allKeys[0][0]
theRT =allKeys[0][1]
flag=0
if thekey=='escape':
core.quit()
elif thekey=='up':
counter=counter+1
flag=1
else:
pass
# appends result to text file:
print('attempt {}: key={} \t reaction time={} \t evaluation={} \n'.format(attempt,thekey,theRT,flag), file=f)
# -----------
elif i==1:
stim.pos = (0.5, 0) # right
stim.draw()
win.flip()
# -----------
# clear any keystrokes before starting
event.clearEvents()
allKeys=[]
# reaction time reset
RT.reset()
while len(allKeys)==0: # wait for a keypress
allKeys=event.getKeys(timeStamped=RT)
# note that allKeys = [(key, milliseconds)]
# if you don't have pyglet, you need to get the time explicitly via getTime()
if not wintype == 'pyglet':
allKeys[0][1] = RT.getTime()
# note that allKeys = [(key, milliseconds)]
thekey=allKeys[0][0]
theRT =allKeys[0][1]
flag=0
if thekey=='escape':
core.quit()
elif thekey=='right':
counter=counter+1
flag=1
else:
pass
# appends result to text file:
print('attempt {}: key={} \t reaction time={} \t evaluation={} \n'.format(attempt,thekey,theRT,flag), file=f)
# -----------
elif i==2:
stim.pos = (0, -0.5) # down
stim.draw()
win.flip()
# -----------
# clear any keystrokes before starting
event.clearEvents()
allKeys=[]
# reaction time reset
RT.reset()
while len(allKeys)==0: # wait for a keypress
allKeys=event.getKeys(timeStamped=RT)
# note that allKeys = [(key, milliseconds)]
# if you don't have pyglet, you need to get the time explicitly via getTime()
if not wintype == 'pyglet':
allKeys[0][1] = RT.getTime()
# note that allKeys = [(key, milliseconds)]
thekey=allKeys[0][0]
theRT =allKeys[0][1]
flag=0
if thekey=='escape':
core.quit()
elif thekey=='down':
counter=counter+1
flag=1
else:
pass
# appends result to text file:
print('attempt {}: key={} \t reaction time={} \t evaluation={} \n'.format(attempt,thekey,theRT,flag), file=f)
# -----------
else:
stim.pos = (-0.5, 0) # left
stim.draw()
win.flip()
# -----------
# clear any keystrokes before starting
event.clearEvents()
allKeys=[]
# reaction time reset
RT.reset()
while len(allKeys)==0: # wait for a keypress
allKeys=event.getKeys(timeStamped=RT)
# note that allKeys = [(key, milliseconds)]
# if you don't have pyglet, you need to get the time explicitly via getTime()
if not wintype == 'pyglet':
allKeys[0][1] = RT.getTime()
# note that allKeys = [(key, milliseconds)]
thekey=allKeys[0][0]
theRT =allKeys[0][1]
flag=0
if thekey=='escape':
core.quit()
elif thekey=='left':
counter=counter+1
flag=1
else:
pass
# appends result to text file:
print('attempt {}: key={} \t reaction time={} \t evaluation={} \n'.format(attempt,thekey,theRT,flag), file=f)
# -----------
win.flip()
attempt=attempt+1
# prints the number of correct responses to the file:
print('Number of correct responces for the block: {}\n-------------------------\n\n'.format(counter), file=f)
block=block+1
# a function that implements the Markov model for four(4) states
# input: length of position sequence
# output: position sequence
def process(time):
# possible sequences of events:
transitionName=[["00","01","02","03"],["10","11","12","13"],["20","21","22","23"],["30","31","32","33"]]
# pos_now=0 # starts from state 0
pos_now=randrange(4) # starts from random state
print("Start state: {}".format(pos_now))
position_list=[pos_now]
i=0
# to calculate the probability of the position_List:
prob=1
while i!=time:
if pos_now==0:
change = np.random.choice(transitionName[0],replace=True,p=A[0])
if change=="00":
prob = prob*A[0][0]
pos_now=0
position_list.append(0)
pass
elif change=="01":
prob = prob*A[0][1]
pos_now=1
position_list.append(1)
elif change=="02":
prob = prob*A[0][2]
pos_now=2
position_list.append(2)
else:
prob = prob*A[0][3]
pos_now=3
position_list.append(3)
elif pos_now==1:
change = np.random.choice(transitionName[1],replace=True,p=A[1])
if change=="11":
prob = prob*A[1][1]
pos_now=1
position_list.append(1)
pass
elif change=="10":
prob = prob*A[1][0]
pos_now=0
position_list.append(0)
elif change=="12":
prob = prob*A[1][2]
pos_now=2
position_list.append(2)
else:
prob = prob*A[1][3]
pos_now=3
position_list.append(3)
elif pos_now==2:
change = np.random.choice(transitionName[2],replace=True,p=A[2])
if change=="22":
prob = prob*A[2][2]
pos_now=2
position_list.append(2)
pass
elif change=="21":
prob = prob*A[2][1]
pos_now=1
position_list.append(1)
elif change=="20":
prob = prob*A[2][0]
pos_now=0
position_list.append(0)
else:
prob = prob*A[2][3]
pos_now=3
position_list.append(3)
else:
change = np.random.choice(transitionName[3],replace=True,p=A[3])
if change=="33":
prob = prob*A[3][3]
pos_now=3
position_list.append(3)
pass
elif change=="31":
prob = prob*A[3][1]
pos_now=1
position_list.append(1)
elif change=="32":
prob = prob*A[3][2]
pos_now=2
position_list.append(2)
else:
prob = prob*A[3][0]
pos_now=0
position_list.append(0)
i=i+1
print('Possible states: {}'.format(position_list))
print('Probability of the possible sequence of states: {}'.format(prob))
return position_list
# -----------------------------------------------------------------
# ------------------------ initialization -------------------------
# open a file to print output
# when results are printed to text file, one may format the text so that tab characters (\t)
# are typed before and after "key", "responce time", or "evalutation" values.
# This will enable one to easily pass the raw values to spreadsheets or stats software.
f=open(r"path\to\file.txt", "w")
# load image. Set the image file path for your machine..
the_image=r'path\to\image_file'
# create a window to draw in
# use specifications required in your application (see the phychopy documentation)
# use pyglet if possible, it's faster at event handling
wintype='pyglet'
win = visual.Window((800.0,700.0),winType=wintype)
# creates the ImageStim
stim = visual.ImageStim(win, image=the_image)
# fixation cross stimulus to appear at the centre
fix_cross = visual.TextStim(win=win, ori=0, name='text',
text='+', font='Arial',
pos=(1,0), wrapWidth=None,
color='black', colorSpace='rgb', opacity=1,
depth=-4.0)
# make a clock for capturing RT (reaction time)
# Note: psychopy selects a timer implementation as follows:
# 1) On Windows, the Windows Query Performance Counter
# (Retrieves the current value of the performance counter,
# which is a high resolution (<1us) time stamp that can be used for time-interval measurements.)
# API is used using ctypes access.
# 2) On other OS's, if the Python version being used is 2.6 or lower,
# time.time is used. For Python 2.7 and above, the timeit.default_timer
# function is used.
RT = core.Clock()
# set the position states
states=[0,1,2,3]
# here input the sequence of positions for the visual stimulus for the test
# for example:
observables=[3,1,2,0,2,1,3,2,1,0]
# calling prob_distro function on observables sequence
A, n0, n1, n2, n3 = prob_distro(observables)
print(A)
# -----------------------------------------------------------------
# ----------------------- first experiment ------------------------
# the first experiment uses the sequence that was input above.
# one may wish to repeat this experiment several times; each will be called a "block".
print('STANDARD BLOCKS\n',file=f)
experiment(observables, 2)
# -----------------------------------------------------------------
# ----------------------- second experiment -----------------------
# the stochastic block:
# in this block, we again have a sequence of positions of the same length as in the previous ones;
# but this time, the sequences is produced via a Markov stochastic process
# with probability transition matrix (distribution) given by A (see above).
#
# it obeys the following restrictions:
# (1) each state in the new sequence appears the same number of times as in the previous experiment
# (2) the transition from state i to state j in the new sequence, is given by transition distribution A
# in the following, we create the new sequence until it validates restriction (1)
# restriction (2) is vadid because of process function (markov chain)
while True:
observables = process(len(observables)-1)
c0=observables.count(0)
c1=observables.count(1)
c2=observables.count(2)
c3=observables.count(3)
if c0==n0 and c1==n1 and c2==n2 and c3==n3:
break
# -------------------
print('-------------------------\n-------------------------\n',file=f)
print('PROBABILISTIC BLOCKS\n',file=f)
# and the same code as in first experiment:
experiment(observables, 1)
# -----------------------------------------------------------------
# ----------------------- prepare to end --------------------------
# close file
f.close()
# wait for 3 secs
core.wait(3)
# Close the window
win.close()
# Close PsychoPy
core.quit()
|
# How to use:
#
# download psychopy for it is required
#
# In the "initialization section":
# 1) Input a sequence which represents the stimulus position as follows: observables=[3,1,2,0,1]
# 2) Set the file path to print output: f=open(r"path\to\file.txt", "w")
# 3) Set the file path for the image stimulus: the_image=r'path\to\image.jpg'
#
# In the "first/second experiment" sections:
# 4) Enter the number of times your experiment(s) is going to be repeated: experiment(observables, repetition_times)
# 5) run the python script (in the command line: python3 name_of_script.py)
# -----------------------------------------------------------------
# --------------------------- imports -----------------------------
# Import the PsychoPy library, et c.
from psychopy import core, visual, event
from random import random, randrange
import numpy as np
# -----------------------------------------------------------------
# -------------------------- functions ----------------------------
# make a vector containing the transitions (ex i->j == [i,j])
def pairlist(tlist):
res = [[tlist[i], tlist[i + 1]] for i in range(len(tlist) - 1)]
res.append([tlist[len(tlist) - 1], tlist[0]])
return res
# a function to create the transition probability distribution matrix
# input: observable sequence
def prob_distro(observables):
# list of transition pairs:
Pairs_observables=pairlist(observables)
# we count each elements (to be used for restriction 1 for block 5):
n0=observables.count(0)
n1=observables.count(1)
n2=observables.count(2)
n3=observables.count(3)
# we create the probability distribution (transition matrix for the Markov process):
for i in range(0,4):
if i==0:
a00=Pairs_observables.count([i,0])/n0
a01=Pairs_observables.count([i,1])/n0
a02=Pairs_observables.count([i,2])/n0
a03=Pairs_observables.count([i,3])/n0
elif i==1:
a10=Pairs_observables.count([i,0])/n1
a11=Pairs_observables.count([i,1])/n1
a12=Pairs_observables.count([i,2])/n1
a13=Pairs_observables.count([i,3])/n1
elif i==2:
a20=Pairs_observables.count([i,0])/n2
a21=Pairs_observables.count([i,1])/n2
a22=Pairs_observables.count([i,2])/n2
a23=Pairs_observables.count([i,3])/n2
else:
a30=Pairs_observables.count([i,0])/n3
a31=Pairs_observables.count([i,1])/n3
a32=Pairs_observables.count([i,2])/n3
a33=Pairs_observables.count([i,3])/n3
A=np.array([[a00,a01,a02,a03], [a10,a11,a12,a13], [a20,a21,a22,a23], [a30,a31,a32,a33]])
return A, n0, n1, n2, n3
# the experiment function
# input: observable sequence, number of times to repeat, file to write on, image stimulus, window
def experiment(observables, repeat):
block=1
while (block <= repeat):
print('BLOCK {}\n'.format(block),file=f)
print('Observables are: {}\n'.format(observables),file=f)
# counter is the variable counting the correct responses
counter=0
# attempt is the variable counting the number of responses for the block
attempt=1
for i in observables:
# fixation cross that appears between image stimulus
fix_cross.draw()
win.flip()
# position for ImageStim: stim.pos = (horizontal axis, vertical axis)
# wait for 3-8 seconds to read feedback and have uncertain wait time before next trial
core.wait(3+int(random()*6))
if i==0:
stim.pos = (0, 0.5) # up
stim.draw()
win.flip() # the win.flip() is required in order for the win to appear
# -----------
# clear any keystrokes before starting
event.clearEvents()
allKeys=[]
# reaction time reset
RT.reset()
while len(allKeys)==0: # wait for a keypress
allKeys=event.getKeys(timeStamped=RT)
# note that allKeys = [(key, milliseconds)]
# if you don't have pyglet, you need to get the time explicitly via getTime()
if not wintype == 'pyglet':
allKeys[0][1] = RT.getTime()
# note that allKeys = [(key, milliseconds)]
thekey=allKeys[0][0]
theRT =allKeys[0][1]
flag=0
if thekey=='escape':
core.quit()
elif thekey=='up':
counter=counter+1
flag=1
else:
pass
# appends result to text file:
print('attempt {}: key={} \t reaction time={} \t evaluation={} \n'.format(attempt,thekey,theRT,flag), file=f)
# -----------
elif i==1:
stim.pos = (0.5, 0) # right
stim.draw()
win.flip()
# -----------
# clear any keystrokes before starting
event.clearEvents()
allKeys=[]
# reaction time reset
RT.reset()
while len(allKeys)==0: # wait for a keypress
allKeys=event.getKeys(timeStamped=RT)
# note that allKeys = [(key, milliseconds)]
# if you don't have pyglet, you need to get the time explicitly via getTime()
if not wintype == 'pyglet':
allKeys[0][1] = RT.getTime()
# note that allKeys = [(key, milliseconds)]
thekey=allKeys[0][0]
theRT =allKeys[0][1]
flag=0
if thekey=='escape':
core.quit()
elif thekey=='right':
counter=counter+1
flag=1
else:
pass
# appends result to text file:
print('attempt {}: key={} \t reaction time={} \t evaluation={} \n'.format(attempt,thekey,theRT,flag), file=f)
# -----------
elif i==2:
stim.pos = (0, -0.5) # down
stim.draw()
win.flip()
# -----------
# clear any keystrokes before starting
event.clearEvents()
allKeys=[]
# reaction time reset
RT.reset()
while len(allKeys)==0: # wait for a keypress
allKeys=event.getKeys(timeStamped=RT)
# note that allKeys = [(key, milliseconds)]
# if you don't have pyglet, you need to get the time explicitly via getTime()
if not wintype == 'pyglet':
allKeys[0][1] = RT.getTime()
# note that allKeys = [(key, milliseconds)]
thekey=allKeys[0][0]
theRT =allKeys[0][1]
flag=0
if thekey=='escape':
core.quit()
elif thekey=='down':
counter=counter+1
flag=1
else:
pass
# appends result to text file:
print('attempt {}: key={} \t reaction time={} \t evaluation={} \n'.format(attempt,thekey,theRT,flag), file=f)
# -----------
else:
stim.pos = (-0.5, 0) # left
stim.draw()
win.flip()
# -----------
# clear any keystrokes before starting
event.clearEvents()
allKeys=[]
# reaction time reset
RT.reset()
while len(allKeys)==0: # wait for a keypress
allKeys=event.getKeys(timeStamped=RT)
# note that allKeys = [(key, milliseconds)]
# if you don't have pyglet, you need to get the time explicitly via getTime()
if not wintype == 'pyglet':
allKeys[0][1] = RT.getTime()
# note that allKeys = [(key, milliseconds)]
thekey=allKeys[0][0]
theRT =allKeys[0][1]
flag=0
if thekey=='escape':
core.quit()
elif thekey=='left':
counter=counter+1
flag=1
else:
pass
# appends result to text file:
print('attempt {}: key={} \t reaction time={} \t evaluation={} \n'.format(attempt,thekey,theRT,flag), file=f)
# -----------
win.flip()
attempt=attempt+1
# prints the number of correct responses to the file:
print('Number of correct responces for the block: {}\n-------------------------\n\n'.format(counter), file=f)
block=block+1
# a function that implements the Markov model for four(4) states
# input: length of position sequence
# output: position sequence
def process(time):
# possible sequences of events:
transitionName=[["00","01","02","03"],["10","11","12","13"],["20","21","22","23"],["30","31","32","33"]]
# pos_now=0 # starts from state 0
pos_now=randrange(4) # starts from random state
print("Start state: {}".format(pos_now))
position_list=[pos_now]
i=0
# to calculate the probability of the position_List:
prob=1
while i!=time:
if pos_now==0:
change = np.random.choice(transitionName[0],replace=True,p=A[0])
if change=="00":
prob = prob*A[0][0]
pos_now=0
position_list.append(0)
pass
elif change=="01":
prob = prob*A[0][1]
pos_now=1
position_list.append(1)
elif change=="02":
prob = prob*A[0][2]
pos_now=2
position_list.append(2)
else:
prob = prob*A[0][3]
pos_now=3
position_list.append(3)
elif pos_now==1:
change = np.random.choice(transitionName[1],replace=True,p=A[1])
if change=="11":
prob = prob*A[1][1]
pos_now=1
position_list.append(1)
pass
elif change=="10":
prob = prob*A[1][0]
pos_now=0
position_list.append(0)
elif change=="12":
prob = prob*A[1][2]
pos_now=2
position_list.append(2)
else:
prob = prob*A[1][3]
pos_now=3
position_list.append(3)
elif pos_now==2:
change = np.random.choice(transitionName[2],replace=True,p=A[2])
if change=="22":
prob = prob*A[2][2]
pos_now=2
position_list.append(2)
pass
elif change=="21":
prob = prob*A[2][1]
pos_now=1
position_list.append(1)
elif change=="20":
prob = prob*A[2][0]
pos_now=0
position_list.append(0)
else:
prob = prob*A[2][3]
pos_now=3
position_list.append(3)
else:
change = np.random.choice(transitionName[3],replace=True,p=A[3])
if change=="33":
prob = prob*A[3][3]
pos_now=3
position_list.append(3)
pass
elif change=="31":
prob = prob*A[3][1]
pos_now=1
position_list.append(1)
elif change=="32":
prob = prob*A[3][2]
pos_now=2
position_list.append(2)
else:
prob = prob*A[3][0]
pos_now=0
position_list.append(0)
i=i+1
print('Possible states: {}'.format(position_list))
print('Probability of the possible sequence of states: {}'.format(prob))
return position_list
# -----------------------------------------------------------------
# ------------------------ initialization -------------------------
# open a file to print output
# when results are printed to text file, one may format the text so that tab characters (\t)
# are typed before and after "key", "responce time", or "evalutation" values.
# This will enable one to easily pass the raw values to spreadsheets or stats software.
f=open(r"path\to\file.txt", "w")
# load image. Set the image file path for your machine..
the_image=r'path\to\image_file'
# create a window to draw in
# use specifications required in your application (see the phychopy documentation)
# use pyglet if possible, it's faster at event handling
wintype='pyglet'
win = visual.Window((800.0,700.0),winType=wintype)
# creates the ImageStim
stim = visual.ImageStim(win, image=the_image)
# fixation cross stimulus to appear at the centre
fix_cross = visual.TextStim(win=win, ori=0, name='text',
text='+', font='Arial',
pos=(1,0), wrapWidth=None,
color='black', colorSpace='rgb', opacity=1,
depth=-4.0)
# make a clock for capturing RT (reaction time)
# Note: psychopy selects a timer implementation as follows:
# 1) On Windows, the Windows Query Performance Counter
# (Retrieves the current value of the performance counter,
# which is a high resolution (<1us) time stamp that can be used for time-interval measurements.)
# API is used using ctypes access.
# 2) On other OS's, if the Python version being used is 2.6 or lower,
# time.time is used. For Python 2.7 and above, the timeit.default_timer
# function is used.
RT = core.Clock()
# set the position states
states=[0,1,2,3]
# here input the sequence of positions for the visual stimulus for the test
# for example:
observables=[3,1,2,0,2,1,3,2,1,0]
# calling prob_distro function on observables sequence
A, n0, n1, n2, n3 = prob_distro(observables)
print(A)
# -----------------------------------------------------------------
# ----------------------- first experiment ------------------------
# the first experiment uses the sequence that was input above.
# one may wish to repeat this experiment several times; each will be called a "block".
print('STANDARD BLOCKS\n',file=f)
experiment(observables, 2)
# -----------------------------------------------------------------
# ----------------------- second experiment -----------------------
# the stochastic block:
# in this block, we again have a sequence of positions of the same length as in the previous ones;
# but this time, the sequences is produced via a Markov stochastic process
# with probability transition matrix (distribution) given by A (see above).
#
# it obeys the following restrictions:
# (1) each state in the new sequence appears the same number of times as in the previous experiment
# (2) the transition from state i to state j in the new sequence, is given by transition distribution A
# in the following, we create the new sequence until it validates restriction (1)
# restriction (2) is vadid because of process function (markov chain)
while True:
observables = process(len(observables)-1)
c0=observables.count(0)
c1=observables.count(1)
c2=observables.count(2)
c3=observables.count(3)
if c0==n0 and c1==n1 and c2==n2 and c3==n3:
break
# -------------------
print('-------------------------\n-------------------------\n',file=f)
print('PROBABILISTIC BLOCKS\n',file=f)
# and the same code as in first experiment:
experiment(observables, 1)
# -----------------------------------------------------------------
# ----------------------- prepare to end --------------------------
# close file
f.close()
# wait for 3 secs
core.wait(3)
# Close the window
win.close()
# Close PsychoPy
core.quit()
| en | 0.772417 | # How to use: # # download psychopy for it is required # # In the "initialization section": # 1) Input a sequence which represents the stimulus position as follows: observables=[3,1,2,0,1] # 2) Set the file path to print output: f=open(r"path\to\file.txt", "w") # 3) Set the file path for the image stimulus: the_image=r'path\to\image.jpg' # # In the "first/second experiment" sections: # 4) Enter the number of times your experiment(s) is going to be repeated: experiment(observables, repetition_times) # 5) run the python script (in the command line: python3 name_of_script.py) # ----------------------------------------------------------------- # --------------------------- imports ----------------------------- # Import the PsychoPy library, et c. # ----------------------------------------------------------------- # -------------------------- functions ---------------------------- # make a vector containing the transitions (ex i->j == [i,j]) # a function to create the transition probability distribution matrix # input: observable sequence # list of transition pairs: # we count each elements (to be used for restriction 1 for block 5): # we create the probability distribution (transition matrix for the Markov process): # the experiment function # input: observable sequence, number of times to repeat, file to write on, image stimulus, window # counter is the variable counting the correct responses # attempt is the variable counting the number of responses for the block # fixation cross that appears between image stimulus # position for ImageStim: stim.pos = (horizontal axis, vertical axis) # wait for 3-8 seconds to read feedback and have uncertain wait time before next trial # up # the win.flip() is required in order for the win to appear # ----------- # clear any keystrokes before starting # reaction time reset # wait for a keypress # note that allKeys = [(key, milliseconds)] # if you don't have pyglet, you need to get the time explicitly via getTime() # note that allKeys = [(key, milliseconds)] # appends result to text file: # ----------- # right # ----------- # clear any keystrokes before starting # reaction time reset # wait for a keypress # note that allKeys = [(key, milliseconds)] # if you don't have pyglet, you need to get the time explicitly via getTime() # note that allKeys = [(key, milliseconds)] # appends result to text file: # ----------- # down # ----------- # clear any keystrokes before starting # reaction time reset # wait for a keypress # note that allKeys = [(key, milliseconds)] # if you don't have pyglet, you need to get the time explicitly via getTime() # note that allKeys = [(key, milliseconds)] # appends result to text file: # ----------- # left # ----------- # clear any keystrokes before starting # reaction time reset # wait for a keypress # note that allKeys = [(key, milliseconds)] # if you don't have pyglet, you need to get the time explicitly via getTime() # note that allKeys = [(key, milliseconds)] # appends result to text file: # ----------- # prints the number of correct responses to the file: # a function that implements the Markov model for four(4) states # input: length of position sequence # output: position sequence # possible sequences of events: # pos_now=0 # starts from state 0 # starts from random state # to calculate the probability of the position_List: # ----------------------------------------------------------------- # ------------------------ initialization ------------------------- # open a file to print output # when results are printed to text file, one may format the text so that tab characters (\t) # are typed before and after "key", "responce time", or "evalutation" values. # This will enable one to easily pass the raw values to spreadsheets or stats software. # load image. Set the image file path for your machine.. # create a window to draw in # use specifications required in your application (see the phychopy documentation) # use pyglet if possible, it's faster at event handling # creates the ImageStim # fixation cross stimulus to appear at the centre # make a clock for capturing RT (reaction time) # Note: psychopy selects a timer implementation as follows: # 1) On Windows, the Windows Query Performance Counter # (Retrieves the current value of the performance counter, # which is a high resolution (<1us) time stamp that can be used for time-interval measurements.) # API is used using ctypes access. # 2) On other OS's, if the Python version being used is 2.6 or lower, # time.time is used. For Python 2.7 and above, the timeit.default_timer # function is used. # set the position states # here input the sequence of positions for the visual stimulus for the test # for example: # calling prob_distro function on observables sequence # ----------------------------------------------------------------- # ----------------------- first experiment ------------------------ # the first experiment uses the sequence that was input above. # one may wish to repeat this experiment several times; each will be called a "block". # ----------------------------------------------------------------- # ----------------------- second experiment ----------------------- # the stochastic block: # in this block, we again have a sequence of positions of the same length as in the previous ones; # but this time, the sequences is produced via a Markov stochastic process # with probability transition matrix (distribution) given by A (see above). # # it obeys the following restrictions: # (1) each state in the new sequence appears the same number of times as in the previous experiment # (2) the transition from state i to state j in the new sequence, is given by transition distribution A # in the following, we create the new sequence until it validates restriction (1) # restriction (2) is vadid because of process function (markov chain) # ------------------- # and the same code as in first experiment: # ----------------------------------------------------------------- # ----------------------- prepare to end -------------------------- # close file # wait for 3 secs # Close the window # Close PsychoPy | 3.784015 | 4 |
physics.py | greendragon1985/Minecraft | 4 | 6630597 | <filename>physics.py
# Imports, sorted alphabetically.
# Python packages
# Nothing for now...
# Third-party packages
# Nothing for now...
# Modules from this project
# Nothing for now...
from timer import Timer
__all__ = (
'PhysicsTask', 'PhysicsManager', 'physics_manager',
)
PHYSICS_TIMER_INTERVAL = PHYSICS_TICK = 0.1
class PhysicsTask(object):
def __init__(self, position, accel, obj):
self.accel = accel
self.velocity = [0, 0, 0]
self.falling_time = 0
self.falling_height = 0
self.obj = obj
self.position = list(position)
class PhysicsManager(object):
def __init__(self):
self.timer = Timer(PHYSICS_TIMER_INTERVAL, "physics_timer")
self.started = False
self.tasks = []
def __del__(self):
self.timer.stop()
def update(self):
if len(self.tasks) == 0:
self.started = False
self.timer.stop()
return
for task in self.tasks:
vy = task.velocity[1]
for i in [0, 1, -1]:
v0 = task.velocity[i]
task.velocity[i] += task.accel[i] * PHYSICS_TIMER_INTERVAL
task.position[i] += (v0 + task.velocity[i]) / 2.0 * PHYSICS_TIMER_INTERVAL
task.falling_time += PHYSICS_TIMER_INTERVAL
task.falling_height += abs(task.velocity[1] + vy) / 2.0 * PHYSICS_TIMER_INTERVAL
task.obj.update_position(task.position)
self.timer.add_task(PHYSICS_TICK, self.update)
# do physics to an object which has:
# * method update_position(position) to update its position
def do_physics(self, position, accel, obj):
self.tasks.append(PhysicsTask(position, accel, obj))
if not self.started:
self.started = True
self.timer.add_task(PHYSICS_TICK, self.update)
self.timer.start()
physics_manager = PhysicsManager()
| <filename>physics.py
# Imports, sorted alphabetically.
# Python packages
# Nothing for now...
# Third-party packages
# Nothing for now...
# Modules from this project
# Nothing for now...
from timer import Timer
__all__ = (
'PhysicsTask', 'PhysicsManager', 'physics_manager',
)
PHYSICS_TIMER_INTERVAL = PHYSICS_TICK = 0.1
class PhysicsTask(object):
def __init__(self, position, accel, obj):
self.accel = accel
self.velocity = [0, 0, 0]
self.falling_time = 0
self.falling_height = 0
self.obj = obj
self.position = list(position)
class PhysicsManager(object):
def __init__(self):
self.timer = Timer(PHYSICS_TIMER_INTERVAL, "physics_timer")
self.started = False
self.tasks = []
def __del__(self):
self.timer.stop()
def update(self):
if len(self.tasks) == 0:
self.started = False
self.timer.stop()
return
for task in self.tasks:
vy = task.velocity[1]
for i in [0, 1, -1]:
v0 = task.velocity[i]
task.velocity[i] += task.accel[i] * PHYSICS_TIMER_INTERVAL
task.position[i] += (v0 + task.velocity[i]) / 2.0 * PHYSICS_TIMER_INTERVAL
task.falling_time += PHYSICS_TIMER_INTERVAL
task.falling_height += abs(task.velocity[1] + vy) / 2.0 * PHYSICS_TIMER_INTERVAL
task.obj.update_position(task.position)
self.timer.add_task(PHYSICS_TICK, self.update)
# do physics to an object which has:
# * method update_position(position) to update its position
def do_physics(self, position, accel, obj):
self.tasks.append(PhysicsTask(position, accel, obj))
if not self.started:
self.started = True
self.timer.add_task(PHYSICS_TICK, self.update)
self.timer.start()
physics_manager = PhysicsManager()
| en | 0.791093 | # Imports, sorted alphabetically. # Python packages # Nothing for now... # Third-party packages # Nothing for now... # Modules from this project # Nothing for now... # do physics to an object which has: # * method update_position(position) to update its position | 3.025779 | 3 |
cmdb_v0.1/manage.py | codemaker-man/projects | 1 | 6630598 | <gh_stars>1-10
#!/usr/bin/env python
#为了区分线上和测试等不同的配置文件,修改mange.py
# 制定配置文件:
# python manage.py runserver --settings=admin.settings.local_cj
import os
import sys
if __name__ == "__main__":
print(111111, sys.argv)
if len(sys.argv) > 3:
print(222222)
run_arg = sys.argv[2]
if not run_arg.startswith('--settings'):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.settings")
else:
print(3333)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.settings")
from django.core.management import execute_from_command_line
print(4444, os.environ['DJANGO_SETTINGS_MODULE'])
print('-'*20)
execute_from_command_line(sys.argv)
print(5555, os.environ['DJANGO_SETTINGS_MODULE'])
# print(11111, sys.argv)
# if len(sys.argv) > 2 and sys.argv[2].startswith('--settings'):
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.local_cj")
# else:
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.settings")
# try:
# from django.core.management import execute_from_command_line
# except ImportError:
# # The above import may fail for some other reason. Ensure that the
# # issue is really that Django is missing to avoid masking other
# # exceptions on Python 2.
# try:
# import django
# except ImportError:
# raise ImportError(
# "Couldn't import Django. Are you sure it's installed and "
# "available on your PYTHONPATH environment variable? Did you "
# "forget to activate a virtual environment?"
# )
# raise
# execute_from_command_line(sys.argv) | #!/usr/bin/env python
#为了区分线上和测试等不同的配置文件,修改mange.py
# 制定配置文件:
# python manage.py runserver --settings=admin.settings.local_cj
import os
import sys
if __name__ == "__main__":
print(111111, sys.argv)
if len(sys.argv) > 3:
print(222222)
run_arg = sys.argv[2]
if not run_arg.startswith('--settings'):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.settings")
else:
print(3333)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.settings")
from django.core.management import execute_from_command_line
print(4444, os.environ['DJANGO_SETTINGS_MODULE'])
print('-'*20)
execute_from_command_line(sys.argv)
print(5555, os.environ['DJANGO_SETTINGS_MODULE'])
# print(11111, sys.argv)
# if len(sys.argv) > 2 and sys.argv[2].startswith('--settings'):
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.local_cj")
# else:
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.settings")
# try:
# from django.core.management import execute_from_command_line
# except ImportError:
# # The above import may fail for some other reason. Ensure that the
# # issue is really that Django is missing to avoid masking other
# # exceptions on Python 2.
# try:
# import django
# except ImportError:
# raise ImportError(
# "Couldn't import Django. Are you sure it's installed and "
# "available on your PYTHONPATH environment variable? Did you "
# "forget to activate a virtual environment?"
# )
# raise
# execute_from_command_line(sys.argv) | en | 0.485571 | #!/usr/bin/env python #为了区分线上和测试等不同的配置文件,修改mange.py # 制定配置文件: # python manage.py runserver --settings=admin.settings.local_cj # print(11111, sys.argv) # if len(sys.argv) > 2 and sys.argv[2].startswith('--settings'): # os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.local_cj") # else: # os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings.settings") # try: # from django.core.management import execute_from_command_line # except ImportError: # # The above import may fail for some other reason. Ensure that the # # issue is really that Django is missing to avoid masking other # # exceptions on Python 2. # try: # import django # except ImportError: # raise ImportError( # "Couldn't import Django. Are you sure it's installed and " # "available on your PYTHONPATH environment variable? Did you " # "forget to activate a virtual environment?" # ) # raise # execute_from_command_line(sys.argv) | 2.507325 | 3 |
c2t.py | ameent/c2t | 3 | 6630599 | <gh_stars>1-10
__author__ = '<NAME>'
from translator import *
# Ask for user input
output_file_name = raw_input("Enter name of output file (e.g. ammo.d.ts): ")
header_folder = raw_input("Enter top level folder location of header files: ")
module_name = raw_input("Enter a name for top level module for generated classes: ")
excluded_folders = Translator.grab_folder_exclusions()
if module_name == '' or header_folder == '' or output_file_name == '':
print 'You need to specify a module name, output file name and location of header files.'
else:
# Create a translator class to perform the actual parsing
translator = Translator(output_file_name, header_folder, module_name, excluded_folders)
translator.parse()
translator.preprocess()
translator.dump()
| __author__ = '<NAME>'
from translator import *
# Ask for user input
output_file_name = raw_input("Enter name of output file (e.g. ammo.d.ts): ")
header_folder = raw_input("Enter top level folder location of header files: ")
module_name = raw_input("Enter a name for top level module for generated classes: ")
excluded_folders = Translator.grab_folder_exclusions()
if module_name == '' or header_folder == '' or output_file_name == '':
print 'You need to specify a module name, output file name and location of header files.'
else:
# Create a translator class to perform the actual parsing
translator = Translator(output_file_name, header_folder, module_name, excluded_folders)
translator.parse()
translator.preprocess()
translator.dump() | en | 0.662696 | # Ask for user input # Create a translator class to perform the actual parsing | 2.990953 | 3 |
lib/membase/api/esrest_client.py | ramalingam-cb/testrunner | 0 | 6630600 | <filename>lib/membase/api/esrest_client.py
from membase.api.rest_client import RestConnection, Bucket, BucketStats, OtpNode, Node
from remote.remote_util import RemoteMachineShellConnection
from TestInput import TestInputSingleton
from pyes import ES, managers, query
import logger
import time
import requests
log = logger.Logger.get_logger()
# EsRestConnection: subclasses RestConnection for use against elastic-search nodes.
# Instance created by membase.api.rest_client.RestConnection
# when elastic-search endpoint is detected so it is not necessary to
# directly import this module into tests
class EsRestConnection(RestConnection):
def __init__(self, serverInfo, proto = "http"):
#serverInfo can be a json object
#only connect pyes to master es node
#in the case that other nodes are taken down
#because http requests will fail
# TODO: dynamic master node detection
if isinstance(serverInfo, dict):
self.ip = serverInfo["ip"]
self.rest_username = serverInfo["username"]
self.rest_password = serverInfo["password"]
self.username = serverInfo["es_username"]
self.password = serverInfo["es_password"]
self.port = 9091 #serverInfo["port"]
else:
self.ip = serverInfo.ip
self.rest_username = serverInfo.rest_username
self.rest_password = serverInfo.rest_password
self.username = serverInfo.es_username
self.password = serverInfo.es_password
self.port = 9091 # serverInfo.port
self.baseUrl = "http://{0}:{1}/".format(self.ip, self.port)
self.capiBaseUrl = self.baseUrl
self.esHttpUrl = "http://{0}:9200".format(self.ip)
self.http_port = str(int(self.port) + 109)
self.proto = proto
self.conn = ES(server=self.esHttpUrl)
self.manager = managers.Cluster(self.conn)
self.test_params = TestInputSingleton.input
self.docs = None
def get_index_stats(self):
return ES.index_stats()
def get_indices(self):
schema = self.conn.indices.get_mapping()
indices_full_list = schema.get_all_indices()
just_indices = [index for index in indices_full_list if not index.startswith(".")]
return just_indices
def get_indices_as_buckets(self, doc_type='couchbaseDocument'):
buckets = []
indices = self.get_indices()
for index in indices:
bucket = Bucket()
q = query.MatchAllQuery()
docs = self.conn.search(q,index,doc_type)
bucket.name = index
bucket.type = "es"
bucket.port = self.port
bucket.authType = None
bucket.saslPassword = <PASSWORD>
bucket.nodes = list()
#vBucketServerMap
bucketStats = BucketStats()
bucketStats.itemCount = docs.count()
bucket.stats = bucketStats
buckets.append(bucket)
bucket.master_id = "es@"+self.ip
return buckets
def get_bucket(self, bucket_name, doc_type='couchbaseDocument'):
for bucket in self.get_indices_as_buckets(doc_type):
if bucket.name == bucket_name:
return bucket
return
def get_buckets(self):
return self.get_indices_as_buckets()
def delete_index(self, name):
self.conn.indices.delete_index(name)
return self.conn.indices.exists_index(name)
def create_index(self, name):
if self.conn.indices.exists_index(name):
self.delete_index(name)
self.conn.indices.create_index(name)
return self.conn.indices.exists_index(name)
def delete_bucket(self, name):
return self.delete_index(name)
def create_bucket(self, *args, **kwargs):
name = 'default'
if len(args) > 0:
name = args[0]
else:
name = kwargs['bucket']
return self.create_index(name)
def is_ns_server_running(self, timeout_in_seconds=360):
return True
def node_statuses(self, timeout=120):
otp_nodes = []
for node in self.get_nodes():
#get otp,get status
otp_node = OtpNode(id=node.id,
status=node.status)
otp_node.ip = node.ip
otp_node.port = node.port
otp_node.replication = None
otp_nodes.append(node)
return otp_nodes
def get_nodes_self(self, timeout=120):
for node in self.get_nodes():
# force to return master node
if node.port == 9091:
return node
return
def get_nodes(self):
es_nodes = []
nodes = self.manager.state()['nodes']
status = self.manager.health()['status']
if status == "green":
status = "healthy"
for node_key in nodes:
nodeInfo = nodes[node_key]
ex_params = self.get_node_params(nodeInfo)
nodeInfo.update({'ssh_password' : ex_params.ssh_password,
'ssh_username' : ex_params.ssh_username})
nodeInfo['key'] = node_key
node = ESNode(nodeInfo)
node.status = status
es_nodes.append(node)
return es_nodes
def get_node_params(self, info):
ip, port = parse_addr(info["transport_address"])
clusters = self.test_params.clusters
master_node = None
for _id in clusters:
for node in clusters[_id]:
if node.ip == ip and int(node.port) == port:
return node
if int(node.port) == 9091:
master_node = node
# use params from master node
return master_node
def search_term(self, key, indices=["default"]):
result = None
params = {"term":{"_id":key}}
query = ES.Search(params)
row = self.conn.search(query, indices = indices)
if row.total > 0:
result = row[0]
return result
def term_exists(self, key, indices=["default"]):
return self.search_term(key, indices = indices) is not None
def all_docs(self, keys_only = False, indices=["default"],size=10000):
q = query.MatchAllQuery()
docs = self.conn.search(q,indices=indices,doc_types='couchbaseDocument')
res_docs = []
for row in docs:
if keys_only:
row = row['meta']['id']
res_docs.append(row)
return res_docs
# check if a key exists by checking all known nodes
# See - CBES-17
# for use when it seems nodes are out of sync
def search_all_nodes(self, key, indices=["default"]):
doc = None
for index in indices:
for _node in self.get_nodes():
ip, port = (_node.ip, _node.ht_port)
r = requests.get('http://%s:%s/%s/couchbaseDocument/%s?preference=_only_node:%s' %\
(ip, port, index, key, _node.key))
if r.status_code == 200 :
if r.json()['_id'] == key:
doc = r.json()
break
return doc
def fetch_bucket_stats(self, bucket_name='default'):
bucket = self.get_bucket(bucket_name=bucket_name)
return bucket.stats
def start_replication(self, *args, **kwargs):
return "es",self.ip
def _rebalance_progress(self, *args, **kwargs):
return 100
def _rebalance_progress_status(self, *args, **kwargs):
return 'not running'
def get_vbuckets(self, *args, **kwargs):
return ()
def replace_template(self, node, file):
f = open(file, 'r')
template = f.read().replace('\n', ' ')
api = "http://{0}:9200/_template/couchbase".format(node.ip)
status, content, header = self._http_request(api, 'PUT', template)
if status:
log.info('uploaded couchbase template: '+file)
else:
log.error('template upload failed: {0}'.format(content))
def add_node(self, user='', password='', remoteIp='', port='8091',zone_name='', services=None):
pass
def update_configuration(self, node, commands):
rmc = RemoteMachineShellConnection(node)
shell = rmc._ssh_client.invoke_shell()
for command in commands:
log.info('Adding elastic search config {0} on node {1}'.format(command, self.ip))
shell.send('echo "{0}" >> ~/elasticsearch/config/elasticsearch.yml \n'.format(command))
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
def reset_configuration(self, node, count=1):
rmc = RemoteMachineShellConnection(node)
shell = rmc._ssh_client.invoke_shell()
log.info('Removing last {0} lines from elastic search config on node {1}'.format(count, self.ip))
shell.send('head -n -{0} ~/elasticsearch/config/elasticsearch.yml > temp ; mv temp ~/elasticsearch/config/elasticsearch.yml \n'.format(count))
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
def start_es_node(self, node):
rmc = RemoteMachineShellConnection(node)
shell=rmc._ssh_client.invoke_shell()
es_kill = "pkill -f elasticsearch;"
shell.send(es_kill+' \n')
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
log.info("Sleep for 30 seconds")
time.sleep(30)
# define es exec path if not in $PATH environment
es_bin = "~/elasticsearch/bin/elasticsearch -Dtransport.couchbase=TRACE -Dcom.couchbase=TRACE > /var/log/es.log 2>&1 &"
if 'es_bin' in TestInputSingleton.input.test_params:
es_bin = TestInputSingleton.input.test_params['es_bin']
# connect to remote node
log.info('Starting node: %s:%s' % (node.ip, node.port))
# start es service
shell.send(es_bin+' \n')
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
log.info("Sleep for 5 seconds before the node can appear")
time.sleep(5)
# wait for new node
tries = 0
while tries < 10:
for cluster_node in self.get_nodes():
if cluster_node.ip == node.ip and cluster_node.port == int(node.port):
return
else:
log.info('Waiting for new node to appear')
time.sleep(5)
tries = tries + 1
raise Exception("failed to add node to cluster: %s:%s" % (node.ip,node.port))
def log_client_error(self, post):
# cannot post req errors to 9091
pass
def vbucket_map_ready(self, *args, **kwargs):
return True
def init_cluster(self, *args, **kwargs):
pass
def init_cluster_memoryQuota(self, *args, **kwargs):
pass
def set_reb_cons_view(self, *args, **kwargs):
pass
def set_reb_index_waiting(self, *args, **kwargs):
pass
def set_rebalance_index_pausing(self, *args, **kwargs):
pass
def set_max_parallel_indexers(self, *args, **kwargs):
pass
def set_max_parallel_replica_indexers(self, *args, **kwargs):
pass
def log_client_error(self, post):
# cannot post req errors to 9091
pass
def vbucket_map_ready(self, *args, **kwargs):
return True
def init_cluster(self, *args, **kwargs):
pass
def init_cluster_memoryQuota(self, *args, **kwargs):
pass
def set_reb_cons_view(self, *args, **kwargs):
pass
def set_reb_index_waiting(self, *args, **kwargs):
pass
def set_rebalance_index_pausing(self, *args, **kwargs):
pass
def set_max_parallel_indexers(self, *args, **kwargs):
pass
def set_max_parallel_replica_indexers(self, *args, **kwargs):
pass
def rebalance(self, otpNodes, ejectedNodes):
# shutdown ejected nodes
# wait for shards to be rebalanced
nodesToShutdown = \
[node for node in self.get_nodes() if node.id in ejectedNodes]
for node in nodesToShutdown:
self.eject_node(node)
def eject_node(self, node):
api = "http://%s:9200/_cluster/nodes/local/_shutdown?delay=0s" % (node.ip)
status, content, header = self._http_request(api, 'POST', '')
if status:
log.info('ejected node: '+node.ip)
else:
log.error('rebalance operation failed: {0}'.format(content))
def monitorRebalance(self, stop_if_loop=False):
# since removed nodes are shutdown use master node for monitoring
return self.get_nodes_self()
def get_pools_info(self):
return {'pools' : []}
def add_remote_cluster(self, *args, **kwargs):
# detect 2:1 mapping and do spectial cluster add
# otherwise run super method
pass
def remove_all_remote_clusters(self):
pass
def remove_all_replications(self):
pass
def is_cluster_mixed(self):
return False
def set_internalSetting(self, param, value):
return {'ok' : True}
def parse_addr(addr):
ip = addr[addr.rfind('/')+1:addr.rfind(':')]
port = addr[addr.rfind(':')+1:-1]
return str(ip), int(port)
class ESNode(Node):
def __init__(self, info):
super(ESNode, self).__init__()
self.key = str(info['key'])
self.ip, self.port = parse_addr(info["transport_address"])
self.tr_ip, self.tr_port = parse_addr(info["transport_address"])
self.port = 9091
if 'http_address' in info:
self.ht_ip, self.ht_port = parse_addr(info["http_address"])
# truncate after space, or comma
name = str(info['name'][:info['name'].find(' ')])
name = name[:name.find(',')]
self.id = "es_%s@%s" % (name, self.ip)
self.ssh_username = info['ssh_username']
self.ssh_password = info['<PASSWORD>']
self.ssh_key = ''
| <filename>lib/membase/api/esrest_client.py
from membase.api.rest_client import RestConnection, Bucket, BucketStats, OtpNode, Node
from remote.remote_util import RemoteMachineShellConnection
from TestInput import TestInputSingleton
from pyes import ES, managers, query
import logger
import time
import requests
log = logger.Logger.get_logger()
# EsRestConnection: subclasses RestConnection for use against elastic-search nodes.
# Instance created by membase.api.rest_client.RestConnection
# when elastic-search endpoint is detected so it is not necessary to
# directly import this module into tests
class EsRestConnection(RestConnection):
def __init__(self, serverInfo, proto = "http"):
#serverInfo can be a json object
#only connect pyes to master es node
#in the case that other nodes are taken down
#because http requests will fail
# TODO: dynamic master node detection
if isinstance(serverInfo, dict):
self.ip = serverInfo["ip"]
self.rest_username = serverInfo["username"]
self.rest_password = serverInfo["password"]
self.username = serverInfo["es_username"]
self.password = serverInfo["es_password"]
self.port = 9091 #serverInfo["port"]
else:
self.ip = serverInfo.ip
self.rest_username = serverInfo.rest_username
self.rest_password = serverInfo.rest_password
self.username = serverInfo.es_username
self.password = serverInfo.es_password
self.port = 9091 # serverInfo.port
self.baseUrl = "http://{0}:{1}/".format(self.ip, self.port)
self.capiBaseUrl = self.baseUrl
self.esHttpUrl = "http://{0}:9200".format(self.ip)
self.http_port = str(int(self.port) + 109)
self.proto = proto
self.conn = ES(server=self.esHttpUrl)
self.manager = managers.Cluster(self.conn)
self.test_params = TestInputSingleton.input
self.docs = None
def get_index_stats(self):
return ES.index_stats()
def get_indices(self):
schema = self.conn.indices.get_mapping()
indices_full_list = schema.get_all_indices()
just_indices = [index for index in indices_full_list if not index.startswith(".")]
return just_indices
def get_indices_as_buckets(self, doc_type='couchbaseDocument'):
buckets = []
indices = self.get_indices()
for index in indices:
bucket = Bucket()
q = query.MatchAllQuery()
docs = self.conn.search(q,index,doc_type)
bucket.name = index
bucket.type = "es"
bucket.port = self.port
bucket.authType = None
bucket.saslPassword = <PASSWORD>
bucket.nodes = list()
#vBucketServerMap
bucketStats = BucketStats()
bucketStats.itemCount = docs.count()
bucket.stats = bucketStats
buckets.append(bucket)
bucket.master_id = "es@"+self.ip
return buckets
def get_bucket(self, bucket_name, doc_type='couchbaseDocument'):
for bucket in self.get_indices_as_buckets(doc_type):
if bucket.name == bucket_name:
return bucket
return
def get_buckets(self):
return self.get_indices_as_buckets()
def delete_index(self, name):
self.conn.indices.delete_index(name)
return self.conn.indices.exists_index(name)
def create_index(self, name):
if self.conn.indices.exists_index(name):
self.delete_index(name)
self.conn.indices.create_index(name)
return self.conn.indices.exists_index(name)
def delete_bucket(self, name):
return self.delete_index(name)
def create_bucket(self, *args, **kwargs):
name = 'default'
if len(args) > 0:
name = args[0]
else:
name = kwargs['bucket']
return self.create_index(name)
def is_ns_server_running(self, timeout_in_seconds=360):
return True
def node_statuses(self, timeout=120):
otp_nodes = []
for node in self.get_nodes():
#get otp,get status
otp_node = OtpNode(id=node.id,
status=node.status)
otp_node.ip = node.ip
otp_node.port = node.port
otp_node.replication = None
otp_nodes.append(node)
return otp_nodes
def get_nodes_self(self, timeout=120):
for node in self.get_nodes():
# force to return master node
if node.port == 9091:
return node
return
def get_nodes(self):
es_nodes = []
nodes = self.manager.state()['nodes']
status = self.manager.health()['status']
if status == "green":
status = "healthy"
for node_key in nodes:
nodeInfo = nodes[node_key]
ex_params = self.get_node_params(nodeInfo)
nodeInfo.update({'ssh_password' : ex_params.ssh_password,
'ssh_username' : ex_params.ssh_username})
nodeInfo['key'] = node_key
node = ESNode(nodeInfo)
node.status = status
es_nodes.append(node)
return es_nodes
def get_node_params(self, info):
ip, port = parse_addr(info["transport_address"])
clusters = self.test_params.clusters
master_node = None
for _id in clusters:
for node in clusters[_id]:
if node.ip == ip and int(node.port) == port:
return node
if int(node.port) == 9091:
master_node = node
# use params from master node
return master_node
def search_term(self, key, indices=["default"]):
result = None
params = {"term":{"_id":key}}
query = ES.Search(params)
row = self.conn.search(query, indices = indices)
if row.total > 0:
result = row[0]
return result
def term_exists(self, key, indices=["default"]):
return self.search_term(key, indices = indices) is not None
def all_docs(self, keys_only = False, indices=["default"],size=10000):
q = query.MatchAllQuery()
docs = self.conn.search(q,indices=indices,doc_types='couchbaseDocument')
res_docs = []
for row in docs:
if keys_only:
row = row['meta']['id']
res_docs.append(row)
return res_docs
# check if a key exists by checking all known nodes
# See - CBES-17
# for use when it seems nodes are out of sync
def search_all_nodes(self, key, indices=["default"]):
doc = None
for index in indices:
for _node in self.get_nodes():
ip, port = (_node.ip, _node.ht_port)
r = requests.get('http://%s:%s/%s/couchbaseDocument/%s?preference=_only_node:%s' %\
(ip, port, index, key, _node.key))
if r.status_code == 200 :
if r.json()['_id'] == key:
doc = r.json()
break
return doc
def fetch_bucket_stats(self, bucket_name='default'):
bucket = self.get_bucket(bucket_name=bucket_name)
return bucket.stats
def start_replication(self, *args, **kwargs):
return "es",self.ip
def _rebalance_progress(self, *args, **kwargs):
return 100
def _rebalance_progress_status(self, *args, **kwargs):
return 'not running'
def get_vbuckets(self, *args, **kwargs):
return ()
def replace_template(self, node, file):
f = open(file, 'r')
template = f.read().replace('\n', ' ')
api = "http://{0}:9200/_template/couchbase".format(node.ip)
status, content, header = self._http_request(api, 'PUT', template)
if status:
log.info('uploaded couchbase template: '+file)
else:
log.error('template upload failed: {0}'.format(content))
def add_node(self, user='', password='', remoteIp='', port='8091',zone_name='', services=None):
pass
def update_configuration(self, node, commands):
rmc = RemoteMachineShellConnection(node)
shell = rmc._ssh_client.invoke_shell()
for command in commands:
log.info('Adding elastic search config {0} on node {1}'.format(command, self.ip))
shell.send('echo "{0}" >> ~/elasticsearch/config/elasticsearch.yml \n'.format(command))
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
def reset_configuration(self, node, count=1):
rmc = RemoteMachineShellConnection(node)
shell = rmc._ssh_client.invoke_shell()
log.info('Removing last {0} lines from elastic search config on node {1}'.format(count, self.ip))
shell.send('head -n -{0} ~/elasticsearch/config/elasticsearch.yml > temp ; mv temp ~/elasticsearch/config/elasticsearch.yml \n'.format(count))
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
def start_es_node(self, node):
rmc = RemoteMachineShellConnection(node)
shell=rmc._ssh_client.invoke_shell()
es_kill = "pkill -f elasticsearch;"
shell.send(es_kill+' \n')
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
log.info("Sleep for 30 seconds")
time.sleep(30)
# define es exec path if not in $PATH environment
es_bin = "~/elasticsearch/bin/elasticsearch -Dtransport.couchbase=TRACE -Dcom.couchbase=TRACE > /var/log/es.log 2>&1 &"
if 'es_bin' in TestInputSingleton.input.test_params:
es_bin = TestInputSingleton.input.test_params['es_bin']
# connect to remote node
log.info('Starting node: %s:%s' % (node.ip, node.port))
# start es service
shell.send(es_bin+' \n')
while not shell.recv_ready():
time.sleep(2)
rc = shell.recv(1024)
log.info(rc)
log.info("Sleep for 5 seconds before the node can appear")
time.sleep(5)
# wait for new node
tries = 0
while tries < 10:
for cluster_node in self.get_nodes():
if cluster_node.ip == node.ip and cluster_node.port == int(node.port):
return
else:
log.info('Waiting for new node to appear')
time.sleep(5)
tries = tries + 1
raise Exception("failed to add node to cluster: %s:%s" % (node.ip,node.port))
def log_client_error(self, post):
# cannot post req errors to 9091
pass
def vbucket_map_ready(self, *args, **kwargs):
return True
def init_cluster(self, *args, **kwargs):
pass
def init_cluster_memoryQuota(self, *args, **kwargs):
pass
def set_reb_cons_view(self, *args, **kwargs):
pass
def set_reb_index_waiting(self, *args, **kwargs):
pass
def set_rebalance_index_pausing(self, *args, **kwargs):
pass
def set_max_parallel_indexers(self, *args, **kwargs):
pass
def set_max_parallel_replica_indexers(self, *args, **kwargs):
pass
def log_client_error(self, post):
# cannot post req errors to 9091
pass
def vbucket_map_ready(self, *args, **kwargs):
return True
def init_cluster(self, *args, **kwargs):
pass
def init_cluster_memoryQuota(self, *args, **kwargs):
pass
def set_reb_cons_view(self, *args, **kwargs):
pass
def set_reb_index_waiting(self, *args, **kwargs):
pass
def set_rebalance_index_pausing(self, *args, **kwargs):
pass
def set_max_parallel_indexers(self, *args, **kwargs):
pass
def set_max_parallel_replica_indexers(self, *args, **kwargs):
pass
def rebalance(self, otpNodes, ejectedNodes):
# shutdown ejected nodes
# wait for shards to be rebalanced
nodesToShutdown = \
[node for node in self.get_nodes() if node.id in ejectedNodes]
for node in nodesToShutdown:
self.eject_node(node)
def eject_node(self, node):
api = "http://%s:9200/_cluster/nodes/local/_shutdown?delay=0s" % (node.ip)
status, content, header = self._http_request(api, 'POST', '')
if status:
log.info('ejected node: '+node.ip)
else:
log.error('rebalance operation failed: {0}'.format(content))
def monitorRebalance(self, stop_if_loop=False):
# since removed nodes are shutdown use master node for monitoring
return self.get_nodes_self()
def get_pools_info(self):
return {'pools' : []}
def add_remote_cluster(self, *args, **kwargs):
# detect 2:1 mapping and do spectial cluster add
# otherwise run super method
pass
def remove_all_remote_clusters(self):
pass
def remove_all_replications(self):
pass
def is_cluster_mixed(self):
return False
def set_internalSetting(self, param, value):
return {'ok' : True}
def parse_addr(addr):
ip = addr[addr.rfind('/')+1:addr.rfind(':')]
port = addr[addr.rfind(':')+1:-1]
return str(ip), int(port)
class ESNode(Node):
def __init__(self, info):
super(ESNode, self).__init__()
self.key = str(info['key'])
self.ip, self.port = parse_addr(info["transport_address"])
self.tr_ip, self.tr_port = parse_addr(info["transport_address"])
self.port = 9091
if 'http_address' in info:
self.ht_ip, self.ht_port = parse_addr(info["http_address"])
# truncate after space, or comma
name = str(info['name'][:info['name'].find(' ')])
name = name[:name.find(',')]
self.id = "es_%s@%s" % (name, self.ip)
self.ssh_username = info['ssh_username']
self.ssh_password = info['<PASSWORD>']
self.ssh_key = ''
| en | 0.767089 | # EsRestConnection: subclasses RestConnection for use against elastic-search nodes. # Instance created by membase.api.rest_client.RestConnection # when elastic-search endpoint is detected so it is not necessary to # directly import this module into tests #serverInfo can be a json object #only connect pyes to master es node #in the case that other nodes are taken down #because http requests will fail # TODO: dynamic master node detection #serverInfo["port"] # serverInfo.port #vBucketServerMap #get otp,get status # force to return master node # use params from master node # check if a key exists by checking all known nodes # See - CBES-17 # for use when it seems nodes are out of sync # define es exec path if not in $PATH environment # connect to remote node # start es service # wait for new node # cannot post req errors to 9091 # cannot post req errors to 9091 # shutdown ejected nodes # wait for shards to be rebalanced # since removed nodes are shutdown use master node for monitoring # detect 2:1 mapping and do spectial cluster add # otherwise run super method # truncate after space, or comma | 2.301711 | 2 |
appengine/findit/libs/gitiles/test/commit_util_test.py | mcgreevy/chromium-infra | 1 | 6630601 | <filename>appengine/findit/libs/gitiles/test/commit_util_test.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import StringIO
from testing_utils import testing
import textwrap
import urllib2
from libs.gitiles import commit_util
class CodeReviewUtilTest(testing.AppengineTestCase):
def testExtractChangeInfo(self):
testcases = [
{
'message':
'balabala...\n'
'\n'
'BUG=604502\n'
'\n'
'Review-Url: https://codereview.chromium.org/1927593004\n'
'Cr-Commit-Position: refs/heads/master@{#390254}\n',
'commit_position': 390254,
'code_review_url': 'https://codereview.chromium.org/1927593004',
'change_id': '1927593004',
'host': 'codereview.chromium.org',
},
{
'message':
'balabala...\n'
'\n'
'BUG=409934\n'
'\n'
'Review URL: https://codereview.chromium.org/547753003\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#293661}',
'commit_position': 293661,
'code_review_url': 'https://codereview.chromium.org/547753003',
'change_id': '547753003',
'host': 'codereview.chromium.org',
},
{
'message':
'Review URL: https://codereview.chromium.org/469523002\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#289120}',
'commit_position': 289120,
'code_review_url': 'https://codereview.chromium.org/469523002',
'change_id': '469523002',
'host': 'codereview.chromium.org',
},
{
'message':
'balabala...\n'
'\n'
'balabala...\n'
'\n'
'R=<EMAIL>\n'
'\n'
'Review URL: https://codereview.chromium.org/469523002\n',
'commit_position': None,
'code_review_url': 'https://codereview.chromium.org/469523002',
'change_id': '469523002',
'host': 'codereview.chromium.org',
},
{
'message': None,
'commit_position': None,
'code_review_url': None,
'change_id': None,
'host': None,
},
{
'message': 'abc',
'commit_position': None,
'code_review_url': None,
'change_id': None,
'host': None,
},
{
'message':
'balabala...\n'
'\n'
'balabala...\n'
'\n'
'R=<EMAIL>\n'
'\n'
'Change-Id: Iaa54f242b5b2fa10870503ef88291b9422cb47ca\n'
'Reviewed-on: https://chromium-review.googlesource.com/45425\n'
'Cr-Commit-Position: refs/heads/master@{#456563}',
'commit_position': 456563,
'code_review_url': 'https://chromium-review.googlesource.com/q/'
'Iaa54f242b5b2fa10870503ef88291b9422cb47ca',
'change_id': 'Iaa54f242b5b2fa10870503ef88291b9422cb47ca',
'host': 'chromium-review.googlesource.com',
}
]
for testcase in testcases:
change_info = commit_util.ExtractChangeInfo(testcase['message'])
self.assertEqual(
change_info.get('commit_position'), testcase['commit_position'])
self.assertEqual(
change_info.get('code_review_url'), testcase['code_review_url'])
self.assertEqual(change_info.get('host'), testcase['host'])
self.assertEqual(
change_info.get('change_id'), testcase['change_id'])
def testNormalizeEmail(self):
self.assertEqual(commit_util.NormalizeEmail(
'<EMAIL>@bbb929c8-8fbe-4397-9dbb-9b2b20218538'),
'<EMAIL>')
def testGetRevertedRevision(self):
message = (
'Revert of test1\n\nReason for revert:\nrevert test1\n\n'
'Original issue\'s description:\n> test 1\n>\n'
'> description of test 1.\n>\n> BUG=none\n> TEST=none\n'
'> R=<EMAIL>\n> TBR=<EMAIL>\n>\n'
'> Committed: https://chromium.googlesource.com/chromium/src/+/'
'c9cc182781484f9010f062859cda048afefefefe\n'
'> Cr-Commit-Position: refs/heads/master@{#341992}\n\n'
'TBR=<EMAIL>\nNOPRESUBMIT=true\nNOTREECHECKS=true\n'
'NOTRY=true\nBUG=none\n\n'
'Review URL: https://codereview.chromium.org/1278653002\n\n'
'Cr-Commit-Position: refs/heads/master@{#342013}\n')
reverted_revision = commit_util.GetRevertedRevision(message)
self.assertEqual('c9cc182781484f9010f062859cda048afefefefe',
reverted_revision)
def testGetRevertedRevisionRevertOfRevert(self):
message = (
'Revert of Revert\n\nReason for revert:\nRevert of revert\n\n'
'Original issue\'s description:\n> test case of revert of revert\n>\n'
'> Reason for revert:\n> reason\n>\n> Original issue\'s description:\n'
'> > base cl\n> >\n> > R=kalman\n> > BUG=424661\n> >\n'
'> > Committed: https://crrev.com/34ea66b8ac1d56dadd670431063857ffdd\n'
'> > Cr-Commit-Position: refs/heads/master@{#326953}\n>\n'
'> TBR=<EMAIL>\n> NOPRESUBMIT=true\n'
'> NOTREECHECKS=true\n> NOTRY=true\n> BUG=424661\n>\n'
'> Committed: https://crrev.com/76a7e3446188256ca240dc31f78de29511a'
'2c322\n'
'> Cr-Commit-Position: refs/heads/master@{#327021}\n\n'
'TBR=<EMAIL>\nNOPRESUBMIT=true\n'
'NOTREECHECKS=true\nNOTRY=true\nBUG=424661\n\n'
'Review URL: https://codereview.chromium.org/1161773008\n\n'
'Cr-Commit-Position: refs/heads/master@{#332062}\n')
reverted_revision = commit_util.GetRevertedRevision(message)
self.assertEqual('76a7e3446188256ca240dc31f78de29511a2c322',
reverted_revision)
def testGetRevertedRevisionNoRevertedCL(self):
message = (
'Test for not revert cl\n\n'
'TBR=<EMAIL>\nNOPRESUBMIT=true\n'
'NOTREECHECKS=true\nNOTRY=true\nBUG=424661\n\n'
'Review URL: https://codereview.chromium.org/1161773008\n\n'
'Cr-Commit-Position: refs/heads/master@{#332062}\n')
reverted_revision = commit_util.GetRevertedRevision(message)
self.assertIsNone(reverted_revision)
def testDistanceBetweenLineRangesErrors(self):
self.assertRaises(ValueError, lambda:
commit_util.DistanceBetweenLineRanges((1,0), (2,3)))
self.assertRaises(ValueError, lambda:
commit_util.DistanceBetweenLineRanges((2,3), (1,0)))
def testDistanceBetweenLineRangesSuccesses(self):
tests = [
(2, (1,2), (4,5)),
(0, (1,3), (2,4)),
(0, (1,4), (2,3)),
]
for expected_distance, range1, range2 in tests:
distance12 = commit_util.DistanceBetweenLineRanges(range1, range2)
distance21 = commit_util.DistanceBetweenLineRanges(range2, range1)
self.assertEqual(distance12, distance21)
self.assertEqual(expected_distance, distance12)
| <filename>appengine/findit/libs/gitiles/test/commit_util_test.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import StringIO
from testing_utils import testing
import textwrap
import urllib2
from libs.gitiles import commit_util
class CodeReviewUtilTest(testing.AppengineTestCase):
def testExtractChangeInfo(self):
testcases = [
{
'message':
'balabala...\n'
'\n'
'BUG=604502\n'
'\n'
'Review-Url: https://codereview.chromium.org/1927593004\n'
'Cr-Commit-Position: refs/heads/master@{#390254}\n',
'commit_position': 390254,
'code_review_url': 'https://codereview.chromium.org/1927593004',
'change_id': '1927593004',
'host': 'codereview.chromium.org',
},
{
'message':
'balabala...\n'
'\n'
'BUG=409934\n'
'\n'
'Review URL: https://codereview.chromium.org/547753003\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#293661}',
'commit_position': 293661,
'code_review_url': 'https://codereview.chromium.org/547753003',
'change_id': '547753003',
'host': 'codereview.chromium.org',
},
{
'message':
'Review URL: https://codereview.chromium.org/469523002\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#289120}',
'commit_position': 289120,
'code_review_url': 'https://codereview.chromium.org/469523002',
'change_id': '469523002',
'host': 'codereview.chromium.org',
},
{
'message':
'balabala...\n'
'\n'
'balabala...\n'
'\n'
'R=<EMAIL>\n'
'\n'
'Review URL: https://codereview.chromium.org/469523002\n',
'commit_position': None,
'code_review_url': 'https://codereview.chromium.org/469523002',
'change_id': '469523002',
'host': 'codereview.chromium.org',
},
{
'message': None,
'commit_position': None,
'code_review_url': None,
'change_id': None,
'host': None,
},
{
'message': 'abc',
'commit_position': None,
'code_review_url': None,
'change_id': None,
'host': None,
},
{
'message':
'balabala...\n'
'\n'
'balabala...\n'
'\n'
'R=<EMAIL>\n'
'\n'
'Change-Id: Iaa54f242b5b2fa10870503ef88291b9422cb47ca\n'
'Reviewed-on: https://chromium-review.googlesource.com/45425\n'
'Cr-Commit-Position: refs/heads/master@{#456563}',
'commit_position': 456563,
'code_review_url': 'https://chromium-review.googlesource.com/q/'
'Iaa54f242b5b2fa10870503ef88291b9422cb47ca',
'change_id': 'Iaa54f242b5b2fa10870503ef88291b9422cb47ca',
'host': 'chromium-review.googlesource.com',
}
]
for testcase in testcases:
change_info = commit_util.ExtractChangeInfo(testcase['message'])
self.assertEqual(
change_info.get('commit_position'), testcase['commit_position'])
self.assertEqual(
change_info.get('code_review_url'), testcase['code_review_url'])
self.assertEqual(change_info.get('host'), testcase['host'])
self.assertEqual(
change_info.get('change_id'), testcase['change_id'])
def testNormalizeEmail(self):
self.assertEqual(commit_util.NormalizeEmail(
'<EMAIL>@bbb929c8-8fbe-4397-9dbb-9b2b20218538'),
'<EMAIL>')
def testGetRevertedRevision(self):
message = (
'Revert of test1\n\nReason for revert:\nrevert test1\n\n'
'Original issue\'s description:\n> test 1\n>\n'
'> description of test 1.\n>\n> BUG=none\n> TEST=none\n'
'> R=<EMAIL>\n> TBR=<EMAIL>\n>\n'
'> Committed: https://chromium.googlesource.com/chromium/src/+/'
'c9cc182781484f9010f062859cda048afefefefe\n'
'> Cr-Commit-Position: refs/heads/master@{#341992}\n\n'
'TBR=<EMAIL>\nNOPRESUBMIT=true\nNOTREECHECKS=true\n'
'NOTRY=true\nBUG=none\n\n'
'Review URL: https://codereview.chromium.org/1278653002\n\n'
'Cr-Commit-Position: refs/heads/master@{#342013}\n')
reverted_revision = commit_util.GetRevertedRevision(message)
self.assertEqual('c9cc182781484f9010f062859cda048afefefefe',
reverted_revision)
def testGetRevertedRevisionRevertOfRevert(self):
message = (
'Revert of Revert\n\nReason for revert:\nRevert of revert\n\n'
'Original issue\'s description:\n> test case of revert of revert\n>\n'
'> Reason for revert:\n> reason\n>\n> Original issue\'s description:\n'
'> > base cl\n> >\n> > R=kalman\n> > BUG=424661\n> >\n'
'> > Committed: https://crrev.com/34ea66b8ac1d56dadd670431063857ffdd\n'
'> > Cr-Commit-Position: refs/heads/master@{#326953}\n>\n'
'> TBR=<EMAIL>\n> NOPRESUBMIT=true\n'
'> NOTREECHECKS=true\n> NOTRY=true\n> BUG=424661\n>\n'
'> Committed: https://crrev.com/76a7e3446188256ca240dc31f78de29511a'
'2c322\n'
'> Cr-Commit-Position: refs/heads/master@{#327021}\n\n'
'TBR=<EMAIL>\nNOPRESUBMIT=true\n'
'NOTREECHECKS=true\nNOTRY=true\nBUG=424661\n\n'
'Review URL: https://codereview.chromium.org/1161773008\n\n'
'Cr-Commit-Position: refs/heads/master@{#332062}\n')
reverted_revision = commit_util.GetRevertedRevision(message)
self.assertEqual('76a7e3446188256ca240dc31f78de29511a2c322',
reverted_revision)
def testGetRevertedRevisionNoRevertedCL(self):
message = (
'Test for not revert cl\n\n'
'TBR=<EMAIL>\nNOPRESUBMIT=true\n'
'NOTREECHECKS=true\nNOTRY=true\nBUG=424661\n\n'
'Review URL: https://codereview.chromium.org/1161773008\n\n'
'Cr-Commit-Position: refs/heads/master@{#332062}\n')
reverted_revision = commit_util.GetRevertedRevision(message)
self.assertIsNone(reverted_revision)
def testDistanceBetweenLineRangesErrors(self):
self.assertRaises(ValueError, lambda:
commit_util.DistanceBetweenLineRanges((1,0), (2,3)))
self.assertRaises(ValueError, lambda:
commit_util.DistanceBetweenLineRanges((2,3), (1,0)))
def testDistanceBetweenLineRangesSuccesses(self):
tests = [
(2, (1,2), (4,5)),
(0, (1,3), (2,4)),
(0, (1,4), (2,3)),
]
for expected_distance, range1, range2 in tests:
distance12 = commit_util.DistanceBetweenLineRanges(range1, range2)
distance21 = commit_util.DistanceBetweenLineRanges(range2, range1)
self.assertEqual(distance12, distance21)
self.assertEqual(expected_distance, distance12)
| en | 0.690358 | # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. #390254}\n', #293661}', #289120}', #456563}', #341992}\n\n' #342013}\n') #326953}\n>\n' #327021}\n\n' #332062}\n') #332062}\n') | 2.045183 | 2 |
python/SquareEveryDigit/setup.py | dheraclio/codewars | 0 | 6630602 | <filename>python/SquareEveryDigit/setup.py
from setuptools import setup, find_packages
setup (
name='SquareEveryDigit',
version='0.1',
packages=find_packages(),
# Declare your packages' dependencies here, for eg:
install_requires=['foo>=3'],
# Fill in these to make your Egg ready for upload to
# PyPI
author='daniel',
author_email='',
#summary = 'Just another Python package for the cheese shop',
url='',
license='',
long_description='Long description of the package',
# could also include long_description, download_url, classifiers, etc.
) | <filename>python/SquareEveryDigit/setup.py
from setuptools import setup, find_packages
setup (
name='SquareEveryDigit',
version='0.1',
packages=find_packages(),
# Declare your packages' dependencies here, for eg:
install_requires=['foo>=3'],
# Fill in these to make your Egg ready for upload to
# PyPI
author='daniel',
author_email='',
#summary = 'Just another Python package for the cheese shop',
url='',
license='',
long_description='Long description of the package',
# could also include long_description, download_url, classifiers, etc.
) | en | 0.893719 | # Declare your packages' dependencies here, for eg: # Fill in these to make your Egg ready for upload to # PyPI #summary = 'Just another Python package for the cheese shop', # could also include long_description, download_url, classifiers, etc. | 1.996122 | 2 |
boa/code/line.py | chisleu/neo-boa | 2 | 6630603 | from byteplay3 import *
from boa.code import pyop
class Line():
"""
"""
items = None
def __init__(self, item_list):
self.items = item_list
@property
def is_import(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if op in [pyop.IMPORT_NAME, pyop.IMPORT_FROM, pyop.IMPORT_STAR]:
return True
return False
@property
def is_constant(self):
"""
:return:
"""
return (len(self.items) == 3 or len(self.items) == 5) and self.items[1][0] == pyop.LOAD_CONST and self.items[2][0] == pyop.STORE_NAME
# return False
@property
def is_module_method_call(self):
if not self.is_class:
return self.items[-2][0] == pyop.CALL_FUNCTION and self.items[-1][0] == pyop.STORE_NAME
return False
@property
def is_docstring(self):
"""
returns whether a line is a docstring
:return: whether a line is a documentation string
:rtype: bool
"""
for item in self.items:
if item[0] == pyop.STORE_NAME and item[1] == '__doc__':
return True
return False
@property
def is_method(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if op == pyop.MAKE_FUNCTION:
return True
return False
@property
def is_class(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if op == pyop.LOAD_BUILD_CLASS:
return True
return False
@property
def code_object(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if type(arg) is Code:
return arg
return None
@property
def is_action_registration(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if arg == 'RegisterAction':
return True
@property
def is_smart_contract_appcall_registration(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if arg == 'RegisterAppCall':
return True
| from byteplay3 import *
from boa.code import pyop
class Line():
"""
"""
items = None
def __init__(self, item_list):
self.items = item_list
@property
def is_import(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if op in [pyop.IMPORT_NAME, pyop.IMPORT_FROM, pyop.IMPORT_STAR]:
return True
return False
@property
def is_constant(self):
"""
:return:
"""
return (len(self.items) == 3 or len(self.items) == 5) and self.items[1][0] == pyop.LOAD_CONST and self.items[2][0] == pyop.STORE_NAME
# return False
@property
def is_module_method_call(self):
if not self.is_class:
return self.items[-2][0] == pyop.CALL_FUNCTION and self.items[-1][0] == pyop.STORE_NAME
return False
@property
def is_docstring(self):
"""
returns whether a line is a docstring
:return: whether a line is a documentation string
:rtype: bool
"""
for item in self.items:
if item[0] == pyop.STORE_NAME and item[1] == '__doc__':
return True
return False
@property
def is_method(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if op == pyop.MAKE_FUNCTION:
return True
return False
@property
def is_class(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if op == pyop.LOAD_BUILD_CLASS:
return True
return False
@property
def code_object(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if type(arg) is Code:
return arg
return None
@property
def is_action_registration(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if arg == 'RegisterAction':
return True
@property
def is_smart_contract_appcall_registration(self):
"""
:return:
"""
for i, (op, arg) in enumerate(self.items):
if arg == 'RegisterAppCall':
return True
| en | 0.302012 | :return: :return: # return False returns whether a line is a docstring :return: whether a line is a documentation string :rtype: bool :return: :return: :return: :return: :return: | 2.63847 | 3 |
tests/test_rnmrtk.py | tjragan/nmrglue | 1 | 6630604 | <filename>tests/test_rnmrtk.py
""" Tests for the fileio.rnmrtk submodule """
import tempfile
import os
from numpy.testing import assert_array_equal
import nmrglue as ng
from nose.plugins.attrib import attr
from setup import DATA_DIR
# subroutines
def write_readback(dic, data):
""" write out and readback a RNMRTK file. """
tf = tempfile.mktemp(suffix='.sec', dir='.')
ng.rnmrtk.write(tf, dic, data)
rdic, rdata = ng.rnmrtk.read(tf)
os.remove(tf)
os.remove(tf.replace('.sec', '.par'))
assert_array_equal(data, rdata)
assert dic == rdic
def lowmem_write_readback(dic, data):
""" lowmemory write out and readback a RNMTRK file. """
tf = tempfile.mktemp(suffix='.sec', dir='.')
ng.rnmrtk.write_lowmem(tf, dic, data)
rdic, rdata = ng.rnmrtk.read_lowmem(tf)
# check value [0,1,...]
s = tuple(range(data.ndim))
assert data[s] == rdata[s]
assert dic == rdic
print tf
os.remove(tf)
os.remove(tf.replace('.sec', '.par'))
# tests
@attr(speed='fast')
def test_1d_time():
""" reading/writing of 1D RNMRTK time domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, 'rnmrtk_1d', 'time_1d.sec'))
assert data.shape == (1500, )
assert round(data[0].real, 2) == 91899.24
assert round(data[0].imag, 2) == -1964.70
assert round(data[1].real, 2) == 168844.25
assert round(data[1].imag, 2) == -49503.41
assert dic['sw'][0] == 50000.0
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 99.0
write_readback(dic, data)
@attr(speed='fast')
def test_1d_freq():
""" reading/writing of 1D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, 'rnmrtk_1d', 'freq_1d.sec'))
assert data.shape == (4096, )
assert round(data[0], 2) == -1726.76
assert round(data[1], 2) == -1702.76
assert dic['sw'][0] == 50000.0
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 99.0
write_readback(dic, data)
@attr(speed='fast')
def test_2d_time():
""" reading/writing of 2D RNMRTK time domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, "rnmrtk_2d", "time_2d.sec"))
assert data.shape == (332, 1500)
assert round(data[0, 1].real, 2) == 360.07
assert round(data[0, 1].imag, 2) == -223.20
assert round(data[10, 18].real, 2) == 17.93
assert round(data[10, 18].imag, 2) == -67.20
assert dic['sw'][1] == 50000.0
assert dic['sf'][1] == 125.69
assert dic['ppm'][1] == 55.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 50.65
assert dic['ppm'][0] == 120.0
write_readback(dic, data)
@attr(speed='fast')
def test_2d_freq():
""" reading/writing of 2D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, "rnmrtk_2d", "freq_2d.sec"))
assert data.shape == (2048, 4096)
assert round(data[0, 1], 2) == -.19
assert round(data[10, 18], 2) == 0.88
assert dic['sw'][1] == 50000.0
assert dic['sf'][1] == 125.69
assert dic['ppm'][1] == 55.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 50.65
assert dic['ppm'][0] == 120.0
write_readback(dic, data)
@attr(speed='fast')
def test_2d_time_lowmem():
""" low memory reading/writing of 2D RNMRTK time domain file """
dic, data = ng.rnmrtk.read_lowmem(
os.path.join(DATA_DIR, "rnmrtk_2d", "time_2d.sec"))
assert data.shape == (332, 1500)
assert round(data[0, 1].real, 2) == 360.07
assert round(data[0, 1].imag, 2) == -223.20
assert round(data[10, 18].real, 2) == 17.93
assert round(data[10, 18].imag, 2) == -67.20
assert dic['sw'][1] == 50000.0
assert dic['sf'][1] == 125.69
assert dic['ppm'][1] == 55.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 50.65
assert dic['ppm'][0] == 120.0
lowmem_write_readback(dic, data)
@attr(speed='fast')
def test_2d_freq_lowmem():
""" low memory reading/writing of 2D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read_lowmem(
os.path.join(DATA_DIR, "rnmrtk_2d", "freq_2d.sec"))
assert data.shape == (2048, 4096)
assert round(data[0, 1], 2) == -.19
assert round(data[10, 18], 2) == 0.88
assert dic['sw'][1] == 50000.0
assert dic['sf'][1] == 125.69
assert dic['ppm'][1] == 55.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 50.65
assert dic['ppm'][0] == 120.0
lowmem_write_readback(dic, data)
@attr(speed='slow')
def test_3d_time():
""" reading/writing of 3D RNMRTK time domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, "rnmrtk_3d", "time_3d.sec"))
assert data.shape == (128, 88, 1250)
assert round(data[0, 1, 2].real, 2) == 7.98
assert round(data[0, 1, 2].imag, 2) == 33.82
assert round(data[10, 11, 18].real, 2) == -9.36
assert round(data[10, 11, 18].imag, 2) == -7.75
assert dic['sw'][2] == 50000.0
assert dic['sf'][2] == 125.68
assert dic['ppm'][2] == 56.0
assert dic['sw'][1] == 2777.778
assert dic['sf'][1] == 50.65
assert dic['ppm'][1] == 120.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 56.0
write_readback(dic, data)
@attr(speed='slow')
def test_3d_freq():
""" reading/writing of 3D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, "rnmrtk_3d", "freq_3d.sec"))
assert data.shape == (128, 128, 4096)
assert round(data[0, 1, 2], 2) == 3.23
assert round(data[10, 11, 18], 2) == 1.16
assert dic['sw'][2] == 50000.0
assert dic['sf'][2] == 125.68
assert dic['ppm'][2] == 56.0
assert dic['sw'][1] == 2777.778
assert dic['sf'][1] == 50.65
assert dic['ppm'][1] == 120.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 56.0
write_readback(dic, data)
@attr(speed='slow')
def test_3d_time_lowmem():
""" low memory reading/writing of 3D RNMRTK time domain file """
dic, data = ng.rnmrtk.read_lowmem(
os.path.join(DATA_DIR, "rnmrtk_3d", "time_3d.sec"))
assert data.shape == (128, 88, 1250)
assert round(data[0, 1, 2].real, 2) == 7.98
assert round(data[0, 1, 2].imag, 2) == 33.82
assert round(data[10, 11, 18].real, 2) == -9.36
assert round(data[10, 11, 18].imag, 2) == -7.75
assert dic['sw'][2] == 50000.0
assert dic['sf'][2] == 125.68
assert dic['ppm'][2] == 56.0
assert dic['sw'][1] == 2777.778
assert dic['sf'][1] == 50.65
assert dic['ppm'][1] == 120.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 56.0
lowmem_write_readback(dic, data)
@attr(speed='slow')
def test_3d_freq_lowmem():
""" low memory reading/writing of 3D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read_lowmem(
os.path.join(DATA_DIR, "rnmrtk_3d", "freq_3d.sec"))
assert data.shape == (128, 128, 4096)
assert round(data[0, 1, 2], 2) == 3.23
assert round(data[10, 11, 18], 2) == 1.16
assert dic['sw'][2] == 50000.0
assert dic['sf'][2] == 125.68
assert dic['ppm'][2] == 56.0
assert dic['sw'][1] == 2777.778
assert dic['sf'][1] == 50.65
assert dic['ppm'][1] == 120.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 56.0
lowmem_write_readback(dic, data)
@attr(speed='fast')
def test_3d_transpose():
""" reading/writing of transposed 3D RNMRTK time domain file """
dir_3d = os.path.join(DATA_DIR, 'rnmrtk_3d')
# T1 T2 T3 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t1_t2_t3.sec"))
assert data.shape == (128, 88, 36)
assert round(data[2, 6, 4].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T1 T3 T2 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t1_t3_t2.sec"))
assert data.shape == (128, 72, 44)
assert round(data[2, 8, 3].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T2 T1 T3 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t2_t1_t3.sec"))
assert data.shape == (88, 128, 36)
assert round(data[6, 2, 4].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T2 T3 T1 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t2_t3_t1.sec"))
assert data.shape == (88, 72, 64)
assert round(data[6, 8, 1].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T3 T1 T2 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t3_t1_t2.sec"))
assert data.shape == (72, 128, 44)
assert round(data[8, 2, 3].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T3 T2 T1 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t3_t2_t1.sec"))
assert data.shape == (72, 88, 64)
assert round(data[8, 6, 1].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
@attr(speed='slow')
def test_3d_transpose_lowmem():
""" low mem. reading/writing of transposed 3D RNMRTK time domain file """
dir_3d = os.path.join(DATA_DIR, 'rnmrtk_3d')
# T1 T2 T3 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t1_t2_t3.sec"))
assert data.shape == (128, 88, 36)
assert round(data[2, 6, 4].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T1 T3 T2 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t1_t3_t2.sec"))
assert data.shape == (128, 72, 44)
assert round(data[2, 8, 3].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T2 T1 T3 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t2_t1_t3.sec"))
assert data.shape == (88, 128, 36)
assert round(data[6, 2, 4].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T2 T3 T1 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t2_t3_t1.sec"))
assert data.shape == (88, 72, 64)
assert round(data[6, 8, 1].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T3 T1 T2 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t3_t1_t2.sec"))
assert data.shape == (72, 128, 44)
assert round(data[8, 2, 3].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T3 T2 T1 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t3_t2_t1.sec"))
assert data.shape == (72, 88, 64)
assert round(data[8, 6, 1].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
| <filename>tests/test_rnmrtk.py
""" Tests for the fileio.rnmrtk submodule """
import tempfile
import os
from numpy.testing import assert_array_equal
import nmrglue as ng
from nose.plugins.attrib import attr
from setup import DATA_DIR
# subroutines
def write_readback(dic, data):
""" write out and readback a RNMRTK file. """
tf = tempfile.mktemp(suffix='.sec', dir='.')
ng.rnmrtk.write(tf, dic, data)
rdic, rdata = ng.rnmrtk.read(tf)
os.remove(tf)
os.remove(tf.replace('.sec', '.par'))
assert_array_equal(data, rdata)
assert dic == rdic
def lowmem_write_readback(dic, data):
""" lowmemory write out and readback a RNMTRK file. """
tf = tempfile.mktemp(suffix='.sec', dir='.')
ng.rnmrtk.write_lowmem(tf, dic, data)
rdic, rdata = ng.rnmrtk.read_lowmem(tf)
# check value [0,1,...]
s = tuple(range(data.ndim))
assert data[s] == rdata[s]
assert dic == rdic
print tf
os.remove(tf)
os.remove(tf.replace('.sec', '.par'))
# tests
@attr(speed='fast')
def test_1d_time():
""" reading/writing of 1D RNMRTK time domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, 'rnmrtk_1d', 'time_1d.sec'))
assert data.shape == (1500, )
assert round(data[0].real, 2) == 91899.24
assert round(data[0].imag, 2) == -1964.70
assert round(data[1].real, 2) == 168844.25
assert round(data[1].imag, 2) == -49503.41
assert dic['sw'][0] == 50000.0
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 99.0
write_readback(dic, data)
@attr(speed='fast')
def test_1d_freq():
""" reading/writing of 1D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, 'rnmrtk_1d', 'freq_1d.sec'))
assert data.shape == (4096, )
assert round(data[0], 2) == -1726.76
assert round(data[1], 2) == -1702.76
assert dic['sw'][0] == 50000.0
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 99.0
write_readback(dic, data)
@attr(speed='fast')
def test_2d_time():
""" reading/writing of 2D RNMRTK time domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, "rnmrtk_2d", "time_2d.sec"))
assert data.shape == (332, 1500)
assert round(data[0, 1].real, 2) == 360.07
assert round(data[0, 1].imag, 2) == -223.20
assert round(data[10, 18].real, 2) == 17.93
assert round(data[10, 18].imag, 2) == -67.20
assert dic['sw'][1] == 50000.0
assert dic['sf'][1] == 125.69
assert dic['ppm'][1] == 55.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 50.65
assert dic['ppm'][0] == 120.0
write_readback(dic, data)
@attr(speed='fast')
def test_2d_freq():
""" reading/writing of 2D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, "rnmrtk_2d", "freq_2d.sec"))
assert data.shape == (2048, 4096)
assert round(data[0, 1], 2) == -.19
assert round(data[10, 18], 2) == 0.88
assert dic['sw'][1] == 50000.0
assert dic['sf'][1] == 125.69
assert dic['ppm'][1] == 55.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 50.65
assert dic['ppm'][0] == 120.0
write_readback(dic, data)
@attr(speed='fast')
def test_2d_time_lowmem():
""" low memory reading/writing of 2D RNMRTK time domain file """
dic, data = ng.rnmrtk.read_lowmem(
os.path.join(DATA_DIR, "rnmrtk_2d", "time_2d.sec"))
assert data.shape == (332, 1500)
assert round(data[0, 1].real, 2) == 360.07
assert round(data[0, 1].imag, 2) == -223.20
assert round(data[10, 18].real, 2) == 17.93
assert round(data[10, 18].imag, 2) == -67.20
assert dic['sw'][1] == 50000.0
assert dic['sf'][1] == 125.69
assert dic['ppm'][1] == 55.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 50.65
assert dic['ppm'][0] == 120.0
lowmem_write_readback(dic, data)
@attr(speed='fast')
def test_2d_freq_lowmem():
""" low memory reading/writing of 2D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read_lowmem(
os.path.join(DATA_DIR, "rnmrtk_2d", "freq_2d.sec"))
assert data.shape == (2048, 4096)
assert round(data[0, 1], 2) == -.19
assert round(data[10, 18], 2) == 0.88
assert dic['sw'][1] == 50000.0
assert dic['sf'][1] == 125.69
assert dic['ppm'][1] == 55.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 50.65
assert dic['ppm'][0] == 120.0
lowmem_write_readback(dic, data)
@attr(speed='slow')
def test_3d_time():
""" reading/writing of 3D RNMRTK time domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, "rnmrtk_3d", "time_3d.sec"))
assert data.shape == (128, 88, 1250)
assert round(data[0, 1, 2].real, 2) == 7.98
assert round(data[0, 1, 2].imag, 2) == 33.82
assert round(data[10, 11, 18].real, 2) == -9.36
assert round(data[10, 11, 18].imag, 2) == -7.75
assert dic['sw'][2] == 50000.0
assert dic['sf'][2] == 125.68
assert dic['ppm'][2] == 56.0
assert dic['sw'][1] == 2777.778
assert dic['sf'][1] == 50.65
assert dic['ppm'][1] == 120.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 56.0
write_readback(dic, data)
@attr(speed='slow')
def test_3d_freq():
""" reading/writing of 3D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read(
os.path.join(DATA_DIR, "rnmrtk_3d", "freq_3d.sec"))
assert data.shape == (128, 128, 4096)
assert round(data[0, 1, 2], 2) == 3.23
assert round(data[10, 11, 18], 2) == 1.16
assert dic['sw'][2] == 50000.0
assert dic['sf'][2] == 125.68
assert dic['ppm'][2] == 56.0
assert dic['sw'][1] == 2777.778
assert dic['sf'][1] == 50.65
assert dic['ppm'][1] == 120.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 56.0
write_readback(dic, data)
@attr(speed='slow')
def test_3d_time_lowmem():
""" low memory reading/writing of 3D RNMRTK time domain file """
dic, data = ng.rnmrtk.read_lowmem(
os.path.join(DATA_DIR, "rnmrtk_3d", "time_3d.sec"))
assert data.shape == (128, 88, 1250)
assert round(data[0, 1, 2].real, 2) == 7.98
assert round(data[0, 1, 2].imag, 2) == 33.82
assert round(data[10, 11, 18].real, 2) == -9.36
assert round(data[10, 11, 18].imag, 2) == -7.75
assert dic['sw'][2] == 50000.0
assert dic['sf'][2] == 125.68
assert dic['ppm'][2] == 56.0
assert dic['sw'][1] == 2777.778
assert dic['sf'][1] == 50.65
assert dic['ppm'][1] == 120.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 56.0
lowmem_write_readback(dic, data)
@attr(speed='slow')
def test_3d_freq_lowmem():
""" low memory reading/writing of 3D RNMRTK frequency domain file """
dic, data = ng.rnmrtk.read_lowmem(
os.path.join(DATA_DIR, "rnmrtk_3d", "freq_3d.sec"))
assert data.shape == (128, 128, 4096)
assert round(data[0, 1, 2], 2) == 3.23
assert round(data[10, 11, 18], 2) == 1.16
assert dic['sw'][2] == 50000.0
assert dic['sf'][2] == 125.68
assert dic['ppm'][2] == 56.0
assert dic['sw'][1] == 2777.778
assert dic['sf'][1] == 50.65
assert dic['ppm'][1] == 120.0
assert dic['sw'][0] == 5555.556
assert dic['sf'][0] == 125.68
assert dic['ppm'][0] == 56.0
lowmem_write_readback(dic, data)
@attr(speed='fast')
def test_3d_transpose():
""" reading/writing of transposed 3D RNMRTK time domain file """
dir_3d = os.path.join(DATA_DIR, 'rnmrtk_3d')
# T1 T2 T3 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t1_t2_t3.sec"))
assert data.shape == (128, 88, 36)
assert round(data[2, 6, 4].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T1 T3 T2 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t1_t3_t2.sec"))
assert data.shape == (128, 72, 44)
assert round(data[2, 8, 3].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T2 T1 T3 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t2_t1_t3.sec"))
assert data.shape == (88, 128, 36)
assert round(data[6, 2, 4].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T2 T3 T1 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t2_t3_t1.sec"))
assert data.shape == (88, 72, 64)
assert round(data[6, 8, 1].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T3 T1 T2 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t3_t1_t2.sec"))
assert data.shape == (72, 128, 44)
assert round(data[8, 2, 3].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
# T3 T2 T1 ordering
dic, data = ng.rnmrtk.read(os.path.join(dir_3d, "time_3d_t3_t2_t1.sec"))
assert data.shape == (72, 88, 64)
assert round(data[8, 6, 1].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
write_readback(dic, data)
@attr(speed='slow')
def test_3d_transpose_lowmem():
""" low mem. reading/writing of transposed 3D RNMRTK time domain file """
dir_3d = os.path.join(DATA_DIR, 'rnmrtk_3d')
# T1 T2 T3 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t1_t2_t3.sec"))
assert data.shape == (128, 88, 36)
assert round(data[2, 6, 4].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T1 T3 T2 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t1_t3_t2.sec"))
assert data.shape == (128, 72, 44)
assert round(data[2, 8, 3].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T2 T1 T3 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t2_t1_t3.sec"))
assert data.shape == (88, 128, 36)
assert round(data[6, 2, 4].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T2 T3 T1 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t2_t3_t1.sec"))
assert data.shape == (88, 72, 64)
assert round(data[6, 8, 1].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T3 T1 T2 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t3_t1_t2.sec"))
assert data.shape == (72, 128, 44)
assert round(data[8, 2, 3].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
# T3 T2 T1 ordering
dic, data = ng.rnmrtk.read_lowmem(os.path.join(
dir_3d, "time_3d_t3_t2_t1.sec"))
assert data.shape == (72, 88, 64)
assert round(data[8, 6, 1].real, 2) == -1.82
assert dic['npts'] == [64, 44, 36]
lowmem_write_readback(dic, data)
| en | 0.751934 | Tests for the fileio.rnmrtk submodule # subroutines write out and readback a RNMRTK file. lowmemory write out and readback a RNMTRK file. # check value [0,1,...] # tests reading/writing of 1D RNMRTK time domain file reading/writing of 1D RNMRTK frequency domain file reading/writing of 2D RNMRTK time domain file reading/writing of 2D RNMRTK frequency domain file low memory reading/writing of 2D RNMRTK time domain file low memory reading/writing of 2D RNMRTK frequency domain file reading/writing of 3D RNMRTK time domain file reading/writing of 3D RNMRTK frequency domain file low memory reading/writing of 3D RNMRTK time domain file low memory reading/writing of 3D RNMRTK frequency domain file reading/writing of transposed 3D RNMRTK time domain file # T1 T2 T3 ordering # T1 T3 T2 ordering # T2 T1 T3 ordering # T2 T3 T1 ordering # T3 T1 T2 ordering # T3 T2 T1 ordering low mem. reading/writing of transposed 3D RNMRTK time domain file # T1 T2 T3 ordering # T1 T3 T2 ordering # T2 T1 T3 ordering # T2 T3 T1 ordering # T3 T1 T2 ordering # T3 T2 T1 ordering | 2.339817 | 2 |
setup.py | IBM/pyds8k | 7 | 6630605 | <gh_stars>1-10
##############################################################################
# Copyright 2019 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from setuptools import find_packages, setup
import pyds8k
install_requires = ['requests', 'httpretty', 'configparser', 'six']
setup(
name='pyds8k',
version=pyds8k.version,
description="DS8000 Python Client",
long_description="DS8000 RESTful API Python Client.",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
keywords=["IBM", "DS8000 Storage"],
requires=install_requires,
install_requires=install_requires,
tests_require=['nose', 'mock'],
license="Apache License, Version 2.0",
include_package_data=True,
packages=find_packages(),
provides=['pyds8k'],
url="https://github.com/IBM/pyds8k",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules',
])
| ##############################################################################
# Copyright 2019 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from setuptools import find_packages, setup
import pyds8k
install_requires = ['requests', 'httpretty', 'configparser', 'six']
setup(
name='pyds8k',
version=pyds8k.version,
description="DS8000 Python Client",
long_description="DS8000 RESTful API Python Client.",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
keywords=["IBM", "DS8000 Storage"],
requires=install_requires,
install_requires=install_requires,
tests_require=['nose', 'mock'],
license="Apache License, Version 2.0",
include_package_data=True,
packages=find_packages(),
provides=['pyds8k'],
url="https://github.com/IBM/pyds8k",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules',
]) | en | 0.600794 | ############################################################################## # Copyright 2019 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## | 1.61164 | 2 |
bin/arguments.py | christine-liu/somaticCNVpipeline | 0 | 6630606 | <filename>bin/arguments.py
#!/usr/bin/python
import argparse
###basic parser for parent help statement###
def parentArgs():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
Suzanne's pipeline to identify somatic CNVs from single-cell whole-genome sequencing data
=========================================================================================
You must specify a function to perform:
*preprocess (trim fastq reads to the appropriate length)
*map (map fastq files to the hg38 or mm10 genome)
*count (count number of reads in 25,000 genomic bins)
*segment (run CBS -- requires Matlab!)
*interpret (perform QC assessment and removal of low-quality CNV calls)
# [More functions coming soon...]
''')
parser.print_help()
raise SystemExit
###interpret arguments needed to perform preprocessing of fastq files###
def preprocessArgs():
parser = argparse.ArgumentParser(description='Trim fastq reads to the appropriate length')
#required arguments#
parser.add_argument('FastqDirectory',
help = 'The path to the folder that contains fastq files to be processed')
#optional arguments#
parser.add_argument('-5', '--trim5', metavar='X', type=int, default=0,
help = "Number of 5' bases to trim from fastq reads")
parser.add_argument('-l', '--length', metavar='X', type=int, default=36,
help = 'The desired read length')
parser.add_argument('-r', '--remove', action='store_true',
help = 'Set this flag if you want to delete the full length fastq files (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of fastq files to be processed\n\tsample names only, no path or file extension needed (UNTESTED)')
return parser
###interpret arguments needed to perform mapping of fastq files###
def mapArgs():
parser = argparse.ArgumentParser(description='Map fastq files to the appropriate reference genome')
#required arguments#
parser.add_argument('FastqDirectory',
help = 'The path to the folder that contains fastq files to be processed')
parser.add_argument('MapIndex',
help='The path to the bowtie (v1) mapping references, as you would input if running bowtie directly -- MUST BE HG38 OR MM10')
# parser.add_argument('species', choices=['hg38', 'mm10'],
# help = 'The genome build of the species being assessed')
#optional arguments#
parser.add_argument('-t', '--trim', metavar='X', nargs=2, type=int, default=[0, 0],
help = "Number of 5' and 3' bases to trim from fastq reads during mapping")
parser.add_argument('-o', '--output', metavar='/path/to/output_directory/', default=False,
help = 'A filepath to the desired directory where you would like sam files saved, if not in the same parent directory as the fastq files (UNTESTED)')
parser.add_argument('-x', '--statdir', metavar='/path/to/statistics_directory/', default=False,
help = 'A filepath to the desired directory where you would like mapping statistics saved, if not in the same parent directory as the fastq files (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of fastq files to be processed\n\tsample names only, no path or file extension needed (UNTESTED)')
parser.add_argument('-b', '--bowtie', metavar='/path/to/bowtie1', default='bowtie',
help='Path to the bowtie binary, if not in your PATH variable (UNTESTED)')
parser.add_argument('-m', '--samtools', metavar='/path/to/samtools0.1.19', default='samtools',
help='Path to the samtools (v0.1.19) binary, if not in your PATH variable (UNTESTED)')
return parser
###interpret arguments needed to perform counting of unique.sam files###
def countArgs():
parser = argparse.ArgumentParser(description='Count the reads per genomic bin from unique sam files')
#required arguments#
parser.add_argument('AnalysisDirectory',
help = 'The path to the analysis directory, which contains the Sam/ directory with unique.sam files to be processed')
parser.add_argument('species', choices=['hg38', 'mm10'],
help = 'The genome build of the species being assessed')
#optional arguments#
parser.add_argument('-m', '--mapdir', metavar='/path/to/output_directory/', default=False,
help = 'A filepath to the directory containing the sam files, if not AnalysisDirectory/Sam/ (UNTESTED)')
parser.add_argument('-x', '--statdir', metavar='/path/to/statistics_directory/', default=False,
help = 'A filepath to the desired directory where you would like mapping statistics saved, if not in the same parent directory as the sam files (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of unique.sam files to be processed\n\tsample names only, no path or file extension needed (UNTESTED)')
return parser
###interpret arguments needed to perform normalization and segmentation of bincounts.txt files###
def segmentArgs():
parser = argparse.ArgumentParser(description='Normalize and segment bincounts files to begin CNV identification process')
#required arguments#
parser.add_argument('AnalysisDirectory',
help = 'The path to the analysis directory, which contains the BinCounts/ directory with bincounts.txt files to be processed')
parser.add_argument('species', choices=['hg38', 'mm10'],
help = 'The genome build of the species being assessed')
#optional arguments#
parser.add_argument('-b', '--bincountdir', metavar='/path/to/output_directory/', default=False,
help = 'A filepath to the folder containing the bincount files, if not AnalysisDirectory/BinCounts (UNTESTED)')
parser.add_argument('-i', '--infofile', metavar='/path/to/sample.info.txt', default=False,
help='Path to a .txt file containing information about the samples to be processed (unique name, amplification method, number of cells)\n\tIf not all are identical. This file should not have a header row (UNTESTED)')
parser.add_argument('-c', '--columns', metavar='X X X', default=[0, 1, 2], type=int, nargs=3,
help='The zero-indexed locations of the columns to import from the infofile in the order: name, method, cell number (if not the first 3 columns) (UNTESTED)')
parser.add_argument('-g', '--gconly', action='store_true',
help = 'Set this flag if you only want GC-correction to be performed during normalization (UNTESTED)')
parser.add_argument('-n', '--normalizeonly', action='store_true',
help = 'Set this flag if you do not want CBS to be performed (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of bincounts.txt files to be processed\n\tsample names only, no path or file extension needed (UNTESTED)')
return parser
###interpret arguments needed to perform QC and CNV analysis of each single cell sample###
def interpretArgs():
parser = argparse.ArgumentParser(description='Assess sample quality, filter unreliable CNVs, and generate user-friendly output files')
#required arguments#
parser.add_argument('AnalysisDirectory',
help = 'The path to the folder to save output files')
parser.add_argument('species', choices=['hg38', 'mm10'],
help = 'The genome build of the species being assessed')
#optional arguments#
parser.add_argument('-f', '--nofilter', action='store_true',
help = 'Set this flag if you do not want to perform FUnC filtering of low-quality CNV calls (UNTESTED)')
# parser.add_argument('-i', '--infofile', metavar='/path/to/sample.info.txt', default=False,
# help='Path to a .txt file containing information about the samples to be processed (unique name, number of cells, group)\n\tIf not all are identical. This file should not have a header row (UNTESTED)')
# parser.add_argument('-c', '--columns', metavar='X X X', default=[0, 1, 2], type=int, nargs=3,
# help='The zero-indexed locations of the columns to import from the infofile in the order: name, cell number, group (if not the first 3 columns) (UNTESTED)')
parser.add_argument('-l', '--lowess', metavar='/path/to/lowess.txt/files/', default=False,
help = 'A filepath to the desired directory where all lowess.txt files are saved, if not AnalysisDirectory/Lowess/ (UNTESTED)')
parser.add_argument('-g', '--segments', metavar='/path/to/segments.txt/files/', default=False,
help = 'A filepath to the desired directory where all segments.txt files are saved, if not AnalysisDirectory/Segments/ (UNTESTED)')
parser.add_argument('-r', '--countstats', metavar='/path/to/bincounts.stats.txt/files/', default=False,
help = 'A filepath to the desired directory where all bincounts.stats.txt files are saved, if not AnalysisDirectory/PipelineStats/ (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of sample names to be processed\n\tno path or file extension needed (UNTESTED)')
return parser
def fullParser(input):
functionDict = {
'-h': parentArgs,
'--help': parentArgs,
'preprocess': preprocessArgs,
'map': mapArgs,
'count': countArgs,
'segment': segmentArgs,
'interpret': interpretArgs,
}
if input == []:
parentArgs()
if input[0] not in functionDict.keys():
return input[0], False
parser = functionDict[input[0]]()
args = parser.parse_args(input[1:])
return input[0], args
| <filename>bin/arguments.py
#!/usr/bin/python
import argparse
###basic parser for parent help statement###
def parentArgs():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
Suzanne's pipeline to identify somatic CNVs from single-cell whole-genome sequencing data
=========================================================================================
You must specify a function to perform:
*preprocess (trim fastq reads to the appropriate length)
*map (map fastq files to the hg38 or mm10 genome)
*count (count number of reads in 25,000 genomic bins)
*segment (run CBS -- requires Matlab!)
*interpret (perform QC assessment and removal of low-quality CNV calls)
# [More functions coming soon...]
''')
parser.print_help()
raise SystemExit
###interpret arguments needed to perform preprocessing of fastq files###
def preprocessArgs():
parser = argparse.ArgumentParser(description='Trim fastq reads to the appropriate length')
#required arguments#
parser.add_argument('FastqDirectory',
help = 'The path to the folder that contains fastq files to be processed')
#optional arguments#
parser.add_argument('-5', '--trim5', metavar='X', type=int, default=0,
help = "Number of 5' bases to trim from fastq reads")
parser.add_argument('-l', '--length', metavar='X', type=int, default=36,
help = 'The desired read length')
parser.add_argument('-r', '--remove', action='store_true',
help = 'Set this flag if you want to delete the full length fastq files (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of fastq files to be processed\n\tsample names only, no path or file extension needed (UNTESTED)')
return parser
###interpret arguments needed to perform mapping of fastq files###
def mapArgs():
parser = argparse.ArgumentParser(description='Map fastq files to the appropriate reference genome')
#required arguments#
parser.add_argument('FastqDirectory',
help = 'The path to the folder that contains fastq files to be processed')
parser.add_argument('MapIndex',
help='The path to the bowtie (v1) mapping references, as you would input if running bowtie directly -- MUST BE HG38 OR MM10')
# parser.add_argument('species', choices=['hg38', 'mm10'],
# help = 'The genome build of the species being assessed')
#optional arguments#
parser.add_argument('-t', '--trim', metavar='X', nargs=2, type=int, default=[0, 0],
help = "Number of 5' and 3' bases to trim from fastq reads during mapping")
parser.add_argument('-o', '--output', metavar='/path/to/output_directory/', default=False,
help = 'A filepath to the desired directory where you would like sam files saved, if not in the same parent directory as the fastq files (UNTESTED)')
parser.add_argument('-x', '--statdir', metavar='/path/to/statistics_directory/', default=False,
help = 'A filepath to the desired directory where you would like mapping statistics saved, if not in the same parent directory as the fastq files (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of fastq files to be processed\n\tsample names only, no path or file extension needed (UNTESTED)')
parser.add_argument('-b', '--bowtie', metavar='/path/to/bowtie1', default='bowtie',
help='Path to the bowtie binary, if not in your PATH variable (UNTESTED)')
parser.add_argument('-m', '--samtools', metavar='/path/to/samtools0.1.19', default='samtools',
help='Path to the samtools (v0.1.19) binary, if not in your PATH variable (UNTESTED)')
return parser
###interpret arguments needed to perform counting of unique.sam files###
def countArgs():
parser = argparse.ArgumentParser(description='Count the reads per genomic bin from unique sam files')
#required arguments#
parser.add_argument('AnalysisDirectory',
help = 'The path to the analysis directory, which contains the Sam/ directory with unique.sam files to be processed')
parser.add_argument('species', choices=['hg38', 'mm10'],
help = 'The genome build of the species being assessed')
#optional arguments#
parser.add_argument('-m', '--mapdir', metavar='/path/to/output_directory/', default=False,
help = 'A filepath to the directory containing the sam files, if not AnalysisDirectory/Sam/ (UNTESTED)')
parser.add_argument('-x', '--statdir', metavar='/path/to/statistics_directory/', default=False,
help = 'A filepath to the desired directory where you would like mapping statistics saved, if not in the same parent directory as the sam files (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of unique.sam files to be processed\n\tsample names only, no path or file extension needed (UNTESTED)')
return parser
###interpret arguments needed to perform normalization and segmentation of bincounts.txt files###
def segmentArgs():
parser = argparse.ArgumentParser(description='Normalize and segment bincounts files to begin CNV identification process')
#required arguments#
parser.add_argument('AnalysisDirectory',
help = 'The path to the analysis directory, which contains the BinCounts/ directory with bincounts.txt files to be processed')
parser.add_argument('species', choices=['hg38', 'mm10'],
help = 'The genome build of the species being assessed')
#optional arguments#
parser.add_argument('-b', '--bincountdir', metavar='/path/to/output_directory/', default=False,
help = 'A filepath to the folder containing the bincount files, if not AnalysisDirectory/BinCounts (UNTESTED)')
parser.add_argument('-i', '--infofile', metavar='/path/to/sample.info.txt', default=False,
help='Path to a .txt file containing information about the samples to be processed (unique name, amplification method, number of cells)\n\tIf not all are identical. This file should not have a header row (UNTESTED)')
parser.add_argument('-c', '--columns', metavar='X X X', default=[0, 1, 2], type=int, nargs=3,
help='The zero-indexed locations of the columns to import from the infofile in the order: name, method, cell number (if not the first 3 columns) (UNTESTED)')
parser.add_argument('-g', '--gconly', action='store_true',
help = 'Set this flag if you only want GC-correction to be performed during normalization (UNTESTED)')
parser.add_argument('-n', '--normalizeonly', action='store_true',
help = 'Set this flag if you do not want CBS to be performed (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of bincounts.txt files to be processed\n\tsample names only, no path or file extension needed (UNTESTED)')
return parser
###interpret arguments needed to perform QC and CNV analysis of each single cell sample###
def interpretArgs():
parser = argparse.ArgumentParser(description='Assess sample quality, filter unreliable CNVs, and generate user-friendly output files')
#required arguments#
parser.add_argument('AnalysisDirectory',
help = 'The path to the folder to save output files')
parser.add_argument('species', choices=['hg38', 'mm10'],
help = 'The genome build of the species being assessed')
#optional arguments#
parser.add_argument('-f', '--nofilter', action='store_true',
help = 'Set this flag if you do not want to perform FUnC filtering of low-quality CNV calls (UNTESTED)')
# parser.add_argument('-i', '--infofile', metavar='/path/to/sample.info.txt', default=False,
# help='Path to a .txt file containing information about the samples to be processed (unique name, number of cells, group)\n\tIf not all are identical. This file should not have a header row (UNTESTED)')
# parser.add_argument('-c', '--columns', metavar='X X X', default=[0, 1, 2], type=int, nargs=3,
# help='The zero-indexed locations of the columns to import from the infofile in the order: name, cell number, group (if not the first 3 columns) (UNTESTED)')
parser.add_argument('-l', '--lowess', metavar='/path/to/lowess.txt/files/', default=False,
help = 'A filepath to the desired directory where all lowess.txt files are saved, if not AnalysisDirectory/Lowess/ (UNTESTED)')
parser.add_argument('-g', '--segments', metavar='/path/to/segments.txt/files/', default=False,
help = 'A filepath to the desired directory where all segments.txt files are saved, if not AnalysisDirectory/Segments/ (UNTESTED)')
parser.add_argument('-r', '--countstats', metavar='/path/to/bincounts.stats.txt/files/', default=False,
help = 'A filepath to the desired directory where all bincounts.stats.txt files are saved, if not AnalysisDirectory/PipelineStats/ (UNTESTED)')
parser.add_argument('-s', '--samples', metavar='/path/to/sample_list.txt', default=False,
help='Path to a file containing a list of sample names to be processed\n\tno path or file extension needed (UNTESTED)')
return parser
def fullParser(input):
functionDict = {
'-h': parentArgs,
'--help': parentArgs,
'preprocess': preprocessArgs,
'map': mapArgs,
'count': countArgs,
'segment': segmentArgs,
'interpret': interpretArgs,
}
if input == []:
parentArgs()
if input[0] not in functionDict.keys():
return input[0], False
parser = functionDict[input[0]]()
args = parser.parse_args(input[1:])
return input[0], args
| en | 0.478148 | #!/usr/bin/python ###basic parser for parent help statement### \ Suzanne's pipeline to identify somatic CNVs from single-cell whole-genome sequencing data ========================================================================================= You must specify a function to perform: *preprocess (trim fastq reads to the appropriate length) *map (map fastq files to the hg38 or mm10 genome) *count (count number of reads in 25,000 genomic bins) *segment (run CBS -- requires Matlab!) *interpret (perform QC assessment and removal of low-quality CNV calls) # [More functions coming soon...] ###interpret arguments needed to perform preprocessing of fastq files### #required arguments# #optional arguments# ###interpret arguments needed to perform mapping of fastq files### #required arguments# # parser.add_argument('species', choices=['hg38', 'mm10'], # help = 'The genome build of the species being assessed') #optional arguments# ###interpret arguments needed to perform counting of unique.sam files### #required arguments# #optional arguments# ###interpret arguments needed to perform normalization and segmentation of bincounts.txt files### #required arguments# #optional arguments# ###interpret arguments needed to perform QC and CNV analysis of each single cell sample### #required arguments# #optional arguments# # parser.add_argument('-i', '--infofile', metavar='/path/to/sample.info.txt', default=False, # help='Path to a .txt file containing information about the samples to be processed (unique name, number of cells, group)\n\tIf not all are identical. This file should not have a header row (UNTESTED)') # parser.add_argument('-c', '--columns', metavar='X X X', default=[0, 1, 2], type=int, nargs=3, # help='The zero-indexed locations of the columns to import from the infofile in the order: name, cell number, group (if not the first 3 columns) (UNTESTED)') | 2.651022 | 3 |
tools/constants_generator.py | kylekrol/psim | 5 | 6630607 | import csv
CSV_FILE='tools/constants.csv'
HPP_FILE='include/gnc/constants.hpp'
HPP_HEADER='''\
/** @file gnc/constants.hpp
* Autocoded constants header file. See tools/constants_generator.py for more
* information. */
#ifndef GNC_CONSTANTS_HPP_
#define GNC_CONSTANTS_HPP_
#include "config.hpp"
#include <lin/core.hpp>
#include <cstdint>
#include <limits>
namespace gnc {
namespace constant {
'''
HPP_FOOTER='''\
} // namespace constant
} // namespace gnc
#endif
'''
CPP_FILE='src/gnc/constants.cpp'
CPP_HEADER='''\
/** @file gnc_constants.cpp
* Autocoded constants source file. See tools/constants_generator.py for more
* information. */
#include <gnc/config.hpp>
#include <gnc/constants.hpp>
namespace gnc {
namespace constant {
'''
CPP_FOOTER='''\
} // namespace constant
} // namespace gnc
'''
def generate(csv_file, hpp_file, cpp_file):
'''This function generates a constants header and source file for the PSim
CXX library given a constants CSV file. This script is automatically run
prior to each PSim PlatformIO build.
The format of the CSV file is as follows:
<editable>,<type>,<name>,<value 0>,<value 1>,...
- <editable> specifies whether or not the constant will be treated as an
extern (i.e. can be edited by flight software) and is either "true" or
"false".
- <type> specifies the type of the constant.
- <name> is the name of the constant in PSim CXX source code.
- <value 0>,... is a variadic static initializer list which sets the
constant's initial value.
'''
hpp_file.write(HPP_HEADER)
cpp_file.write(CPP_HEADER)
# Loop over each constant entry
for constant in csv.reader(csv_file, delimiter=','):
# Ensure at least four entries
if len(constant) < 4:
print('ERROR: CSV line with less than four elements detected')
continue
# External value
if constant[0].lower() == 'true':
hpp_file.write('extern {0} {1};\n\n'.format(constant[1], constant[2]))
cpp_file.write('GNC_TRACKED_CONSTANT({0}, {1}, {2}'.format(constant[1], constant[2], constant[3]))
for arg in constant[4:]:
cpp_file.write(', {}'.format(arg))
cpp_file.write(');\n\n')
# Constexpr value
elif constant[0].lower() == 'false':
hpp_file.write('GNC_TRACKED_CONSTANT(constexpr static {0}, {1}, {2}'.format(constant[1], constant[2], constant[3]))
for arg in constant[4:]:
hpp_file.write(', {}'.format(arg))
hpp_file.write(');\n\n')
hpp_file.write(HPP_FOOTER)
cpp_file.write(CPP_FOOTER)
with open(CSV_FILE, 'r') as csv_file:
with open(HPP_FILE, 'w') as hpp_file:
with open(CPP_FILE, 'w') as cpp_file:
generate(csv_file, hpp_file, cpp_file)
| import csv
CSV_FILE='tools/constants.csv'
HPP_FILE='include/gnc/constants.hpp'
HPP_HEADER='''\
/** @file gnc/constants.hpp
* Autocoded constants header file. See tools/constants_generator.py for more
* information. */
#ifndef GNC_CONSTANTS_HPP_
#define GNC_CONSTANTS_HPP_
#include "config.hpp"
#include <lin/core.hpp>
#include <cstdint>
#include <limits>
namespace gnc {
namespace constant {
'''
HPP_FOOTER='''\
} // namespace constant
} // namespace gnc
#endif
'''
CPP_FILE='src/gnc/constants.cpp'
CPP_HEADER='''\
/** @file gnc_constants.cpp
* Autocoded constants source file. See tools/constants_generator.py for more
* information. */
#include <gnc/config.hpp>
#include <gnc/constants.hpp>
namespace gnc {
namespace constant {
'''
CPP_FOOTER='''\
} // namespace constant
} // namespace gnc
'''
def generate(csv_file, hpp_file, cpp_file):
'''This function generates a constants header and source file for the PSim
CXX library given a constants CSV file. This script is automatically run
prior to each PSim PlatformIO build.
The format of the CSV file is as follows:
<editable>,<type>,<name>,<value 0>,<value 1>,...
- <editable> specifies whether or not the constant will be treated as an
extern (i.e. can be edited by flight software) and is either "true" or
"false".
- <type> specifies the type of the constant.
- <name> is the name of the constant in PSim CXX source code.
- <value 0>,... is a variadic static initializer list which sets the
constant's initial value.
'''
hpp_file.write(HPP_HEADER)
cpp_file.write(CPP_HEADER)
# Loop over each constant entry
for constant in csv.reader(csv_file, delimiter=','):
# Ensure at least four entries
if len(constant) < 4:
print('ERROR: CSV line with less than four elements detected')
continue
# External value
if constant[0].lower() == 'true':
hpp_file.write('extern {0} {1};\n\n'.format(constant[1], constant[2]))
cpp_file.write('GNC_TRACKED_CONSTANT({0}, {1}, {2}'.format(constant[1], constant[2], constant[3]))
for arg in constant[4:]:
cpp_file.write(', {}'.format(arg))
cpp_file.write(');\n\n')
# Constexpr value
elif constant[0].lower() == 'false':
hpp_file.write('GNC_TRACKED_CONSTANT(constexpr static {0}, {1}, {2}'.format(constant[1], constant[2], constant[3]))
for arg in constant[4:]:
hpp_file.write(', {}'.format(arg))
hpp_file.write(');\n\n')
hpp_file.write(HPP_FOOTER)
cpp_file.write(CPP_FOOTER)
with open(CSV_FILE, 'r') as csv_file:
with open(HPP_FILE, 'w') as hpp_file:
with open(CPP_FILE, 'w') as cpp_file:
generate(csv_file, hpp_file, cpp_file)
| en | 0.478788 | \ /** @file gnc/constants.hpp * Autocoded constants header file. See tools/constants_generator.py for more * information. */ #ifndef GNC_CONSTANTS_HPP_ #define GNC_CONSTANTS_HPP_ #include "config.hpp" #include <lin/core.hpp> #include <cstdint> #include <limits> namespace gnc { namespace constant { \ } // namespace constant } // namespace gnc #endif \ /** @file gnc_constants.cpp * Autocoded constants source file. See tools/constants_generator.py for more * information. */ #include <gnc/config.hpp> #include <gnc/constants.hpp> namespace gnc { namespace constant { \ } // namespace constant } // namespace gnc This function generates a constants header and source file for the PSim CXX library given a constants CSV file. This script is automatically run prior to each PSim PlatformIO build. The format of the CSV file is as follows: <editable>,<type>,<name>,<value 0>,<value 1>,... - <editable> specifies whether or not the constant will be treated as an extern (i.e. can be edited by flight software) and is either "true" or "false". - <type> specifies the type of the constant. - <name> is the name of the constant in PSim CXX source code. - <value 0>,... is a variadic static initializer list which sets the constant's initial value. # Loop over each constant entry # Ensure at least four entries # External value # Constexpr value | 2.198911 | 2 |
data_collect_process/history_weather_collect.py | BigBigRadish/JX_MATH_Model_b | 2 | 6630608 | <reponame>BigBigRadish/JX_MATH_Model_b
#!/home/admin/anaconda3/envs/TF/bin/ python3.5
# -*- coding: utf-8 -*-
'''
Created on 2018年6月9日
@author: <NAME>
Jiangxi university of finance and economics
'''
from bs4 import BeautifulSoup
import re
import requests
from pymongo import MongoClient
citylist=['anqing','baotou','changsha','changzhou','chaoyang','dalian','fuzhou','ganzhou','guangzhou','hefei','hengyang','huhehaote','huangshan','jingzhou','mianyang','nanchang','nanjing','nanning','qingdao','shangqiu','shanghai','shenzhen','shijiazhuang','shouguang','suzhou','taiyuan','taizhou','tianjin','tianshui','wulumuqi','wuhan','xian','xuancheng','yinchuan','yuxi','chongqing']
monthlist=['201707','201708','201709','201710','201711','201712','201801','201802','201803','201804','201805']
con=MongoClient('localhost', 27017)
db=con.modelData
collection=db.weatherData
for j in monthlist:
for i in range(len(citylist)):
city=citylist[i-1]
url='http://www.tianqihoubao.com/lishi/'+citylist[i-1]+'/month/'+str(j)+'.html'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4882.400 QQ'
headers={'User-Agent':user_agent}
r = requests.get(url,headers=headers)
soup = BeautifulSoup(r.text,'html5lib')#html.parser
#print (soup)
tr=soup.find_all('tr')[8]
td=tr.find_all('td')
print(td)
priceDate=td[0].text.strip()#日期
weather_condition=td[1].text.strip()#天气状况
parttern=re.compile('(.*)℃')
envir =parttern.findall(td[2].text.strip())[1]#
print(envir)
weatherDetails={"priceDate":priceDate,"weather_condition":weather_condition,"envir":envir,'city':city}
#collection.insert(weatherDetails)
# for idx, tr in enumerate(soup.find_all('tr')):
# if idx != 0:
# td=tr.find_all('td')
# print(td)
# priceDate=td[0].text.strip()#日期
# weather_condition=td[1].text.strip()#天气状况
# parttern=re.compile('(.*)℃')
# try:
# envir =parttern.findall(td[2].text.strip())[1]#
# print(envir)
# except IndexError as e:
# continue
#
# weatherDetails={"priceDate":priceDate,"weather_condition":weather_condition,"envir":envir,'city':city}
# collection.insert(weatherDetails) | #!/home/admin/anaconda3/envs/TF/bin/ python3.5
# -*- coding: utf-8 -*-
'''
Created on 2018年6月9日
@author: <NAME>
Jiangxi university of finance and economics
'''
from bs4 import BeautifulSoup
import re
import requests
from pymongo import MongoClient
citylist=['anqing','baotou','changsha','changzhou','chaoyang','dalian','fuzhou','ganzhou','guangzhou','hefei','hengyang','huhehaote','huangshan','jingzhou','mianyang','nanchang','nanjing','nanning','qingdao','shangqiu','shanghai','shenzhen','shijiazhuang','shouguang','suzhou','taiyuan','taizhou','tianjin','tianshui','wulumuqi','wuhan','xian','xuancheng','yinchuan','yuxi','chongqing']
monthlist=['201707','201708','201709','201710','201711','201712','201801','201802','201803','201804','201805']
con=MongoClient('localhost', 27017)
db=con.modelData
collection=db.weatherData
for j in monthlist:
for i in range(len(citylist)):
city=citylist[i-1]
url='http://www.tianqihoubao.com/lishi/'+citylist[i-1]+'/month/'+str(j)+'.html'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4882.400 QQ'
headers={'User-Agent':user_agent}
r = requests.get(url,headers=headers)
soup = BeautifulSoup(r.text,'html5lib')#html.parser
#print (soup)
tr=soup.find_all('tr')[8]
td=tr.find_all('td')
print(td)
priceDate=td[0].text.strip()#日期
weather_condition=td[1].text.strip()#天气状况
parttern=re.compile('(.*)℃')
envir =parttern.findall(td[2].text.strip())[1]#
print(envir)
weatherDetails={"priceDate":priceDate,"weather_condition":weather_condition,"envir":envir,'city':city}
#collection.insert(weatherDetails)
# for idx, tr in enumerate(soup.find_all('tr')):
# if idx != 0:
# td=tr.find_all('td')
# print(td)
# priceDate=td[0].text.strip()#日期
# weather_condition=td[1].text.strip()#天气状况
# parttern=re.compile('(.*)℃')
# try:
# envir =parttern.findall(td[2].text.strip())[1]#
# print(envir)
# except IndexError as e:
# continue
#
# weatherDetails={"priceDate":priceDate,"weather_condition":weather_condition,"envir":envir,'city':city}
# collection.insert(weatherDetails) | en | 0.279244 | #!/home/admin/anaconda3/envs/TF/bin/ python3.5 # -*- coding: utf-8 -*- Created on 2018年6月9日
@author: <NAME>
Jiangxi university of finance and economics #html.parser #print (soup) #日期 #天气状况 # #collection.insert(weatherDetails) # for idx, tr in enumerate(soup.find_all('tr')): # if idx != 0: # td=tr.find_all('td') # print(td) # priceDate=td[0].text.strip()#日期 # weather_condition=td[1].text.strip()#天气状况 # parttern=re.compile('(.*)℃') # try: # envir =parttern.findall(td[2].text.strip())[1]# # print(envir) # except IndexError as e: # continue # # weatherDetails={"priceDate":priceDate,"weather_condition":weather_condition,"envir":envir,'city':city} # collection.insert(weatherDetails) | 2.424087 | 2 |
9term/fipt/P2PLending/users/forms.py | nik-sergeson/bsuir-informatics-labs | 0 | 6630609 | <reponame>nik-sergeson/bsuir-informatics-labs<gh_stars>0
from django import forms
from django.contrib.auth import get_user_model
from P2PLending.users.models import User, UserMoney
from registration.forms import RegistrationForm
class UserRegistrationForm(RegistrationForm):
first_name = forms.CharField()
last_name = forms.CharField()
patronymic = forms.CharField()
phone = forms.CharField()
birth_date = forms.DateField()
class Meta:
model = User
fields = ["email", "first_name", "last_name", "patronymic", "phone", "birth_date"]
class ProfileForm(forms.Form):
home_ownership = forms.ChoiceField(widget=forms.widgets.RadioSelect, choices=User.HOME_OWNERSHIP)
annual_income = forms.IntegerField()
class Meta:
model = User
fields = ["home_ownership", "income"]
| from django import forms
from django.contrib.auth import get_user_model
from P2PLending.users.models import User, UserMoney
from registration.forms import RegistrationForm
class UserRegistrationForm(RegistrationForm):
first_name = forms.CharField()
last_name = forms.CharField()
patronymic = forms.CharField()
phone = forms.CharField()
birth_date = forms.DateField()
class Meta:
model = User
fields = ["email", "first_name", "last_name", "patronymic", "phone", "birth_date"]
class ProfileForm(forms.Form):
home_ownership = forms.ChoiceField(widget=forms.widgets.RadioSelect, choices=User.HOME_OWNERSHIP)
annual_income = forms.IntegerField()
class Meta:
model = User
fields = ["home_ownership", "income"] | none | 1 | 2.546466 | 3 |
|
legacy/flexibility_model/model_building/building_process_hvac_efficiencies.py | AlexJew/CityEnergyAnalyst | 0 | 6630610 | <reponame>AlexJew/CityEnergyAnalyst
"""
MIT License
Copyright (c) 2019 TUMCREATE <https://tum-create.edu.sg/>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
import datetime
import math
import numpy as np
import pandas as pd
from cea.utilities.dbf import dbf_to_dataframe
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def main(locator,
buildings_names,
footprint,
buildings_cardinal,
cooling_generation_df,
emission_systems_cooling_df,
emission_systems_controller_df,
generation_cooling_code_dic,
emissions_cooling_type_dic,
emissions_controller_type_dic,
set_temperatures_dic,
T_ext_cea_df,
wet_bulb_temperature_df,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list,
supply_temperature_df,
phi_5_max,
Fb,
HP_ETA_EX_COOL,
HP_AUXRATIO
):
length_and_width_df = get_building_length_and_width(locator,
buildings_names
)
Y_dic = calculate_pipe_transmittance(locator,
buildings_names
)
fforma_dic = calculate_form_factor(
length_and_width_df,
footprint,
buildings_names
)
(
eff_cs_dic,
source_cs_dic,
scale_cs_dic,
dTcs_C_dic,
Qcsmax_Wm2_dic,
Tc_sup_air_ahu_C_dic,
Tc_sup_air_aru_C_dic,
dT_Qcs_dic,
temperature_difference_df
) = get_hvac_data(
buildings_names,
buildings_cardinal,
cooling_generation_df,
emission_systems_cooling_df,
emission_systems_controller_df,
generation_cooling_code_dic,
emissions_cooling_type_dic,
emissions_controller_type_dic
)
(
gen_efficiency_mean_dic,
sto_efficiency,
em_efficiency_mean_dic,
dis_efficiency_mean_dic,
comparison_gen_mean_dic,
comparison_em_mean_dic,
comparison_dis_mean_dic_dic
) = calculate_hvac_efficiencies(
buildings_names,
set_temperatures_dic,
T_ext_cea_df,
wet_bulb_temperature_df,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list,
length_and_width_df,
generation_cooling_code_dic,
supply_temperature_df,
emissions_cooling_type_dic,
Y_dic,
fforma_dic,
eff_cs_dic,
source_cs_dic,
scale_cs_dic,
dTcs_C_dic,
dT_Qcs_dic,
temperature_difference_df,
phi_5_max,
Fb,
HP_ETA_EX_COOL,
HP_AUXRATIO
)
write_building_system_ahu_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
Tc_sup_air_ahu_C_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
)
write_building_system_aru_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
Tc_sup_air_aru_C_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
)
write_building_hvac_generic_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
)
return (
Qcsmax_Wm2_dic,
em_efficiency_mean_dic,
)
def get_building_length_and_width(locator,
buildings_names
):
# Function taken from calc_bounding_box_geom in the CEA file building_properties.py
# Get data
geometry_shapefile_path = locator.get_zone_geometry()
# Calculate
import shapefile
sf = shapefile.Reader(geometry_shapefile_path)
shapes = sf.shapes()
len_shapes = len(shapes)
length_and_width = []
for shape in range(len_shapes):
bbox = shapes[shape].bbox
coords_bbox = [coord for coord in bbox]
delta1 = abs(coords_bbox[0] - coords_bbox[2])
delta2 = abs(coords_bbox[1] - coords_bbox[3])
if delta1 >= delta2:
Lw = delta2
Ll = delta1
length_and_width.append([Ll, Lw])
else:
Lw = delta1
Ll = delta2
length_and_width.append([Ll, Lw])
for i in range(len(buildings_names)):
length_and_width[i].insert(0, buildings_names[i])
length_and_width_df = pd.DataFrame(
length_and_width,
columns=[
'Name',
'Ll',
'Lw'
]
)
length_and_width_df.set_index('Name', inplace=True)
return length_and_width_df
def calculate_pipe_transmittance(locator,
buildings_names
):
# Get data
age_df = dbf_to_dataframe(locator.get_building_age())
age_df.set_index('Name', inplace=True)
Y_dic = {}
for building in buildings_names:
age_built = age_df.loc[building, 'built']
age_HVAC = age_df.loc[building, 'HVAC']
# Calculate
if age_built >= 1995 or age_HVAC > 1995:
Y_dic[building] = 0.2
elif 1985 <= age_built < 1995:
Y_dic[building] = 0.3
if age_HVAC == age_built:
print(
'Incorrect HVAC renovation year for '
+ building
+ ': if HVAC has not been renovated, the year should be set to 0'
)
quit()
else:
Y_dic[building] = 0.4
return Y_dic
def calculate_form_factor(
length_and_width_df,
footprint,
buildings_names
):
fforma_dic = {}
for building in buildings_names:
fforma_dic[building] = (
footprint[building]
/ (length_and_width_df.loc[building]['Lw'] * length_and_width_df.loc[building]['Ll'])
)
return fforma_dic
def get_hvac_data(
buildings_names,
buildings_cardinal,
cooling_generation_df,
emission_systems_cooling_df,
emission_systems_controller_df,
generation_cooling_code_dic,
emissions_cooling_type_dic,
emissions_controller_type_dic
):
eff_cs_dic = {}
source_cs_dic = {}
scale_cs_dic = {}
dTcs_C_dic = {}
Qcsmax_Wm2_dic = {}
Tc_sup_air_ahu_C_dic = {}
Tc_sup_air_aru_C_dic = {}
dT_Qcs_dic = {}
temperature_difference_df = pd.DataFrame(
np.zeros((buildings_cardinal, 3)),
buildings_names,
['ahu', 'aru', 'scu']
)
for building in buildings_names:
# Supply system
gen_cooling_code = generation_cooling_code_dic[building]
eff_cs_dic[building] = cooling_generation_df.loc[gen_cooling_code, 'eff_cs']
source_cs_dic[building] = cooling_generation_df.loc[gen_cooling_code, 'source_cs']
scale_cs_dic[building] = cooling_generation_df.loc[gen_cooling_code, 'scale_cs']
# Emissions system
emissions_cooling_type = emissions_cooling_type_dic[building]
dTcs_C_dic[building] = emission_systems_cooling_df.loc[emissions_cooling_type, 'dTcs_C']
Qcsmax_Wm2_dic[building] = emission_systems_cooling_df.loc[emissions_cooling_type, 'Qcsmax_Wm2']
Tc_sup_air_ahu_C_dic[building] = emission_systems_cooling_df.loc[emissions_cooling_type, 'Tc_sup_air_ahu_C']
Tc_sup_air_aru_C_dic[building] = emission_systems_cooling_df.loc[emissions_cooling_type, 'Tc_sup_air_aru_C']
for sys in ['ahu', 'aru', 'scu']:
temperature_difference_df.loc[building][sys] = (
emission_systems_cooling_df.loc[emissions_cooling_type, 'dTcs0_' + sys + '_C']
)
dT_Qcs_dic[building] = emission_systems_controller_df.loc[emissions_controller_type_dic[building]]['dT_Qcs']
return (
eff_cs_dic,
source_cs_dic,
scale_cs_dic,
dTcs_C_dic,
Qcsmax_Wm2_dic,
Tc_sup_air_ahu_C_dic,
Tc_sup_air_aru_C_dic,
dT_Qcs_dic,
temperature_difference_df
)
def calculate_hvac_efficiencies(
buildings_names,
set_temperatures_dic,
T_ext_cea_df,
wet_bulb_temperature_df,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list,
length_and_width_df,
generation_cooling_code_dic,
supply_temperature_df,
emissions_cooling_type_dic,
Y_dic,
fforma_dic,
eff_cs_dic,
source_cs_dic,
scale_cs_dic,
dTcs_C_dic,
dT_Qcs_dic,
temperature_difference_df,
phi_5_max,
Fb,
HP_ETA_EX_COOL,
HP_AUXRATIO
):
gen_efficiency_mean_dic = {}
em_efficiency_mean_dic = {}
dis_efficiency_mean_dic = {}
comparison_gen_mean_dic = {}
comparison_em_mean_dic = {}
comparison_dis_mean_dic_dic = {}
for building in buildings_names:
# Calculate each efficiency type
gen_efficiency_df = get_generation_efficiency(
date_and_time_prediction,
prediction_horizon,
building,
generation_cooling_code_dic[building],
eff_cs_dic[building],
source_cs_dic[building],
scale_cs_dic[building],
supply_temperature_df,
T_ext_cea_df,
wet_bulb_temperature_df,
HP_ETA_EX_COOL,
HP_AUXRATIO
)
sto_efficiency = get_storage_efficiency()
em_efficiency_df = get_emission_efficiency(
dTcs_C_dic[building],
dT_Qcs_dic[building],
building,
set_temperatures_dic,
T_ext_cea_df,
emissions_cooling_type_dic[building],
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list
)
dis_efficiency_dic = get_distribution_efficiency(
em_efficiency_df,
phi_5_max,
supply_temperature_df,
temperature_difference_df.loc[building],
set_temperatures_dic,
T_ext_cea_df,
length_and_width_df,
fforma_dic[building],
Y_dic[building],
Fb,
building,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list
)
# Calculate the mean efficiencies, when needed
(
gen_efficiency_mean_dic[building],
em_efficiency_mean_dic[building],
dis_efficiency_mean_dic[building]
) = calculate_mean_efficiencies(
gen_efficiency_df,
em_efficiency_df,
dis_efficiency_dic
)
# Compare the mean difference between the efficiency values and the mean efficiency
(
comparison_gen_df,
comparison_em_df,
comparison_dis_dic,
comparison_gen_mean_dic[building],
comparison_em_mean_dic[building],
comparison_dis_mean_dic_dic[building]
) = calculate_comparisons_mean(
gen_efficiency_mean_dic[building],
em_efficiency_mean_dic[building],
dis_efficiency_mean_dic[building],
gen_efficiency_df,
em_efficiency_df,
dis_efficiency_dic,
date_and_time_prediction
)
return (
gen_efficiency_mean_dic,
sto_efficiency,
em_efficiency_mean_dic,
dis_efficiency_mean_dic,
comparison_gen_mean_dic,
comparison_em_mean_dic,
comparison_dis_mean_dic_dic
)
def get_generation_efficiency(
date_and_time_prediction,
prediction_horizon,
building,
gen_cooling_code,
eff_cs,
source_cs,
scale_cs,
supply_temperature_df,
T_ext_cea_df,
wet_bulb_temperature_df,
HP_ETA_EX_COOL,
HP_AUXRATIO
):
gen_efficiency_df = pd.DataFrame(
np.zeros((3, prediction_horizon)),
['ahu', 'aru', 'scu'],
date_and_time_prediction
)
if scale_cs == 'DISTRICT':
for sys in ['ahu', 'aru', 'scu']:
for time in date_and_time_prediction:
gen_efficiency_df.loc[sys][time] = eff_cs
elif scale_cs == 'NONE':
raise ValueError('No supply air cooling supply system')
elif scale_cs == 'BUILDING':
if source_cs == 'GRID':
supply_temperature_kelvin_dic = {}
for sys in ['ahu', 'aru', 'scu']:
supply_temperature_kelvin_dic[sys] = supply_temperature_df.loc[building][sys] + 273.15
if gen_cooling_code in {'T2', 'T3'}:
for time in date_and_time_prediction:
if gen_cooling_code == 'T2':
string_object_time = datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S')
t_source_t = T_ext_cea_df[string_object_time] + 273
if gen_cooling_code == 'T3':
t_source_t = wet_bulb_temperature_df.loc[time]['wet_bulb_temperature'] + 273
for sys in ['ahu', 'aru', 'scu']:
if not math.isnan(supply_temperature_kelvin_dic[sys]):
gen_efficiency_df.loc[sys][time] = (
HP_ETA_EX_COOL
* HP_AUXRATIO
* supply_temperature_kelvin_dic[sys]
/ (t_source_t - supply_temperature_kelvin_dic[sys])
)
else:
gen_efficiency_df.loc[sys][time] = np.nan
else:
raise NotImplementedError('Unknown cooling supply system')
return gen_efficiency_df
def get_storage_efficiency():
sto_efficiency = 1
return sto_efficiency
def get_emission_efficiency(
dTcs_C,
dT_Qcs,
building,
set_temperatures_dic,
T_ext_cea_df,
emissions_cooling_type,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list
):
# TODO: Use the correct delta theta sol (c.f. HVAC efficiencies documentation)
em_efficiency_df = pd.DataFrame(
np.zeros((occupancy_per_building_cardinal[building], prediction_horizon)),
occupancy_per_building_list[building],
date_and_time_prediction
)
for occupancy in occupancy_per_building_list[building]:
if emissions_cooling_type == 'T0':
for time in date_and_time_prediction:
em_efficiency_df.loc[occupancy][time] = 1
else:
for time in date_and_time_prediction:
string_object_time = datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S')
T_int_t = set_temperatures_dic[building].loc[occupancy][time]
frac_t = (dTcs_C + dT_Qcs) / (T_int_t - T_ext_cea_df[string_object_time] + dTcs_C + dT_Qcs - 10)
if frac_t < 0:
em_efficiency_df.loc[occupancy][time] = 1
elif abs(T_int_t - T_ext_cea_df[string_object_time] + dTcs_C + dT_Qcs - 10) < 10 ** (-6):
em_efficiency_df.loc[occupancy][time] = 1
else:
if 1 / (1 + frac_t) > 1: # Check efficiency value
raise ValueError('Emission efficiency is greater than 1')
else:
em_efficiency_df.loc[occupancy][time] = 1 / (1 + frac_t)
return em_efficiency_df
def get_distribution_efficiency(
em_efficiency_df,
phi_5_max,
supply_temperature_df,
temperature_difference_dic,
set_temperatures_dic,
T_ext_cea_df,
length_and_width_df,
fforma,
Y,
Fb,
building,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list
):
# Non time-dependent parts
Ll = length_and_width_df.loc[building]['Ll']
Lw = length_and_width_df.loc[building]['Lw']
Lv = (2 * Ll + 0.0325 * Ll * Lw + 6) * fforma
sys_temperatures = {}
for sys in ['ahu', 'aru', 'scu']:
sys_temperatures[sys] = (2 * supply_temperature_df.loc[building][sys] + temperature_difference_dic[sys]) / 2
# Time-dependent parts
dis_efficiency_dic = {}
for sys in ['ahu', 'aru', 'scu']:
dis_efficiency_sys_df = pd.DataFrame(
np.zeros((occupancy_per_building_cardinal[building], prediction_horizon)),
occupancy_per_building_list[building],
date_and_time_prediction
)
for occupancy in occupancy_per_building_list[building]:
if math.isnan(sys_temperatures[sys]): # Check whether AHU, ARU and SCU exist
for time in date_and_time_prediction:
dis_efficiency_sys_df.loc[occupancy][time] = np.nan
else:
for time in date_and_time_prediction:
string_object_time = datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S')
common_coeff = em_efficiency_df.loc[occupancy][time] * Lv * Y / phi_5_max
Tb = (
set_temperatures_dic[building].loc[occupancy][time]
- Fb
* (
set_temperatures_dic[building].loc[occupancy][time]
- T_ext_cea_df[string_object_time]
)
)
if (1 + common_coeff * (sys_temperatures[sys] - Tb)) > 1: # Check efficiency value
raise ValueError('Distribution efficiency is greater than 1')
else:
dis_efficiency_sys_df.loc[occupancy][time] = 1 + common_coeff * (sys_temperatures[sys] - Tb)
dis_efficiency_dic[sys] = dis_efficiency_sys_df
return dis_efficiency_dic
def calculate_mean_efficiencies(gen_efficiency_df, em_efficiency_df, dis_efficiency_dic):
# Calculates the mean generation/conversion efficiencies for the ahu, the aru and the scu
gen_efficiency_mean = gen_efficiency_df.mean(axis=1, skipna=False)
# Calculates the emission efficiency
em_efficiency_mean_row = em_efficiency_df.mean(axis=1)
em_efficiency_mean = em_efficiency_mean_row.mean(axis=0)
# Calculates the mean distribution efficiencies for the ahu, the aru and the scu
dis_efficiency_mean = {}
for sys in ['ahu', 'aru', 'scu']:
dis_efficiency_dic_sys = dis_efficiency_dic[sys]
dis_efficiency_mean_sys_row = dis_efficiency_dic_sys.mean(axis=1, skipna=False)
dis_efficiency_mean[sys] = dis_efficiency_mean_sys_row.mean(axis=0, skipna=False)
return (
gen_efficiency_mean,
em_efficiency_mean,
dis_efficiency_mean
)
def calculate_comparisons_mean(
gen_efficiency_mean,
em_efficiency_mean,
dis_efficiency_mean,
gen_efficiency_df,
em_efficiency_df,
dis_efficiency_dic,
date_and_time_prediction
):
# Create the data frames
comparison_gen_df = pd.DataFrame(
np.zeros(gen_efficiency_df.shape),
gen_efficiency_df.index,
gen_efficiency_df.columns
)
comparison_em_df = pd.DataFrame(
np.zeros(em_efficiency_df.shape),
em_efficiency_df.index,
em_efficiency_df.columns
)
comparison_dis_dic = {}
for sys in ['ahu', 'aru', 'scu']:
comparison_dis_dic[sys] = pd.DataFrame(
np.zeros(dis_efficiency_dic[sys].shape),
dis_efficiency_dic[sys].index,
dis_efficiency_dic[sys].columns
)
# Fill in the data frames of the relative differences to the means
for time in date_and_time_prediction:
for index, row in gen_efficiency_df.iterrows():
comparison_gen_df.loc[index][time] = (
abs(row[time] - gen_efficiency_mean[index])
/ gen_efficiency_mean[index]
)
for index, row in em_efficiency_df.iterrows():
comparison_em_df.loc[index][time] = (
abs(row[time] - em_efficiency_mean)
/ em_efficiency_mean
)
for sys in ['ahu', 'aru', 'scu']:
for index, row in dis_efficiency_dic[sys].iterrows():
comparison_dis_dic[sys].loc[index][time] = (
abs(dis_efficiency_dic[sys].loc[index][time] - dis_efficiency_mean[sys])
/ dis_efficiency_mean[sys]
)
# Calculate the means
# Calculates the mean generation/conversion efficiencies relative differences to the means
# for the ahu, the aru and the scu
comparison_gen_mean = comparison_gen_df.mean(axis=1, skipna=False)
# Calculates the emission efficiency relative difference to the mean
comparison_em_mean_row = comparison_em_df.mean(axis=1)
comparison_em_mean = comparison_em_mean_row.mean(axis=0)
# Calculates the mean distribution efficiencies relative differences to the means for the ahu, the aru and the scu
comparison_dis_mean_dic = {}
for sys in ['ahu', 'aru', 'scu']:
comparison_dis_dic_sys = comparison_dis_dic[sys]
comparison_dis_mean_sys_row = comparison_dis_dic_sys.mean(axis=1, skipna=False)
comparison_dis_mean_dic[sys] = comparison_dis_mean_sys_row.mean(axis=0, skipna=False)
return (
comparison_gen_df,
comparison_em_df,
comparison_dis_dic,
comparison_gen_mean,
comparison_em_mean,
comparison_dis_mean_dic
)
def write_building_system_ahu_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
Tc_sup_air_ahu_C_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
):
ahu_types = []
for building in buildings_names:
if not math.isnan(supply_temperature_df.loc[building]['ahu']):
ahu_types.append([
building + '_' + emissions_cooling_type_dic[building],
'default',
'default',
'default',
'default',
Tc_sup_air_ahu_C_dic[building],
11.5,
1,
(
gen_efficiency_mean_dic[building].loc['ahu']
* sto_efficiency
* dis_efficiency_mean_dic[building]['ahu']
),
1,
1
])
ahu_types_df = pd.DataFrame.from_records(
ahu_types,
columns=[
'hvac_ahu_type',
'ahu_cooling_type',
'ahu_heating_type',
'ahu_dehumidification_type',
'ahu_return_air_heat_recovery_type',
'ahu_supply_air_temperature_setpoint',
'ahu_supply_air_relative_humidty_setpoint',
'ahu_fan_efficiency',
'ahu_cooling_efficiency',
'ahu_heating_efficiency',
'ahu_return_air_recovery_efficiency'
])
ahu_types_df.to_csv(
path_or_buf=locator.get_mpc_results_building_definitions_file('building_hvac_ahu_types'
),
index=False
)
def write_building_system_aru_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
Tc_sup_air_aru_C_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
):
aru_types = []
for building in buildings_names:
if not math.isnan(supply_temperature_df.loc[building]['aru']):
aru_types.append([
building + '_' + emissions_cooling_type_dic[building],
'default',
'default',
'zone',
Tc_sup_air_aru_C_dic[building],
1,
(
gen_efficiency_mean_dic[building].loc['aru']
* sto_efficiency
* dis_efficiency_mean_dic[building]['aru']
),
1
])
aru_types_df = pd.DataFrame.from_records(
aru_types,
columns=[
'hvac_tu_type',
'tu_cooling_type',
'tu_heating_type',
'tu_air_intake_type',
'tu_supply_air_temperature_setpoint',
'tu_fan_efficiency',
'tu_cooling_efficiency',
'tu_heating_efficiency'
])
aru_types_df.to_csv(
path_or_buf=locator.get_mpc_results_building_definitions_file('building_hvac_tu_types'
),
index=False
)
def write_building_hvac_generic_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
):
scu_types = []
for building in buildings_names:
if not math.isnan(supply_temperature_df.loc[building]['scu']):
scu_types.append([
building + '_' + emissions_cooling_type_dic[building],
1,
(
gen_efficiency_mean_dic[building].loc['scu']
* sto_efficiency
* dis_efficiency_mean_dic[building]['scu']
)
])
scu_types_df = pd.DataFrame.from_records(
scu_types,
columns=[
'hvac_generic_type',
'generic_heating_efficiency',
'generic_cooling_efficiency'
])
scu_types_df.to_csv(
path_or_buf=locator.get_mpc_results_building_definitions_file('building_hvac_generic_types'
),
index=False
)
| """
MIT License
Copyright (c) 2019 TUMCREATE <https://tum-create.edu.sg/>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
import datetime
import math
import numpy as np
import pandas as pd
from cea.utilities.dbf import dbf_to_dataframe
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def main(locator,
buildings_names,
footprint,
buildings_cardinal,
cooling_generation_df,
emission_systems_cooling_df,
emission_systems_controller_df,
generation_cooling_code_dic,
emissions_cooling_type_dic,
emissions_controller_type_dic,
set_temperatures_dic,
T_ext_cea_df,
wet_bulb_temperature_df,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list,
supply_temperature_df,
phi_5_max,
Fb,
HP_ETA_EX_COOL,
HP_AUXRATIO
):
length_and_width_df = get_building_length_and_width(locator,
buildings_names
)
Y_dic = calculate_pipe_transmittance(locator,
buildings_names
)
fforma_dic = calculate_form_factor(
length_and_width_df,
footprint,
buildings_names
)
(
eff_cs_dic,
source_cs_dic,
scale_cs_dic,
dTcs_C_dic,
Qcsmax_Wm2_dic,
Tc_sup_air_ahu_C_dic,
Tc_sup_air_aru_C_dic,
dT_Qcs_dic,
temperature_difference_df
) = get_hvac_data(
buildings_names,
buildings_cardinal,
cooling_generation_df,
emission_systems_cooling_df,
emission_systems_controller_df,
generation_cooling_code_dic,
emissions_cooling_type_dic,
emissions_controller_type_dic
)
(
gen_efficiency_mean_dic,
sto_efficiency,
em_efficiency_mean_dic,
dis_efficiency_mean_dic,
comparison_gen_mean_dic,
comparison_em_mean_dic,
comparison_dis_mean_dic_dic
) = calculate_hvac_efficiencies(
buildings_names,
set_temperatures_dic,
T_ext_cea_df,
wet_bulb_temperature_df,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list,
length_and_width_df,
generation_cooling_code_dic,
supply_temperature_df,
emissions_cooling_type_dic,
Y_dic,
fforma_dic,
eff_cs_dic,
source_cs_dic,
scale_cs_dic,
dTcs_C_dic,
dT_Qcs_dic,
temperature_difference_df,
phi_5_max,
Fb,
HP_ETA_EX_COOL,
HP_AUXRATIO
)
write_building_system_ahu_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
Tc_sup_air_ahu_C_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
)
write_building_system_aru_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
Tc_sup_air_aru_C_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
)
write_building_hvac_generic_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
)
return (
Qcsmax_Wm2_dic,
em_efficiency_mean_dic,
)
def get_building_length_and_width(locator,
buildings_names
):
# Function taken from calc_bounding_box_geom in the CEA file building_properties.py
# Get data
geometry_shapefile_path = locator.get_zone_geometry()
# Calculate
import shapefile
sf = shapefile.Reader(geometry_shapefile_path)
shapes = sf.shapes()
len_shapes = len(shapes)
length_and_width = []
for shape in range(len_shapes):
bbox = shapes[shape].bbox
coords_bbox = [coord for coord in bbox]
delta1 = abs(coords_bbox[0] - coords_bbox[2])
delta2 = abs(coords_bbox[1] - coords_bbox[3])
if delta1 >= delta2:
Lw = delta2
Ll = delta1
length_and_width.append([Ll, Lw])
else:
Lw = delta1
Ll = delta2
length_and_width.append([Ll, Lw])
for i in range(len(buildings_names)):
length_and_width[i].insert(0, buildings_names[i])
length_and_width_df = pd.DataFrame(
length_and_width,
columns=[
'Name',
'Ll',
'Lw'
]
)
length_and_width_df.set_index('Name', inplace=True)
return length_and_width_df
def calculate_pipe_transmittance(locator,
buildings_names
):
# Get data
age_df = dbf_to_dataframe(locator.get_building_age())
age_df.set_index('Name', inplace=True)
Y_dic = {}
for building in buildings_names:
age_built = age_df.loc[building, 'built']
age_HVAC = age_df.loc[building, 'HVAC']
# Calculate
if age_built >= 1995 or age_HVAC > 1995:
Y_dic[building] = 0.2
elif 1985 <= age_built < 1995:
Y_dic[building] = 0.3
if age_HVAC == age_built:
print(
'Incorrect HVAC renovation year for '
+ building
+ ': if HVAC has not been renovated, the year should be set to 0'
)
quit()
else:
Y_dic[building] = 0.4
return Y_dic
def calculate_form_factor(
length_and_width_df,
footprint,
buildings_names
):
fforma_dic = {}
for building in buildings_names:
fforma_dic[building] = (
footprint[building]
/ (length_and_width_df.loc[building]['Lw'] * length_and_width_df.loc[building]['Ll'])
)
return fforma_dic
def get_hvac_data(
buildings_names,
buildings_cardinal,
cooling_generation_df,
emission_systems_cooling_df,
emission_systems_controller_df,
generation_cooling_code_dic,
emissions_cooling_type_dic,
emissions_controller_type_dic
):
eff_cs_dic = {}
source_cs_dic = {}
scale_cs_dic = {}
dTcs_C_dic = {}
Qcsmax_Wm2_dic = {}
Tc_sup_air_ahu_C_dic = {}
Tc_sup_air_aru_C_dic = {}
dT_Qcs_dic = {}
temperature_difference_df = pd.DataFrame(
np.zeros((buildings_cardinal, 3)),
buildings_names,
['ahu', 'aru', 'scu']
)
for building in buildings_names:
# Supply system
gen_cooling_code = generation_cooling_code_dic[building]
eff_cs_dic[building] = cooling_generation_df.loc[gen_cooling_code, 'eff_cs']
source_cs_dic[building] = cooling_generation_df.loc[gen_cooling_code, 'source_cs']
scale_cs_dic[building] = cooling_generation_df.loc[gen_cooling_code, 'scale_cs']
# Emissions system
emissions_cooling_type = emissions_cooling_type_dic[building]
dTcs_C_dic[building] = emission_systems_cooling_df.loc[emissions_cooling_type, 'dTcs_C']
Qcsmax_Wm2_dic[building] = emission_systems_cooling_df.loc[emissions_cooling_type, 'Qcsmax_Wm2']
Tc_sup_air_ahu_C_dic[building] = emission_systems_cooling_df.loc[emissions_cooling_type, 'Tc_sup_air_ahu_C']
Tc_sup_air_aru_C_dic[building] = emission_systems_cooling_df.loc[emissions_cooling_type, 'Tc_sup_air_aru_C']
for sys in ['ahu', 'aru', 'scu']:
temperature_difference_df.loc[building][sys] = (
emission_systems_cooling_df.loc[emissions_cooling_type, 'dTcs0_' + sys + '_C']
)
dT_Qcs_dic[building] = emission_systems_controller_df.loc[emissions_controller_type_dic[building]]['dT_Qcs']
return (
eff_cs_dic,
source_cs_dic,
scale_cs_dic,
dTcs_C_dic,
Qcsmax_Wm2_dic,
Tc_sup_air_ahu_C_dic,
Tc_sup_air_aru_C_dic,
dT_Qcs_dic,
temperature_difference_df
)
def calculate_hvac_efficiencies(
buildings_names,
set_temperatures_dic,
T_ext_cea_df,
wet_bulb_temperature_df,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list,
length_and_width_df,
generation_cooling_code_dic,
supply_temperature_df,
emissions_cooling_type_dic,
Y_dic,
fforma_dic,
eff_cs_dic,
source_cs_dic,
scale_cs_dic,
dTcs_C_dic,
dT_Qcs_dic,
temperature_difference_df,
phi_5_max,
Fb,
HP_ETA_EX_COOL,
HP_AUXRATIO
):
gen_efficiency_mean_dic = {}
em_efficiency_mean_dic = {}
dis_efficiency_mean_dic = {}
comparison_gen_mean_dic = {}
comparison_em_mean_dic = {}
comparison_dis_mean_dic_dic = {}
for building in buildings_names:
# Calculate each efficiency type
gen_efficiency_df = get_generation_efficiency(
date_and_time_prediction,
prediction_horizon,
building,
generation_cooling_code_dic[building],
eff_cs_dic[building],
source_cs_dic[building],
scale_cs_dic[building],
supply_temperature_df,
T_ext_cea_df,
wet_bulb_temperature_df,
HP_ETA_EX_COOL,
HP_AUXRATIO
)
sto_efficiency = get_storage_efficiency()
em_efficiency_df = get_emission_efficiency(
dTcs_C_dic[building],
dT_Qcs_dic[building],
building,
set_temperatures_dic,
T_ext_cea_df,
emissions_cooling_type_dic[building],
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list
)
dis_efficiency_dic = get_distribution_efficiency(
em_efficiency_df,
phi_5_max,
supply_temperature_df,
temperature_difference_df.loc[building],
set_temperatures_dic,
T_ext_cea_df,
length_and_width_df,
fforma_dic[building],
Y_dic[building],
Fb,
building,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list
)
# Calculate the mean efficiencies, when needed
(
gen_efficiency_mean_dic[building],
em_efficiency_mean_dic[building],
dis_efficiency_mean_dic[building]
) = calculate_mean_efficiencies(
gen_efficiency_df,
em_efficiency_df,
dis_efficiency_dic
)
# Compare the mean difference between the efficiency values and the mean efficiency
(
comparison_gen_df,
comparison_em_df,
comparison_dis_dic,
comparison_gen_mean_dic[building],
comparison_em_mean_dic[building],
comparison_dis_mean_dic_dic[building]
) = calculate_comparisons_mean(
gen_efficiency_mean_dic[building],
em_efficiency_mean_dic[building],
dis_efficiency_mean_dic[building],
gen_efficiency_df,
em_efficiency_df,
dis_efficiency_dic,
date_and_time_prediction
)
return (
gen_efficiency_mean_dic,
sto_efficiency,
em_efficiency_mean_dic,
dis_efficiency_mean_dic,
comparison_gen_mean_dic,
comparison_em_mean_dic,
comparison_dis_mean_dic_dic
)
def get_generation_efficiency(
date_and_time_prediction,
prediction_horizon,
building,
gen_cooling_code,
eff_cs,
source_cs,
scale_cs,
supply_temperature_df,
T_ext_cea_df,
wet_bulb_temperature_df,
HP_ETA_EX_COOL,
HP_AUXRATIO
):
gen_efficiency_df = pd.DataFrame(
np.zeros((3, prediction_horizon)),
['ahu', 'aru', 'scu'],
date_and_time_prediction
)
if scale_cs == 'DISTRICT':
for sys in ['ahu', 'aru', 'scu']:
for time in date_and_time_prediction:
gen_efficiency_df.loc[sys][time] = eff_cs
elif scale_cs == 'NONE':
raise ValueError('No supply air cooling supply system')
elif scale_cs == 'BUILDING':
if source_cs == 'GRID':
supply_temperature_kelvin_dic = {}
for sys in ['ahu', 'aru', 'scu']:
supply_temperature_kelvin_dic[sys] = supply_temperature_df.loc[building][sys] + 273.15
if gen_cooling_code in {'T2', 'T3'}:
for time in date_and_time_prediction:
if gen_cooling_code == 'T2':
string_object_time = datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S')
t_source_t = T_ext_cea_df[string_object_time] + 273
if gen_cooling_code == 'T3':
t_source_t = wet_bulb_temperature_df.loc[time]['wet_bulb_temperature'] + 273
for sys in ['ahu', 'aru', 'scu']:
if not math.isnan(supply_temperature_kelvin_dic[sys]):
gen_efficiency_df.loc[sys][time] = (
HP_ETA_EX_COOL
* HP_AUXRATIO
* supply_temperature_kelvin_dic[sys]
/ (t_source_t - supply_temperature_kelvin_dic[sys])
)
else:
gen_efficiency_df.loc[sys][time] = np.nan
else:
raise NotImplementedError('Unknown cooling supply system')
return gen_efficiency_df
def get_storage_efficiency():
sto_efficiency = 1
return sto_efficiency
def get_emission_efficiency(
dTcs_C,
dT_Qcs,
building,
set_temperatures_dic,
T_ext_cea_df,
emissions_cooling_type,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list
):
# TODO: Use the correct delta theta sol (c.f. HVAC efficiencies documentation)
em_efficiency_df = pd.DataFrame(
np.zeros((occupancy_per_building_cardinal[building], prediction_horizon)),
occupancy_per_building_list[building],
date_and_time_prediction
)
for occupancy in occupancy_per_building_list[building]:
if emissions_cooling_type == 'T0':
for time in date_and_time_prediction:
em_efficiency_df.loc[occupancy][time] = 1
else:
for time in date_and_time_prediction:
string_object_time = datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S')
T_int_t = set_temperatures_dic[building].loc[occupancy][time]
frac_t = (dTcs_C + dT_Qcs) / (T_int_t - T_ext_cea_df[string_object_time] + dTcs_C + dT_Qcs - 10)
if frac_t < 0:
em_efficiency_df.loc[occupancy][time] = 1
elif abs(T_int_t - T_ext_cea_df[string_object_time] + dTcs_C + dT_Qcs - 10) < 10 ** (-6):
em_efficiency_df.loc[occupancy][time] = 1
else:
if 1 / (1 + frac_t) > 1: # Check efficiency value
raise ValueError('Emission efficiency is greater than 1')
else:
em_efficiency_df.loc[occupancy][time] = 1 / (1 + frac_t)
return em_efficiency_df
def get_distribution_efficiency(
em_efficiency_df,
phi_5_max,
supply_temperature_df,
temperature_difference_dic,
set_temperatures_dic,
T_ext_cea_df,
length_and_width_df,
fforma,
Y,
Fb,
building,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list
):
# Non time-dependent parts
Ll = length_and_width_df.loc[building]['Ll']
Lw = length_and_width_df.loc[building]['Lw']
Lv = (2 * Ll + 0.0325 * Ll * Lw + 6) * fforma
sys_temperatures = {}
for sys in ['ahu', 'aru', 'scu']:
sys_temperatures[sys] = (2 * supply_temperature_df.loc[building][sys] + temperature_difference_dic[sys]) / 2
# Time-dependent parts
dis_efficiency_dic = {}
for sys in ['ahu', 'aru', 'scu']:
dis_efficiency_sys_df = pd.DataFrame(
np.zeros((occupancy_per_building_cardinal[building], prediction_horizon)),
occupancy_per_building_list[building],
date_and_time_prediction
)
for occupancy in occupancy_per_building_list[building]:
if math.isnan(sys_temperatures[sys]): # Check whether AHU, ARU and SCU exist
for time in date_and_time_prediction:
dis_efficiency_sys_df.loc[occupancy][time] = np.nan
else:
for time in date_and_time_prediction:
string_object_time = datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S')
common_coeff = em_efficiency_df.loc[occupancy][time] * Lv * Y / phi_5_max
Tb = (
set_temperatures_dic[building].loc[occupancy][time]
- Fb
* (
set_temperatures_dic[building].loc[occupancy][time]
- T_ext_cea_df[string_object_time]
)
)
if (1 + common_coeff * (sys_temperatures[sys] - Tb)) > 1: # Check efficiency value
raise ValueError('Distribution efficiency is greater than 1')
else:
dis_efficiency_sys_df.loc[occupancy][time] = 1 + common_coeff * (sys_temperatures[sys] - Tb)
dis_efficiency_dic[sys] = dis_efficiency_sys_df
return dis_efficiency_dic
def calculate_mean_efficiencies(gen_efficiency_df, em_efficiency_df, dis_efficiency_dic):
# Calculates the mean generation/conversion efficiencies for the ahu, the aru and the scu
gen_efficiency_mean = gen_efficiency_df.mean(axis=1, skipna=False)
# Calculates the emission efficiency
em_efficiency_mean_row = em_efficiency_df.mean(axis=1)
em_efficiency_mean = em_efficiency_mean_row.mean(axis=0)
# Calculates the mean distribution efficiencies for the ahu, the aru and the scu
dis_efficiency_mean = {}
for sys in ['ahu', 'aru', 'scu']:
dis_efficiency_dic_sys = dis_efficiency_dic[sys]
dis_efficiency_mean_sys_row = dis_efficiency_dic_sys.mean(axis=1, skipna=False)
dis_efficiency_mean[sys] = dis_efficiency_mean_sys_row.mean(axis=0, skipna=False)
return (
gen_efficiency_mean,
em_efficiency_mean,
dis_efficiency_mean
)
def calculate_comparisons_mean(
gen_efficiency_mean,
em_efficiency_mean,
dis_efficiency_mean,
gen_efficiency_df,
em_efficiency_df,
dis_efficiency_dic,
date_and_time_prediction
):
# Create the data frames
comparison_gen_df = pd.DataFrame(
np.zeros(gen_efficiency_df.shape),
gen_efficiency_df.index,
gen_efficiency_df.columns
)
comparison_em_df = pd.DataFrame(
np.zeros(em_efficiency_df.shape),
em_efficiency_df.index,
em_efficiency_df.columns
)
comparison_dis_dic = {}
for sys in ['ahu', 'aru', 'scu']:
comparison_dis_dic[sys] = pd.DataFrame(
np.zeros(dis_efficiency_dic[sys].shape),
dis_efficiency_dic[sys].index,
dis_efficiency_dic[sys].columns
)
# Fill in the data frames of the relative differences to the means
for time in date_and_time_prediction:
for index, row in gen_efficiency_df.iterrows():
comparison_gen_df.loc[index][time] = (
abs(row[time] - gen_efficiency_mean[index])
/ gen_efficiency_mean[index]
)
for index, row in em_efficiency_df.iterrows():
comparison_em_df.loc[index][time] = (
abs(row[time] - em_efficiency_mean)
/ em_efficiency_mean
)
for sys in ['ahu', 'aru', 'scu']:
for index, row in dis_efficiency_dic[sys].iterrows():
comparison_dis_dic[sys].loc[index][time] = (
abs(dis_efficiency_dic[sys].loc[index][time] - dis_efficiency_mean[sys])
/ dis_efficiency_mean[sys]
)
# Calculate the means
# Calculates the mean generation/conversion efficiencies relative differences to the means
# for the ahu, the aru and the scu
comparison_gen_mean = comparison_gen_df.mean(axis=1, skipna=False)
# Calculates the emission efficiency relative difference to the mean
comparison_em_mean_row = comparison_em_df.mean(axis=1)
comparison_em_mean = comparison_em_mean_row.mean(axis=0)
# Calculates the mean distribution efficiencies relative differences to the means for the ahu, the aru and the scu
comparison_dis_mean_dic = {}
for sys in ['ahu', 'aru', 'scu']:
comparison_dis_dic_sys = comparison_dis_dic[sys]
comparison_dis_mean_sys_row = comparison_dis_dic_sys.mean(axis=1, skipna=False)
comparison_dis_mean_dic[sys] = comparison_dis_mean_sys_row.mean(axis=0, skipna=False)
return (
comparison_gen_df,
comparison_em_df,
comparison_dis_dic,
comparison_gen_mean,
comparison_em_mean,
comparison_dis_mean_dic
)
def write_building_system_ahu_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
Tc_sup_air_ahu_C_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
):
ahu_types = []
for building in buildings_names:
if not math.isnan(supply_temperature_df.loc[building]['ahu']):
ahu_types.append([
building + '_' + emissions_cooling_type_dic[building],
'default',
'default',
'default',
'default',
Tc_sup_air_ahu_C_dic[building],
11.5,
1,
(
gen_efficiency_mean_dic[building].loc['ahu']
* sto_efficiency
* dis_efficiency_mean_dic[building]['ahu']
),
1,
1
])
ahu_types_df = pd.DataFrame.from_records(
ahu_types,
columns=[
'hvac_ahu_type',
'ahu_cooling_type',
'ahu_heating_type',
'ahu_dehumidification_type',
'ahu_return_air_heat_recovery_type',
'ahu_supply_air_temperature_setpoint',
'ahu_supply_air_relative_humidty_setpoint',
'ahu_fan_efficiency',
'ahu_cooling_efficiency',
'ahu_heating_efficiency',
'ahu_return_air_recovery_efficiency'
])
ahu_types_df.to_csv(
path_or_buf=locator.get_mpc_results_building_definitions_file('building_hvac_ahu_types'
),
index=False
)
def write_building_system_aru_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
Tc_sup_air_aru_C_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
):
aru_types = []
for building in buildings_names:
if not math.isnan(supply_temperature_df.loc[building]['aru']):
aru_types.append([
building + '_' + emissions_cooling_type_dic[building],
'default',
'default',
'zone',
Tc_sup_air_aru_C_dic[building],
1,
(
gen_efficiency_mean_dic[building].loc['aru']
* sto_efficiency
* dis_efficiency_mean_dic[building]['aru']
),
1
])
aru_types_df = pd.DataFrame.from_records(
aru_types,
columns=[
'hvac_tu_type',
'tu_cooling_type',
'tu_heating_type',
'tu_air_intake_type',
'tu_supply_air_temperature_setpoint',
'tu_fan_efficiency',
'tu_cooling_efficiency',
'tu_heating_efficiency'
])
aru_types_df.to_csv(
path_or_buf=locator.get_mpc_results_building_definitions_file('building_hvac_tu_types'
),
index=False
)
def write_building_hvac_generic_types(locator,
buildings_names,
supply_temperature_df,
emissions_cooling_type_dic,
gen_efficiency_mean_dic,
sto_efficiency,
dis_efficiency_mean_dic
):
scu_types = []
for building in buildings_names:
if not math.isnan(supply_temperature_df.loc[building]['scu']):
scu_types.append([
building + '_' + emissions_cooling_type_dic[building],
1,
(
gen_efficiency_mean_dic[building].loc['scu']
* sto_efficiency
* dis_efficiency_mean_dic[building]['scu']
)
])
scu_types_df = pd.DataFrame.from_records(
scu_types,
columns=[
'hvac_generic_type',
'generic_heating_efficiency',
'generic_cooling_efficiency'
])
scu_types_df.to_csv(
path_or_buf=locator.get_mpc_results_building_definitions_file('building_hvac_generic_types'
),
index=False
) | en | 0.771344 | MIT License Copyright (c) 2019 TUMCREATE <https://tum-create.edu.sg/> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Function taken from calc_bounding_box_geom in the CEA file building_properties.py # Get data # Calculate # Get data # Calculate # Supply system # Emissions system # Calculate each efficiency type # Calculate the mean efficiencies, when needed # Compare the mean difference between the efficiency values and the mean efficiency # TODO: Use the correct delta theta sol (c.f. HVAC efficiencies documentation) # Check efficiency value # Non time-dependent parts # Time-dependent parts # Check whether AHU, ARU and SCU exist # Check efficiency value # Calculates the mean generation/conversion efficiencies for the ahu, the aru and the scu # Calculates the emission efficiency # Calculates the mean distribution efficiencies for the ahu, the aru and the scu # Create the data frames # Fill in the data frames of the relative differences to the means # Calculate the means # Calculates the mean generation/conversion efficiencies relative differences to the means # for the ahu, the aru and the scu # Calculates the emission efficiency relative difference to the mean # Calculates the mean distribution efficiencies relative differences to the means for the ahu, the aru and the scu | 1.844395 | 2 |
python/test/extra/noref_feature_extractor_extratest.py | elam03/vmaf | 1 | 6630611 | <filename>python/test/extra/noref_feature_extractor_extratest.py
import unittest
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.asset import NorefAsset
from vmaf.core.noref_feature_extractor import MomentNorefFeatureExtractor
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
# @unittest.skipIf(not VmafExternalConfig.ffmpeg_path(), "ffmpeg not installed")
class NorefFeatureExtractorTest(unittest.TestCase):
def tearDown(self):
if hasattr(self, 'fextractor'):
self.fextractor.remove_results()
pass
def test_noref_moment_fextractor_with_noref_asset_notyuv(self):
print 'test on running Moment noref feature extractor on NorefAssets ' \
'(non-YUV)...'
dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4")
asset = NorefAsset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
dis_path=dis_path,
asset_dict={'yuv_type': 'notyuv',
'quality_width': 720, 'quality_height': 480,
})
self.fextractor = MomentNorefFeatureExtractor(
[asset],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run()
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_noref_feature_1st_score'], 63.776442013888882, places=4)
self.assertAlmostEqual(results[0]['Moment_noref_feature_2nd_score'], 5194.9118422453694, places=4)
self.assertAlmostEqual(results[0]['Moment_noref_feature_var_score'], 1118.4952858425261, places=4)
def test_noref_moment_fextractor_frames(self):
print 'test on running Moment noref feature extractor on Assets with frames...'
dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv")
asset = NorefAsset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
dis_path=dis_path,
asset_dict={'width':576, 'height':324,
'start_frame':2, 'end_frame':2,
})
self.fextractor = MomentNorefFeatureExtractor(
[asset],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run()
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_noref_feature_1st_score'], 62.315495327503427, places=4)
self.assertAlmostEqual(results[0]['Moment_noref_feature_2nd_score'], 4888.7623296039092, places=4)
self.assertAlmostEqual(results[0]['Moment_noref_feature_var_score'], 1005.5413716918079, places=4)
| <filename>python/test/extra/noref_feature_extractor_extratest.py
import unittest
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.asset import NorefAsset
from vmaf.core.noref_feature_extractor import MomentNorefFeatureExtractor
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
# @unittest.skipIf(not VmafExternalConfig.ffmpeg_path(), "ffmpeg not installed")
class NorefFeatureExtractorTest(unittest.TestCase):
def tearDown(self):
if hasattr(self, 'fextractor'):
self.fextractor.remove_results()
pass
def test_noref_moment_fextractor_with_noref_asset_notyuv(self):
print 'test on running Moment noref feature extractor on NorefAssets ' \
'(non-YUV)...'
dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4")
asset = NorefAsset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
dis_path=dis_path,
asset_dict={'yuv_type': 'notyuv',
'quality_width': 720, 'quality_height': 480,
})
self.fextractor = MomentNorefFeatureExtractor(
[asset],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run()
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_noref_feature_1st_score'], 63.776442013888882, places=4)
self.assertAlmostEqual(results[0]['Moment_noref_feature_2nd_score'], 5194.9118422453694, places=4)
self.assertAlmostEqual(results[0]['Moment_noref_feature_var_score'], 1118.4952858425261, places=4)
def test_noref_moment_fextractor_frames(self):
print 'test on running Moment noref feature extractor on Assets with frames...'
dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv")
asset = NorefAsset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
dis_path=dis_path,
asset_dict={'width':576, 'height':324,
'start_frame':2, 'end_frame':2,
})
self.fextractor = MomentNorefFeatureExtractor(
[asset],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run()
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_noref_feature_1st_score'], 62.315495327503427, places=4)
self.assertAlmostEqual(results[0]['Moment_noref_feature_2nd_score'], 4888.7623296039092, places=4)
self.assertAlmostEqual(results[0]['Moment_noref_feature_var_score'], 1005.5413716918079, places=4)
| en | 0.219277 | # @unittest.skipIf(not VmafExternalConfig.ffmpeg_path(), "ffmpeg not installed") | 2.058069 | 2 |
aiomessaging/consumers/message.py | aiomessaging/aiomessaging | 4 | 6630612 | """Message consumer.
"""
from typing import Callable
from ..message import Message
from ..router import Router
from ..queues import AbstractQueue
from ..actions import SendOutputAction, CheckOutputAction
from .base import BaseMessageConsumer
class OutputNotAvailable(Exception):
"""Output not available exception.
Raised if output returned from pipeline is not available (no consumers for
such routing key, message will not be delivered).
"""
pass
class MessageConsumer(BaseMessageConsumer):
"""Message consumer.
Consume messages from `messages.<event_type>` queue and route it to the
next output(s).
Output queue used to distribute message delivery between all subscribed
workers.
"""
event_type: str
router: Router
output_queue: AbstractQueue
output_observed_handler: Callable
def __init__(self, event_type, router: Router, output_queue,
**kwargs) -> None:
super().__init__(**kwargs)
self.event_type = event_type
self.router = router
self.output_queue = output_queue
def on_output_observed(self, handler):
"""Set output observed handler.
"""
self.output_observed_handler = handler
async def handle_message(self, message: Message):
"""Message handler.
Select next output for message and send it to related queue.
"""
try:
while True:
effect = self.router.next_effect(message)
prev_state = message.get_route_state(effect)
action = effect.next_action(prev_state)
if isinstance(action, (SendOutputAction, CheckOutputAction)):
# send message to output queue
output = action.get_output()
# manager will create output consumer for us if possible
if hasattr(self, 'output_observed_handler'):
await self.output_observed_handler(self.event_type, output)
await self.output_queue.publish(
message.to_dict(), routing_key=output.name
)
message.log.debug("published to output %s, routing_key=%s",
self.output_queue.name, output.name)
# TODO: publish not confirmed
return True
message.log.error("Unhandled action type %s", type(action)) # pragma: no cover
# pylint: disable=broad-except
except Exception: # pragma: no cover
message.log.exception("Unhandled exception in MessageConsumer")
| """Message consumer.
"""
from typing import Callable
from ..message import Message
from ..router import Router
from ..queues import AbstractQueue
from ..actions import SendOutputAction, CheckOutputAction
from .base import BaseMessageConsumer
class OutputNotAvailable(Exception):
"""Output not available exception.
Raised if output returned from pipeline is not available (no consumers for
such routing key, message will not be delivered).
"""
pass
class MessageConsumer(BaseMessageConsumer):
"""Message consumer.
Consume messages from `messages.<event_type>` queue and route it to the
next output(s).
Output queue used to distribute message delivery between all subscribed
workers.
"""
event_type: str
router: Router
output_queue: AbstractQueue
output_observed_handler: Callable
def __init__(self, event_type, router: Router, output_queue,
**kwargs) -> None:
super().__init__(**kwargs)
self.event_type = event_type
self.router = router
self.output_queue = output_queue
def on_output_observed(self, handler):
"""Set output observed handler.
"""
self.output_observed_handler = handler
async def handle_message(self, message: Message):
"""Message handler.
Select next output for message and send it to related queue.
"""
try:
while True:
effect = self.router.next_effect(message)
prev_state = message.get_route_state(effect)
action = effect.next_action(prev_state)
if isinstance(action, (SendOutputAction, CheckOutputAction)):
# send message to output queue
output = action.get_output()
# manager will create output consumer for us if possible
if hasattr(self, 'output_observed_handler'):
await self.output_observed_handler(self.event_type, output)
await self.output_queue.publish(
message.to_dict(), routing_key=output.name
)
message.log.debug("published to output %s, routing_key=%s",
self.output_queue.name, output.name)
# TODO: publish not confirmed
return True
message.log.error("Unhandled action type %s", type(action)) # pragma: no cover
# pylint: disable=broad-except
except Exception: # pragma: no cover
message.log.exception("Unhandled exception in MessageConsumer")
| en | 0.678278 | Message consumer. Output not available exception. Raised if output returned from pipeline is not available (no consumers for such routing key, message will not be delivered). Message consumer. Consume messages from `messages.<event_type>` queue and route it to the next output(s). Output queue used to distribute message delivery between all subscribed workers. Set output observed handler. Message handler. Select next output for message and send it to related queue. # send message to output queue # manager will create output consumer for us if possible # TODO: publish not confirmed # pragma: no cover # pylint: disable=broad-except # pragma: no cover | 2.389974 | 2 |
splparser/rules/mvexpandrules.py | lowell80/splparser | 31 | 6630613 | <reponame>lowell80/splparser
#!/usr/bin/env python
from splparser.parsetree import *
from splparser.rules.common.fieldrules import *
from splparser.rules.common.fieldlistrules import *
from splparser.rules.common.valuerules import *
from splparser.lexers.mvexpandlexer import tokens
from splparser.exceptions import SPLSyntaxError
start = 'cmdexpr'
def p_cmdexpr_mvexpand(p):
"""cmdexpr : mvexpandcmd"""
p[0] = p[1]
def p_mvexpandcmd_field(p):
"""mvexpandcmd : MVEXPAND field"""
p[0] = ParseTreeNode('COMMAND', raw='mvexpand')
p[0].add_child(p[2])
def p_mvexpandcmd_field_limit(p):
"""mvexpandcmd : MVEXPAND field m_limit"""
p[0] = ParseTreeNode('COMMAND', raw='mvexpand')
p[0].add_children([p[2], p[3]])
def p_mvexpandcmd_limit_field(p):
"""mvexpandcmd : MVEXPAND m_limit field"""
p[0] = ParseTreeNode('COMMAND', raw='mvexpand')
p[0].add_children([p[2], p[3]])
def p_value(p):
"""m_limit : LIMIT EQ value"""
p[0] = ParseTreeNode('EQ', raw='assign')
p[1] = ParseTreeNode('OPTION', raw=p[1])
p[1].values.append(p[3])
p[0].add_children([p[1], p[3]])
def p_error(p):
raise SPLSyntaxError("Syntax error in mvexpand parser input!")
| #!/usr/bin/env python
from splparser.parsetree import *
from splparser.rules.common.fieldrules import *
from splparser.rules.common.fieldlistrules import *
from splparser.rules.common.valuerules import *
from splparser.lexers.mvexpandlexer import tokens
from splparser.exceptions import SPLSyntaxError
start = 'cmdexpr'
def p_cmdexpr_mvexpand(p):
"""cmdexpr : mvexpandcmd"""
p[0] = p[1]
def p_mvexpandcmd_field(p):
"""mvexpandcmd : MVEXPAND field"""
p[0] = ParseTreeNode('COMMAND', raw='mvexpand')
p[0].add_child(p[2])
def p_mvexpandcmd_field_limit(p):
"""mvexpandcmd : MVEXPAND field m_limit"""
p[0] = ParseTreeNode('COMMAND', raw='mvexpand')
p[0].add_children([p[2], p[3]])
def p_mvexpandcmd_limit_field(p):
"""mvexpandcmd : MVEXPAND m_limit field"""
p[0] = ParseTreeNode('COMMAND', raw='mvexpand')
p[0].add_children([p[2], p[3]])
def p_value(p):
"""m_limit : LIMIT EQ value"""
p[0] = ParseTreeNode('EQ', raw='assign')
p[1] = ParseTreeNode('OPTION', raw=p[1])
p[1].values.append(p[3])
p[0].add_children([p[1], p[3]])
def p_error(p):
raise SPLSyntaxError("Syntax error in mvexpand parser input!") | en | 0.270871 | #!/usr/bin/env python cmdexpr : mvexpandcmd mvexpandcmd : MVEXPAND field mvexpandcmd : MVEXPAND field m_limit mvexpandcmd : MVEXPAND m_limit field m_limit : LIMIT EQ value | 2.203795 | 2 |
OOP Ex.py | MoomenEltelbany/PythonDesafios | 0 | 6630614 | from random import randint
tries = 0
comp = randint(0, 100)
print('You have 7 Tries....')
while tries <= 7:
num = int(input('Choose a number: '))
if num > comp:
print(f'You need to choose a smaller number.')
tries += 1
elif num < comp:
print('You need to choose a bigger number.')
tries += 1
else:
print('You won!!!')
break
print(f'You have tried {tries} times.')
print(f'The computer chose {comp}')
| from random import randint
tries = 0
comp = randint(0, 100)
print('You have 7 Tries....')
while tries <= 7:
num = int(input('Choose a number: '))
if num > comp:
print(f'You need to choose a smaller number.')
tries += 1
elif num < comp:
print('You need to choose a bigger number.')
tries += 1
else:
print('You won!!!')
break
print(f'You have tried {tries} times.')
print(f'The computer chose {comp}')
| none | 1 | 3.94544 | 4 |
|
fomo_social_harvester/scraper/utils.py | dgnsrekt/fomo-social-harvester | 0 | 6630615 | <gh_stars>0
from datetime import datetime, timedelta
import functools
from time import time
from lxml.etree import ParserError, XMLSyntaxError
from requests.exceptions import (SSLError, ReadTimeout, ConnectTimeout,
ConnectionError, ChunkedEncodingError, TooManyRedirects)
def scraper_exception_handler():
"""
A decorator that wraps the passed in function and logs
exceptions should one occur
@param logger: The logging object
"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except(SSLError,
ReadTimeout,
ConnectTimeout,
ConnectionError,
ChunkedEncodingError,
UnicodeDecodeError,
ValueError,
TooManyRedirects,
ParserError,
XMLSyntaxError) as e:
# TODO: add differen output for RTO, CTO, CE, CHE, UNI, VAL
print('E', end='', flush=True)
except Exception as e:
print('X', end='', flush=True)
print(str(e.message), str(e.args))
return wrapper
return decorator
def is_valid_telegram_link(link):
'''Checks link to see if its a real telegram link.
:param str link:
:return True if valid telegram link:
:rtype: bool'''
if 'https://t.me/' in link:
return True
elif 'https://telegram.me/' in link:
return True
elif 'http://www.telegram.me/' in link:
return True
elif 'http://t.me/' in link:
return True
else:
return False
def is_valid_twitter_link(link):
'''Checks link to see if its a real twitter link.
:param str link:
:return True if valid twitter link:
:rtype: bool'''
if 'https://twitter.com/CoinMarketCap' in link:
return False
elif 'https://twitter.com' in link:
return True
elif 'http://twitter.com' in link:
return True
else:
return False
def timeit(method):
def timed(*args, **kw):
tstart = time()
result = method(*args, **kw)
tend = time()
time_result = ((tend - tstart) * 1000) / 60
print(f'{method.__name__.upper()} Completed in: {time_result: 2.2f} s')
return time_result, result
return timed
def get_current_hour():
return datetime.now().replace(microsecond=0, second=0, minute=0)
| from datetime import datetime, timedelta
import functools
from time import time
from lxml.etree import ParserError, XMLSyntaxError
from requests.exceptions import (SSLError, ReadTimeout, ConnectTimeout,
ConnectionError, ChunkedEncodingError, TooManyRedirects)
def scraper_exception_handler():
"""
A decorator that wraps the passed in function and logs
exceptions should one occur
@param logger: The logging object
"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except(SSLError,
ReadTimeout,
ConnectTimeout,
ConnectionError,
ChunkedEncodingError,
UnicodeDecodeError,
ValueError,
TooManyRedirects,
ParserError,
XMLSyntaxError) as e:
# TODO: add differen output for RTO, CTO, CE, CHE, UNI, VAL
print('E', end='', flush=True)
except Exception as e:
print('X', end='', flush=True)
print(str(e.message), str(e.args))
return wrapper
return decorator
def is_valid_telegram_link(link):
'''Checks link to see if its a real telegram link.
:param str link:
:return True if valid telegram link:
:rtype: bool'''
if 'https://t.me/' in link:
return True
elif 'https://telegram.me/' in link:
return True
elif 'http://www.telegram.me/' in link:
return True
elif 'http://t.me/' in link:
return True
else:
return False
def is_valid_twitter_link(link):
'''Checks link to see if its a real twitter link.
:param str link:
:return True if valid twitter link:
:rtype: bool'''
if 'https://twitter.com/CoinMarketCap' in link:
return False
elif 'https://twitter.com' in link:
return True
elif 'http://twitter.com' in link:
return True
else:
return False
def timeit(method):
def timed(*args, **kw):
tstart = time()
result = method(*args, **kw)
tend = time()
time_result = ((tend - tstart) * 1000) / 60
print(f'{method.__name__.upper()} Completed in: {time_result: 2.2f} s')
return time_result, result
return timed
def get_current_hour():
return datetime.now().replace(microsecond=0, second=0, minute=0) | en | 0.660798 | A decorator that wraps the passed in function and logs exceptions should one occur @param logger: The logging object # TODO: add differen output for RTO, CTO, CE, CHE, UNI, VAL Checks link to see if its a real telegram link. :param str link: :return True if valid telegram link: :rtype: bool Checks link to see if its a real twitter link. :param str link: :return True if valid twitter link: :rtype: bool | 2.954597 | 3 |
model.py | fangchenplus/CarND-Behavioral-Cloning-P3 | 0 | 6630616 | <filename>model.py
import csv
import cv2
import numpy as np
from scipy import ndimage
# import matplotlib.pyplot as plt
lines = []
with open('./data/driving_log.csv') as csvfile:
# with open('D:/data/data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
# print(type(lines))
# print(lines[0])
# print(lines[1])
images = []
measurements = []
correction = 0.2
for line in lines[1:]:
for i in range(3):
source_path = line[i]
filename = source_path.split('/')[-1]
current_path = './data/IMG/' + filename
# current_path = 'D:/data/data/IMG/' + filename
# image = cv2.imread(current_path) # cv2.imread will get images in BGR format, while drive.py uses RGB
image = ndimage.imread(current_path)
images.append(image)
measurement = float(line[3])
if i == 0:
measurements.append(measurement)
elif i == 1:
measurements.append(measurement + correction)
elif i == 2:
measurements.append(measurement - correction)
else:
print('error')
# data augmentation by flipping images and steering angles
augmented_images, augmented_measurements = [], []
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Activation, Dropout, Conv2D, MaxPooling2D, Cropping2D
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = (160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
### LeNet
# model.add(Conv2D(6, 5, 5))
# # model.add(MaxPooling2D())
# # model.add(Dropout(0.5))
# model.add(Activation('relu'))
# model.add(Conv2D(6, 5, 5))
# # model.add(MaxPooling2D())
# # model.add(Dropout(0.5))
# model.add(Activation('relu'))
# model.add(Flatten())
# model.add(Dense(120))
# model.add(Dense(84))
# # model.add(Activation('relu'))
# model.add(Dense(1))
### Nvidia
# model.add(Conv2D(24,5,5, subsample=(2,2), activation='relu'))
# model.add(Conv2D(36,5,5, subsample=(2,2), activation='relu'))
# model.add(Conv2D(48,5,5, subsample=(2,2), activation='relu'))
# model.add(Conv2D(64,3,3, activation='relu'))
# model.add(Conv2D(64,3,3, activation='relu'))
model.add(Conv2D(24, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(36, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(48, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
###
model.compile(loss = 'mse', optimizer = 'adam')
history_object = model.fit(X_train, y_train, validation_split = 0.2, shuffle = True, epochs = 3, verbose = 1)
model.save('model.h5')
### print the keys contained in the history object
# print(history_object.history.keys())
### plot the training and validation loss for each epoch
# plt.plot(history_object.history['loss'])
# plt.plot(history_object.history['val_loss'])
# plt.title('model mean squared error loss')
# plt.ylabel('mean squared error loss')
# plt.xlabel('epoch')
# plt.legend(['training set', 'validation set'], loc='upper right')
# plt.show() | <filename>model.py
import csv
import cv2
import numpy as np
from scipy import ndimage
# import matplotlib.pyplot as plt
lines = []
with open('./data/driving_log.csv') as csvfile:
# with open('D:/data/data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
# print(type(lines))
# print(lines[0])
# print(lines[1])
images = []
measurements = []
correction = 0.2
for line in lines[1:]:
for i in range(3):
source_path = line[i]
filename = source_path.split('/')[-1]
current_path = './data/IMG/' + filename
# current_path = 'D:/data/data/IMG/' + filename
# image = cv2.imread(current_path) # cv2.imread will get images in BGR format, while drive.py uses RGB
image = ndimage.imread(current_path)
images.append(image)
measurement = float(line[3])
if i == 0:
measurements.append(measurement)
elif i == 1:
measurements.append(measurement + correction)
elif i == 2:
measurements.append(measurement - correction)
else:
print('error')
# data augmentation by flipping images and steering angles
augmented_images, augmented_measurements = [], []
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Activation, Dropout, Conv2D, MaxPooling2D, Cropping2D
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = (160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
### LeNet
# model.add(Conv2D(6, 5, 5))
# # model.add(MaxPooling2D())
# # model.add(Dropout(0.5))
# model.add(Activation('relu'))
# model.add(Conv2D(6, 5, 5))
# # model.add(MaxPooling2D())
# # model.add(Dropout(0.5))
# model.add(Activation('relu'))
# model.add(Flatten())
# model.add(Dense(120))
# model.add(Dense(84))
# # model.add(Activation('relu'))
# model.add(Dense(1))
### Nvidia
# model.add(Conv2D(24,5,5, subsample=(2,2), activation='relu'))
# model.add(Conv2D(36,5,5, subsample=(2,2), activation='relu'))
# model.add(Conv2D(48,5,5, subsample=(2,2), activation='relu'))
# model.add(Conv2D(64,3,3, activation='relu'))
# model.add(Conv2D(64,3,3, activation='relu'))
model.add(Conv2D(24, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(36, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(48, (5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
###
model.compile(loss = 'mse', optimizer = 'adam')
history_object = model.fit(X_train, y_train, validation_split = 0.2, shuffle = True, epochs = 3, verbose = 1)
model.save('model.h5')
### print the keys contained in the history object
# print(history_object.history.keys())
### plot the training and validation loss for each epoch
# plt.plot(history_object.history['loss'])
# plt.plot(history_object.history['val_loss'])
# plt.title('model mean squared error loss')
# plt.ylabel('mean squared error loss')
# plt.xlabel('epoch')
# plt.legend(['training set', 'validation set'], loc='upper right')
# plt.show() | en | 0.370792 | # import matplotlib.pyplot as plt # with open('D:/data/data/driving_log.csv') as csvfile: # print(type(lines)) # print(lines[0]) # print(lines[1]) # current_path = 'D:/data/data/IMG/' + filename # image = cv2.imread(current_path) # cv2.imread will get images in BGR format, while drive.py uses RGB # data augmentation by flipping images and steering angles ### LeNet # model.add(Conv2D(6, 5, 5)) # # model.add(MaxPooling2D()) # # model.add(Dropout(0.5)) # model.add(Activation('relu')) # model.add(Conv2D(6, 5, 5)) # # model.add(MaxPooling2D()) # # model.add(Dropout(0.5)) # model.add(Activation('relu')) # model.add(Flatten()) # model.add(Dense(120)) # model.add(Dense(84)) # # model.add(Activation('relu')) # model.add(Dense(1)) ### Nvidia # model.add(Conv2D(24,5,5, subsample=(2,2), activation='relu')) # model.add(Conv2D(36,5,5, subsample=(2,2), activation='relu')) # model.add(Conv2D(48,5,5, subsample=(2,2), activation='relu')) # model.add(Conv2D(64,3,3, activation='relu')) # model.add(Conv2D(64,3,3, activation='relu')) ### ### print the keys contained in the history object # print(history_object.history.keys()) ### plot the training and validation loss for each epoch # plt.plot(history_object.history['loss']) # plt.plot(history_object.history['val_loss']) # plt.title('model mean squared error loss') # plt.ylabel('mean squared error loss') # plt.xlabel('epoch') # plt.legend(['training set', 'validation set'], loc='upper right') # plt.show() | 2.969132 | 3 |
test/utils/test_negative_sampling.py | snubeaver/pytorch_geometric | 2 | 6630617 | import torch
from torch_geometric.utils import (negative_sampling,
structured_negative_sampling,
batched_negative_sampling)
def test_negative_sampling():
edge_index = torch.as_tensor([[0, 0, 1, 2], [0, 1, 2, 3]])
neg_edge_index = negative_sampling(edge_index)
assert neg_edge_index.size(1) == edge_index.size(1)
adj = torch.zeros(4, 4, dtype=torch.uint8)
adj[edge_index[0], edge_index[1]] = 1
neg_adj = torch.zeros(4, 4, dtype=torch.uint8)
neg_adj[neg_edge_index[0], neg_edge_index[1]] = 1
assert (adj & neg_adj).sum() == 0
neg_edge_index = negative_sampling(edge_index, num_neg_samples=2)
assert neg_edge_index.size(1) == 2
def test_structured_negative_sampling():
edge_index = torch.as_tensor([[0, 0, 1, 2], [0, 1, 2, 3]])
i, j, k = structured_negative_sampling(edge_index)
assert i.size(0) == edge_index.size(1)
assert j.size(0) == edge_index.size(1)
assert k.size(0) == edge_index.size(1)
adj = torch.zeros(4, 4, dtype=torch.uint8)
adj[i, j] = 1
neg_adj = torch.zeros(4, 4, dtype=torch.uint8)
neg_adj[i, k] = 1
assert (adj & neg_adj).sum() == 0
def test_batched_negative_sampling():
edge_index = torch.as_tensor([[0, 0, 1, 2], [0, 1, 2, 3]])
edge_index = torch.cat([edge_index, edge_index + 4], dim=1)
batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])
neg_edge_index = batched_negative_sampling(edge_index, batch)
assert neg_edge_index.size(1) == edge_index.size(1)
adj = torch.zeros(8, 8, dtype=torch.uint8)
adj[edge_index[0], edge_index[1]] = 1
neg_adj = torch.zeros(8, 8, dtype=torch.uint8)
neg_adj[neg_edge_index[0], neg_edge_index[1]] = 1
assert (adj & neg_adj).sum() == 0
assert neg_adj[:4, 4:].sum() == 0
assert neg_adj[4:, :4].sum() == 0
| import torch
from torch_geometric.utils import (negative_sampling,
structured_negative_sampling,
batched_negative_sampling)
def test_negative_sampling():
edge_index = torch.as_tensor([[0, 0, 1, 2], [0, 1, 2, 3]])
neg_edge_index = negative_sampling(edge_index)
assert neg_edge_index.size(1) == edge_index.size(1)
adj = torch.zeros(4, 4, dtype=torch.uint8)
adj[edge_index[0], edge_index[1]] = 1
neg_adj = torch.zeros(4, 4, dtype=torch.uint8)
neg_adj[neg_edge_index[0], neg_edge_index[1]] = 1
assert (adj & neg_adj).sum() == 0
neg_edge_index = negative_sampling(edge_index, num_neg_samples=2)
assert neg_edge_index.size(1) == 2
def test_structured_negative_sampling():
edge_index = torch.as_tensor([[0, 0, 1, 2], [0, 1, 2, 3]])
i, j, k = structured_negative_sampling(edge_index)
assert i.size(0) == edge_index.size(1)
assert j.size(0) == edge_index.size(1)
assert k.size(0) == edge_index.size(1)
adj = torch.zeros(4, 4, dtype=torch.uint8)
adj[i, j] = 1
neg_adj = torch.zeros(4, 4, dtype=torch.uint8)
neg_adj[i, k] = 1
assert (adj & neg_adj).sum() == 0
def test_batched_negative_sampling():
edge_index = torch.as_tensor([[0, 0, 1, 2], [0, 1, 2, 3]])
edge_index = torch.cat([edge_index, edge_index + 4], dim=1)
batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])
neg_edge_index = batched_negative_sampling(edge_index, batch)
assert neg_edge_index.size(1) == edge_index.size(1)
adj = torch.zeros(8, 8, dtype=torch.uint8)
adj[edge_index[0], edge_index[1]] = 1
neg_adj = torch.zeros(8, 8, dtype=torch.uint8)
neg_adj[neg_edge_index[0], neg_edge_index[1]] = 1
assert (adj & neg_adj).sum() == 0
assert neg_adj[:4, 4:].sum() == 0
assert neg_adj[4:, :4].sum() == 0
| none | 1 | 2.437235 | 2 |
|
mxfusion/components/distributions/gp/kernels/linear.py | JeremiasKnoblauch/MXFusion | 2 | 6630618 | <reponame>JeremiasKnoblauch/MXFusion
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from .kernel import NativeKernel
from ....variables import Variable
from ....variables import PositiveTransformation
class Linear(NativeKernel):
"""
Linear kernel
.. math::
k(x,y) = \\sum_{i=1}^{\\text{input_dim}} \\sigma^2_i x_iy_i
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions) .
:type input_dim: int
:param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided
by a lengthscale for individual dimensions.
:type ARD: boolean
:param variances: the initial value for the variances parameter, which scales the input dimensions.
:type variances: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, ARD=False, variances=1., name='linear',
active_dims=None, dtype=None, ctx=None):
super(Linear, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
self.ARD = ARD
if not isinstance(variances, Variable):
variances = Variable(shape=(input_dim if ARD else 1,),
transformation=PositiveTransformation(),
initial_value=variances)
self.variances = variances
def _compute_K(self, F, X, variances, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variances: the variances parameter, which scales the input dimensions.
:type variances: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if self.ARD:
var_sqrt = F.expand_dims(F.sqrt(variances), axis=-2)
if X2 is None:
xsc = X * var_sqrt
return F.linalg.syrk(xsc)
else:
xsc = X * var_sqrt
x2sc = X2 * var_sqrt
return F.linalg.gemm2(xsc, x2sc, False, True)
else:
if X2 is None:
A = F.linalg.syrk(X)
else:
A = F.linalg.gemm2(X, X2, False, True)
return A * F.expand_dims(variances, axis=-1)
def _compute_Kdiag(self, F, X, variances):
"""
The internal interface for the actual computation for the diagonal of the covariance matrix.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variances: the variances parameter, which scales the input dimensions.
:type variances: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
X2 = F.square(X)
return F.sum(X2 * F.expand_dims(variances, axis=-2), axis=-1)
def replicate_self(self, attribute_map=None):
"""
The copy constructor for a kernel.
"""
replicant = super(Linear, self).replicate_self(attribute_map)
replicant.ARD = self.ARD
return replicant
| # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from .kernel import NativeKernel
from ....variables import Variable
from ....variables import PositiveTransformation
class Linear(NativeKernel):
"""
Linear kernel
.. math::
k(x,y) = \\sum_{i=1}^{\\text{input_dim}} \\sigma^2_i x_iy_i
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions) .
:type input_dim: int
:param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided
by a lengthscale for individual dimensions.
:type ARD: boolean
:param variances: the initial value for the variances parameter, which scales the input dimensions.
:type variances: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, ARD=False, variances=1., name='linear',
active_dims=None, dtype=None, ctx=None):
super(Linear, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
self.ARD = ARD
if not isinstance(variances, Variable):
variances = Variable(shape=(input_dim if ARD else 1,),
transformation=PositiveTransformation(),
initial_value=variances)
self.variances = variances
def _compute_K(self, F, X, variances, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variances: the variances parameter, which scales the input dimensions.
:type variances: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if self.ARD:
var_sqrt = F.expand_dims(F.sqrt(variances), axis=-2)
if X2 is None:
xsc = X * var_sqrt
return F.linalg.syrk(xsc)
else:
xsc = X * var_sqrt
x2sc = X2 * var_sqrt
return F.linalg.gemm2(xsc, x2sc, False, True)
else:
if X2 is None:
A = F.linalg.syrk(X)
else:
A = F.linalg.gemm2(X, X2, False, True)
return A * F.expand_dims(variances, axis=-1)
def _compute_Kdiag(self, F, X, variances):
"""
The internal interface for the actual computation for the diagonal of the covariance matrix.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variances: the variances parameter, which scales the input dimensions.
:type variances: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
X2 = F.square(X)
return F.sum(X2 * F.expand_dims(variances, axis=-2), axis=-1)
def replicate_self(self, attribute_map=None):
"""
The copy constructor for a kernel.
"""
replicant = super(Linear, self).replicate_self(attribute_map)
replicant.ARD = self.ARD
return replicant | en | 0.670554 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # ============================================================================== Linear kernel .. math:: k(x,y) = \\sum_{i=1}^{\\text{input_dim}} \\sigma^2_i x_iy_i :param input_dim: the number of dimensions of the kernel. (The total number of active dimensions) . :type input_dim: int :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided by a lengthscale for individual dimensions. :type ARD: boolean :param variances: the initial value for the variances parameter, which scales the input dimensions. :type variances: float or MXNet NDArray :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 :param ctx: the mxnet context (default: None/current context). :type ctx: None or mxnet.cpu or mxnet.gpu The internal interface for the actual covariance matrix computation. :param F: MXNet computation type <mx.sym, mx.nd>. :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variances: the variances parameter, which scales the input dimensions. :type variances: MXNet NDArray or MXNet Symbol :return: The covariance matrix. :rtype: MXNet NDArray or MXNet Symbol The internal interface for the actual computation for the diagonal of the covariance matrix. :param F: MXNet computation type <mx.sym, mx.nd>. :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol :param variances: the variances parameter, which scales the input dimensions. :type variances: MXNet NDArray or MXNet Symbol :return: The covariance matrix. :rtype: MXNet NDArray or MXNet Symbol The copy constructor for a kernel. | 2.380876 | 2 |
tartiflette/types/object.py | mazzi/tartiflette | 530 | 6630619 | from functools import partial
from typing import Any, Callable, Dict, List, Optional
from tartiflette.coercers.outputs.directives_coercer import (
output_directives_coercer,
)
from tartiflette.coercers.outputs.object_coercer import object_coercer
from tartiflette.types.helpers.get_directive_instances import (
compute_directive_nodes,
)
from tartiflette.types.type import (
GraphQLCompositeType,
GraphQLExtension,
GraphQLType,
)
from tartiflette.utils.directives import wraps_with_directives
__all__ = ("GraphQLObjectType",)
class GraphQLObjectType(GraphQLCompositeType, GraphQLType):
"""
Definition of a GraphQL object.
"""
# Introspection attributes
kind = "OBJECT"
def __init__(
self,
name: str,
fields: Dict[str, "GraphQLField"],
interfaces: Optional[List[str]] = None,
description: Optional[str] = None,
directives: Optional[List["DirectiveNode"]] = None,
) -> None:
"""
:param name: name of the object
:param fields: map of fields linked to the object
:param interfaces: list of interface names implemented by the object
:param description: description of the object
:param directives: list of directives linked to the object
:type name: str
:type fields: Dict[str, GraphQLField]
:type interfaces: Optional[List[str]]
:type description: Optional[str]
:type directives: Optional[List[DirectiveNode]]
"""
self.name = name
self.implemented_fields = fields or {}
self.interfaces_names = interfaces or []
self.description = description
# Directives
self.directives = directives
self.introspection_directives: Optional[Callable] = None
self.pre_output_coercion_directives: Optional[Callable] = None
# Coercers
self.output_coercer: Optional[Callable] = None
# Introspection attributes
self.interfaces: List["GraphQLInterfaceType"] = []
self.fields: List["GraphQLField"] = []
self._possible_types_set = set()
self._possible_types_set.add(self.name)
def __eq__(self, other: Any) -> bool:
"""
Returns True if `other` instance is identical to `self`.
:param other: object instance to compare to `self`
:type other: Any
:return: whether or not `other` is identical to `self`
:rtype: bool
"""
return self is other or (
isinstance(other, GraphQLObjectType)
and self.name == other.name
and self.implemented_fields == other.implemented_fields
and self.interfaces_names == other.interfaces_names
and self.description == other.description
and self.directives == other.directives
)
def __repr__(self) -> str:
"""
Returns the representation of a GraphQLObjectType instance.
:return: the representation of a GraphQLObjectType instance
:rtype: str
"""
return (
"GraphQLObjectType(name={!r}, fields={!r}, "
"interfaces={!r}, description={!r}, directives={!r})".format(
self.name,
self.implemented_fields,
self.interfaces_names,
self.description,
self.directives,
)
)
def __str__(self) -> str:
"""
Returns a human-readable representation of the object.
:return: a human-readable representation of the object
:rtype: str
"""
return self.name
def add_field(self, field: "GraphQLField") -> None:
"""
Adds the filled in field to the list of implemented fields.
:param field: field to add to the list
:type field: GraphQLField
"""
self.implemented_fields[field.name] = field
def find_field(self, name: str) -> "GraphQLField":
"""
Returns the field corresponding to the filled in name.
:param name: name of the field to return
:type name: str
:return: the field corresponding to the filled in name
:rtype: GraphQLField
"""
return self.implemented_fields[name]
def bake(self, schema: "GraphQLSchema") -> None:
"""
Bakes the GraphQLObjectType and computes all the necessary stuff for
execution.
:param schema: the GraphQLSchema instance linked to the engine
:type schema: GraphQLSchema
"""
if self.interfaces_names:
for interface_name in self.interfaces_names:
interface = schema.find_type(interface_name)
self.interfaces.append(interface)
interface.add_possible_type(self)
# Directives
directives_definition = compute_directive_nodes(
schema, self.directives
)
self.introspection_directives = wraps_with_directives(
directives_definition=directives_definition,
directive_hook="on_introspection",
)
self.pre_output_coercion_directives = wraps_with_directives(
directives_definition=directives_definition,
directive_hook="on_pre_output_coercion",
with_default=True,
)
# Coercers
self.output_coercer = partial(
output_directives_coercer,
coercer=partial(object_coercer, object_type=self),
directives=self.pre_output_coercion_directives,
)
async def bake_fields(
self,
schema: "GraphQLSchema",
custom_default_resolver: Optional[Callable],
) -> None:
"""
Bakes object's fields.
:param schema: the GraphQLSchema instance linked to the engine
:param custom_default_resolver: callable that will replace the builtin
default_resolver
:type schema: GraphQLSchema
:type custom_default_resolver: Optional[Callable]
"""
if self.implemented_fields:
for field in self.implemented_fields.values():
field.bake(schema, custom_default_resolver)
field = await field.on_post_bake()
if not field.name.startswith("__"):
self.fields.append(field)
@property
def possible_types_set(self) -> set:
return self._possible_types_set
class GraphQLObjectTypeExtension(GraphQLType, GraphQLExtension):
def __init__(self, name, fields, directives, interfaces):
self.name = name
self.fields = fields or {}
self.directives = directives
self.interfaces = interfaces or []
def bake(self, schema):
extended = schema.find_type(self.name)
extended.directives.extend(self.directives)
extended.implemented_fields.update(self.fields)
extended.interfaces_names.extend(self.interfaces)
def __eq__(self, other: Any) -> bool:
"""
Returns True if `other` instance is identical to `self`.
:param other: object instance to compare to `self`
:type other: Any
:return: whether or not `other` is identical to `self`
:rtype: bool
"""
return self is other or (
isinstance(other, GraphQLObjectTypeExtension)
and other.directives == self.directives
and other.fields == self.fields
and other.name == self.name
and other.interfaces == self.interfaces
)
def __repr__(self) -> str:
"""
Returns the representation of a GraphQLType instance.
:return: the representation of a GraphQLType instance
:rtype: str
"""
return (
f"GraphQLObjectTypeExtension("
f"name={repr(self.name)}, "
f"directives={repr(self.directives)}, "
f"fields={repr(self.fields)}, "
f"interfaces={repr(self.interfaces)})"
)
| from functools import partial
from typing import Any, Callable, Dict, List, Optional
from tartiflette.coercers.outputs.directives_coercer import (
output_directives_coercer,
)
from tartiflette.coercers.outputs.object_coercer import object_coercer
from tartiflette.types.helpers.get_directive_instances import (
compute_directive_nodes,
)
from tartiflette.types.type import (
GraphQLCompositeType,
GraphQLExtension,
GraphQLType,
)
from tartiflette.utils.directives import wraps_with_directives
__all__ = ("GraphQLObjectType",)
class GraphQLObjectType(GraphQLCompositeType, GraphQLType):
"""
Definition of a GraphQL object.
"""
# Introspection attributes
kind = "OBJECT"
def __init__(
self,
name: str,
fields: Dict[str, "GraphQLField"],
interfaces: Optional[List[str]] = None,
description: Optional[str] = None,
directives: Optional[List["DirectiveNode"]] = None,
) -> None:
"""
:param name: name of the object
:param fields: map of fields linked to the object
:param interfaces: list of interface names implemented by the object
:param description: description of the object
:param directives: list of directives linked to the object
:type name: str
:type fields: Dict[str, GraphQLField]
:type interfaces: Optional[List[str]]
:type description: Optional[str]
:type directives: Optional[List[DirectiveNode]]
"""
self.name = name
self.implemented_fields = fields or {}
self.interfaces_names = interfaces or []
self.description = description
# Directives
self.directives = directives
self.introspection_directives: Optional[Callable] = None
self.pre_output_coercion_directives: Optional[Callable] = None
# Coercers
self.output_coercer: Optional[Callable] = None
# Introspection attributes
self.interfaces: List["GraphQLInterfaceType"] = []
self.fields: List["GraphQLField"] = []
self._possible_types_set = set()
self._possible_types_set.add(self.name)
def __eq__(self, other: Any) -> bool:
"""
Returns True if `other` instance is identical to `self`.
:param other: object instance to compare to `self`
:type other: Any
:return: whether or not `other` is identical to `self`
:rtype: bool
"""
return self is other or (
isinstance(other, GraphQLObjectType)
and self.name == other.name
and self.implemented_fields == other.implemented_fields
and self.interfaces_names == other.interfaces_names
and self.description == other.description
and self.directives == other.directives
)
def __repr__(self) -> str:
"""
Returns the representation of a GraphQLObjectType instance.
:return: the representation of a GraphQLObjectType instance
:rtype: str
"""
return (
"GraphQLObjectType(name={!r}, fields={!r}, "
"interfaces={!r}, description={!r}, directives={!r})".format(
self.name,
self.implemented_fields,
self.interfaces_names,
self.description,
self.directives,
)
)
def __str__(self) -> str:
"""
Returns a human-readable representation of the object.
:return: a human-readable representation of the object
:rtype: str
"""
return self.name
def add_field(self, field: "GraphQLField") -> None:
"""
Adds the filled in field to the list of implemented fields.
:param field: field to add to the list
:type field: GraphQLField
"""
self.implemented_fields[field.name] = field
def find_field(self, name: str) -> "GraphQLField":
"""
Returns the field corresponding to the filled in name.
:param name: name of the field to return
:type name: str
:return: the field corresponding to the filled in name
:rtype: GraphQLField
"""
return self.implemented_fields[name]
def bake(self, schema: "GraphQLSchema") -> None:
"""
Bakes the GraphQLObjectType and computes all the necessary stuff for
execution.
:param schema: the GraphQLSchema instance linked to the engine
:type schema: GraphQLSchema
"""
if self.interfaces_names:
for interface_name in self.interfaces_names:
interface = schema.find_type(interface_name)
self.interfaces.append(interface)
interface.add_possible_type(self)
# Directives
directives_definition = compute_directive_nodes(
schema, self.directives
)
self.introspection_directives = wraps_with_directives(
directives_definition=directives_definition,
directive_hook="on_introspection",
)
self.pre_output_coercion_directives = wraps_with_directives(
directives_definition=directives_definition,
directive_hook="on_pre_output_coercion",
with_default=True,
)
# Coercers
self.output_coercer = partial(
output_directives_coercer,
coercer=partial(object_coercer, object_type=self),
directives=self.pre_output_coercion_directives,
)
async def bake_fields(
self,
schema: "GraphQLSchema",
custom_default_resolver: Optional[Callable],
) -> None:
"""
Bakes object's fields.
:param schema: the GraphQLSchema instance linked to the engine
:param custom_default_resolver: callable that will replace the builtin
default_resolver
:type schema: GraphQLSchema
:type custom_default_resolver: Optional[Callable]
"""
if self.implemented_fields:
for field in self.implemented_fields.values():
field.bake(schema, custom_default_resolver)
field = await field.on_post_bake()
if not field.name.startswith("__"):
self.fields.append(field)
@property
def possible_types_set(self) -> set:
return self._possible_types_set
class GraphQLObjectTypeExtension(GraphQLType, GraphQLExtension):
def __init__(self, name, fields, directives, interfaces):
self.name = name
self.fields = fields or {}
self.directives = directives
self.interfaces = interfaces or []
def bake(self, schema):
extended = schema.find_type(self.name)
extended.directives.extend(self.directives)
extended.implemented_fields.update(self.fields)
extended.interfaces_names.extend(self.interfaces)
def __eq__(self, other: Any) -> bool:
"""
Returns True if `other` instance is identical to `self`.
:param other: object instance to compare to `self`
:type other: Any
:return: whether or not `other` is identical to `self`
:rtype: bool
"""
return self is other or (
isinstance(other, GraphQLObjectTypeExtension)
and other.directives == self.directives
and other.fields == self.fields
and other.name == self.name
and other.interfaces == self.interfaces
)
def __repr__(self) -> str:
"""
Returns the representation of a GraphQLType instance.
:return: the representation of a GraphQLType instance
:rtype: str
"""
return (
f"GraphQLObjectTypeExtension("
f"name={repr(self.name)}, "
f"directives={repr(self.directives)}, "
f"fields={repr(self.fields)}, "
f"interfaces={repr(self.interfaces)})"
)
| en | 0.675079 | Definition of a GraphQL object. # Introspection attributes :param name: name of the object :param fields: map of fields linked to the object :param interfaces: list of interface names implemented by the object :param description: description of the object :param directives: list of directives linked to the object :type name: str :type fields: Dict[str, GraphQLField] :type interfaces: Optional[List[str]] :type description: Optional[str] :type directives: Optional[List[DirectiveNode]] # Directives # Coercers # Introspection attributes Returns True if `other` instance is identical to `self`. :param other: object instance to compare to `self` :type other: Any :return: whether or not `other` is identical to `self` :rtype: bool Returns the representation of a GraphQLObjectType instance. :return: the representation of a GraphQLObjectType instance :rtype: str Returns a human-readable representation of the object. :return: a human-readable representation of the object :rtype: str Adds the filled in field to the list of implemented fields. :param field: field to add to the list :type field: GraphQLField Returns the field corresponding to the filled in name. :param name: name of the field to return :type name: str :return: the field corresponding to the filled in name :rtype: GraphQLField Bakes the GraphQLObjectType and computes all the necessary stuff for execution. :param schema: the GraphQLSchema instance linked to the engine :type schema: GraphQLSchema # Directives # Coercers Bakes object's fields. :param schema: the GraphQLSchema instance linked to the engine :param custom_default_resolver: callable that will replace the builtin default_resolver :type schema: GraphQLSchema :type custom_default_resolver: Optional[Callable] Returns True if `other` instance is identical to `self`. :param other: object instance to compare to `self` :type other: Any :return: whether or not `other` is identical to `self` :rtype: bool Returns the representation of a GraphQLType instance. :return: the representation of a GraphQLType instance :rtype: str | 2.017289 | 2 |
nova/objects/instance_group.py | WeifanFu-bsn/nova | 0 | 6630620 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from oslo_utils import versionutils
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from nova.compute import utils as compute_utils
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova.db.sqlalchemy import models as main_models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
LAZY_LOAD_FIELDS = ['hosts']
def _instance_group_get_query(context, id_field=None, id=None):
query = context.session.query(api_models.InstanceGroup).\
options(joinedload('_policies')).\
options(joinedload('_members'))
if not context.is_admin:
query = query.filter_by(project_id=context.project_id)
if id and id_field:
query = query.filter(id_field == id)
return query
def _instance_group_model_get_query(context, model_class, group_id):
return context.session.query(model_class).filter_by(group_id=group_id)
def _instance_group_model_add(context, model_class, items, item_models, field,
group_id, append_to_models=None):
models = []
already_existing = set()
for db_item in item_models:
already_existing.add(getattr(db_item, field))
models.append(db_item)
for item in items:
if item in already_existing:
continue
model = model_class()
values = {'group_id': group_id}
values[field] = item
model.update(values)
context.session.add(model)
if append_to_models:
append_to_models.append(model)
models.append(model)
return models
def _instance_group_policies_add(context, group, policies):
query = _instance_group_model_get_query(context,
api_models.InstanceGroupPolicy,
group.id)
query = query.filter(
api_models.InstanceGroupPolicy.policy.in_(set(policies)))
return _instance_group_model_add(context, api_models.InstanceGroupPolicy,
policies, query.all(), 'policy', group.id,
append_to_models=group._policies)
def _instance_group_members_add(context, group, members):
query = _instance_group_model_get_query(context,
api_models.InstanceGroupMember,
group.id)
query = query.filter(
api_models.InstanceGroupMember.instance_uuid.in_(set(members)))
return _instance_group_model_add(context, api_models.InstanceGroupMember,
members, query.all(), 'instance_uuid',
group.id, append_to_models=group._members)
def _instance_group_members_add_by_uuid(context, group_uuid, members):
# NOTE(melwitt): The condition on the join limits the number of members
# returned to only those we wish to check as already existing.
group = context.session.query(api_models.InstanceGroup).\
outerjoin(api_models.InstanceGroupMember,
api_models.InstanceGroupMember.instance_uuid.in_(set(members))).\
filter(api_models.InstanceGroup.uuid == group_uuid).\
options(contains_eager('_members')).first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return _instance_group_model_add(context, api_models.InstanceGroupMember,
members, group._members, 'instance_uuid',
group.id)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Use list/dict helpers for policies, metadetails, members
# Version 1.3: Make uuid a non-None real string
# Version 1.4: Add add_members()
# Version 1.5: Add get_hosts()
# Version 1.6: Add get_by_name()
# Version 1.7: Deprecate metadetails
# Version 1.8: Add count_members_by_user()
# Version 1.9: Add get_by_instance_uuid()
# Version 1.10: Add hosts field
VERSION = '1.10'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'policies': fields.ListOfStringsField(nullable=True),
'members': fields.ListOfStringsField(nullable=True),
'hosts': fields.ListOfStringsField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we had an always-empty
# metadetails property
primitive['metadetails'] = {}
@staticmethod
def _from_db_object(context, instance_group, db_inst):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
# Most of the field names match right now, so be quick
for field in instance_group.fields:
if field in LAZY_LOAD_FIELDS:
continue
# This is needed to handle db models from both the api
# database and the main database. In the migration to
# the api database, we have removed soft-delete, so
# the object fields for delete must be filled in with
# default values for db models from the api database.
ignore = {'deleted': False,
'deleted_at': None}
if field in ignore and not hasattr(db_inst, field):
instance_group[field] = ignore[field]
else:
instance_group[field] = db_inst[field]
instance_group._context = context
instance_group.obj_reset_changes()
return instance_group
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db_by_uuid(context, uuid):
grp = _instance_group_get_query(context,
id_field=api_models.InstanceGroup.uuid,
id=uuid).first()
if not grp:
raise exception.InstanceGroupNotFound(group_uuid=uuid)
return grp
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db_by_id(context, id):
grp = _instance_group_get_query(context,
id_field=api_models.InstanceGroup.id,
id=id).first()
if not grp:
raise exception.InstanceGroupNotFound(group_uuid=id)
return grp
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db_by_name(context, name):
grp = _instance_group_get_query(context).filter_by(name=name).first()
if not grp:
raise exception.InstanceGroupNotFound(group_uuid=name)
return grp
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db_by_instance(context, instance_uuid):
grp_member = context.session.query(api_models.InstanceGroupMember).\
filter_by(instance_uuid=instance_uuid).first()
if not grp_member:
raise exception.InstanceGroupNotFound(group_uuid='')
grp = InstanceGroup._get_from_db_by_id(context, grp_member.group_id)
return grp
@staticmethod
@db_api.api_context_manager.writer
def _save_in_db(context, group_uuid, values):
grp = _instance_group_get_query(context,
id_field=api_models.InstanceGroup.uuid,
id=group_uuid).first()
if not grp:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
values_copy = copy.copy(values)
policies = values_copy.pop('policies', None)
members = values_copy.pop('members', None)
grp.update(values_copy)
if policies is not None:
_instance_group_policies_add(context, grp, policies)
if members is not None:
_instance_group_members_add(context, grp, members)
return grp
@staticmethod
@db_api.api_context_manager.writer
def _create_in_db(context, values, policies=None, members=None):
try:
group = api_models.InstanceGroup()
group.update(values)
group.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=values['uuid'])
if policies:
group._policies = _instance_group_policies_add(context, group,
policies)
else:
group._policies = []
if members:
group._members = _instance_group_members_add(context, group,
members)
else:
group._members = []
return group
@staticmethod
@db_api.api_context_manager.writer
def _destroy_in_db(context, group_uuid):
qry = _instance_group_get_query(context,
id_field=api_models.InstanceGroup.uuid,
id=group_uuid)
if qry.count() == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies and members
group_id = qry.first().id
instance_models = [api_models.InstanceGroupPolicy,
api_models.InstanceGroupMember]
for model in instance_models:
context.session.query(model).filter_by(group_id=group_id).delete()
qry.delete()
@staticmethod
@db_api.api_context_manager.writer
def _add_members_in_db(context, group_uuid, members):
return _instance_group_members_add_by_uuid(context, group_uuid,
members)
@staticmethod
@db_api.api_context_manager.writer
def _remove_members_in_db(context, group_id, instance_uuids):
# There is no public method provided for removing members because the
# user-facing API doesn't allow removal of instance group members. We
# need to be able to remove members to address quota races.
context.session.query(api_models.InstanceGroupMember).\
filter_by(group_id=group_id).\
filter(api_models.InstanceGroupMember.instance_uuid.
in_(set(instance_uuids))).\
delete(synchronize_session=False)
def obj_load_attr(self, attrname):
# NOTE(sbauza): Only hosts could be lazy-loaded right now
if attrname != 'hosts':
raise exception.ObjectActionError(
action='obj_load_attr', reason='unable to load %s' % attrname)
self.hosts = self.get_hosts()
self.obj_reset_changes(['hosts'])
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_group = None
try:
db_group = cls._get_from_db_by_uuid(context, uuid)
except exception.InstanceGroupNotFound:
pass
if db_group is None:
db_group = db.instance_group_get(context, uuid)
return cls._from_db_object(context, cls(), db_group)
@base.remotable_classmethod
def get_by_name(cls, context, name):
try:
db_group = cls._get_from_db_by_name(context, name)
except exception.InstanceGroupNotFound:
igs = InstanceGroupList._get_main_by_project_id(context,
context.project_id)
for ig in igs:
if ig.name == name:
return ig
raise exception.InstanceGroupNotFound(group_uuid=name)
return cls._from_db_object(context, cls(), db_group)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_group = None
try:
db_group = cls._get_from_db_by_instance(context, instance_uuid)
except exception.InstanceGroupNotFound:
pass
if db_group is None:
db_group = db.instance_group_get_by_instance(context,
instance_uuid)
return cls._from_db_object(context, cls(), db_group)
@classmethod
def get_by_hint(cls, context, hint):
if uuidutils.is_uuid_like(hint):
return cls.get_by_uuid(context, hint)
else:
return cls.get_by_name(context, hint)
@base.remotable
def save(self):
"""Save updates to this instance group."""
updates = self.obj_get_changes()
# NOTE(sbauza): We do NOT save the set of compute nodes that an
# instance group is connected to in this method. Instance groups are
# implicitly connected to compute nodes when the
# InstanceGroup.add_members() method is called, which adds the mapping
# table entries.
# So, since the only way to have hosts in the updates is to set that
# field explicitly, we prefer to raise an Exception so the developer
# knows he has to call obj_reset_changes(['hosts']) right after setting
# the field.
if 'hosts' in updates:
raise exception.InstanceGroupSaveException(field='hosts')
if not updates:
return
payload = dict(updates)
payload['server_group_id'] = self.uuid
try:
db_group = self._save_in_db(self._context, self.uuid, updates)
except exception.InstanceGroupNotFound:
db.instance_group_update(self._context, self.uuid, updates)
db_group = db.instance_group_get(self._context, self.uuid)
self._from_db_object(self._context, self, db_group)
compute_utils.notify_about_server_group_update(self._context,
"update", payload)
@base.remotable
def refresh(self):
"""Refreshes the instance group."""
current = self.__class__.get_by_uuid(self._context, self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def _create(self, skipcheck=False):
# NOTE(danms): This is just for the migration routine, and
# can be removed once we're no longer supporting the migration
# of instance groups from the main to api database.
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
payload = dict(updates)
updates.pop('id', None)
policies = updates.pop('policies', None)
members = updates.pop('members', None)
if 'uuid' not in updates:
self.uuid = uuidutils.generate_uuid()
updates['uuid'] = self.uuid
if not skipcheck:
try:
db.instance_group_get(self._context, self.uuid)
raise exception.ObjectActionError(
action='create',
reason='already created in main')
except exception.InstanceGroupNotFound:
pass
db_group = self._create_in_db(self._context, updates,
policies=policies,
members=members)
self._from_db_object(self._context, self, db_group)
payload['server_group_id'] = self.uuid
compute_utils.notify_about_server_group_update(self._context,
"create", payload)
compute_utils.notify_about_server_group_action(
context=self._context,
group=self,
action=fields.NotificationAction.CREATE)
@base.remotable
def create(self):
self._create()
@base.remotable
def destroy(self):
payload = {'server_group_id': self.uuid}
try:
self._destroy_in_db(self._context, self.uuid)
except exception.InstanceGroupNotFound:
db.instance_group_delete(self._context, self.uuid)
self.obj_reset_changes()
compute_utils.notify_about_server_group_update(self._context,
"delete", payload)
compute_utils.notify_about_server_group_action(
context=self._context,
group=self,
action=fields.NotificationAction.DELETE)
@base.remotable_classmethod
def add_members(cls, context, group_uuid, instance_uuids):
payload = {'server_group_id': group_uuid,
'instance_uuids': instance_uuids}
try:
members = cls._add_members_in_db(context, group_uuid,
instance_uuids)
members = [member['instance_uuid'] for member in members]
except exception.InstanceGroupNotFound:
members = db.instance_group_members_add(context, group_uuid,
instance_uuids)
compute_utils.notify_about_server_group_update(context,
"addmember", payload)
return list(members)
@base.remotable
def get_hosts(self, exclude=None):
"""Get a list of hosts for non-deleted instances in the group
This method allows you to get a list of the hosts where instances in
this group are currently running. There's also an option to exclude
certain instance UUIDs from this calculation.
"""
filter_uuids = self.members
if exclude:
filter_uuids = set(filter_uuids) - set(exclude)
filters = {'uuid': filter_uuids, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return list(set([instance.host for instance in instances
if instance.host]))
@base.remotable
def count_members_by_user(self, user_id):
"""Count the number of instances in a group belonging to a user."""
filter_uuids = self.members
filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return len(instances)
@base.NovaObjectRegistry.register
class InstanceGroupList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceGroup <= version 1.3
# Version 1.1: InstanceGroup <= version 1.4
# Version 1.2: InstanceGroup <= version 1.5
# Version 1.3: InstanceGroup <= version 1.6
# Version 1.4: InstanceGroup <= version 1.7
# Version 1.5: InstanceGroup <= version 1.8
# Version 1.6: InstanceGroup <= version 1.9
# Version 1.7: InstanceGroup <= version 1.10
# Version 1.8: Added get_counts() for quotas
VERSION = '1.8'
fields = {
'objects': fields.ListOfObjectsField('InstanceGroup'),
}
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db(context, project_id=None):
query = _instance_group_get_query(context)
if project_id is not None:
query = query.filter_by(project_id=project_id)
return query.all()
@classmethod
def _get_main_by_project_id(cls, context, project_id):
main_db_groups = db.instance_group_get_all_by_project_id(context,
project_id)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
main_db_groups)
@staticmethod
@db_api.api_context_manager.reader
def _get_counts_from_db(context, project_id, user_id=None):
query = context.session.query(api_models.InstanceGroup.id).\
filter_by(project_id=project_id)
counts = {}
counts['project'] = {'server_groups': query.count()}
if user_id:
query = query.filter_by(user_id=user_id)
counts['user'] = {'server_groups': query.count()}
return counts
@base.remotable_classmethod
def get_by_project_id(cls, context, project_id):
api_db_groups = cls._get_from_db(context, project_id=project_id)
main_db_groups = db.instance_group_get_all_by_project_id(context,
project_id)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
api_db_groups + main_db_groups)
@base.remotable_classmethod
def get_all(cls, context):
api_db_groups = cls._get_from_db(context)
main_db_groups = db.instance_group_get_all(context)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
api_db_groups + main_db_groups)
@base.remotable_classmethod
def get_counts(cls, context, project_id, user_id=None):
"""Get the counts of InstanceGroup objects in the database.
:param context: The request context for database access
:param project_id: The project_id to count across
:param user_id: The user_id to count across
:returns: A dict containing the project-scoped counts and user-scoped
counts if user_id is specified. For example:
{'project': {'server_groups': <count across project>},
'user': {'server_groups': <count across user>}}
"""
return cls._get_counts_from_db(context, project_id, user_id=user_id)
@db_api.pick_context_manager_reader
def _get_main_instance_groups(context, limit):
return context.session.query(main_models.InstanceGroup).\
options(joinedload('_policies')).\
options(joinedload('_members')).\
filter_by(deleted=0).\
limit(limit).\
all()
def migrate_instance_groups_to_api_db(context, count):
main_groups = _get_main_instance_groups(context, count)
done = 0
for db_group in main_groups:
group = objects.InstanceGroup(context=context,
user_id=db_group.user_id,
project_id=db_group.project_id,
uuid=db_group.uuid,
name=db_group.name,
policies=db_group.policies,
members=db_group.members)
try:
group._create(skipcheck=True)
except exception.InstanceGroupIdExists:
# NOTE(melwitt): This might happen if there's a failure right after
# the InstanceGroup was created and the migration is re-run.
pass
db_api.instance_group_delete(context, db_group.uuid)
done += 1
return len(main_groups), done
| # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from oslo_utils import versionutils
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from nova.compute import utils as compute_utils
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova.db.sqlalchemy import models as main_models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
LAZY_LOAD_FIELDS = ['hosts']
def _instance_group_get_query(context, id_field=None, id=None):
query = context.session.query(api_models.InstanceGroup).\
options(joinedload('_policies')).\
options(joinedload('_members'))
if not context.is_admin:
query = query.filter_by(project_id=context.project_id)
if id and id_field:
query = query.filter(id_field == id)
return query
def _instance_group_model_get_query(context, model_class, group_id):
return context.session.query(model_class).filter_by(group_id=group_id)
def _instance_group_model_add(context, model_class, items, item_models, field,
group_id, append_to_models=None):
models = []
already_existing = set()
for db_item in item_models:
already_existing.add(getattr(db_item, field))
models.append(db_item)
for item in items:
if item in already_existing:
continue
model = model_class()
values = {'group_id': group_id}
values[field] = item
model.update(values)
context.session.add(model)
if append_to_models:
append_to_models.append(model)
models.append(model)
return models
def _instance_group_policies_add(context, group, policies):
query = _instance_group_model_get_query(context,
api_models.InstanceGroupPolicy,
group.id)
query = query.filter(
api_models.InstanceGroupPolicy.policy.in_(set(policies)))
return _instance_group_model_add(context, api_models.InstanceGroupPolicy,
policies, query.all(), 'policy', group.id,
append_to_models=group._policies)
def _instance_group_members_add(context, group, members):
query = _instance_group_model_get_query(context,
api_models.InstanceGroupMember,
group.id)
query = query.filter(
api_models.InstanceGroupMember.instance_uuid.in_(set(members)))
return _instance_group_model_add(context, api_models.InstanceGroupMember,
members, query.all(), 'instance_uuid',
group.id, append_to_models=group._members)
def _instance_group_members_add_by_uuid(context, group_uuid, members):
# NOTE(melwitt): The condition on the join limits the number of members
# returned to only those we wish to check as already existing.
group = context.session.query(api_models.InstanceGroup).\
outerjoin(api_models.InstanceGroupMember,
api_models.InstanceGroupMember.instance_uuid.in_(set(members))).\
filter(api_models.InstanceGroup.uuid == group_uuid).\
options(contains_eager('_members')).first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return _instance_group_model_add(context, api_models.InstanceGroupMember,
members, group._members, 'instance_uuid',
group.id)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Use list/dict helpers for policies, metadetails, members
# Version 1.3: Make uuid a non-None real string
# Version 1.4: Add add_members()
# Version 1.5: Add get_hosts()
# Version 1.6: Add get_by_name()
# Version 1.7: Deprecate metadetails
# Version 1.8: Add count_members_by_user()
# Version 1.9: Add get_by_instance_uuid()
# Version 1.10: Add hosts field
VERSION = '1.10'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'policies': fields.ListOfStringsField(nullable=True),
'members': fields.ListOfStringsField(nullable=True),
'hosts': fields.ListOfStringsField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we had an always-empty
# metadetails property
primitive['metadetails'] = {}
@staticmethod
def _from_db_object(context, instance_group, db_inst):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
# Most of the field names match right now, so be quick
for field in instance_group.fields:
if field in LAZY_LOAD_FIELDS:
continue
# This is needed to handle db models from both the api
# database and the main database. In the migration to
# the api database, we have removed soft-delete, so
# the object fields for delete must be filled in with
# default values for db models from the api database.
ignore = {'deleted': False,
'deleted_at': None}
if field in ignore and not hasattr(db_inst, field):
instance_group[field] = ignore[field]
else:
instance_group[field] = db_inst[field]
instance_group._context = context
instance_group.obj_reset_changes()
return instance_group
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db_by_uuid(context, uuid):
grp = _instance_group_get_query(context,
id_field=api_models.InstanceGroup.uuid,
id=uuid).first()
if not grp:
raise exception.InstanceGroupNotFound(group_uuid=uuid)
return grp
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db_by_id(context, id):
grp = _instance_group_get_query(context,
id_field=api_models.InstanceGroup.id,
id=id).first()
if not grp:
raise exception.InstanceGroupNotFound(group_uuid=id)
return grp
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db_by_name(context, name):
grp = _instance_group_get_query(context).filter_by(name=name).first()
if not grp:
raise exception.InstanceGroupNotFound(group_uuid=name)
return grp
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db_by_instance(context, instance_uuid):
grp_member = context.session.query(api_models.InstanceGroupMember).\
filter_by(instance_uuid=instance_uuid).first()
if not grp_member:
raise exception.InstanceGroupNotFound(group_uuid='')
grp = InstanceGroup._get_from_db_by_id(context, grp_member.group_id)
return grp
@staticmethod
@db_api.api_context_manager.writer
def _save_in_db(context, group_uuid, values):
grp = _instance_group_get_query(context,
id_field=api_models.InstanceGroup.uuid,
id=group_uuid).first()
if not grp:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
values_copy = copy.copy(values)
policies = values_copy.pop('policies', None)
members = values_copy.pop('members', None)
grp.update(values_copy)
if policies is not None:
_instance_group_policies_add(context, grp, policies)
if members is not None:
_instance_group_members_add(context, grp, members)
return grp
@staticmethod
@db_api.api_context_manager.writer
def _create_in_db(context, values, policies=None, members=None):
try:
group = api_models.InstanceGroup()
group.update(values)
group.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=values['uuid'])
if policies:
group._policies = _instance_group_policies_add(context, group,
policies)
else:
group._policies = []
if members:
group._members = _instance_group_members_add(context, group,
members)
else:
group._members = []
return group
@staticmethod
@db_api.api_context_manager.writer
def _destroy_in_db(context, group_uuid):
qry = _instance_group_get_query(context,
id_field=api_models.InstanceGroup.uuid,
id=group_uuid)
if qry.count() == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies and members
group_id = qry.first().id
instance_models = [api_models.InstanceGroupPolicy,
api_models.InstanceGroupMember]
for model in instance_models:
context.session.query(model).filter_by(group_id=group_id).delete()
qry.delete()
@staticmethod
@db_api.api_context_manager.writer
def _add_members_in_db(context, group_uuid, members):
return _instance_group_members_add_by_uuid(context, group_uuid,
members)
@staticmethod
@db_api.api_context_manager.writer
def _remove_members_in_db(context, group_id, instance_uuids):
# There is no public method provided for removing members because the
# user-facing API doesn't allow removal of instance group members. We
# need to be able to remove members to address quota races.
context.session.query(api_models.InstanceGroupMember).\
filter_by(group_id=group_id).\
filter(api_models.InstanceGroupMember.instance_uuid.
in_(set(instance_uuids))).\
delete(synchronize_session=False)
def obj_load_attr(self, attrname):
# NOTE(sbauza): Only hosts could be lazy-loaded right now
if attrname != 'hosts':
raise exception.ObjectActionError(
action='obj_load_attr', reason='unable to load %s' % attrname)
self.hosts = self.get_hosts()
self.obj_reset_changes(['hosts'])
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_group = None
try:
db_group = cls._get_from_db_by_uuid(context, uuid)
except exception.InstanceGroupNotFound:
pass
if db_group is None:
db_group = db.instance_group_get(context, uuid)
return cls._from_db_object(context, cls(), db_group)
@base.remotable_classmethod
def get_by_name(cls, context, name):
try:
db_group = cls._get_from_db_by_name(context, name)
except exception.InstanceGroupNotFound:
igs = InstanceGroupList._get_main_by_project_id(context,
context.project_id)
for ig in igs:
if ig.name == name:
return ig
raise exception.InstanceGroupNotFound(group_uuid=name)
return cls._from_db_object(context, cls(), db_group)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_group = None
try:
db_group = cls._get_from_db_by_instance(context, instance_uuid)
except exception.InstanceGroupNotFound:
pass
if db_group is None:
db_group = db.instance_group_get_by_instance(context,
instance_uuid)
return cls._from_db_object(context, cls(), db_group)
@classmethod
def get_by_hint(cls, context, hint):
if uuidutils.is_uuid_like(hint):
return cls.get_by_uuid(context, hint)
else:
return cls.get_by_name(context, hint)
@base.remotable
def save(self):
"""Save updates to this instance group."""
updates = self.obj_get_changes()
# NOTE(sbauza): We do NOT save the set of compute nodes that an
# instance group is connected to in this method. Instance groups are
# implicitly connected to compute nodes when the
# InstanceGroup.add_members() method is called, which adds the mapping
# table entries.
# So, since the only way to have hosts in the updates is to set that
# field explicitly, we prefer to raise an Exception so the developer
# knows he has to call obj_reset_changes(['hosts']) right after setting
# the field.
if 'hosts' in updates:
raise exception.InstanceGroupSaveException(field='hosts')
if not updates:
return
payload = dict(updates)
payload['server_group_id'] = self.uuid
try:
db_group = self._save_in_db(self._context, self.uuid, updates)
except exception.InstanceGroupNotFound:
db.instance_group_update(self._context, self.uuid, updates)
db_group = db.instance_group_get(self._context, self.uuid)
self._from_db_object(self._context, self, db_group)
compute_utils.notify_about_server_group_update(self._context,
"update", payload)
@base.remotable
def refresh(self):
"""Refreshes the instance group."""
current = self.__class__.get_by_uuid(self._context, self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def _create(self, skipcheck=False):
# NOTE(danms): This is just for the migration routine, and
# can be removed once we're no longer supporting the migration
# of instance groups from the main to api database.
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
payload = dict(updates)
updates.pop('id', None)
policies = updates.pop('policies', None)
members = updates.pop('members', None)
if 'uuid' not in updates:
self.uuid = uuidutils.generate_uuid()
updates['uuid'] = self.uuid
if not skipcheck:
try:
db.instance_group_get(self._context, self.uuid)
raise exception.ObjectActionError(
action='create',
reason='already created in main')
except exception.InstanceGroupNotFound:
pass
db_group = self._create_in_db(self._context, updates,
policies=policies,
members=members)
self._from_db_object(self._context, self, db_group)
payload['server_group_id'] = self.uuid
compute_utils.notify_about_server_group_update(self._context,
"create", payload)
compute_utils.notify_about_server_group_action(
context=self._context,
group=self,
action=fields.NotificationAction.CREATE)
@base.remotable
def create(self):
self._create()
@base.remotable
def destroy(self):
payload = {'server_group_id': self.uuid}
try:
self._destroy_in_db(self._context, self.uuid)
except exception.InstanceGroupNotFound:
db.instance_group_delete(self._context, self.uuid)
self.obj_reset_changes()
compute_utils.notify_about_server_group_update(self._context,
"delete", payload)
compute_utils.notify_about_server_group_action(
context=self._context,
group=self,
action=fields.NotificationAction.DELETE)
@base.remotable_classmethod
def add_members(cls, context, group_uuid, instance_uuids):
payload = {'server_group_id': group_uuid,
'instance_uuids': instance_uuids}
try:
members = cls._add_members_in_db(context, group_uuid,
instance_uuids)
members = [member['instance_uuid'] for member in members]
except exception.InstanceGroupNotFound:
members = db.instance_group_members_add(context, group_uuid,
instance_uuids)
compute_utils.notify_about_server_group_update(context,
"addmember", payload)
return list(members)
@base.remotable
def get_hosts(self, exclude=None):
"""Get a list of hosts for non-deleted instances in the group
This method allows you to get a list of the hosts where instances in
this group are currently running. There's also an option to exclude
certain instance UUIDs from this calculation.
"""
filter_uuids = self.members
if exclude:
filter_uuids = set(filter_uuids) - set(exclude)
filters = {'uuid': filter_uuids, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return list(set([instance.host for instance in instances
if instance.host]))
@base.remotable
def count_members_by_user(self, user_id):
"""Count the number of instances in a group belonging to a user."""
filter_uuids = self.members
filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return len(instances)
@base.NovaObjectRegistry.register
class InstanceGroupList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceGroup <= version 1.3
# Version 1.1: InstanceGroup <= version 1.4
# Version 1.2: InstanceGroup <= version 1.5
# Version 1.3: InstanceGroup <= version 1.6
# Version 1.4: InstanceGroup <= version 1.7
# Version 1.5: InstanceGroup <= version 1.8
# Version 1.6: InstanceGroup <= version 1.9
# Version 1.7: InstanceGroup <= version 1.10
# Version 1.8: Added get_counts() for quotas
VERSION = '1.8'
fields = {
'objects': fields.ListOfObjectsField('InstanceGroup'),
}
@staticmethod
@db_api.api_context_manager.reader
def _get_from_db(context, project_id=None):
query = _instance_group_get_query(context)
if project_id is not None:
query = query.filter_by(project_id=project_id)
return query.all()
@classmethod
def _get_main_by_project_id(cls, context, project_id):
main_db_groups = db.instance_group_get_all_by_project_id(context,
project_id)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
main_db_groups)
@staticmethod
@db_api.api_context_manager.reader
def _get_counts_from_db(context, project_id, user_id=None):
query = context.session.query(api_models.InstanceGroup.id).\
filter_by(project_id=project_id)
counts = {}
counts['project'] = {'server_groups': query.count()}
if user_id:
query = query.filter_by(user_id=user_id)
counts['user'] = {'server_groups': query.count()}
return counts
@base.remotable_classmethod
def get_by_project_id(cls, context, project_id):
api_db_groups = cls._get_from_db(context, project_id=project_id)
main_db_groups = db.instance_group_get_all_by_project_id(context,
project_id)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
api_db_groups + main_db_groups)
@base.remotable_classmethod
def get_all(cls, context):
api_db_groups = cls._get_from_db(context)
main_db_groups = db.instance_group_get_all(context)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
api_db_groups + main_db_groups)
@base.remotable_classmethod
def get_counts(cls, context, project_id, user_id=None):
"""Get the counts of InstanceGroup objects in the database.
:param context: The request context for database access
:param project_id: The project_id to count across
:param user_id: The user_id to count across
:returns: A dict containing the project-scoped counts and user-scoped
counts if user_id is specified. For example:
{'project': {'server_groups': <count across project>},
'user': {'server_groups': <count across user>}}
"""
return cls._get_counts_from_db(context, project_id, user_id=user_id)
@db_api.pick_context_manager_reader
def _get_main_instance_groups(context, limit):
return context.session.query(main_models.InstanceGroup).\
options(joinedload('_policies')).\
options(joinedload('_members')).\
filter_by(deleted=0).\
limit(limit).\
all()
def migrate_instance_groups_to_api_db(context, count):
main_groups = _get_main_instance_groups(context, count)
done = 0
for db_group in main_groups:
group = objects.InstanceGroup(context=context,
user_id=db_group.user_id,
project_id=db_group.project_id,
uuid=db_group.uuid,
name=db_group.name,
policies=db_group.policies,
members=db_group.members)
try:
group._create(skipcheck=True)
except exception.InstanceGroupIdExists:
# NOTE(melwitt): This might happen if there's a failure right after
# the InstanceGroup was created and the migration is re-run.
pass
db_api.instance_group_delete(context, db_group.uuid)
done += 1
return len(main_groups), done
| en | 0.837699 | # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(melwitt): The condition on the join limits the number of members # returned to only those we wish to check as already existing. # TODO(berrange): Remove NovaObjectDictCompat # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Use list/dict helpers for policies, metadetails, members # Version 1.3: Make uuid a non-None real string # Version 1.4: Add add_members() # Version 1.5: Add get_hosts() # Version 1.6: Add get_by_name() # Version 1.7: Deprecate metadetails # Version 1.8: Add count_members_by_user() # Version 1.9: Add get_by_instance_uuid() # Version 1.10: Add hosts field # NOTE(danms): Before 1.7, we had an always-empty # metadetails property Method to help with migration to objects. Converts a database entity to a formal object. # Most of the field names match right now, so be quick # This is needed to handle db models from both the api # database and the main database. In the migration to # the api database, we have removed soft-delete, so # the object fields for delete must be filled in with # default values for db models from the api database. # Delete policies and members # There is no public method provided for removing members because the # user-facing API doesn't allow removal of instance group members. We # need to be able to remove members to address quota races. # NOTE(sbauza): Only hosts could be lazy-loaded right now Save updates to this instance group. # NOTE(sbauza): We do NOT save the set of compute nodes that an # instance group is connected to in this method. Instance groups are # implicitly connected to compute nodes when the # InstanceGroup.add_members() method is called, which adds the mapping # table entries. # So, since the only way to have hosts in the updates is to set that # field explicitly, we prefer to raise an Exception so the developer # knows he has to call obj_reset_changes(['hosts']) right after setting # the field. Refreshes the instance group. # NOTE(danms): This is just for the migration routine, and # can be removed once we're no longer supporting the migration # of instance groups from the main to api database. Get a list of hosts for non-deleted instances in the group This method allows you to get a list of the hosts where instances in this group are currently running. There's also an option to exclude certain instance UUIDs from this calculation. Count the number of instances in a group belonging to a user. # Version 1.0: Initial version # InstanceGroup <= version 1.3 # Version 1.1: InstanceGroup <= version 1.4 # Version 1.2: InstanceGroup <= version 1.5 # Version 1.3: InstanceGroup <= version 1.6 # Version 1.4: InstanceGroup <= version 1.7 # Version 1.5: InstanceGroup <= version 1.8 # Version 1.6: InstanceGroup <= version 1.9 # Version 1.7: InstanceGroup <= version 1.10 # Version 1.8: Added get_counts() for quotas Get the counts of InstanceGroup objects in the database. :param context: The request context for database access :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped counts and user-scoped counts if user_id is specified. For example: {'project': {'server_groups': <count across project>}, 'user': {'server_groups': <count across user>}} # NOTE(melwitt): This might happen if there's a failure right after # the InstanceGroup was created and the migration is re-run. | 1.886537 | 2 |
LogisticRegression.py | mccannj9/TensorFlowML | 0 | 6630621 | #! /usr/bin/env python
import numpy as np
def tensorflow_solution_gradient_descent(Xdata, ydata, diff="automatic"):
import tensorflow as tf
# half examples for training
Xt, yt = Xdata[::2], ydata[::2]
# Xt, yt = Xdata, ydata
# half examples for validation
Xv, yv = Xdata[1::2], ydata[1::2]
Xt = np.append(np.ones((Xt.shape[0], 1)), Xt, 1)
Xv = np.append(np.ones((Xv.shape[0], 1)), Xv, 1)
# print(Xt)
niters = 1000000
X = tf.placeholder(dtype=tf.float64, name="X")
y = tf.placeholder(dtype=tf.float64, name="y")
weights = tf.Variable(tf.zeros(shape=(Xt.shape[1],1), dtype=tf.float64), name="weights")
learn_rate = tf.constant(0.00115, dtype=tf.float64, name="learn_rate")
examples = tf.constant(Xt.shape[0], dtype=tf.float64, name="examples")
predictions = tf.matmul(X, weights, name="predictions")
sigmoid = tf.divide(1, 1 + tf.exp(-1*predictions))
res_p1 = tf.matmul(tf.transpose(y), tf.log(sigmoid))
res_p2 = tf.matmul(tf.transpose(1-y), tf.log(1-sigmoid))
residuals = res_p1 + res_p2
cost = (-1/examples)*tf.reduce_sum(residuals)
# cost = tf.reduce_mean(tf.log(1+tf.exp(-y*predictions)))
if diff == "automatic":
print("Using automatic differentiation for gradient descent")
cost_gradient = tf.gradients(cost, [weights])[0] # automatic differentiation
else:
print("Using closed-form gradient for gradient descent")
XT = tf.transpose(X, name="XT")
cost_gradient = 1/Xt.shape[0] * tf.matmul(XT, sigmoid-y)
update_weights = weights.assign(weights - learn_rate * cost_gradient)
init = tf.global_variables_initializer()
config = tf.ConfigProto(
device_count = {'CPU': 1}
)
saver = tf.train.Saver()
graph = tf.get_default_graph()
graph.finalize()
feeder = {X: Xt, y: yt}
with tf.Session(config=config) as sesh:
sesh.run(init)
saver.save(sesh, './logreg_test', global_step=10000)
for i in range(niters):
weights_value = sesh.run(update_weights, feed_dict=feeder)
if i % 1000 == 0:
cgrad = sesh.run(cost_gradient, feed_dict=feeder)
train_cost = sesh.run(cost, feed_dict=feeder)
valid_cost = sesh.run(cost, feed_dict=feeder)
print(
"Iteration %s :: Train Cost %s :: Valid Cost %s" % (
i, train_cost, valid_cost
)
)
# print(cgrad)
print("First few weights = ", weights_value[:5].T)
print("Cost on training data = ", train_cost)
print("Cost on validation data = ", valid_cost)
return weights
def main():
import os, sys
data_dir = os.path.dirname(os.path.abspath(__file__)) + "/data/"
# features_path = data_dir + "Xtrain.txt"
features_path = "/media/jamc/Sticky/MachineLearning/DeepLearning/data/Xtrain.txt"
labels_path = "/media/jamc/Sticky/MachineLearning/DeepLearning/data/Ytrain_zeros.txt"
Xdata = np.loadtxt(features_path)
ydata = np.loadtxt(labels_path)
ydata = ydata.reshape(-1,1)
tf_gradient_wvector = tensorflow_solution_gradient_descent(
Xdata, ydata, diff="automatic"
)
#
# tf_gradient_wvector = tensorflow_solution_gradient_descent(
# Xdata, ydata, diff="closed-form"
# )
# # Some additional test data from Coursera course on LogReg
# features_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex2/ex2/ex2data1_X.txt"
# # features_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex1/week3_functions/data_X.txt"
# labels_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex2/ex2/ex2data1_y.txt"
# # labels_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex1/week3_functions/data_y_negs.txt"
# #
# Xdata = np.loadtxt(features_path)
# ydata = np.loadtxt(labels_path)
# ydata = ydata.reshape(-1,1)
#
# tf_gradient_wvector = tensorflow_solution_gradient_descent(
# Xdata, ydata, diff="automatic"
# )
# tf_gradient_wvector = tensorflow_solution_gradient_descent(
# Xdata, ydata, diff="closed-form"
# )
if __name__ == '__main__':
main()
| #! /usr/bin/env python
import numpy as np
def tensorflow_solution_gradient_descent(Xdata, ydata, diff="automatic"):
import tensorflow as tf
# half examples for training
Xt, yt = Xdata[::2], ydata[::2]
# Xt, yt = Xdata, ydata
# half examples for validation
Xv, yv = Xdata[1::2], ydata[1::2]
Xt = np.append(np.ones((Xt.shape[0], 1)), Xt, 1)
Xv = np.append(np.ones((Xv.shape[0], 1)), Xv, 1)
# print(Xt)
niters = 1000000
X = tf.placeholder(dtype=tf.float64, name="X")
y = tf.placeholder(dtype=tf.float64, name="y")
weights = tf.Variable(tf.zeros(shape=(Xt.shape[1],1), dtype=tf.float64), name="weights")
learn_rate = tf.constant(0.00115, dtype=tf.float64, name="learn_rate")
examples = tf.constant(Xt.shape[0], dtype=tf.float64, name="examples")
predictions = tf.matmul(X, weights, name="predictions")
sigmoid = tf.divide(1, 1 + tf.exp(-1*predictions))
res_p1 = tf.matmul(tf.transpose(y), tf.log(sigmoid))
res_p2 = tf.matmul(tf.transpose(1-y), tf.log(1-sigmoid))
residuals = res_p1 + res_p2
cost = (-1/examples)*tf.reduce_sum(residuals)
# cost = tf.reduce_mean(tf.log(1+tf.exp(-y*predictions)))
if diff == "automatic":
print("Using automatic differentiation for gradient descent")
cost_gradient = tf.gradients(cost, [weights])[0] # automatic differentiation
else:
print("Using closed-form gradient for gradient descent")
XT = tf.transpose(X, name="XT")
cost_gradient = 1/Xt.shape[0] * tf.matmul(XT, sigmoid-y)
update_weights = weights.assign(weights - learn_rate * cost_gradient)
init = tf.global_variables_initializer()
config = tf.ConfigProto(
device_count = {'CPU': 1}
)
saver = tf.train.Saver()
graph = tf.get_default_graph()
graph.finalize()
feeder = {X: Xt, y: yt}
with tf.Session(config=config) as sesh:
sesh.run(init)
saver.save(sesh, './logreg_test', global_step=10000)
for i in range(niters):
weights_value = sesh.run(update_weights, feed_dict=feeder)
if i % 1000 == 0:
cgrad = sesh.run(cost_gradient, feed_dict=feeder)
train_cost = sesh.run(cost, feed_dict=feeder)
valid_cost = sesh.run(cost, feed_dict=feeder)
print(
"Iteration %s :: Train Cost %s :: Valid Cost %s" % (
i, train_cost, valid_cost
)
)
# print(cgrad)
print("First few weights = ", weights_value[:5].T)
print("Cost on training data = ", train_cost)
print("Cost on validation data = ", valid_cost)
return weights
def main():
import os, sys
data_dir = os.path.dirname(os.path.abspath(__file__)) + "/data/"
# features_path = data_dir + "Xtrain.txt"
features_path = "/media/jamc/Sticky/MachineLearning/DeepLearning/data/Xtrain.txt"
labels_path = "/media/jamc/Sticky/MachineLearning/DeepLearning/data/Ytrain_zeros.txt"
Xdata = np.loadtxt(features_path)
ydata = np.loadtxt(labels_path)
ydata = ydata.reshape(-1,1)
tf_gradient_wvector = tensorflow_solution_gradient_descent(
Xdata, ydata, diff="automatic"
)
#
# tf_gradient_wvector = tensorflow_solution_gradient_descent(
# Xdata, ydata, diff="closed-form"
# )
# # Some additional test data from Coursera course on LogReg
# features_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex2/ex2/ex2data1_X.txt"
# # features_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex1/week3_functions/data_X.txt"
# labels_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex2/ex2/ex2data1_y.txt"
# # labels_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex1/week3_functions/data_y_negs.txt"
# #
# Xdata = np.loadtxt(features_path)
# ydata = np.loadtxt(labels_path)
# ydata = ydata.reshape(-1,1)
#
# tf_gradient_wvector = tensorflow_solution_gradient_descent(
# Xdata, ydata, diff="automatic"
# )
# tf_gradient_wvector = tensorflow_solution_gradient_descent(
# Xdata, ydata, diff="closed-form"
# )
if __name__ == '__main__':
main()
| en | 0.672082 | #! /usr/bin/env python # half examples for training # Xt, yt = Xdata, ydata # half examples for validation # print(Xt) # cost = tf.reduce_mean(tf.log(1+tf.exp(-y*predictions))) # automatic differentiation # print(cgrad) # features_path = data_dir + "Xtrain.txt" # # tf_gradient_wvector = tensorflow_solution_gradient_descent( # Xdata, ydata, diff="closed-form" # ) # # Some additional test data from Coursera course on LogReg # features_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex2/ex2/ex2data1_X.txt" # # features_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex1/week3_functions/data_X.txt" # labels_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex2/ex2/ex2data1_y.txt" # # labels_path = "/media/jamc/Sticky/ML_Assignments/machine-learning-ex1/week3_functions/data_y_negs.txt" # # # Xdata = np.loadtxt(features_path) # ydata = np.loadtxt(labels_path) # ydata = ydata.reshape(-1,1) # # tf_gradient_wvector = tensorflow_solution_gradient_descent( # Xdata, ydata, diff="automatic" # ) # tf_gradient_wvector = tensorflow_solution_gradient_descent( # Xdata, ydata, diff="closed-form" # ) | 3.121828 | 3 |
src/tumcsbot/plugins/sql.py | jpbernius/tumcsbot | 5 | 6630622 | <gh_stars>1-10
#!/usr/bin/env python3
# See LICENSE file for copyright and license details.
# TUM CS Bot - https://github.com/ro-i/tumcsbot
from inspect import cleandoc
from typing import Any, Dict, Iterable, List, Tuple, Union
from tumcsbot.lib import DB, Response
from tumcsbot.plugin import CommandPlugin, PluginContext
class Source(CommandPlugin):
plugin_name = 'sql'
syntax = cleandoc(
"""
sql <sql_script>
or sql list
"""
)
description = cleandoc(
"""
Access the internal database of the bot read-only.
The `list` command is a shortcut to list all tables.
[administrator/moderator rights needed]
"""
)
_list_sql: str = 'select * from sqlite_master where type = "table"'
def __init__(self, plugin_context: PluginContext) -> None:
super().__init__(plugin_context)
# Get own read-only (!!!) database connection.
self._db = DB(read_only = True)
def handle_message(
self,
message: Dict[str, Any],
**kwargs: Any
) -> Union[Response, Iterable[Response]]:
result_sql: List[Tuple[Any, ...]]
if not self.client.user_is_privileged(message['sender_id']):
return Response.admin_err(message)
try:
if message['command'] == 'list':
result_sql = self._db.execute(self._list_sql)
else:
result_sql = self._db.execute(message['command'])
except Exception as e:
return Response.build_message(message, str(e))
result: str = '```text\n' + '\n'.join(map(str, result_sql)) + '\n```'
return Response.build_message(message, result)
| #!/usr/bin/env python3
# See LICENSE file for copyright and license details.
# TUM CS Bot - https://github.com/ro-i/tumcsbot
from inspect import cleandoc
from typing import Any, Dict, Iterable, List, Tuple, Union
from tumcsbot.lib import DB, Response
from tumcsbot.plugin import CommandPlugin, PluginContext
class Source(CommandPlugin):
plugin_name = 'sql'
syntax = cleandoc(
"""
sql <sql_script>
or sql list
"""
)
description = cleandoc(
"""
Access the internal database of the bot read-only.
The `list` command is a shortcut to list all tables.
[administrator/moderator rights needed]
"""
)
_list_sql: str = 'select * from sqlite_master where type = "table"'
def __init__(self, plugin_context: PluginContext) -> None:
super().__init__(plugin_context)
# Get own read-only (!!!) database connection.
self._db = DB(read_only = True)
def handle_message(
self,
message: Dict[str, Any],
**kwargs: Any
) -> Union[Response, Iterable[Response]]:
result_sql: List[Tuple[Any, ...]]
if not self.client.user_is_privileged(message['sender_id']):
return Response.admin_err(message)
try:
if message['command'] == 'list':
result_sql = self._db.execute(self._list_sql)
else:
result_sql = self._db.execute(message['command'])
except Exception as e:
return Response.build_message(message, str(e))
result: str = '```text\n' + '\n'.join(map(str, result_sql)) + '\n```'
return Response.build_message(message, result) | en | 0.559838 | #!/usr/bin/env python3 # See LICENSE file for copyright and license details. # TUM CS Bot - https://github.com/ro-i/tumcsbot sql <sql_script> or sql list Access the internal database of the bot read-only. The `list` command is a shortcut to list all tables. [administrator/moderator rights needed] # Get own read-only (!!!) database connection. | 2.310106 | 2 |
music_review/graphql/performer/schema.py | wmalarski/music-reviews | 0 | 6630623 | <gh_stars>0
import graphene
from graphene_django.filter import DjangoFilterConnectionField
from .filters import PerformerFilter
from .mutations import CreatePerformer, UpdatePerformer, DeletePerformer
from .types import PerformerType
class PerformerMutations(graphene.ObjectType):
create_performer = CreatePerformer.Field()
update_performer = UpdatePerformer.Field()
delete_performer = DeletePerformer.Field()
class PerformerQuery(graphene.ObjectType):
performer_set = DjangoFilterConnectionField(
PerformerType, filterset_class=PerformerFilter
)
performer = graphene.relay.Node.Field(PerformerType)
| import graphene
from graphene_django.filter import DjangoFilterConnectionField
from .filters import PerformerFilter
from .mutations import CreatePerformer, UpdatePerformer, DeletePerformer
from .types import PerformerType
class PerformerMutations(graphene.ObjectType):
create_performer = CreatePerformer.Field()
update_performer = UpdatePerformer.Field()
delete_performer = DeletePerformer.Field()
class PerformerQuery(graphene.ObjectType):
performer_set = DjangoFilterConnectionField(
PerformerType, filterset_class=PerformerFilter
)
performer = graphene.relay.Node.Field(PerformerType) | none | 1 | 1.905415 | 2 |
|
ctm_api_client/models/certificate_signing_request_data.py | tadinve/ctm_python_client | 0 | 6630624 | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_api_client.configuration import Configuration
class CertificateSigningRequestData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"organization": "str",
"organization_unit": "str",
"city_locality": "str",
"state_province": "str",
"country": "str",
"email_address": "str",
}
attribute_map = {
"organization": "organization",
"organization_unit": "organizationUnit",
"city_locality": "cityLocality",
"state_province": "stateProvince",
"country": "country",
"email_address": "emailAddress",
}
def __init__(
self,
organization=None,
organization_unit=None,
city_locality=None,
state_province=None,
country=None,
email_address=None,
_configuration=None,
): # noqa: E501
"""CertificateSigningRequestData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._organization = None
self._organization_unit = None
self._city_locality = None
self._state_province = None
self._country = None
self._email_address = None
self.discriminator = None
if organization is not None:
self.organization = organization
if organization_unit is not None:
self.organization_unit = organization_unit
if city_locality is not None:
self.city_locality = city_locality
if state_province is not None:
self.state_province = state_province
if country is not None:
self.country = country
if email_address is not None:
self.email_address = email_address
@property
def organization(self):
"""Gets the organization of this CertificateSigningRequestData. # noqa: E501
The organization HIDDEN # noqa: E501
:return: The organization of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this CertificateSigningRequestData.
The organization HIDDEN # noqa: E501
:param organization: The organization of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._organization = organization
@property
def organization_unit(self):
"""Gets the organization_unit of this CertificateSigningRequestData. # noqa: E501
The organizationUnit HIDDEN # noqa: E501
:return: The organization_unit of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._organization_unit
@organization_unit.setter
def organization_unit(self, organization_unit):
"""Sets the organization_unit of this CertificateSigningRequestData.
The organizationUnit HIDDEN # noqa: E501
:param organization_unit: The organization_unit of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._organization_unit = organization_unit
@property
def city_locality(self):
"""Gets the city_locality of this CertificateSigningRequestData. # noqa: E501
The cityLocality HIDDEN # noqa: E501
:return: The city_locality of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._city_locality
@city_locality.setter
def city_locality(self, city_locality):
"""Sets the city_locality of this CertificateSigningRequestData.
The cityLocality HIDDEN # noqa: E501
:param city_locality: The city_locality of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._city_locality = city_locality
@property
def state_province(self):
"""Gets the state_province of this CertificateSigningRequestData. # noqa: E501
The stateProvince HIDDEN # noqa: E501
:return: The state_province of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._state_province
@state_province.setter
def state_province(self, state_province):
"""Sets the state_province of this CertificateSigningRequestData.
The stateProvince HIDDEN # noqa: E501
:param state_province: The state_province of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._state_province = state_province
@property
def country(self):
"""Gets the country of this CertificateSigningRequestData. # noqa: E501
The country HIDDEN # noqa: E501
:return: The country of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this CertificateSigningRequestData.
The country HIDDEN # noqa: E501
:param country: The country of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._country = country
@property
def email_address(self):
"""Gets the email_address of this CertificateSigningRequestData. # noqa: E501
The emailAddress HIDDEN # noqa: E501
:return: The email_address of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this CertificateSigningRequestData.
The emailAddress HIDDEN # noqa: E501
:param email_address: The email_address of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._email_address = email_address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(CertificateSigningRequestData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificateSigningRequestData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CertificateSigningRequestData):
return True
return self.to_dict() != other.to_dict()
| # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_api_client.configuration import Configuration
class CertificateSigningRequestData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"organization": "str",
"organization_unit": "str",
"city_locality": "str",
"state_province": "str",
"country": "str",
"email_address": "str",
}
attribute_map = {
"organization": "organization",
"organization_unit": "organizationUnit",
"city_locality": "cityLocality",
"state_province": "stateProvince",
"country": "country",
"email_address": "emailAddress",
}
def __init__(
self,
organization=None,
organization_unit=None,
city_locality=None,
state_province=None,
country=None,
email_address=None,
_configuration=None,
): # noqa: E501
"""CertificateSigningRequestData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._organization = None
self._organization_unit = None
self._city_locality = None
self._state_province = None
self._country = None
self._email_address = None
self.discriminator = None
if organization is not None:
self.organization = organization
if organization_unit is not None:
self.organization_unit = organization_unit
if city_locality is not None:
self.city_locality = city_locality
if state_province is not None:
self.state_province = state_province
if country is not None:
self.country = country
if email_address is not None:
self.email_address = email_address
@property
def organization(self):
"""Gets the organization of this CertificateSigningRequestData. # noqa: E501
The organization HIDDEN # noqa: E501
:return: The organization of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this CertificateSigningRequestData.
The organization HIDDEN # noqa: E501
:param organization: The organization of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._organization = organization
@property
def organization_unit(self):
"""Gets the organization_unit of this CertificateSigningRequestData. # noqa: E501
The organizationUnit HIDDEN # noqa: E501
:return: The organization_unit of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._organization_unit
@organization_unit.setter
def organization_unit(self, organization_unit):
"""Sets the organization_unit of this CertificateSigningRequestData.
The organizationUnit HIDDEN # noqa: E501
:param organization_unit: The organization_unit of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._organization_unit = organization_unit
@property
def city_locality(self):
"""Gets the city_locality of this CertificateSigningRequestData. # noqa: E501
The cityLocality HIDDEN # noqa: E501
:return: The city_locality of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._city_locality
@city_locality.setter
def city_locality(self, city_locality):
"""Sets the city_locality of this CertificateSigningRequestData.
The cityLocality HIDDEN # noqa: E501
:param city_locality: The city_locality of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._city_locality = city_locality
@property
def state_province(self):
"""Gets the state_province of this CertificateSigningRequestData. # noqa: E501
The stateProvince HIDDEN # noqa: E501
:return: The state_province of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._state_province
@state_province.setter
def state_province(self, state_province):
"""Sets the state_province of this CertificateSigningRequestData.
The stateProvince HIDDEN # noqa: E501
:param state_province: The state_province of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._state_province = state_province
@property
def country(self):
"""Gets the country of this CertificateSigningRequestData. # noqa: E501
The country HIDDEN # noqa: E501
:return: The country of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this CertificateSigningRequestData.
The country HIDDEN # noqa: E501
:param country: The country of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._country = country
@property
def email_address(self):
"""Gets the email_address of this CertificateSigningRequestData. # noqa: E501
The emailAddress HIDDEN # noqa: E501
:return: The email_address of this CertificateSigningRequestData. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this CertificateSigningRequestData.
The emailAddress HIDDEN # noqa: E501
:param email_address: The email_address of this CertificateSigningRequestData. # noqa: E501
:type: str
"""
self._email_address = email_address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(CertificateSigningRequestData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificateSigningRequestData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CertificateSigningRequestData):
return True
return self.to_dict() != other.to_dict()
| en | 0.50604 | # coding: utf-8 Control-M Services Provides access to BMC Control-M Services # noqa: E501 OpenAPI spec version: 9.20.215 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 CertificateSigningRequestData - a model defined in Swagger # noqa: E501 Gets the organization of this CertificateSigningRequestData. # noqa: E501 The organization HIDDEN # noqa: E501 :return: The organization of this CertificateSigningRequestData. # noqa: E501 :rtype: str Sets the organization of this CertificateSigningRequestData. The organization HIDDEN # noqa: E501 :param organization: The organization of this CertificateSigningRequestData. # noqa: E501 :type: str Gets the organization_unit of this CertificateSigningRequestData. # noqa: E501 The organizationUnit HIDDEN # noqa: E501 :return: The organization_unit of this CertificateSigningRequestData. # noqa: E501 :rtype: str Sets the organization_unit of this CertificateSigningRequestData. The organizationUnit HIDDEN # noqa: E501 :param organization_unit: The organization_unit of this CertificateSigningRequestData. # noqa: E501 :type: str Gets the city_locality of this CertificateSigningRequestData. # noqa: E501 The cityLocality HIDDEN # noqa: E501 :return: The city_locality of this CertificateSigningRequestData. # noqa: E501 :rtype: str Sets the city_locality of this CertificateSigningRequestData. The cityLocality HIDDEN # noqa: E501 :param city_locality: The city_locality of this CertificateSigningRequestData. # noqa: E501 :type: str Gets the state_province of this CertificateSigningRequestData. # noqa: E501 The stateProvince HIDDEN # noqa: E501 :return: The state_province of this CertificateSigningRequestData. # noqa: E501 :rtype: str Sets the state_province of this CertificateSigningRequestData. The stateProvince HIDDEN # noqa: E501 :param state_province: The state_province of this CertificateSigningRequestData. # noqa: E501 :type: str Gets the country of this CertificateSigningRequestData. # noqa: E501 The country HIDDEN # noqa: E501 :return: The country of this CertificateSigningRequestData. # noqa: E501 :rtype: str Sets the country of this CertificateSigningRequestData. The country HIDDEN # noqa: E501 :param country: The country of this CertificateSigningRequestData. # noqa: E501 :type: str Gets the email_address of this CertificateSigningRequestData. # noqa: E501 The emailAddress HIDDEN # noqa: E501 :return: The email_address of this CertificateSigningRequestData. # noqa: E501 :rtype: str Sets the email_address of this CertificateSigningRequestData. The emailAddress HIDDEN # noqa: E501 :param email_address: The email_address of this CertificateSigningRequestData. # noqa: E501 :type: str Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.526769 | 2 |
Taschenrechner.py | Tasm-Devil/Taschenrechner | 1 | 6630625 | <reponame>Tasm-Devil/Taschenrechner
# The MIT License (MIT)
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`Taschenrechner.py`
====================================================
Simple calculator made as an example for PyQt4. It uses the power of a
finite state machine to parse every new symbol typed in.
* Author(s): <NAME>
"""
parse_string = ""
state = 'A'
open_brackets = 0
digits = [chr(x) for x in range(ord('1'), ord('9') + 1)]
operations = ['÷', '×', '+']
alphabet = digits + operations + ['0', '(', ')', ',', '−']
# A dict of string:list of (tuples of (list,string))
fsm_transition_table = {
# state ,input , next_state
'A': [(['('], 'A'),
(digits, 'B'),
(['−'], 'C'),
(['0'], 'D'),
],
'B': [(operations, 'A'),
(digits + ['0'], 'B'),
(['−'], 'C'),
([','], 'E'),
([')'], 'G'),
],
'C': [(['('], 'A'),
(digits, 'B'),
(['0'], 'D')
],
'D': [(operations, 'A'),
(['−'], 'C'),
([','], 'E'),
([')'], 'G'),
],
'E': [(digits + ['0'], 'F')
],
'F': [(operations, 'A'),
(['−'], 'C'),
(digits + ['0'], 'F'),
([')'], 'G'),
],
'G': [(operations, 'A'),
(['−'], 'C'),
([')'], 'G'),
],
}
def clear_text():
global parse_string, state, open_brackets
parse_string = ""
state = 'A'
open_brackets = 0
ui.pTE_display.setPlainText("Eingabe bitte")
def new_symbol(symbol):
global parse_string, state, alphabet, fsm_transition_table, open_brackets
if symbol not in alphabet:
print("Symbol ist nicht Teil des Eingabealphabets: " + symbol + " !")
return
if symbol in {')'} and open_brackets < 1:
print("Eine Klammer bitte immer erst öffnen!")
return
# Liste aller möglichen Eingabe-Symbole des aktuellen Zustands
inputs = [x[0] for x in fsm_transition_table[state]]
# Liste aller möglichen Zustandsübergänge des aktuellen Zustands
next_states = [x[1] for x in fsm_transition_table[state]]
# ist in der Liste 'inputs' einmal das jetzige Eingabe-Symbol vorhanden?
if True in [symbol in x for x in inputs]:
if symbol in {'('}:
open_brackets += 1
if symbol in {')'}:
open_brackets -= 1
# An der Position no in der Liste der Zustandsübergänge des aktuellen
# Zustands, wurde das aktuelle Eingabe-Symbol gefunden.
no = [symbol in x for x in inputs].index(True)
state = next_states[no]
print("Zustand: " + state)
parse_string += symbol
ui.pTE_display.setPlainText(parse_string)
else:
print("Das Symbol " + symbol + " ist hier nicht erlaubt!")
def evaluate():
global parse_string, state, open_brackets
if parse_string:
try:
result = eval(parse_string.replace(",", ".").replace("÷", "/").replace("×", "*").replace("−", "-"))
parse_string = str(result)
state = 'G'
open_brackets = 0
ui.pTE_display.setPlainText(str(result))
except ZeroDivisionError:
clear_text()
ui.pTE_display.setPlainText("Division durch 0 nicht erlaubt")
from PyQt5.QtWidgets import QApplication
from PyQt5.uic import loadUi
from sys import argv, exit
app = QApplication(argv)
ui = loadUi("form.ui")
ui.pB_ret.clicked.connect(evaluate)
ui.pB_del.clicked.connect(clear_text)
ui.pB_Z0.clicked.connect(lambda: new_symbol("0"))
ui.pB_Z1.clicked.connect(lambda: new_symbol("1"))
ui.pB_Z2.clicked.connect(lambda: new_symbol("2"))
ui.pB_Z3.clicked.connect(lambda: new_symbol("3"))
ui.pB_Z4.clicked.connect(lambda: new_symbol("4"))
ui.pB_Z5.clicked.connect(lambda: new_symbol("5"))
ui.pB_Z6.clicked.connect(lambda: new_symbol("6"))
ui.pB_Z7.clicked.connect(lambda: new_symbol("7"))
ui.pB_Z8.clicked.connect(lambda: new_symbol("8"))
ui.pB_Z9.clicked.connect(lambda: new_symbol("9"))
ui.pB_comma.clicked.connect(lambda: new_symbol(","))
ui.pB_div.clicked.connect(lambda: new_symbol("÷"))
ui.pB_mul.clicked.connect(lambda: new_symbol("×"))
ui.pB_sub.clicked.connect(lambda: new_symbol("−"))
ui.pB_add.clicked.connect(lambda: new_symbol("+"))
ui.pB_ob.clicked.connect(lambda: new_symbol("("))
ui.pB_cb.clicked.connect(lambda: new_symbol(")"))
ui.show()
exit(app.exec_())
| # The MIT License (MIT)
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`Taschenrechner.py`
====================================================
Simple calculator made as an example for PyQt4. It uses the power of a
finite state machine to parse every new symbol typed in.
* Author(s): <NAME>
"""
parse_string = ""
state = 'A'
open_brackets = 0
digits = [chr(x) for x in range(ord('1'), ord('9') + 1)]
operations = ['÷', '×', '+']
alphabet = digits + operations + ['0', '(', ')', ',', '−']
# A dict of string:list of (tuples of (list,string))
fsm_transition_table = {
# state ,input , next_state
'A': [(['('], 'A'),
(digits, 'B'),
(['−'], 'C'),
(['0'], 'D'),
],
'B': [(operations, 'A'),
(digits + ['0'], 'B'),
(['−'], 'C'),
([','], 'E'),
([')'], 'G'),
],
'C': [(['('], 'A'),
(digits, 'B'),
(['0'], 'D')
],
'D': [(operations, 'A'),
(['−'], 'C'),
([','], 'E'),
([')'], 'G'),
],
'E': [(digits + ['0'], 'F')
],
'F': [(operations, 'A'),
(['−'], 'C'),
(digits + ['0'], 'F'),
([')'], 'G'),
],
'G': [(operations, 'A'),
(['−'], 'C'),
([')'], 'G'),
],
}
def clear_text():
global parse_string, state, open_brackets
parse_string = ""
state = 'A'
open_brackets = 0
ui.pTE_display.setPlainText("Eingabe bitte")
def new_symbol(symbol):
global parse_string, state, alphabet, fsm_transition_table, open_brackets
if symbol not in alphabet:
print("Symbol ist nicht Teil des Eingabealphabets: " + symbol + " !")
return
if symbol in {')'} and open_brackets < 1:
print("Eine Klammer bitte immer erst öffnen!")
return
# Liste aller möglichen Eingabe-Symbole des aktuellen Zustands
inputs = [x[0] for x in fsm_transition_table[state]]
# Liste aller möglichen Zustandsübergänge des aktuellen Zustands
next_states = [x[1] for x in fsm_transition_table[state]]
# ist in der Liste 'inputs' einmal das jetzige Eingabe-Symbol vorhanden?
if True in [symbol in x for x in inputs]:
if symbol in {'('}:
open_brackets += 1
if symbol in {')'}:
open_brackets -= 1
# An der Position no in der Liste der Zustandsübergänge des aktuellen
# Zustands, wurde das aktuelle Eingabe-Symbol gefunden.
no = [symbol in x for x in inputs].index(True)
state = next_states[no]
print("Zustand: " + state)
parse_string += symbol
ui.pTE_display.setPlainText(parse_string)
else:
print("Das Symbol " + symbol + " ist hier nicht erlaubt!")
def evaluate():
global parse_string, state, open_brackets
if parse_string:
try:
result = eval(parse_string.replace(",", ".").replace("÷", "/").replace("×", "*").replace("−", "-"))
parse_string = str(result)
state = 'G'
open_brackets = 0
ui.pTE_display.setPlainText(str(result))
except ZeroDivisionError:
clear_text()
ui.pTE_display.setPlainText("Division durch 0 nicht erlaubt")
from PyQt5.QtWidgets import QApplication
from PyQt5.uic import loadUi
from sys import argv, exit
app = QApplication(argv)
ui = loadUi("form.ui")
ui.pB_ret.clicked.connect(evaluate)
ui.pB_del.clicked.connect(clear_text)
ui.pB_Z0.clicked.connect(lambda: new_symbol("0"))
ui.pB_Z1.clicked.connect(lambda: new_symbol("1"))
ui.pB_Z2.clicked.connect(lambda: new_symbol("2"))
ui.pB_Z3.clicked.connect(lambda: new_symbol("3"))
ui.pB_Z4.clicked.connect(lambda: new_symbol("4"))
ui.pB_Z5.clicked.connect(lambda: new_symbol("5"))
ui.pB_Z6.clicked.connect(lambda: new_symbol("6"))
ui.pB_Z7.clicked.connect(lambda: new_symbol("7"))
ui.pB_Z8.clicked.connect(lambda: new_symbol("8"))
ui.pB_Z9.clicked.connect(lambda: new_symbol("9"))
ui.pB_comma.clicked.connect(lambda: new_symbol(","))
ui.pB_div.clicked.connect(lambda: new_symbol("÷"))
ui.pB_mul.clicked.connect(lambda: new_symbol("×"))
ui.pB_sub.clicked.connect(lambda: new_symbol("−"))
ui.pB_add.clicked.connect(lambda: new_symbol("+"))
ui.pB_ob.clicked.connect(lambda: new_symbol("("))
ui.pB_cb.clicked.connect(lambda: new_symbol(")"))
ui.show()
exit(app.exec_()) | en | 0.468926 | # The MIT License (MIT) # # Copyright (c) 2018 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. `Taschenrechner.py` ==================================================== Simple calculator made as an example for PyQt4. It uses the power of a finite state machine to parse every new symbol typed in. * Author(s): <NAME> # A dict of string:list of (tuples of (list,string)) # state ,input , next_state # Liste aller möglichen Eingabe-Symbole des aktuellen Zustands # Liste aller möglichen Zustandsübergänge des aktuellen Zustands # ist in der Liste 'inputs' einmal das jetzige Eingabe-Symbol vorhanden? # An der Position no in der Liste der Zustandsübergänge des aktuellen # Zustands, wurde das aktuelle Eingabe-Symbol gefunden. | 1.674703 | 2 |
main.py | Raddmason99/EconomyDiscord-Bot | 22 | 6630626 | <gh_stars>10-100
'''
_____ ____ ___ ____ ___ _____ ____
| ____/ ___/ _ \| __ ) / _ \_ _|_ _|___ \
| _|| | | | | | _ \| | | || | \ \ / / __) |
| |__| |__| |_| | |_) | |_| || | \ V / / __/
|_____\____\___/|____/ \___/ |_| \_/ |_____|
- A new feature packet discord economy bot.
Language(s) : Python, SQL, JSON
Licensed : Not Licensed
Contributes : <NAME>
'''
import discord
from discord.utils import get
from discord.ext import commands
from discord import Intents
from datetime import datetime
import sqlite3
import requests
import random
import json
import time
import os
class BotData():
prefix = json.load(open('data\\config.json'))['prefix']
token = json.load(open('data\\config.json'))['token']
xpmsg = json.load(open('data\\config.json'))['xpmsg']
xplvl = json.load(open('data\\config.json'))['xplvl']
spam = json.load(open('data\\config.json'))['spamtime']
swear = json.load(open('data\\config.json'))['allowswear']
marksforgrade = json.load(open('data\\config.json'))['marksforgrade']
colour = json.load(open('data\\config.json'))['colour']
moneysymbl = json.load(open('data\\config.json'))['moneysymbl']
base = sqlite3.connect('data\\userdata.db')
item = sqlite3.connect('data\\useritems.db')
swearlist = json.load(open('data\\bannedWords.json'))
educationfetch = random.choice(json.load(open('data\\education.json'))['questions'])
def status():
members = 0
guilds = 0
for x in client.get_all_members(): members += 1
for x in client.guilds: guilds += 1
status = json.load(open('data\\config.json'))['status'].replace('$total_members$', str(members)).replace('$prefix$', json.load(open('data\\config.json'))['prefix']).replace('$total_guilds$', str(guilds))
return status
client = commands.Bot(command_prefix=BotData.prefix, intents=Intents.all(), help_command=None)
#-Career
@client.command()
async def learn(ctx, *, anwser=None):
data = ()
embedVar = discord.Embed(title=discord.Embed.Empty, description=f'** **', color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
if anwser == None:
with BotData.base as conn:
c = conn.cursor()
c.execute('''SELECT * FROM members
WHERE _id = :a''',
{'a':ctx.author.id})
data = c.fetchone()
try:
if data[12] + 86400 > time.time():
if data[13] == None:
embedVar.add_field(name=f"**You have already learned to much today**", value='** **', inline=True)
else:
embedVar.add_field(name=f"**{data[13]}**", value='True or False', inline=True)
else:
datalearn = BotData.educationfetch
with BotData.base as conn:
c.execute("""UPDATE members SET _eduCool = :a, _eduQues = :b, _eduAnw = :c
WHERE _id = :d""",
{'a': int(time.time()), 'b':datalearn['question'], 'c':datalearn['correct_answer'] ,'d': message.author.id})
conn.commit()
embedVar.add_field(name=f"**{datalearn['question']}**", value='True or False', inline=True)
except:
datalearn = BotData.educationfetch
with BotData.base as conn:
c.execute("""UPDATE members SET _eduCool = :a, _eduQues = :b, _eduAnw = :c
WHERE _id = :d""",
{'a': int(time.time()), 'b':datalearn['question'], 'c':datalearn['correct_answer'] ,'d': ctx.author.id})
conn.commit()
embedVar.add_field(name=f"**{datalearn['question']}**", value='True or False', inline=True)
else:
with BotData.base as conn:
c = conn.cursor()
c.execute('''SELECT * FROM members
WHERE _id = :a''',
{'a':ctx.author.id})
data = c.fetchone()
if str(data[14]) == '0':
embedVar.add_field(name=f"**You have already learned to much today**", value='** **', inline=True)
elif str(anwser).lower() == str(data[14]).lower():
with BotData.base as conn:
c.execute("""UPDATE members SET _eduPoint = _eduPoint + 1
WHERE _id = :d""",
{'d': ctx.author.id})
conn.commit()
if data[11] >= BotData.marksforgrade:
embedVar.add_field(name=f"**Correct +1 grade**", value='** **', inline=False)
with BotData.base as conn:
c.execute("""UPDATE members SET _eduPoint = 0, _eduLevel = _eduLevel + 1
WHERE _id = :d""",
{'d': ctx.author.id})
conn.commit()
else:
embedVar.add_field(name=f"**Correct +1 mark**", value='** **', inline=True)
else:
embedVar.add_field(name=f"**Incorrect, better luck next time**", value='** **', inline=True)
with BotData.base as conn:
c.execute("""UPDATE members SET _eduQues = :b, _eduAnw = :c
WHERE _id = :d""",
{'b':None, 'c':False ,'d': ctx.author.id})
conn.commit()
await ctx.send(embed=embedVar)
#-Economy
@client.command()
async def shop(ctx):
data2 = json.load(open('data\\shop.json'))
if time.time() - data2['time'] >= 86400:
items = []
raw_data = json.load(open('data\\items.json'))
for x in range(9):
temp = random.choice(raw_data)
price = random.randint(temp['price']['start'],temp['price']['end'])
rarity = temp['price']['start'] / price
items.append({"id": x,"name": temp['name'],"emoji": temp['emoji'],"price": price,"rarity": rarity})
data2['content'] = items
data2['time'] = time.time()
json.dump(data2, open('data\\shop.json', 'w'), indent=2)
data = json.load(open('data\\shop.json'))
embedVar = discord.Embed(title=discord.Embed.Empty, description=f'** **', color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]), timestamp=datetime.fromtimestamp(data['time'] + 86400))
embedVar.set_author(name=f"{ctx.author} | Shop Menu", url=discord.Embed.Empty, icon_url='https://i.imgur.com/lOShv1G.png')
embedVar.set_footer(text='Shop Refreshes :', icon_url=discord.Embed.Empty)
for x in data['content']:
rarity = ''
if 0.7 <= x['rarity']: rarity = ':first_place:'
elif 0.5 <= x['rarity'] < 0.7: rarity = ':second_place:'
else: rarity = ':third_place:'
embedVar.add_field(name=f"{x['emoji']}**{x['name']}**", value=f"**ID :**{x['id']}\n**Price :** {BotData.moneysymbl} {x['price']}\n**Rarity:** {rarity}*{round(x['rarity'], 2)}*", inline=True)
await ctx.send(embed=embedVar)
#-UserInformation
@client.command(aliases=['info', 'stat'])
async def stats(ctx):
with BotData.base as conn:
c = conn.cursor()
c.execute('''SELECT * FROM members
WHERE _id = :a''',
{'a':ctx.author.id})
data = c.fetchone()
embedVar = discord.Embed(title=discord.Embed.Empty, description='** **', color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
embedVar.set_author(name=f"{ctx.author} | Stats Menu", url=discord.Embed.Empty, icon_url=ctx.author.avatar_url)
embedVar.add_field(name="**Economy**", value=f":credit_card: **Bank :** {BotData.moneysymbl} {data[6]}\n:moneybag: **Wallet:** {BotData.moneysymbl} {data[5]}", inline=True)
embedVar.add_field(name="**Career**", value=f":tools: **Job :**{data[8]}\n:money_with_wings: **Wage :** {BotData.moneysymbl} {data[9]}", inline=True)
embedVar.add_field(name="** **", value='** **', inline=False)
embedVar.add_field(name="**Education**", value=f":scroll: **Degrees :**{data[10]}\n:white_check_mark: **Marks :** {BotData.moneysymbl} {data[11]}", inline=True)
embedVar.add_field(name="**Stats**", value=f":medal: **Level :**{data[3]} *({data[4]}/{BotData.xplvl})*\n:speech_balloon: **Messages :** {data[1]}", inline=True)
await ctx.author.send(embed=embedVar)
@client.command(aliases=['pocket'])
async def backpack(ctx):
with BotData.item as conn:
c = conn.cursor()
c.execute(f'SELECT * FROM _{ctx.author.id}')
data = c.fetchall()
embedVar = discord.Embed(title=discord.Embed.Empty, description='** **', color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
embedVar.set_author(name=f"{ctx.author} | Backpack", url=discord.Embed.Empty, icon_url=ctx.author.avatar_url)
if data == []:
embedVar.add_field(name="Your Backpack is empty!", value=discord.Embed.Empty, inline=False)
else:
for x in data:
rarity = ''
if 0.7 <= x[4]: rarity = ':first_place:'
elif 0.5 <= x[4] < 0.7: rarity = ':second_place:'
else: rarity = ':third_place:'
embedVar.add_field(name=f"{x[2]}**{x[1]}**", value=f"**Price :** {BotData.moneysymbl} {x[3]}\n**Rarity:** {rarity}*{round(x[4], 2)}*", inline=True)
await ctx.author.send(embed=embedVar)
#-Moderation
#-Utilities
@client.command()
async def help(ctx):
embedVar = discord.Embed(title=discord.Embed.Empty, description=discord.Embed.Empty, color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
embedVar.set_author(name=f"{str(client.user)[:-5]} | Help Menu", url=discord.Embed.Empty, icon_url='https://i.imgur.com/NxONR7a.png')
embedVar.add_field(name="Profile", value=f"`{BotData.prefix}stats`\n`{BotData.prefix}backpack`", inline=False)
embedVar.add_field(name="Economy", value=f"`{BotData.prefix}shop`\n`{BotData.prefix}buy [ID]`")
embedVar.add_field(name="Career", value=f"`{BotData.prefix}learn`\n`{BotData.prefix}learn [anwser]`", inline=False)
embedVar.add_field(name="Moderation", value=f"`{BotData.prefix}warn [@user] [reason]`\n`{BotData.prefix}purge [amount]`\n`{BotData.prefix}kick [@user] [@reason]`\n`{BotData.prefix}ban [@user] [@reason]`", inline=False)
await ctx.author.send(embed=embedVar)
@client.command()
async def ping(ctx):
ms = int(client.latency * 1000)
if ms < 150 : rate = [23, 235, 23]
elif 150 < ms < 250: rate = [235, 102, 30]
else: rate = [235, 47, 26]
await ctx.send(embed=discord.Embed(title=f"Pong! {ms} ms", description=discord.Embed.Empty, color=discord.Colour.from_rgb(rate[0], rate[1], rate[2])))
#-Events
@client.event
async def on_message(message):
swearfound = False
if not BotData.swear:
for x in BotData.swearlist:
if x in message.content:
swearfound = True
if message.content.startswith(BotData.prefix):
if isinstance(message.channel, discord.channel.DMChannel):pass
else:await message.delete()
else:
if isinstance(message.channel, discord.channel.DMChannel):pass
else:
if not swearfound:
with BotData.base as conn:
c = conn.cursor()
c.execute('''SELECT _msgCool FROM members
WHERE _id = :a''',
{'a':message.author.id})
times = str(c.fetchone()).replace(',','').replace('(','').replace(')','')
if times == "None":
c.execute("""UPDATE members SET _msgCool = :a
WHERE _id = :b""",
{'a': int(time.time()), 'b': message.author.id})
c.execute("""UPDATE members SET _xp = _xp + :a
WHERE _id = :b""",
{'a': BotData.xpmsg, 'b': message.author.id})
else:
cooldown = int(time.time()) - int(times)
if cooldown > BotData.spam:
c.execute("""UPDATE members SET _xp = _xp + :a
WHERE _id = :b""",
{'a': BotData.xpmsg, 'b': message.author.id})
c.execute("""UPDATE members SET _msgCool = :a
WHERE _id = :b""",
{'a': int(time.time()), 'b': message.author.id})
else:
if isinstance(message.channel, discord.channel.DMChannel):pass
else:await message.delete()
c.execute("""UPDATE members SET _messages = _messages + :a
WHERE _id = :b""",
{'a': 1, 'b': message.author.id})
c.execute('''SELECT _xp FROM members
WHERE _id = :a''',
{'a':message.author.id})
if int(str(c.fetchone()).replace('(','').replace(')','').replace(',','')) >= BotData.xplvl:
c.execute("""UPDATE members SET _level = _level + :a
WHERE _id = :b""",
{'a': 1, 'b': message.author.id})
c.execute("""UPDATE members SET _xp = :a
WHERE _id = :b""",
{'a': 0, 'b': message.author.id})
c.execute('''SELECT _level FROM members
WHERE _id = :a''',
{'a':message.author.id})
level = str(c.fetchone()).replace('(','').replace(')','').replace(',','')
levelup = json.load(open('data\\config.json'))['lvlmsg'].replace('$level$', str(level)).replace('$name$', message.author.display_name).replace('$last_level$', str(int(level)-1))
embedVar = discord.Embed(title=levelup, description=discord.Embed.Empty, color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
embedVar.set_author(name=f"{str(client.user)[:-5]} | Level Up", url=discord.Embed.Empty, icon_url=message.author.avatar_url)
await message.author.send(embed=embedVar)
if swearfound:
if isinstance(message.channel, discord.channel.DMChannel):pass
else:await message.delete()
try:
conn.commit()
except:
pass
await client.process_commands(message)
@client.event # When the member joins add them to the system
async def on_member_join(member):
with BotData.base as conn:
c = conn.cursor()
all_users=[]
for x in c.fetchall(): all_users.append(str(x).replace(',','').replace('(','').replace(')',''))
if str(member.id) in all_users:
pass
else:
c.execute('INSERT INTO members VALUES (:id ,:messages, :spam ,:level, :xp ,:money ,:bank, :items ,:job, :jobWage ,:eduLevel ,:eduPoint ,:eduCool ,:eduQues ,:eduAnw)',
{'id':member.id, 'messages':0, 'spam':None, 'level':0, 'xp':0, 'money':0, 'bank':0, 'items': 0, 'job':None, 'jobWage':0, 'eduLevel':0, 'eduPoint':0, 'eduCool':None, 'eduQues':None, 'eduAnw':False})
conn.commit()
conn.close()
conn2 = sqlite3.connect('data\\useritems.db')
c2 = conn2.cursor()
c2.execute(f"""
CREATE TABLE _{member.id} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
)
""")
conn2.commit()
conn2.close()
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=BotData.status()))
@client.event
async def on_guild_join(guild):
conn = sqlite3.connect('data\\userdata.db')
c = conn.cursor()
db_data = []
discord_users = []
entry_users = []
entry_ids=[]
c.execute('SELECT _id FROM members')
for x in c.fetchall():
db_data.append(str(x).replace(',','').replace('(','').replace(')',''))
for x in client.get_all_members():
if str(x.id) in discord_users:pass
else:discord_users.append(str(x.id))
for x in discord_users:
if x not in db_data:
entry_users.append((x, 0, None, 0, 0, 0, 0, 0, None, 0, 0, 0, None, None, False))
entry_ids.append(x)
c.executemany('INSERT INTO members VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
entry_users)
for x in entry_ids:
conn2 = sqlite3.connect('data\\useritems.db')
c2 = conn2.cursor()
c2.execute(f"""
CREATE TABLE _{x} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
)""")
conn2.commit()
conn2.close()
conn.commit()
conn.close()
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=BotData.status()))
@client.event
async def on_ready():
conn = sqlite3.connect('data\\userdata.db')
c = conn.cursor()
db_data = []
discord_users = []
entry_users = []
entry_ids=[]
c.execute('SELECT _id FROM members')
for x in c.fetchall():
db_data.append(str(x).replace(',','').replace('(','').replace(')',''))
for x in client.get_all_members():
if str(x.id) in discord_users:pass
else:discord_users.append(str(x.id))
for x in discord_users:
if x not in db_data:
entry_users.append((x, 0, None, 0, 0, 0, 0, 0, None, 0, 0, 0, None, None, False))
entry_ids.append(x)
c.executemany('INSERT INTO members VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
entry_users)
for x in entry_ids:
conn2 = sqlite3.connect('data\\useritems.db')
c2 = conn2.cursor()
c2.execute(f"""
CREATE TABLE _{x} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
)""")
conn2.commit()
conn2.close()
conn.commit()
conn.close()
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=BotData.status()))
print("ready")
if __name__ == "__main__":
client.run(BotData.token)
| '''
_____ ____ ___ ____ ___ _____ ____
| ____/ ___/ _ \| __ ) / _ \_ _|_ _|___ \
| _|| | | | | | _ \| | | || | \ \ / / __) |
| |__| |__| |_| | |_) | |_| || | \ V / / __/
|_____\____\___/|____/ \___/ |_| \_/ |_____|
- A new feature packet discord economy bot.
Language(s) : Python, SQL, JSON
Licensed : Not Licensed
Contributes : <NAME>
'''
import discord
from discord.utils import get
from discord.ext import commands
from discord import Intents
from datetime import datetime
import sqlite3
import requests
import random
import json
import time
import os
class BotData():
prefix = json.load(open('data\\config.json'))['prefix']
token = json.load(open('data\\config.json'))['token']
xpmsg = json.load(open('data\\config.json'))['xpmsg']
xplvl = json.load(open('data\\config.json'))['xplvl']
spam = json.load(open('data\\config.json'))['spamtime']
swear = json.load(open('data\\config.json'))['allowswear']
marksforgrade = json.load(open('data\\config.json'))['marksforgrade']
colour = json.load(open('data\\config.json'))['colour']
moneysymbl = json.load(open('data\\config.json'))['moneysymbl']
base = sqlite3.connect('data\\userdata.db')
item = sqlite3.connect('data\\useritems.db')
swearlist = json.load(open('data\\bannedWords.json'))
educationfetch = random.choice(json.load(open('data\\education.json'))['questions'])
def status():
members = 0
guilds = 0
for x in client.get_all_members(): members += 1
for x in client.guilds: guilds += 1
status = json.load(open('data\\config.json'))['status'].replace('$total_members$', str(members)).replace('$prefix$', json.load(open('data\\config.json'))['prefix']).replace('$total_guilds$', str(guilds))
return status
client = commands.Bot(command_prefix=BotData.prefix, intents=Intents.all(), help_command=None)
#-Career
@client.command()
async def learn(ctx, *, anwser=None):
data = ()
embedVar = discord.Embed(title=discord.Embed.Empty, description=f'** **', color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
if anwser == None:
with BotData.base as conn:
c = conn.cursor()
c.execute('''SELECT * FROM members
WHERE _id = :a''',
{'a':ctx.author.id})
data = c.fetchone()
try:
if data[12] + 86400 > time.time():
if data[13] == None:
embedVar.add_field(name=f"**You have already learned to much today**", value='** **', inline=True)
else:
embedVar.add_field(name=f"**{data[13]}**", value='True or False', inline=True)
else:
datalearn = BotData.educationfetch
with BotData.base as conn:
c.execute("""UPDATE members SET _eduCool = :a, _eduQues = :b, _eduAnw = :c
WHERE _id = :d""",
{'a': int(time.time()), 'b':datalearn['question'], 'c':datalearn['correct_answer'] ,'d': message.author.id})
conn.commit()
embedVar.add_field(name=f"**{datalearn['question']}**", value='True or False', inline=True)
except:
datalearn = BotData.educationfetch
with BotData.base as conn:
c.execute("""UPDATE members SET _eduCool = :a, _eduQues = :b, _eduAnw = :c
WHERE _id = :d""",
{'a': int(time.time()), 'b':datalearn['question'], 'c':datalearn['correct_answer'] ,'d': ctx.author.id})
conn.commit()
embedVar.add_field(name=f"**{datalearn['question']}**", value='True or False', inline=True)
else:
with BotData.base as conn:
c = conn.cursor()
c.execute('''SELECT * FROM members
WHERE _id = :a''',
{'a':ctx.author.id})
data = c.fetchone()
if str(data[14]) == '0':
embedVar.add_field(name=f"**You have already learned to much today**", value='** **', inline=True)
elif str(anwser).lower() == str(data[14]).lower():
with BotData.base as conn:
c.execute("""UPDATE members SET _eduPoint = _eduPoint + 1
WHERE _id = :d""",
{'d': ctx.author.id})
conn.commit()
if data[11] >= BotData.marksforgrade:
embedVar.add_field(name=f"**Correct +1 grade**", value='** **', inline=False)
with BotData.base as conn:
c.execute("""UPDATE members SET _eduPoint = 0, _eduLevel = _eduLevel + 1
WHERE _id = :d""",
{'d': ctx.author.id})
conn.commit()
else:
embedVar.add_field(name=f"**Correct +1 mark**", value='** **', inline=True)
else:
embedVar.add_field(name=f"**Incorrect, better luck next time**", value='** **', inline=True)
with BotData.base as conn:
c.execute("""UPDATE members SET _eduQues = :b, _eduAnw = :c
WHERE _id = :d""",
{'b':None, 'c':False ,'d': ctx.author.id})
conn.commit()
await ctx.send(embed=embedVar)
#-Economy
@client.command()
async def shop(ctx):
data2 = json.load(open('data\\shop.json'))
if time.time() - data2['time'] >= 86400:
items = []
raw_data = json.load(open('data\\items.json'))
for x in range(9):
temp = random.choice(raw_data)
price = random.randint(temp['price']['start'],temp['price']['end'])
rarity = temp['price']['start'] / price
items.append({"id": x,"name": temp['name'],"emoji": temp['emoji'],"price": price,"rarity": rarity})
data2['content'] = items
data2['time'] = time.time()
json.dump(data2, open('data\\shop.json', 'w'), indent=2)
data = json.load(open('data\\shop.json'))
embedVar = discord.Embed(title=discord.Embed.Empty, description=f'** **', color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]), timestamp=datetime.fromtimestamp(data['time'] + 86400))
embedVar.set_author(name=f"{ctx.author} | Shop Menu", url=discord.Embed.Empty, icon_url='https://i.imgur.com/lOShv1G.png')
embedVar.set_footer(text='Shop Refreshes :', icon_url=discord.Embed.Empty)
for x in data['content']:
rarity = ''
if 0.7 <= x['rarity']: rarity = ':first_place:'
elif 0.5 <= x['rarity'] < 0.7: rarity = ':second_place:'
else: rarity = ':third_place:'
embedVar.add_field(name=f"{x['emoji']}**{x['name']}**", value=f"**ID :**{x['id']}\n**Price :** {BotData.moneysymbl} {x['price']}\n**Rarity:** {rarity}*{round(x['rarity'], 2)}*", inline=True)
await ctx.send(embed=embedVar)
#-UserInformation
@client.command(aliases=['info', 'stat'])
async def stats(ctx):
with BotData.base as conn:
c = conn.cursor()
c.execute('''SELECT * FROM members
WHERE _id = :a''',
{'a':ctx.author.id})
data = c.fetchone()
embedVar = discord.Embed(title=discord.Embed.Empty, description='** **', color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
embedVar.set_author(name=f"{ctx.author} | Stats Menu", url=discord.Embed.Empty, icon_url=ctx.author.avatar_url)
embedVar.add_field(name="**Economy**", value=f":credit_card: **Bank :** {BotData.moneysymbl} {data[6]}\n:moneybag: **Wallet:** {BotData.moneysymbl} {data[5]}", inline=True)
embedVar.add_field(name="**Career**", value=f":tools: **Job :**{data[8]}\n:money_with_wings: **Wage :** {BotData.moneysymbl} {data[9]}", inline=True)
embedVar.add_field(name="** **", value='** **', inline=False)
embedVar.add_field(name="**Education**", value=f":scroll: **Degrees :**{data[10]}\n:white_check_mark: **Marks :** {BotData.moneysymbl} {data[11]}", inline=True)
embedVar.add_field(name="**Stats**", value=f":medal: **Level :**{data[3]} *({data[4]}/{BotData.xplvl})*\n:speech_balloon: **Messages :** {data[1]}", inline=True)
await ctx.author.send(embed=embedVar)
@client.command(aliases=['pocket'])
async def backpack(ctx):
with BotData.item as conn:
c = conn.cursor()
c.execute(f'SELECT * FROM _{ctx.author.id}')
data = c.fetchall()
embedVar = discord.Embed(title=discord.Embed.Empty, description='** **', color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
embedVar.set_author(name=f"{ctx.author} | Backpack", url=discord.Embed.Empty, icon_url=ctx.author.avatar_url)
if data == []:
embedVar.add_field(name="Your Backpack is empty!", value=discord.Embed.Empty, inline=False)
else:
for x in data:
rarity = ''
if 0.7 <= x[4]: rarity = ':first_place:'
elif 0.5 <= x[4] < 0.7: rarity = ':second_place:'
else: rarity = ':third_place:'
embedVar.add_field(name=f"{x[2]}**{x[1]}**", value=f"**Price :** {BotData.moneysymbl} {x[3]}\n**Rarity:** {rarity}*{round(x[4], 2)}*", inline=True)
await ctx.author.send(embed=embedVar)
#-Moderation
#-Utilities
@client.command()
async def help(ctx):
embedVar = discord.Embed(title=discord.Embed.Empty, description=discord.Embed.Empty, color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
embedVar.set_author(name=f"{str(client.user)[:-5]} | Help Menu", url=discord.Embed.Empty, icon_url='https://i.imgur.com/NxONR7a.png')
embedVar.add_field(name="Profile", value=f"`{BotData.prefix}stats`\n`{BotData.prefix}backpack`", inline=False)
embedVar.add_field(name="Economy", value=f"`{BotData.prefix}shop`\n`{BotData.prefix}buy [ID]`")
embedVar.add_field(name="Career", value=f"`{BotData.prefix}learn`\n`{BotData.prefix}learn [anwser]`", inline=False)
embedVar.add_field(name="Moderation", value=f"`{BotData.prefix}warn [@user] [reason]`\n`{BotData.prefix}purge [amount]`\n`{BotData.prefix}kick [@user] [@reason]`\n`{BotData.prefix}ban [@user] [@reason]`", inline=False)
await ctx.author.send(embed=embedVar)
@client.command()
async def ping(ctx):
ms = int(client.latency * 1000)
if ms < 150 : rate = [23, 235, 23]
elif 150 < ms < 250: rate = [235, 102, 30]
else: rate = [235, 47, 26]
await ctx.send(embed=discord.Embed(title=f"Pong! {ms} ms", description=discord.Embed.Empty, color=discord.Colour.from_rgb(rate[0], rate[1], rate[2])))
#-Events
@client.event
async def on_message(message):
swearfound = False
if not BotData.swear:
for x in BotData.swearlist:
if x in message.content:
swearfound = True
if message.content.startswith(BotData.prefix):
if isinstance(message.channel, discord.channel.DMChannel):pass
else:await message.delete()
else:
if isinstance(message.channel, discord.channel.DMChannel):pass
else:
if not swearfound:
with BotData.base as conn:
c = conn.cursor()
c.execute('''SELECT _msgCool FROM members
WHERE _id = :a''',
{'a':message.author.id})
times = str(c.fetchone()).replace(',','').replace('(','').replace(')','')
if times == "None":
c.execute("""UPDATE members SET _msgCool = :a
WHERE _id = :b""",
{'a': int(time.time()), 'b': message.author.id})
c.execute("""UPDATE members SET _xp = _xp + :a
WHERE _id = :b""",
{'a': BotData.xpmsg, 'b': message.author.id})
else:
cooldown = int(time.time()) - int(times)
if cooldown > BotData.spam:
c.execute("""UPDATE members SET _xp = _xp + :a
WHERE _id = :b""",
{'a': BotData.xpmsg, 'b': message.author.id})
c.execute("""UPDATE members SET _msgCool = :a
WHERE _id = :b""",
{'a': int(time.time()), 'b': message.author.id})
else:
if isinstance(message.channel, discord.channel.DMChannel):pass
else:await message.delete()
c.execute("""UPDATE members SET _messages = _messages + :a
WHERE _id = :b""",
{'a': 1, 'b': message.author.id})
c.execute('''SELECT _xp FROM members
WHERE _id = :a''',
{'a':message.author.id})
if int(str(c.fetchone()).replace('(','').replace(')','').replace(',','')) >= BotData.xplvl:
c.execute("""UPDATE members SET _level = _level + :a
WHERE _id = :b""",
{'a': 1, 'b': message.author.id})
c.execute("""UPDATE members SET _xp = :a
WHERE _id = :b""",
{'a': 0, 'b': message.author.id})
c.execute('''SELECT _level FROM members
WHERE _id = :a''',
{'a':message.author.id})
level = str(c.fetchone()).replace('(','').replace(')','').replace(',','')
levelup = json.load(open('data\\config.json'))['lvlmsg'].replace('$level$', str(level)).replace('$name$', message.author.display_name).replace('$last_level$', str(int(level)-1))
embedVar = discord.Embed(title=levelup, description=discord.Embed.Empty, color=discord.Colour.from_rgb(BotData.colour[0],BotData.colour[1],BotData.colour[2]))
embedVar.set_author(name=f"{str(client.user)[:-5]} | Level Up", url=discord.Embed.Empty, icon_url=message.author.avatar_url)
await message.author.send(embed=embedVar)
if swearfound:
if isinstance(message.channel, discord.channel.DMChannel):pass
else:await message.delete()
try:
conn.commit()
except:
pass
await client.process_commands(message)
@client.event # When the member joins add them to the system
async def on_member_join(member):
with BotData.base as conn:
c = conn.cursor()
all_users=[]
for x in c.fetchall(): all_users.append(str(x).replace(',','').replace('(','').replace(')',''))
if str(member.id) in all_users:
pass
else:
c.execute('INSERT INTO members VALUES (:id ,:messages, :spam ,:level, :xp ,:money ,:bank, :items ,:job, :jobWage ,:eduLevel ,:eduPoint ,:eduCool ,:eduQues ,:eduAnw)',
{'id':member.id, 'messages':0, 'spam':None, 'level':0, 'xp':0, 'money':0, 'bank':0, 'items': 0, 'job':None, 'jobWage':0, 'eduLevel':0, 'eduPoint':0, 'eduCool':None, 'eduQues':None, 'eduAnw':False})
conn.commit()
conn.close()
conn2 = sqlite3.connect('data\\useritems.db')
c2 = conn2.cursor()
c2.execute(f"""
CREATE TABLE _{member.id} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
)
""")
conn2.commit()
conn2.close()
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=BotData.status()))
@client.event
async def on_guild_join(guild):
conn = sqlite3.connect('data\\userdata.db')
c = conn.cursor()
db_data = []
discord_users = []
entry_users = []
entry_ids=[]
c.execute('SELECT _id FROM members')
for x in c.fetchall():
db_data.append(str(x).replace(',','').replace('(','').replace(')',''))
for x in client.get_all_members():
if str(x.id) in discord_users:pass
else:discord_users.append(str(x.id))
for x in discord_users:
if x not in db_data:
entry_users.append((x, 0, None, 0, 0, 0, 0, 0, None, 0, 0, 0, None, None, False))
entry_ids.append(x)
c.executemany('INSERT INTO members VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
entry_users)
for x in entry_ids:
conn2 = sqlite3.connect('data\\useritems.db')
c2 = conn2.cursor()
c2.execute(f"""
CREATE TABLE _{x} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
)""")
conn2.commit()
conn2.close()
conn.commit()
conn.close()
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=BotData.status()))
@client.event
async def on_ready():
conn = sqlite3.connect('data\\userdata.db')
c = conn.cursor()
db_data = []
discord_users = []
entry_users = []
entry_ids=[]
c.execute('SELECT _id FROM members')
for x in c.fetchall():
db_data.append(str(x).replace(',','').replace('(','').replace(')',''))
for x in client.get_all_members():
if str(x.id) in discord_users:pass
else:discord_users.append(str(x.id))
for x in discord_users:
if x not in db_data:
entry_users.append((x, 0, None, 0, 0, 0, 0, 0, None, 0, 0, 0, None, None, False))
entry_ids.append(x)
c.executemany('INSERT INTO members VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
entry_users)
for x in entry_ids:
conn2 = sqlite3.connect('data\\useritems.db')
c2 = conn2.cursor()
c2.execute(f"""
CREATE TABLE _{x} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
)""")
conn2.commit()
conn2.close()
conn.commit()
conn.close()
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=BotData.status()))
print("ready")
if __name__ == "__main__":
client.run(BotData.token) | en | 0.647907 | _____ ____ ___ ____ ___ _____ ____
| ____/ ___/ _ \| __ ) / _ \_ _|_ _|___ \
| _|| | | | | | _ \| | | || | \ \ / / __) |
| |__| |__| |_| | |_) | |_| || | \ V / / __/
|_____\____\___/|____/ \___/ |_| \_/ |_____|
- A new feature packet discord economy bot.
Language(s) : Python, SQL, JSON
Licensed : Not Licensed
Contributes : <NAME> #-Career SELECT * FROM members
WHERE _id = :a UPDATE members SET _eduCool = :a, _eduQues = :b, _eduAnw = :c
WHERE _id = :d UPDATE members SET _eduCool = :a, _eduQues = :b, _eduAnw = :c
WHERE _id = :d SELECT * FROM members
WHERE _id = :a UPDATE members SET _eduPoint = _eduPoint + 1
WHERE _id = :d UPDATE members SET _eduPoint = 0, _eduLevel = _eduLevel + 1
WHERE _id = :d UPDATE members SET _eduQues = :b, _eduAnw = :c
WHERE _id = :d #-Economy #-UserInformation SELECT * FROM members
WHERE _id = :a #-Moderation #-Utilities #-Events SELECT _msgCool FROM members
WHERE _id = :a UPDATE members SET _msgCool = :a
WHERE _id = :b UPDATE members SET _xp = _xp + :a
WHERE _id = :b UPDATE members SET _xp = _xp + :a
WHERE _id = :b UPDATE members SET _msgCool = :a
WHERE _id = :b UPDATE members SET _messages = _messages + :a
WHERE _id = :b SELECT _xp FROM members
WHERE _id = :a UPDATE members SET _level = _level + :a
WHERE _id = :b UPDATE members SET _xp = :a
WHERE _id = :b SELECT _level FROM members
WHERE _id = :a # When the member joins add them to the system CREATE TABLE _{member.id} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
) CREATE TABLE _{x} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
) CREATE TABLE _{x} (
_id integer(18) NOT NULL,
_name varchar(32) NOT NULL,
_emoji varchar(32) NOT NULL,
_price integer NOT NULL,
_rarity FLOAT NOT NULL,
_amount integer NOT NULL
) | 2.514262 | 3 |
src/data/character.py | SvenElyes/Textanalytics | 1 | 6630627 | from src.data.relation import Relation
class Character:
"""Simple class to store and retrieve information about a character. Specifially: name, aliases for the name, relations to other characters, keywords."""
def __init__(self, name):
self.name = name
self.alias = list()
self.relations = list()
self.most_frequent_words = list()
def add_most_frequent_words(self, wordlist):
self.most_frequent_words.extend(wordlist)
def add_most_frequent_word(self, word):
self.most_frequent_words.append(word)
def set_most_frequent_words(self, wordlist):
"""erases existing wordlist and sets wordlist"""
self.most_frequent_words = wordlist
def add_relation(self, relation):
self.relations.append(relation)
def get_relations(self):
return self.relations
def get_most_frequent_words(self):
return self.most_frequent_words
def get_name(self):
return self.name
| from src.data.relation import Relation
class Character:
"""Simple class to store and retrieve information about a character. Specifially: name, aliases for the name, relations to other characters, keywords."""
def __init__(self, name):
self.name = name
self.alias = list()
self.relations = list()
self.most_frequent_words = list()
def add_most_frequent_words(self, wordlist):
self.most_frequent_words.extend(wordlist)
def add_most_frequent_word(self, word):
self.most_frequent_words.append(word)
def set_most_frequent_words(self, wordlist):
"""erases existing wordlist and sets wordlist"""
self.most_frequent_words = wordlist
def add_relation(self, relation):
self.relations.append(relation)
def get_relations(self):
return self.relations
def get_most_frequent_words(self):
return self.most_frequent_words
def get_name(self):
return self.name
| en | 0.821823 | Simple class to store and retrieve information about a character. Specifially: name, aliases for the name, relations to other characters, keywords. erases existing wordlist and sets wordlist | 3.861125 | 4 |
reporting_api/tests/test_utils.py | SmartElect/SmartElect | 23 | 6630628 | # Python imports
import datetime
# 3rd party imports
from django.test import TestCase
from django.utils.timezone import now
# Project imports
from register.tests.test_models import RegistrationCenterFactory
from reporting_api.data_pull_common import get_active_registration_locations, \
get_all_polling_locations
from reporting_api.reports import calc_yesterday, parse_iso_datetime, printable_iso_datetime
from reporting_api.utils import get_datetime_from_local_date_and_time
class TestReportUtils(TestCase):
def test_yesterday_no_dates(self):
string_and_date = calc_yesterday(())
self.assertEqual(string_and_date, (None, None))
def test_yesterday_from_single_date(self):
today = now()
today_str = today.strftime('%Y-%m-%d')
date_and_string = calc_yesterday([today_str])
# since there's just one date provided, yesterday can only
# be that date regardless of other factors
self.assertEqual(date_and_string, (today.date(), today_str))
# try again, providing the date object
date_and_string = calc_yesterday([today_str], [today.date()])
self.assertEqual(date_and_string, (today.date(), today_str))
def test_yesterday_general(self):
today = now()
first_dt = today - datetime.timedelta(7)
input_strings = [(first_dt + datetime.timedelta(delta_days)).strftime('%Y-%m-%d')
for delta_days in range(7)]
expected_str = input_strings[-1]
expected_date = datetime.datetime.strptime(expected_str, '%Y-%m-%d').date()
date_and_string = calc_yesterday(input_strings)
self.assertEqual(date_and_string, (expected_date, expected_str))
# try again, providing the date objects
input_dates = [datetime.datetime.strptime(s, '%Y-%m-%d').date()
for s in input_strings]
date_and_string = calc_yesterday(input_strings, input_dates)
self.assertEqual(date_and_string, (expected_date, expected_str))
def test_iso_parsing(self):
times = (
'2015-02-20T10:09:32.123456+02:00',
'2015-02-20T10:09:32.123456',
'2015-02-20T10:09:32+02:00',
'2015-02-20T10:09:32')
for s in times:
dt = parse_iso_datetime(s)
self.assertEqual((dt.year, dt.month, dt.day), (2015, 2, 20))
self.assertEqual((dt.hour, dt.minute, dt.second), (10, 9, 32))
self.assertEqual(dt.microsecond, 123456 if '.' in s else 0)
if '+' in s:
tz = dt.tzinfo
self.assertEqual(tz.utcoffset(dt), datetime.timedelta(seconds=7200))
else:
self.assertEqual(dt.tzinfo, None)
self.assertEqual(printable_iso_datetime(s), '20/02 10:09')
def test_datetime_from_local_date_and_time(self):
times = (
('2015-02-20', '10:09:32.123456'),
('2015-02-20', '10:09:32'),
)
for d, t in times:
dt = get_datetime_from_local_date_and_time(d, t)
self.assertEqual(dt.strftime('%Y-%m-%d'), d)
time_fmt = '%H:%M:%S.%f' if dt.microsecond else '%H:%M:%S'
self.assertEqual(dt.strftime(time_fmt), t)
def test_registration_center_queries(self):
rc1 = RegistrationCenterFactory(reg_open=True)
rc2 = RegistrationCenterFactory(reg_open=False)
for_registration = get_active_registration_locations()
all_locations = get_all_polling_locations()
self.assertEqual(sorted(for_registration.keys()), sorted([rc1.center_id]))
self.assertEqual(sorted(all_locations.keys()),
sorted([rc1.center_id, rc2.center_id]))
| # Python imports
import datetime
# 3rd party imports
from django.test import TestCase
from django.utils.timezone import now
# Project imports
from register.tests.test_models import RegistrationCenterFactory
from reporting_api.data_pull_common import get_active_registration_locations, \
get_all_polling_locations
from reporting_api.reports import calc_yesterday, parse_iso_datetime, printable_iso_datetime
from reporting_api.utils import get_datetime_from_local_date_and_time
class TestReportUtils(TestCase):
def test_yesterday_no_dates(self):
string_and_date = calc_yesterday(())
self.assertEqual(string_and_date, (None, None))
def test_yesterday_from_single_date(self):
today = now()
today_str = today.strftime('%Y-%m-%d')
date_and_string = calc_yesterday([today_str])
# since there's just one date provided, yesterday can only
# be that date regardless of other factors
self.assertEqual(date_and_string, (today.date(), today_str))
# try again, providing the date object
date_and_string = calc_yesterday([today_str], [today.date()])
self.assertEqual(date_and_string, (today.date(), today_str))
def test_yesterday_general(self):
today = now()
first_dt = today - datetime.timedelta(7)
input_strings = [(first_dt + datetime.timedelta(delta_days)).strftime('%Y-%m-%d')
for delta_days in range(7)]
expected_str = input_strings[-1]
expected_date = datetime.datetime.strptime(expected_str, '%Y-%m-%d').date()
date_and_string = calc_yesterday(input_strings)
self.assertEqual(date_and_string, (expected_date, expected_str))
# try again, providing the date objects
input_dates = [datetime.datetime.strptime(s, '%Y-%m-%d').date()
for s in input_strings]
date_and_string = calc_yesterday(input_strings, input_dates)
self.assertEqual(date_and_string, (expected_date, expected_str))
def test_iso_parsing(self):
times = (
'2015-02-20T10:09:32.123456+02:00',
'2015-02-20T10:09:32.123456',
'2015-02-20T10:09:32+02:00',
'2015-02-20T10:09:32')
for s in times:
dt = parse_iso_datetime(s)
self.assertEqual((dt.year, dt.month, dt.day), (2015, 2, 20))
self.assertEqual((dt.hour, dt.minute, dt.second), (10, 9, 32))
self.assertEqual(dt.microsecond, 123456 if '.' in s else 0)
if '+' in s:
tz = dt.tzinfo
self.assertEqual(tz.utcoffset(dt), datetime.timedelta(seconds=7200))
else:
self.assertEqual(dt.tzinfo, None)
self.assertEqual(printable_iso_datetime(s), '20/02 10:09')
def test_datetime_from_local_date_and_time(self):
times = (
('2015-02-20', '10:09:32.123456'),
('2015-02-20', '10:09:32'),
)
for d, t in times:
dt = get_datetime_from_local_date_and_time(d, t)
self.assertEqual(dt.strftime('%Y-%m-%d'), d)
time_fmt = '%H:%M:%S.%f' if dt.microsecond else '%H:%M:%S'
self.assertEqual(dt.strftime(time_fmt), t)
def test_registration_center_queries(self):
rc1 = RegistrationCenterFactory(reg_open=True)
rc2 = RegistrationCenterFactory(reg_open=False)
for_registration = get_active_registration_locations()
all_locations = get_all_polling_locations()
self.assertEqual(sorted(for_registration.keys()), sorted([rc1.center_id]))
self.assertEqual(sorted(all_locations.keys()),
sorted([rc1.center_id, rc2.center_id]))
| en | 0.880759 | # Python imports # 3rd party imports # Project imports # since there's just one date provided, yesterday can only # be that date regardless of other factors # try again, providing the date object # try again, providing the date objects | 2.382315 | 2 |
rental_state/models.py | lisboalien/api_rental_state_management | 0 | 6630629 | from django.db import models
class RentalStateProperty(models.Model):
RENTAL_STATE_TYPE = (
('C', 'COMERCIAL'),
('R', 'RESIDENCIAL')
)
TYPE = (
('AP', 'APARTAMENTO'),
('CA', 'CASA'),
('CC', 'CASA DE CONDOMÍNIO'),
('CO', 'COBERTURA'),
('FL', 'FLAT'),
('SO', 'SOBRADO')
)
rental_state_code = models.CharField(max_length=100)
description = models.CharField(max_length=100, blank=False, null=False)
rental_state_type = models.CharField(
max_length=1, choices=RENTAL_STATE_TYPE, blank=False, null=False, default='R')
property_type = models.CharField(
max_length=2, choices=TYPE, blank=False, null=False, default='AP')
address = models.CharField(max_length=1000)
footage = models.CharField(max_length=100)
unit = models.CharField(max_length=2, blank=False,
null=False, default='m2')
number_of_rooms = models.CharField(max_length=10)
number_of_bathrooms = models.CharField(max_length=10)
garage_for_how_many_cars = models.CharField(max_length=10)
def __str__(self):
return self.description
| from django.db import models
class RentalStateProperty(models.Model):
RENTAL_STATE_TYPE = (
('C', 'COMERCIAL'),
('R', 'RESIDENCIAL')
)
TYPE = (
('AP', 'APARTAMENTO'),
('CA', 'CASA'),
('CC', 'CASA DE CONDOMÍNIO'),
('CO', 'COBERTURA'),
('FL', 'FLAT'),
('SO', 'SOBRADO')
)
rental_state_code = models.CharField(max_length=100)
description = models.CharField(max_length=100, blank=False, null=False)
rental_state_type = models.CharField(
max_length=1, choices=RENTAL_STATE_TYPE, blank=False, null=False, default='R')
property_type = models.CharField(
max_length=2, choices=TYPE, blank=False, null=False, default='AP')
address = models.CharField(max_length=1000)
footage = models.CharField(max_length=100)
unit = models.CharField(max_length=2, blank=False,
null=False, default='m2')
number_of_rooms = models.CharField(max_length=10)
number_of_bathrooms = models.CharField(max_length=10)
garage_for_how_many_cars = models.CharField(max_length=10)
def __str__(self):
return self.description
| none | 1 | 2.091222 | 2 |
|
parakeet/exps/voice_cloning/tacotron2_ge2e/extract_mel.py | zh794390558/DeepSpeech | 0 | 6630630 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing as mp
from functools import partial
from pathlib import Path
import numpy as np
import tqdm
from parakeet.audio import AudioProcessor
from parakeet.audio.spec_normalizer import LogMagnitude
from parakeet.audio.spec_normalizer import NormalizerBase
from parakeet.exps.voice_cloning.tacotron2_ge2e.config import get_cfg_defaults
def extract_mel(fname: Path,
input_dir: Path,
output_dir: Path,
p: AudioProcessor,
n: NormalizerBase):
relative_path = fname.relative_to(input_dir)
out_path = (output_dir / relative_path).with_suffix(".npy")
out_path.parent.mkdir(parents=True, exist_ok=True)
wav = p.read_wav(fname)
mel = p.mel_spectrogram(wav)
mel = n.transform(mel)
np.save(out_path, mel)
def extract_mel_multispeaker(config, input_dir, output_dir, extension=".wav"):
input_dir = Path(input_dir).expanduser()
fnames = list(input_dir.rglob(f"*{extension}"))
output_dir = Path(output_dir).expanduser()
output_dir.mkdir(parents=True, exist_ok=True)
p = AudioProcessor(config.sample_rate, config.n_fft, config.win_length,
config.hop_length, config.d_mels, config.fmin,
config.fmax)
n = LogMagnitude(1e-5)
func = partial(
extract_mel, input_dir=input_dir, output_dir=output_dir, p=p, n=n)
with mp.Pool(16) as pool:
list(
tqdm.tqdm(
pool.imap(func, fnames), total=len(fnames), unit="utterance"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Extract mel spectrogram from processed wav in AiShell3 training dataset."
)
parser.add_argument(
"--config",
type=str,
help="yaml config file to overwrite the default config")
parser.add_argument(
"--input",
type=str,
default="~/datasets/aishell3/train/normalized_wav",
help="path of the processed wav folder")
parser.add_argument(
"--output",
type=str,
default="~/datasets/aishell3/train/mel",
help="path of the folder to save mel spectrograms")
parser.add_argument(
"--opts",
nargs=argparse.REMAINDER,
help="options to overwrite --config file and the default config, passing in KEY VALUE pairs"
)
default_config = get_cfg_defaults()
args = parser.parse_args()
if args.config:
default_config.merge_from_file(args.config)
if args.opts:
default_config.merge_from_list(args.opts)
default_config.freeze()
audio_config = default_config.data
extract_mel_multispeaker(audio_config, args.input, args.output)
| # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing as mp
from functools import partial
from pathlib import Path
import numpy as np
import tqdm
from parakeet.audio import AudioProcessor
from parakeet.audio.spec_normalizer import LogMagnitude
from parakeet.audio.spec_normalizer import NormalizerBase
from parakeet.exps.voice_cloning.tacotron2_ge2e.config import get_cfg_defaults
def extract_mel(fname: Path,
input_dir: Path,
output_dir: Path,
p: AudioProcessor,
n: NormalizerBase):
relative_path = fname.relative_to(input_dir)
out_path = (output_dir / relative_path).with_suffix(".npy")
out_path.parent.mkdir(parents=True, exist_ok=True)
wav = p.read_wav(fname)
mel = p.mel_spectrogram(wav)
mel = n.transform(mel)
np.save(out_path, mel)
def extract_mel_multispeaker(config, input_dir, output_dir, extension=".wav"):
input_dir = Path(input_dir).expanduser()
fnames = list(input_dir.rglob(f"*{extension}"))
output_dir = Path(output_dir).expanduser()
output_dir.mkdir(parents=True, exist_ok=True)
p = AudioProcessor(config.sample_rate, config.n_fft, config.win_length,
config.hop_length, config.d_mels, config.fmin,
config.fmax)
n = LogMagnitude(1e-5)
func = partial(
extract_mel, input_dir=input_dir, output_dir=output_dir, p=p, n=n)
with mp.Pool(16) as pool:
list(
tqdm.tqdm(
pool.imap(func, fnames), total=len(fnames), unit="utterance"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Extract mel spectrogram from processed wav in AiShell3 training dataset."
)
parser.add_argument(
"--config",
type=str,
help="yaml config file to overwrite the default config")
parser.add_argument(
"--input",
type=str,
default="~/datasets/aishell3/train/normalized_wav",
help="path of the processed wav folder")
parser.add_argument(
"--output",
type=str,
default="~/datasets/aishell3/train/mel",
help="path of the folder to save mel spectrograms")
parser.add_argument(
"--opts",
nargs=argparse.REMAINDER,
help="options to overwrite --config file and the default config, passing in KEY VALUE pairs"
)
default_config = get_cfg_defaults()
args = parser.parse_args()
if args.config:
default_config.merge_from_file(args.config)
if args.opts:
default_config.merge_from_list(args.opts)
default_config.freeze()
audio_config = default_config.data
extract_mel_multispeaker(audio_config, args.input, args.output)
| en | 0.856067 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.806366 | 2 |
fakeGen/train.py | bigheiniu/FakeReviewAll | 0 | 6630631 | import torch
import torch.nn as nn
from fakeGen.Discriminator import Discriminator, RNNclaissfier
from fakeGen.seq2seq import Seq2seq
from fakeGen.Generator import Generator
from fakeGen.evaluate import f1_score, accuracy_score, tensor2list
from SeqModel.trainer import SupervisedTrainer
from SeqModel.models import EncoderRNN, DecoderRNN, ItemEncoder, ContextDecoderRNN
from SeqModel.loss import VAELoss, Perplexity
from SeqModel.optim import Optimizer
from SeqModel.dataset import SourceField, TargetField
from SeqModel.evaluator import Predictor
from SeqModel.util.checkpoint import Checkpoint
import torchtext
import sys
import torch.optim as optim
import argparse
import itertools
import numpy as np
def helper_fusion(pos_data, neg_data):
pos_batch = pos_data.shape[0]
neg_batch = neg_data.shape[0]
label_pos = torch.ones((pos_batch, 1), device=pos_data.device, dtype=torch.long)
label_neg = torch.ones((neg_batch, 1), device=pos_data.device, dtype=torch.long)
all_data = torch.cat((pos_data, neg_data), dim=0)
label = torch.cat((label_pos, label_neg), dim=0)
perm = torch.randperm(label.size()[0], device=pos_data.device)
all_data = all_data[perm].contiguous()
label = label[perm].contiguous()
return all_data, label
def train_LM(opt, loss, seq2seq, fake_data, optimizer=None):
# train
train_lm_data, test_lm_data = fake_data.split(split_ratio=0.2)
t = SupervisedTrainer(loss=loss, batch_size=opt.batch_size,
checkpoint_every=opt.check_point, device=opt.device,
print_every=opt.print_every, expt_dir=opt.expt_dir, predic_rate=True)
seq2seq = t.train(seq2seq, train_lm_data, dev_data=test_lm_data,
num_epochs=opt.lm_epochs,
optimizer=optimizer,
teacher_forcing_ratio=opt.teach_force_ratio,
resume=opt.resume)
return seq2seq
def pre_train_deceptive(rnn_claissfier, classifier_opt, data, opt):
rnn_claissfier.train()
for epochs in range(opt.pre_clf_epochs):
total_loss = 0
data_iter = data.__iter__()
for batch in data_iter:
feature, input_length = getattr(batch, 'src')
label = getattr(batch, 'label')
classifier_opt.zero_grad()
loss, _ = rnn_claissfier(feature, label, input_length)
loss.backward()
classifier_opt.step()
total_loss += loss.item()
print('[INFO] ---PRE-TRAIN--- clf loss is {}'.format(total_loss))
return rnn_claissfier
def train_discriminator(discriminator, dis_opt, seq2seq, gen, fake_data, opt):
for epoch in range(opt.dis_epoch):
print('epoch %d : ' % (epoch + 1), end='')
sys.stdout.flush()
total_loss = 0
# clf the simulate text and fake text
fake_iter = fake_data.__iter__()
true_list = []
pre_list = []
for batch in fake_iter:
feature, input_lengths = getattr(batch, 'src')
_, hidden = seq2seq.encoder_seq(feature, input_lengths=input_lengths)
if len(hidden) == 2:
hidden = hidden[0]
hidden = hidden.permute(1, 0, 2).contiguous()
hidden = hidden.view(hidden.shape[0], -1)
shape = torch.Size((opt.batch_size, opt.z_size))
if next(discriminator.parameters()).is_cuda:
z = torch.cuda.FloatTensor(shape)
else:
z = torch.FloatTensor(shape)
torch.randn(shape, out=z)
# classify the hidden state
sim_data = gen(z)
all_data, label = helper_fusion(hidden, sim_data)
dis_opt.zero_grad()
loss, out = discriminator.batchBCELoss(all_data, label)
loss.backward()
dis_opt.step()
true_list.append(tensor2list(out['y_true']))
pre_list.append(tensor2list(out['y_pre']))
total_loss += loss.item()
y_true = list(itertools.chain.from_iterable(true_list))
y_pre = list(itertools.chain.from_iterable(pre_list))
f1 = f1_score(y_true, y_pre)
acc = accuracy_score(y_true, y_pre)
print("[INFO] ---TRAIN---- discriminator loss {}, acc {}, f1 {}".format(total_loss, f1, acc))
# train the classifier
# return discriminator
#TODO: print total loss
# pad one
def pad(tensor, length):
return torch.cat([tensor, tensor.new_ones(length - tensor.size(0), tensor.size()[1])])
def train_classifier(opt, real_data, gen, seq2seq, rnn_classifier, rnn_opt):
rnn_classifier.train()
seq2seq.train()
gen.train()
real_iter = real_data.__iter__()
total_loss = 0
true_list = []
pre_list = []
for batch in real_iter:
feature, lengths = getattr(batch, 'src')
label = getattr(batch, 'label')
# shape = torch.Size((opt.batch_size, opt.z_size))
shape = torch.Size((10, opt.z_size))
if next(rnn_classifier.parameters()).is_cuda:
z = torch.cuda.FloatTensor(shape)
else:
z = torch.FloatTensor(shape)
# sim_seq: distribution of words
torch.randn(shape, out=z)
sim_hidden = gen(z)
sim_hidden = sim_hidden.view(z.shape[0], opt.n_layers * (2 if opt.bidirectional else 1), opt.hidden_size)
sim_hidden = sim_hidden.permute(1, 0, 2).contiguous()
sim_seq_list, _, _ = seq2seq.decoder_hidden(sim_hidden, teacher_forcing_ratio=0)
sim_seq_length = [seq.shape[0] for seq in sim_seq_list]
length_sort = np.sort(sim_seq_length)
index_sort = np.argsort(sim_seq_length)
max_length = np.max(sim_seq_length)
sim_seq_reorder = [pad(sim_seq_list[index], max_length).unsqueeze(0) for index in index_sort]
sim_seq = torch.cat(sim_seq_reorder)
real_label = torch.ones_like(label)
sim_label = label.new_zeros(sim_seq.shape[0])
rnn_opt.zero_grad()
loss, out = rnn_classifier(feature, real_label, lengths, sim_seq, sim_label, length_sort)
loss.backward()
rnn_opt.step()
total_loss += loss.item()
true_list.append(tensor2list(out['y_true']))
pre_list.append(tensor2list(out['y_pre']))
y_pre = [int(i[0]) for i in list(itertools.chain.from_iterable(pre_list))]
y_true = [i for i in list(itertools.chain.from_iterable(true_list))]
f1 = f1_score(y_true, y_pre)
acc = accuracy_score(y_true, y_pre)
print("[INFO] ---TRAINING--- clf loss {}, f1 {}, acc {}".format(total_loss, f1, acc))
def clf_test(test_clf_data, rnn_classifier):
rnn_classifier.eval()
pre_list = []
true_list = []
data_iter = test_clf_data.__iter__()
for batch in data_iter:
feature, lengths = getattr(batch, 'src')
label = getattr(batch, 'label')
# feature and label will be shuffle in clf
loss, out = rnn_classifier(feature, label, lengths)
y_pre = tensor2list(out['y_pre'])
y_true = tensor2list(out['y_true'])
pre_list.append(y_pre)
true_list.append(y_true)
y_pre = [int(i[0]) for i in list(itertools.chain.from_iterable(pre_list))]
y_true = [ i - 1 for i in list(itertools.chain.from_iterable(true_list))]
f1 = f1_score(y_true, y_pre)
acc = accuracy_score(y_true, y_pre)
print("[INFO] ---TEST--- acc is {}, f1 is {}".format(acc, f1))
#
# print the loss and F1 score
def train_gen(gen, gen_opt, dis_gen, opt):
for epoch in range(opt.gen_epoch):
for _ in range(opt.batch_count):
shape = torch.Size((opt.batch_size, opt.z_size))
if next(gen.parameters()).is_cuda:
z = torch.cuda.FloatTensor(shape)
else:
z = torch.FloatTensor(shape)
torch.randn(shape, out=z)
# generate fake review
sim_data = gen(z)
gen_opt.zero_grad()
# fool the discriminator
# seqGAN will use the prediction logit as reward
loss, _ = dis_gen.batchBCELoss(sim_data, torch.ones((sim_data.shape[0], 1), device=opt.device, dtype=torch.long))
loss.backward()
gen_opt.step()
def prepare_data(opt):
# seq-label real dataset
# seq-label all dataset
# seq-label fake dataset
tgt = TargetField()
src = SourceField()
label = torchtext.data.Field(sequential=False)
# fake_data_lm = torchtext.data.TabularDataset(
# path=opt.fake_data_path, format='csv',
# fields=[('src', src), ('label', label), ('tgt', tgt)]
# )
# real_data_clf = torchtext.data.TabularDataset(
# path=opt.real_data_path, format='csv',
# fields=[('src', src), ('label', label)]
# )
#
# train_clf = torchtext.data.TabularDataset(
# path=opt.train_data_path, format='csv',
# fields=[('src', src), ('label', label)]
# )
#
#
# test_clf = torchtext.data.TabularDataset(
# path=opt.test_data_path, format='csv',
# fields=[('src', src), ('label', label)]
# )
all_data = torchtext.data.TabularDataset(
path=opt.text_path, format='csv',
fields=[('src', src), ('tgt', tgt)]
)
src.build_vocab(all_data.src, max_size=opt.max_word)
tgt.build_vocab(all_data.src, max_size=opt.max_word)
# label.build_vocab(train_clf)
input_vocab = src.vocab
output_vocab = tgt.vocab
return all_data, tgt
# # the data is so large?
# test_clf = torchtext.data.BucketIterator(
# dataset=test_clf, batch_size=opt.batch_size,
# sort=False, sort_within_batch=True,
# sort_key=lambda x: len(x.src),
# device=opt.device, repeat=False)
#
# train_clf = torchtext.data.BucketIterator(
# dataset=train_clf, batch_size=opt.batch_size,
# sort=False, sort_within_batch=True,
# sort_key=lambda x: len(x.src),
# device=opt.device, repeat=False)
#
# real_data_clf = torchtext.data.BucketIterator(
# dataset=real_data_clf, batch_size=opt.batch_size,
# sort=False, sort_within_batch=True,
# sort_key=lambda x: len(x.src),
# device=opt.device, repeat=False)
#
# fake_data_dis = torchtext.data.BucketIterator(
# dataset=fake_data_lm, batch_size=opt.batch_size,
# sort=False, sort_within_batch=True,
# sort_key=lambda x: len(x.src),
# device=opt.device, repeat=False)
fake_data_dis = 1
# return fake_data_dis, fake_data_lm, real_data_clf, train_clf, test_clf, input_vocab, tgt
def prepare_loss(tgt, opt):
weight = torch.ones(len(tgt.vocab))
pad = tgt.vocab.stoi[tgt.pad_token]
loss = Perplexity(weight, pad)
if opt.cuda:
loss.cuda()
return loss
def prepare_model(opt, vocab_size, tgt):
dis_hidden_size = opt.hidden_size * opt.n_layers * (2 if opt.bidirectional else 1)
# Prepare loss
encoder = EncoderRNN(vocab_size, opt.max_len, opt.hidden_size,
bidirectional=opt.bidirectional, n_layers=opt.n_layers, variable_lengths=True)
decoder = DecoderRNN(vocab_size, opt.max_len, opt.hidden_size * 2 if opt.bidirectional else opt.hidden_size,
dropout_p=opt.dropout, n_layers=opt.n_layers, use_attention=False, bidirectional=opt.bidirectional,
eos_id=tgt.eos_id, sos_id=tgt.sos_id)
seq2seq = Seq2seq(encoder, decoder).to(opt.device)
# gen = Generator(dis_hidden_size, opt.z_size).to(opt.device)
# encoder_new = EncoderRNN(vocab_size, opt.max_len, opt.hidden_size,
# bidirectional=opt.bidirectional, n_layers=opt.n_layers,variable_lengths=True).to(opt.device)
#
#
# dis_clf = Discriminator(dis_hidden_size, opt.clf_layers).to(opt.device)
# rnn_clf = RNNclaissfier(encoder_new, dis_clf).to(opt.device)
#
# dis_gen = Discriminator(dis_hidden_size, opt.clf_layers).to(opt.device)
# opt_gen = optim.Adam(gen.parameters(), lr=opt.gen_lr)
# opt_dis_clf = optim.Adam(rnn_clf.parameters(), lr=opt.dis_dec_lr)
# opt_dis_gen = optim.Adam(dis_gen.parameters(), lr=opt.dis_gen_lr)
gen = 1
opt_gen = 1
rnn_clf = 1
opt_dis_clf =1
dis_gen = 1
opt_dis_gen = 1
return seq2seq, gen, opt_gen, rnn_clf, opt_dis_clf, dis_gen, opt_dis_gen
def build_parser():
parser = argparse.ArgumentParser()
# data
parser.add_argument('-fake_data_path', type=str,
default='/home/yichuan/course/seq2/data/YelpNYC/text.csv')
parser.add_argument('-real_data_path', type=str,
default='/home/yichuan/course/seq2/data/YelpNYC/real_data.csv')
parser.add_argument('-train_data_path', type=str,
default='/home/yichuan/course/seq2/data/YelpNYC/train_data.csv')
parser.add_argument('-test_data_path', type=str,
default='/home/yichuan/course/seq2/data/YelpNYC/test_data.csv')
# language model
parser.add_argument('-max_len', type=int, default=100)
parser.add_argument('-bidirectional', action='store_false', default=True)
parser.add_argument('-dropout', type=float, default=0.3)
parser.add_argument('-hidden_size', type=int, default=128)
parser.add_argument('-max_word', type=int, default=30000)
parser.add_argument('-n_layers', type=int, default=2)
# seq2seq model
parser.add_argument('-batch_size', type=int, default=20)
parser.add_argument('-check_point', type=int, default=100)
parser.add_argument('-print_every', type=int, default=100)
parser.add_argument('-expt_dir', type=str, default='./experiment')
parser.add_argument('-teach_force_ratio', type=float, default=0.4)
parser.add_argument('-resume', action='store_true', default=False)
# GAN discriminator
parser.add_argument('-clf_layers', type=int, default=3)
parser.add_argument('-z_size', type=int, default=128)
# learning rate
parser.add_argument('-dis_dec_lr', type=float, default=0.05)
parser.add_argument('-dis_gen_lr', type=float, default=0.005)
parser.add_argument('-gen_lr', type=float, default=0.005)
# epochs
parser.add_argument('-gan_epoch', type=int, default=10)
parser.add_argument('-gen_epoch', type=int, default=10)
parser.add_argument('-clf_epoch', type=int, default=10)
parser.add_argument('-dis_epoch', type=int, default=1)
parser.add_argument('-pre_clf_epochs', type=int, default=10)
parser.add_argument('-lm_epochs', type=int, default=20)
parser.add_argument('-batch_count', type=int, default=200)
# cuda
parser.add_argument('-cuda', action='store_false')
return parser
def main(parser):
opt = parser.parse_args()
opt.device = torch.device('cuda') if opt.cuda else torch.device('cpu')
# fake_data_dis, fake_data_lm, real_data_clf, train_clf, test_clf, vocab, tgt = prepare_data(opt)
fake_data_dis, tgt = prepare_data(opt)
seq2seq, gen, opt_gen, rnn_claissfier, classifier_opt, dis_gen, opt_dis_gen = \
prepare_model(opt, len(tgt.vocab), tgt=tgt)
# pre-train the LM model
loss_seq = prepare_loss(tgt, opt)
seq2seq = train_LM(opt, loss_seq, seq2seq, fake_data_dis)
exit()
# # pre-train the classify model
# pre_train_deceptive(rnn_claissfier, classifier_opt, train_clf, opt)
#
# # train the generator
# for epoch in range(opt.gan_epoch):
# train_gen(gen, opt_gen, dis_gen, opt)
#
# # train the discriminator
# train_discriminator(dis_gen, opt_dis_gen, seq2seq, gen, fake_data_dis, opt)
#
# # train the classification on simulate data and real review
# for epoch in range(opt.clf_epoch):
# # test the classifier
# clf_test(test_clf, rnn_claissfier)
# train_classifier(opt, real_data_clf, gen, seq2seq, rnn_claissfier, classifier_opt)
if __name__ == '__main__':
parser = build_parser()
main(parser)
| import torch
import torch.nn as nn
from fakeGen.Discriminator import Discriminator, RNNclaissfier
from fakeGen.seq2seq import Seq2seq
from fakeGen.Generator import Generator
from fakeGen.evaluate import f1_score, accuracy_score, tensor2list
from SeqModel.trainer import SupervisedTrainer
from SeqModel.models import EncoderRNN, DecoderRNN, ItemEncoder, ContextDecoderRNN
from SeqModel.loss import VAELoss, Perplexity
from SeqModel.optim import Optimizer
from SeqModel.dataset import SourceField, TargetField
from SeqModel.evaluator import Predictor
from SeqModel.util.checkpoint import Checkpoint
import torchtext
import sys
import torch.optim as optim
import argparse
import itertools
import numpy as np
def helper_fusion(pos_data, neg_data):
pos_batch = pos_data.shape[0]
neg_batch = neg_data.shape[0]
label_pos = torch.ones((pos_batch, 1), device=pos_data.device, dtype=torch.long)
label_neg = torch.ones((neg_batch, 1), device=pos_data.device, dtype=torch.long)
all_data = torch.cat((pos_data, neg_data), dim=0)
label = torch.cat((label_pos, label_neg), dim=0)
perm = torch.randperm(label.size()[0], device=pos_data.device)
all_data = all_data[perm].contiguous()
label = label[perm].contiguous()
return all_data, label
def train_LM(opt, loss, seq2seq, fake_data, optimizer=None):
# train
train_lm_data, test_lm_data = fake_data.split(split_ratio=0.2)
t = SupervisedTrainer(loss=loss, batch_size=opt.batch_size,
checkpoint_every=opt.check_point, device=opt.device,
print_every=opt.print_every, expt_dir=opt.expt_dir, predic_rate=True)
seq2seq = t.train(seq2seq, train_lm_data, dev_data=test_lm_data,
num_epochs=opt.lm_epochs,
optimizer=optimizer,
teacher_forcing_ratio=opt.teach_force_ratio,
resume=opt.resume)
return seq2seq
def pre_train_deceptive(rnn_claissfier, classifier_opt, data, opt):
rnn_claissfier.train()
for epochs in range(opt.pre_clf_epochs):
total_loss = 0
data_iter = data.__iter__()
for batch in data_iter:
feature, input_length = getattr(batch, 'src')
label = getattr(batch, 'label')
classifier_opt.zero_grad()
loss, _ = rnn_claissfier(feature, label, input_length)
loss.backward()
classifier_opt.step()
total_loss += loss.item()
print('[INFO] ---PRE-TRAIN--- clf loss is {}'.format(total_loss))
return rnn_claissfier
def train_discriminator(discriminator, dis_opt, seq2seq, gen, fake_data, opt):
for epoch in range(opt.dis_epoch):
print('epoch %d : ' % (epoch + 1), end='')
sys.stdout.flush()
total_loss = 0
# clf the simulate text and fake text
fake_iter = fake_data.__iter__()
true_list = []
pre_list = []
for batch in fake_iter:
feature, input_lengths = getattr(batch, 'src')
_, hidden = seq2seq.encoder_seq(feature, input_lengths=input_lengths)
if len(hidden) == 2:
hidden = hidden[0]
hidden = hidden.permute(1, 0, 2).contiguous()
hidden = hidden.view(hidden.shape[0], -1)
shape = torch.Size((opt.batch_size, opt.z_size))
if next(discriminator.parameters()).is_cuda:
z = torch.cuda.FloatTensor(shape)
else:
z = torch.FloatTensor(shape)
torch.randn(shape, out=z)
# classify the hidden state
sim_data = gen(z)
all_data, label = helper_fusion(hidden, sim_data)
dis_opt.zero_grad()
loss, out = discriminator.batchBCELoss(all_data, label)
loss.backward()
dis_opt.step()
true_list.append(tensor2list(out['y_true']))
pre_list.append(tensor2list(out['y_pre']))
total_loss += loss.item()
y_true = list(itertools.chain.from_iterable(true_list))
y_pre = list(itertools.chain.from_iterable(pre_list))
f1 = f1_score(y_true, y_pre)
acc = accuracy_score(y_true, y_pre)
print("[INFO] ---TRAIN---- discriminator loss {}, acc {}, f1 {}".format(total_loss, f1, acc))
# train the classifier
# return discriminator
#TODO: print total loss
# pad one
def pad(tensor, length):
return torch.cat([tensor, tensor.new_ones(length - tensor.size(0), tensor.size()[1])])
def train_classifier(opt, real_data, gen, seq2seq, rnn_classifier, rnn_opt):
rnn_classifier.train()
seq2seq.train()
gen.train()
real_iter = real_data.__iter__()
total_loss = 0
true_list = []
pre_list = []
for batch in real_iter:
feature, lengths = getattr(batch, 'src')
label = getattr(batch, 'label')
# shape = torch.Size((opt.batch_size, opt.z_size))
shape = torch.Size((10, opt.z_size))
if next(rnn_classifier.parameters()).is_cuda:
z = torch.cuda.FloatTensor(shape)
else:
z = torch.FloatTensor(shape)
# sim_seq: distribution of words
torch.randn(shape, out=z)
sim_hidden = gen(z)
sim_hidden = sim_hidden.view(z.shape[0], opt.n_layers * (2 if opt.bidirectional else 1), opt.hidden_size)
sim_hidden = sim_hidden.permute(1, 0, 2).contiguous()
sim_seq_list, _, _ = seq2seq.decoder_hidden(sim_hidden, teacher_forcing_ratio=0)
sim_seq_length = [seq.shape[0] for seq in sim_seq_list]
length_sort = np.sort(sim_seq_length)
index_sort = np.argsort(sim_seq_length)
max_length = np.max(sim_seq_length)
sim_seq_reorder = [pad(sim_seq_list[index], max_length).unsqueeze(0) for index in index_sort]
sim_seq = torch.cat(sim_seq_reorder)
real_label = torch.ones_like(label)
sim_label = label.new_zeros(sim_seq.shape[0])
rnn_opt.zero_grad()
loss, out = rnn_classifier(feature, real_label, lengths, sim_seq, sim_label, length_sort)
loss.backward()
rnn_opt.step()
total_loss += loss.item()
true_list.append(tensor2list(out['y_true']))
pre_list.append(tensor2list(out['y_pre']))
y_pre = [int(i[0]) for i in list(itertools.chain.from_iterable(pre_list))]
y_true = [i for i in list(itertools.chain.from_iterable(true_list))]
f1 = f1_score(y_true, y_pre)
acc = accuracy_score(y_true, y_pre)
print("[INFO] ---TRAINING--- clf loss {}, f1 {}, acc {}".format(total_loss, f1, acc))
def clf_test(test_clf_data, rnn_classifier):
rnn_classifier.eval()
pre_list = []
true_list = []
data_iter = test_clf_data.__iter__()
for batch in data_iter:
feature, lengths = getattr(batch, 'src')
label = getattr(batch, 'label')
# feature and label will be shuffle in clf
loss, out = rnn_classifier(feature, label, lengths)
y_pre = tensor2list(out['y_pre'])
y_true = tensor2list(out['y_true'])
pre_list.append(y_pre)
true_list.append(y_true)
y_pre = [int(i[0]) for i in list(itertools.chain.from_iterable(pre_list))]
y_true = [ i - 1 for i in list(itertools.chain.from_iterable(true_list))]
f1 = f1_score(y_true, y_pre)
acc = accuracy_score(y_true, y_pre)
print("[INFO] ---TEST--- acc is {}, f1 is {}".format(acc, f1))
#
# print the loss and F1 score
def train_gen(gen, gen_opt, dis_gen, opt):
for epoch in range(opt.gen_epoch):
for _ in range(opt.batch_count):
shape = torch.Size((opt.batch_size, opt.z_size))
if next(gen.parameters()).is_cuda:
z = torch.cuda.FloatTensor(shape)
else:
z = torch.FloatTensor(shape)
torch.randn(shape, out=z)
# generate fake review
sim_data = gen(z)
gen_opt.zero_grad()
# fool the discriminator
# seqGAN will use the prediction logit as reward
loss, _ = dis_gen.batchBCELoss(sim_data, torch.ones((sim_data.shape[0], 1), device=opt.device, dtype=torch.long))
loss.backward()
gen_opt.step()
def prepare_data(opt):
# seq-label real dataset
# seq-label all dataset
# seq-label fake dataset
tgt = TargetField()
src = SourceField()
label = torchtext.data.Field(sequential=False)
# fake_data_lm = torchtext.data.TabularDataset(
# path=opt.fake_data_path, format='csv',
# fields=[('src', src), ('label', label), ('tgt', tgt)]
# )
# real_data_clf = torchtext.data.TabularDataset(
# path=opt.real_data_path, format='csv',
# fields=[('src', src), ('label', label)]
# )
#
# train_clf = torchtext.data.TabularDataset(
# path=opt.train_data_path, format='csv',
# fields=[('src', src), ('label', label)]
# )
#
#
# test_clf = torchtext.data.TabularDataset(
# path=opt.test_data_path, format='csv',
# fields=[('src', src), ('label', label)]
# )
all_data = torchtext.data.TabularDataset(
path=opt.text_path, format='csv',
fields=[('src', src), ('tgt', tgt)]
)
src.build_vocab(all_data.src, max_size=opt.max_word)
tgt.build_vocab(all_data.src, max_size=opt.max_word)
# label.build_vocab(train_clf)
input_vocab = src.vocab
output_vocab = tgt.vocab
return all_data, tgt
# # the data is so large?
# test_clf = torchtext.data.BucketIterator(
# dataset=test_clf, batch_size=opt.batch_size,
# sort=False, sort_within_batch=True,
# sort_key=lambda x: len(x.src),
# device=opt.device, repeat=False)
#
# train_clf = torchtext.data.BucketIterator(
# dataset=train_clf, batch_size=opt.batch_size,
# sort=False, sort_within_batch=True,
# sort_key=lambda x: len(x.src),
# device=opt.device, repeat=False)
#
# real_data_clf = torchtext.data.BucketIterator(
# dataset=real_data_clf, batch_size=opt.batch_size,
# sort=False, sort_within_batch=True,
# sort_key=lambda x: len(x.src),
# device=opt.device, repeat=False)
#
# fake_data_dis = torchtext.data.BucketIterator(
# dataset=fake_data_lm, batch_size=opt.batch_size,
# sort=False, sort_within_batch=True,
# sort_key=lambda x: len(x.src),
# device=opt.device, repeat=False)
fake_data_dis = 1
# return fake_data_dis, fake_data_lm, real_data_clf, train_clf, test_clf, input_vocab, tgt
def prepare_loss(tgt, opt):
weight = torch.ones(len(tgt.vocab))
pad = tgt.vocab.stoi[tgt.pad_token]
loss = Perplexity(weight, pad)
if opt.cuda:
loss.cuda()
return loss
def prepare_model(opt, vocab_size, tgt):
dis_hidden_size = opt.hidden_size * opt.n_layers * (2 if opt.bidirectional else 1)
# Prepare loss
encoder = EncoderRNN(vocab_size, opt.max_len, opt.hidden_size,
bidirectional=opt.bidirectional, n_layers=opt.n_layers, variable_lengths=True)
decoder = DecoderRNN(vocab_size, opt.max_len, opt.hidden_size * 2 if opt.bidirectional else opt.hidden_size,
dropout_p=opt.dropout, n_layers=opt.n_layers, use_attention=False, bidirectional=opt.bidirectional,
eos_id=tgt.eos_id, sos_id=tgt.sos_id)
seq2seq = Seq2seq(encoder, decoder).to(opt.device)
# gen = Generator(dis_hidden_size, opt.z_size).to(opt.device)
# encoder_new = EncoderRNN(vocab_size, opt.max_len, opt.hidden_size,
# bidirectional=opt.bidirectional, n_layers=opt.n_layers,variable_lengths=True).to(opt.device)
#
#
# dis_clf = Discriminator(dis_hidden_size, opt.clf_layers).to(opt.device)
# rnn_clf = RNNclaissfier(encoder_new, dis_clf).to(opt.device)
#
# dis_gen = Discriminator(dis_hidden_size, opt.clf_layers).to(opt.device)
# opt_gen = optim.Adam(gen.parameters(), lr=opt.gen_lr)
# opt_dis_clf = optim.Adam(rnn_clf.parameters(), lr=opt.dis_dec_lr)
# opt_dis_gen = optim.Adam(dis_gen.parameters(), lr=opt.dis_gen_lr)
gen = 1
opt_gen = 1
rnn_clf = 1
opt_dis_clf =1
dis_gen = 1
opt_dis_gen = 1
return seq2seq, gen, opt_gen, rnn_clf, opt_dis_clf, dis_gen, opt_dis_gen
def build_parser():
parser = argparse.ArgumentParser()
# data
parser.add_argument('-fake_data_path', type=str,
default='/home/yichuan/course/seq2/data/YelpNYC/text.csv')
parser.add_argument('-real_data_path', type=str,
default='/home/yichuan/course/seq2/data/YelpNYC/real_data.csv')
parser.add_argument('-train_data_path', type=str,
default='/home/yichuan/course/seq2/data/YelpNYC/train_data.csv')
parser.add_argument('-test_data_path', type=str,
default='/home/yichuan/course/seq2/data/YelpNYC/test_data.csv')
# language model
parser.add_argument('-max_len', type=int, default=100)
parser.add_argument('-bidirectional', action='store_false', default=True)
parser.add_argument('-dropout', type=float, default=0.3)
parser.add_argument('-hidden_size', type=int, default=128)
parser.add_argument('-max_word', type=int, default=30000)
parser.add_argument('-n_layers', type=int, default=2)
# seq2seq model
parser.add_argument('-batch_size', type=int, default=20)
parser.add_argument('-check_point', type=int, default=100)
parser.add_argument('-print_every', type=int, default=100)
parser.add_argument('-expt_dir', type=str, default='./experiment')
parser.add_argument('-teach_force_ratio', type=float, default=0.4)
parser.add_argument('-resume', action='store_true', default=False)
# GAN discriminator
parser.add_argument('-clf_layers', type=int, default=3)
parser.add_argument('-z_size', type=int, default=128)
# learning rate
parser.add_argument('-dis_dec_lr', type=float, default=0.05)
parser.add_argument('-dis_gen_lr', type=float, default=0.005)
parser.add_argument('-gen_lr', type=float, default=0.005)
# epochs
parser.add_argument('-gan_epoch', type=int, default=10)
parser.add_argument('-gen_epoch', type=int, default=10)
parser.add_argument('-clf_epoch', type=int, default=10)
parser.add_argument('-dis_epoch', type=int, default=1)
parser.add_argument('-pre_clf_epochs', type=int, default=10)
parser.add_argument('-lm_epochs', type=int, default=20)
parser.add_argument('-batch_count', type=int, default=200)
# cuda
parser.add_argument('-cuda', action='store_false')
return parser
def main(parser):
opt = parser.parse_args()
opt.device = torch.device('cuda') if opt.cuda else torch.device('cpu')
# fake_data_dis, fake_data_lm, real_data_clf, train_clf, test_clf, vocab, tgt = prepare_data(opt)
fake_data_dis, tgt = prepare_data(opt)
seq2seq, gen, opt_gen, rnn_claissfier, classifier_opt, dis_gen, opt_dis_gen = \
prepare_model(opt, len(tgt.vocab), tgt=tgt)
# pre-train the LM model
loss_seq = prepare_loss(tgt, opt)
seq2seq = train_LM(opt, loss_seq, seq2seq, fake_data_dis)
exit()
# # pre-train the classify model
# pre_train_deceptive(rnn_claissfier, classifier_opt, train_clf, opt)
#
# # train the generator
# for epoch in range(opt.gan_epoch):
# train_gen(gen, opt_gen, dis_gen, opt)
#
# # train the discriminator
# train_discriminator(dis_gen, opt_dis_gen, seq2seq, gen, fake_data_dis, opt)
#
# # train the classification on simulate data and real review
# for epoch in range(opt.clf_epoch):
# # test the classifier
# clf_test(test_clf, rnn_claissfier)
# train_classifier(opt, real_data_clf, gen, seq2seq, rnn_claissfier, classifier_opt)
if __name__ == '__main__':
parser = build_parser()
main(parser)
| en | 0.287937 | # train # clf the simulate text and fake text # classify the hidden state # train the classifier # return discriminator #TODO: print total loss # pad one # shape = torch.Size((opt.batch_size, opt.z_size)) # sim_seq: distribution of words # feature and label will be shuffle in clf # # print the loss and F1 score # generate fake review # fool the discriminator # seqGAN will use the prediction logit as reward # seq-label real dataset # seq-label all dataset # seq-label fake dataset # fake_data_lm = torchtext.data.TabularDataset( # path=opt.fake_data_path, format='csv', # fields=[('src', src), ('label', label), ('tgt', tgt)] # ) # real_data_clf = torchtext.data.TabularDataset( # path=opt.real_data_path, format='csv', # fields=[('src', src), ('label', label)] # ) # # train_clf = torchtext.data.TabularDataset( # path=opt.train_data_path, format='csv', # fields=[('src', src), ('label', label)] # ) # # # test_clf = torchtext.data.TabularDataset( # path=opt.test_data_path, format='csv', # fields=[('src', src), ('label', label)] # ) # label.build_vocab(train_clf) # # the data is so large? # test_clf = torchtext.data.BucketIterator( # dataset=test_clf, batch_size=opt.batch_size, # sort=False, sort_within_batch=True, # sort_key=lambda x: len(x.src), # device=opt.device, repeat=False) # # train_clf = torchtext.data.BucketIterator( # dataset=train_clf, batch_size=opt.batch_size, # sort=False, sort_within_batch=True, # sort_key=lambda x: len(x.src), # device=opt.device, repeat=False) # # real_data_clf = torchtext.data.BucketIterator( # dataset=real_data_clf, batch_size=opt.batch_size, # sort=False, sort_within_batch=True, # sort_key=lambda x: len(x.src), # device=opt.device, repeat=False) # # fake_data_dis = torchtext.data.BucketIterator( # dataset=fake_data_lm, batch_size=opt.batch_size, # sort=False, sort_within_batch=True, # sort_key=lambda x: len(x.src), # device=opt.device, repeat=False) # return fake_data_dis, fake_data_lm, real_data_clf, train_clf, test_clf, input_vocab, tgt # Prepare loss # gen = Generator(dis_hidden_size, opt.z_size).to(opt.device) # encoder_new = EncoderRNN(vocab_size, opt.max_len, opt.hidden_size, # bidirectional=opt.bidirectional, n_layers=opt.n_layers,variable_lengths=True).to(opt.device) # # # dis_clf = Discriminator(dis_hidden_size, opt.clf_layers).to(opt.device) # rnn_clf = RNNclaissfier(encoder_new, dis_clf).to(opt.device) # # dis_gen = Discriminator(dis_hidden_size, opt.clf_layers).to(opt.device) # opt_gen = optim.Adam(gen.parameters(), lr=opt.gen_lr) # opt_dis_clf = optim.Adam(rnn_clf.parameters(), lr=opt.dis_dec_lr) # opt_dis_gen = optim.Adam(dis_gen.parameters(), lr=opt.dis_gen_lr) # data # language model # seq2seq model # GAN discriminator # learning rate # epochs # cuda # fake_data_dis, fake_data_lm, real_data_clf, train_clf, test_clf, vocab, tgt = prepare_data(opt) # pre-train the LM model # # pre-train the classify model # pre_train_deceptive(rnn_claissfier, classifier_opt, train_clf, opt) # # # train the generator # for epoch in range(opt.gan_epoch): # train_gen(gen, opt_gen, dis_gen, opt) # # # train the discriminator # train_discriminator(dis_gen, opt_dis_gen, seq2seq, gen, fake_data_dis, opt) # # # train the classification on simulate data and real review # for epoch in range(opt.clf_epoch): # # test the classifier # clf_test(test_clf, rnn_claissfier) # train_classifier(opt, real_data_clf, gen, seq2seq, rnn_claissfier, classifier_opt) | 1.963654 | 2 |
hub/migrations/0002_auto_20210216_0854.py | LindaOuer/django3.1_InnovationHub | 0 | 6630632 | # Generated by Django 3.1.6 on 2021-02-16 07:54
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
import hub.models
class Migration(migrations.Migration):
dependencies = [
('hub', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='coach',
options={'verbose_name': 'Coach', 'verbose_name_plural': 'Coachs'},
),
migrations.AlterModelOptions(
name='student',
options={'verbose_name': 'Student', 'verbose_name_plural': 'Students'},
),
migrations.AddField(
model_name='project',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='project',
name='time_allocated',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1, 'The minimum time allowed is 1 hour'), django.core.validators.MaxValueValidator(10, 'The maximum time allowed is 10 hours')], verbose_name='Temps Alloué'),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, validators=[hub.models.is_Esprit_Email], verbose_name='Email'),
),
]
| # Generated by Django 3.1.6 on 2021-02-16 07:54
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
import hub.models
class Migration(migrations.Migration):
dependencies = [
('hub', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='coach',
options={'verbose_name': 'Coach', 'verbose_name_plural': 'Coachs'},
),
migrations.AlterModelOptions(
name='student',
options={'verbose_name': 'Student', 'verbose_name_plural': 'Students'},
),
migrations.AddField(
model_name='project',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='project',
name='time_allocated',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1, 'The minimum time allowed is 1 hour'), django.core.validators.MaxValueValidator(10, 'The maximum time allowed is 10 hours')], verbose_name='Temps Alloué'),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, validators=[hub.models.is_Esprit_Email], verbose_name='Email'),
),
]
| en | 0.791602 | # Generated by Django 3.1.6 on 2021-02-16 07:54 | 1.854147 | 2 |
drf_util/views.py | RodiZaharadji/drf-util-1 | 0 | 6630633 | <filename>drf_util/views.py
from django.db.models import QuerySet
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, status
from rest_framework.decorators import permission_classes, api_view
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from drf_util.utils import add_related
health_check_response = openapi.Response('Health check')
@swagger_auto_schema(methods=['get'], responses={200: health_check_response})
@api_view(['get'])
@permission_classes([AllowAny])
def health_check(request):
return Response({'live': True})
class BaseCreateModelMixin:
def create(self, request, return_instance=False, *args, **kwargs):
serializer = self.get_serializer_create(data=request.data) # noqa
serializer.is_valid(raise_exception=True)
instance = self.perform_create(serializer, **kwargs)
if return_instance:
return instance
serializer_display = self.get_serializer(instance) # noqa
return Response(serializer_display.data, status=status.HTTP_201_CREATED)
def perform_create(self, serializer, **kwargs): # noqa
instance = serializer.save(**kwargs)
return instance
class BaseUpdateModelMixin:
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object() # noqa
serializer = self.get_serializer_create(instance, data=request.data, partial=partial) # noqa
serializer.is_valid(raise_exception=True)
instance = self.perform_update(serializer)
serializer_display = self.get_serializer(instance) # noqa
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
return Response(serializer_display.data)
def perform_update(self, serializer): # noqa
instance = serializer.save()
return instance
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
class BaseViewSet(GenericViewSet):
queryset = None
query_serializer = None
serializer_class = None
serializer_create_class = None
serializer_by_action = {}
permission_classes_by_action = {}
autocomplete_field = None
def get_queryset(self) -> QuerySet:
queryset = super().get_queryset()
if self.serializer_class and callable(self.get_serializer_class()):
queryset = add_related(queryset, self.get_serializer())
return queryset
def get_serializer_by_action(self):
return self.serializer_by_action.get(self.action)
def get_serializer_class(self):
return self.get_serializer_by_action() or super().get_serializer_class()
def get_permissions(self):
try:
# return permission_classes depending on `action`
return [permission() for permission in self.permission_classes_by_action[self.action]]
except KeyError:
# action is not set return default permission_classes
default = self.permission_classes_by_action.get('default')
if default:
if hasattr(default, '__iter__'):
return [permission() for permission in default]
else:
return [default()] # noqa
return [permission() for permission in self.permission_classes]
def get_serializer_create(self, *args, **kwargs):
serializer_class = self.get_serializer_create_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
def get_query_serializer(self):
if self.action == ['retrieve', 'post', 'patch']:
return None
return self.query_serializer
def get_serializer_create_class(self):
return self.get_serializer_by_action() or self.serializer_create_class or self.serializer_class
def get_object_id(self):
return self.kwargs.get(self.lookup_field)
class BaseListModelMixin(mixins.ListModelMixin):
filter_class = None
search_fields = ()
ordering_fields = '__all__'
ordering = ['-id']
class BaseReadOnlyViewSet(BaseListModelMixin, mixins.RetrieveModelMixin, BaseViewSet):
pass
class BaseModelItemViewSet(
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
BaseCreateModelMixin,
BaseUpdateModelMixin,
BaseViewSet
):
pass
class BaseModelViewSet(
BaseListModelMixin,
BaseModelItemViewSet
):
pass
| <filename>drf_util/views.py
from django.db.models import QuerySet
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, status
from rest_framework.decorators import permission_classes, api_view
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from drf_util.utils import add_related
health_check_response = openapi.Response('Health check')
@swagger_auto_schema(methods=['get'], responses={200: health_check_response})
@api_view(['get'])
@permission_classes([AllowAny])
def health_check(request):
return Response({'live': True})
class BaseCreateModelMixin:
def create(self, request, return_instance=False, *args, **kwargs):
serializer = self.get_serializer_create(data=request.data) # noqa
serializer.is_valid(raise_exception=True)
instance = self.perform_create(serializer, **kwargs)
if return_instance:
return instance
serializer_display = self.get_serializer(instance) # noqa
return Response(serializer_display.data, status=status.HTTP_201_CREATED)
def perform_create(self, serializer, **kwargs): # noqa
instance = serializer.save(**kwargs)
return instance
class BaseUpdateModelMixin:
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object() # noqa
serializer = self.get_serializer_create(instance, data=request.data, partial=partial) # noqa
serializer.is_valid(raise_exception=True)
instance = self.perform_update(serializer)
serializer_display = self.get_serializer(instance) # noqa
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
return Response(serializer_display.data)
def perform_update(self, serializer): # noqa
instance = serializer.save()
return instance
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
class BaseViewSet(GenericViewSet):
queryset = None
query_serializer = None
serializer_class = None
serializer_create_class = None
serializer_by_action = {}
permission_classes_by_action = {}
autocomplete_field = None
def get_queryset(self) -> QuerySet:
queryset = super().get_queryset()
if self.serializer_class and callable(self.get_serializer_class()):
queryset = add_related(queryset, self.get_serializer())
return queryset
def get_serializer_by_action(self):
return self.serializer_by_action.get(self.action)
def get_serializer_class(self):
return self.get_serializer_by_action() or super().get_serializer_class()
def get_permissions(self):
try:
# return permission_classes depending on `action`
return [permission() for permission in self.permission_classes_by_action[self.action]]
except KeyError:
# action is not set return default permission_classes
default = self.permission_classes_by_action.get('default')
if default:
if hasattr(default, '__iter__'):
return [permission() for permission in default]
else:
return [default()] # noqa
return [permission() for permission in self.permission_classes]
def get_serializer_create(self, *args, **kwargs):
serializer_class = self.get_serializer_create_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
def get_query_serializer(self):
if self.action == ['retrieve', 'post', 'patch']:
return None
return self.query_serializer
def get_serializer_create_class(self):
return self.get_serializer_by_action() or self.serializer_create_class or self.serializer_class
def get_object_id(self):
return self.kwargs.get(self.lookup_field)
class BaseListModelMixin(mixins.ListModelMixin):
filter_class = None
search_fields = ()
ordering_fields = '__all__'
ordering = ['-id']
class BaseReadOnlyViewSet(BaseListModelMixin, mixins.RetrieveModelMixin, BaseViewSet):
pass
class BaseModelItemViewSet(
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
BaseCreateModelMixin,
BaseUpdateModelMixin,
BaseViewSet
):
pass
class BaseModelViewSet(
BaseListModelMixin,
BaseModelItemViewSet
):
pass
| en | 0.215371 | # noqa # noqa # noqa # noqa # noqa # noqa # noqa # return permission_classes depending on `action` # action is not set return default permission_classes # noqa | 1.949055 | 2 |
pychron/processing/xml/primitives.py | aelamspychron/pychron | 1 | 6630634 | # ===============================================================================
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from datetime import datetime
from traits.api import HasTraits, Instance
from traitsui.api import View, UItem
# ============= standard library imports ========================
# ============= local library imports ==========================
from uncertainties import ufloat, nominal_value, std_dev
from pychron.experiment.utilities.identifier import make_runid
from pychron.processing.analyses.analysis import Analysis
from pychron.processing.analyses.view.main_view import MainView
class BaseRecordView(object):
def __init__(self, name):
self.name = name
class XMLProjectRecordView(BaseRecordView):
pass
class XMLSpectrometerRecord(BaseRecordView):
pass
class XMLIrradiationRecordView(BaseRecordView):
pass
# -------------------------------------------------
class XMLMassSpectrometer(object):
def __init__(self, elem):
exp = elem.xpath('Parameters/Experiment')[0]
self.name = exp.get('massSpectrometer')
class XMLMainView(MainView):
pass
class XMLAnalysisView(HasTraits):
main_view = Instance(XMLMainView)
selection_tool = None
def __init__(self, *args, **kw):
super(XMLAnalysisView, self).__init__(*args, **kw)
# self.main_view = XMLMainView(analysis_id=self.model.uuid)
# self.main_view.load(self.model)
def update_fontsize(self, a, s):
pass
def traits_view(self):
v = View(UItem('main_view', style='custom'))
return v
def _main_view_default(self):
mv = XMLMainView(self.model,
analysis_type=self.model.analysis_type,
analysis_id=self.model.uuid)
return mv
class XMLBaseValue(object):
def __init__(self, key, meas_elem):
self.name = key
self.value = 0
self.error = 0
@property
def uvalue(self):
return ufloat(self.value, self.error)
class XMLBlank(XMLBaseValue):
def __init__(self, key, meas_elem):
super(XMLBlank, self).__init__(key, meas_elem)
self.value = float(meas_elem.get('blank{}'.format(key)))
self.error = float(meas_elem.get('blank{}Sigma'.format(key)))
class XMLBaseline(XMLBaseValue):
pass
class XMLIsotope(XMLBaseValue):
def __init__(self, key, meas_elem):
self.name = key
self.value = float(meas_elem.get('intercept{}'.format(key)))
self.error = float(meas_elem.get('intercept{}Sigma'.format(key)))
self.fit_abbreviation = meas_elem.get('intercept{}RegressionType'.format(key))[0].upper()
self.detector = '---'
self.blank = XMLBlank(key, meas_elem)
self.baseline = XMLBaseline(key, meas_elem)
def get_intensity(self):
return ufloat(self.value, self.error)
def get_baseline_corrected_value(self):
return ufloat(self.value, self.error) - self.baseline.uvalue
class XMLAnalysisRecord(object):
selected_histories = None
def __init__(self, elem, meas_elem):
self.uuid = meas_elem.get('measurementNumber')
self.labnumber = XMLLabnumber(elem)
self.labnumber.identifier = self.uuid
self.record_id = self.uuid
self.aliquot = 0
self.step = ''
self.increment = 0
self.tag = ''
ds = meas_elem.get('measurementDateTime')
self.analysis_timestamp = datetime.strptime(ds, '%Y:%m:%d:%H:%M:%S.00')
self.rundate = self.analysis_timestamp
self.measurement = XMLMeasurement(elem, meas_elem)
self.extraction = XMLExtraction(meas_elem)
class XMLAnalysis(Analysis):
# selected_histories = None
analysis_view = Instance(XMLAnalysisView)
analysis_type = 'unknown'
def __init__(self, elem, meas_elem):
self.uuid = meas_elem.get('measurementNumber')
# self.labnumber = XMLLabnumber(elem)
# self.labnumber.identifier = self.uuid
self.labnumber = self.uuid
self.measurement = XMLMeasurement(elem, meas_elem)
self.extraction = XMLExtraction(meas_elem)
self.aliquot = 0
self.step = ''
self.increment = 0
self.tag = ''
ds = meas_elem.get('measurementDateTime')
self.analysis_timestamp = datetime.strptime(ds, '%Y:%m:%d:%H:%M:%S.00')
self.rundate = self.analysis_timestamp
self._make_isotopes(meas_elem)
ex = XMLExtraction(meas_elem)
exp = XMLExperiment(elem)
self.mass_spectrometer = self.measurement.mass_spectrometer.name
self.extraction_script_name = '---'
self.measurement_script_name = '---'
self.extract_device = '---'
self.position = '---'
self.xyz_position = '---'
self.extract_value = ex.extract_value
self.extract_units = ex.extract_units
self.duration = ex.extract_duration
self.cleanup = ex.cleanup_duration
self.beam_diameter = '---'
self.pattern = '---'
self.ramp_duration = '---'
self.ramp_rate = '---'
self.collection_time_zero_offset = '---'
self.extract_device = exp.extract_device
parm = elem.find('Parameters')
self.j = ufloat(parm.get('jValue'), parm.get('jValueSigma'))
self.ar39decayfactor = 1
self.ar37decayfactor = 1
self.data_reduction_tag = ''
ix = XMLIrradiationPosition(elem)
# self.irradiation_label = ix.level.irradiation.name
self.irradiation = ix.level.irradiation.name
self.irradiation_level = ix.level.name
self.irradiation_pos = ''
sx = XMLSample(elem)
self.project = sx.project.name
self.sample = sx.name
self.material = sx.material.name
self.comment = ''
self.sensitivity = 0
self.uage = self._make_ufloat(meas_elem, 'measuredAge')
self.age = nominal_value(self.uage)
self.age_err = std_dev(self.uage)
self.age_err_wo_j = std_dev(self.uage)
self.age_err_wo_j_irrad = std_dev(self.uage)
self.kca = self._make_ufloat(meas_elem, 'measuredKCaRatio')
self.uF = self._make_ufloat(meas_elem, 'corrected40ArRad39ArKRatio')
self.age_err_wo_j = 0
self.kcl = ufloat(0, 0)
self.radiogenic_yield = ufloat(meas_elem.get('fraction40ArRadiogenic'), 0)
self.F_err_wo_irrad = 0
# self.Ar40/Ar39_decay_corrected=0
# self.Ar40/Ar37_decay_corrected=0
# self.Ar40/Ar36=0
# self.Ar38/Ar39_decay_corrected=0
# self.Ar37_decay_corrected/Ar39_decay_corrected=0
# self.Ar36/Ar39_decay_corrected=0
def calculate_age(self, force=False, **kw):
pass
def _make_ufloat(self, meas_elem, key):
return ufloat(meas_elem.get(key), meas_elem.get('{}Sigma'.format(key)))
def _analysis_view_default(self):
return XMLAnalysisView(model=self, analysis_id=self.uuid)
@property
def record_id(self):
return make_runid(self.uuid, self.aliquot, self.step)
def _make_isotopes(self, m):
isos = {}
for k, kk in (('Ar40', '40Ar'), ('Ar39', '39Ar'), ('Ar38', '38Ar'), ('Ar37', '37Ar'), ('Ar36', '36Ar')):
if m.get('intercept{}'.format(kk)):
isos[k] = XMLIsotope(kk, m)
self.isotopes = isos
# def __getattr__(self, item):
# if '/' in item:
# return ufloat(0, 0)
# elif item != 'analysis_view':
# print 'define {}'.format(item)
# return '---'
# else:
# return XMLAnalysisView(model=self, analysis_id=self.uuid)
class XMLExperiment(object):
def __init__(self, elem):
exp = elem.xpath('Parameters/Experiment')[0]
self.extract_device = exp.get('extractionMethod')
class XMLExtraction(object):
def __init__(self, meas_elem):
self.extract_value = float(meas_elem.get('temperature'))
self.extract_units = meas_elem.get('temperatureUnit')
self.cleanup_duration = float(meas_elem.get('isolationDuration'))
self.extract_duration = '---'
class XMLMeasurement(object):
def __init__(self, elem, meas_elem):
self.mass_spectrometer = XMLMassSpectrometer(elem)
class XMLLabnumber(object):
selected_flux_id = None
def __init__(self, elem):
self.identifier = elem.get('igsn')
pos = XMLIrradiationPosition(elem)
self.irradiation_position = pos
self.sample = XMLSample(elem)
class XMLSample(object):
def __init__(self, elem):
self.name = elem.get('sampleID')
self.igsn = elem.get('igsn')
self.lon = float(elem.get('longitude'))
self.lat = float(elem.get('latitude'))
exp = elem.xpath('Parameters/Experiment')[0]
self.material = XMLMaterial(exp)
self.project = XMLProject(exp)
class XMLMaterial(object):
def __init__(self, exp_elem):
self.name = exp_elem.get('sampleMaterialType')
class XMLProject(object):
def __init__(self, exp_elem):
self.name = exp_elem.get('projectName')
class XMLIrradiationPosition(object):
def __init__(self, elem):
self.position = '---'
self.level = XMLIrradiationLevel(elem)
class XMLIrradiationLevel(object):
def __init__(self, elem):
self.name = ''
self.irradiation = XMLIrradiation(elem)
class XMLIrradiation(object):
def __init__(self, elem):
irrad = elem.xpath('Parameters/Experiment/Irradiation')[0]
self.name = irrad.get('irradiationName')
# ============= EOF =============================================
| # ===============================================================================
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from datetime import datetime
from traits.api import HasTraits, Instance
from traitsui.api import View, UItem
# ============= standard library imports ========================
# ============= local library imports ==========================
from uncertainties import ufloat, nominal_value, std_dev
from pychron.experiment.utilities.identifier import make_runid
from pychron.processing.analyses.analysis import Analysis
from pychron.processing.analyses.view.main_view import MainView
class BaseRecordView(object):
def __init__(self, name):
self.name = name
class XMLProjectRecordView(BaseRecordView):
pass
class XMLSpectrometerRecord(BaseRecordView):
pass
class XMLIrradiationRecordView(BaseRecordView):
pass
# -------------------------------------------------
class XMLMassSpectrometer(object):
def __init__(self, elem):
exp = elem.xpath('Parameters/Experiment')[0]
self.name = exp.get('massSpectrometer')
class XMLMainView(MainView):
pass
class XMLAnalysisView(HasTraits):
main_view = Instance(XMLMainView)
selection_tool = None
def __init__(self, *args, **kw):
super(XMLAnalysisView, self).__init__(*args, **kw)
# self.main_view = XMLMainView(analysis_id=self.model.uuid)
# self.main_view.load(self.model)
def update_fontsize(self, a, s):
pass
def traits_view(self):
v = View(UItem('main_view', style='custom'))
return v
def _main_view_default(self):
mv = XMLMainView(self.model,
analysis_type=self.model.analysis_type,
analysis_id=self.model.uuid)
return mv
class XMLBaseValue(object):
def __init__(self, key, meas_elem):
self.name = key
self.value = 0
self.error = 0
@property
def uvalue(self):
return ufloat(self.value, self.error)
class XMLBlank(XMLBaseValue):
def __init__(self, key, meas_elem):
super(XMLBlank, self).__init__(key, meas_elem)
self.value = float(meas_elem.get('blank{}'.format(key)))
self.error = float(meas_elem.get('blank{}Sigma'.format(key)))
class XMLBaseline(XMLBaseValue):
pass
class XMLIsotope(XMLBaseValue):
def __init__(self, key, meas_elem):
self.name = key
self.value = float(meas_elem.get('intercept{}'.format(key)))
self.error = float(meas_elem.get('intercept{}Sigma'.format(key)))
self.fit_abbreviation = meas_elem.get('intercept{}RegressionType'.format(key))[0].upper()
self.detector = '---'
self.blank = XMLBlank(key, meas_elem)
self.baseline = XMLBaseline(key, meas_elem)
def get_intensity(self):
return ufloat(self.value, self.error)
def get_baseline_corrected_value(self):
return ufloat(self.value, self.error) - self.baseline.uvalue
class XMLAnalysisRecord(object):
selected_histories = None
def __init__(self, elem, meas_elem):
self.uuid = meas_elem.get('measurementNumber')
self.labnumber = XMLLabnumber(elem)
self.labnumber.identifier = self.uuid
self.record_id = self.uuid
self.aliquot = 0
self.step = ''
self.increment = 0
self.tag = ''
ds = meas_elem.get('measurementDateTime')
self.analysis_timestamp = datetime.strptime(ds, '%Y:%m:%d:%H:%M:%S.00')
self.rundate = self.analysis_timestamp
self.measurement = XMLMeasurement(elem, meas_elem)
self.extraction = XMLExtraction(meas_elem)
class XMLAnalysis(Analysis):
# selected_histories = None
analysis_view = Instance(XMLAnalysisView)
analysis_type = 'unknown'
def __init__(self, elem, meas_elem):
self.uuid = meas_elem.get('measurementNumber')
# self.labnumber = XMLLabnumber(elem)
# self.labnumber.identifier = self.uuid
self.labnumber = self.uuid
self.measurement = XMLMeasurement(elem, meas_elem)
self.extraction = XMLExtraction(meas_elem)
self.aliquot = 0
self.step = ''
self.increment = 0
self.tag = ''
ds = meas_elem.get('measurementDateTime')
self.analysis_timestamp = datetime.strptime(ds, '%Y:%m:%d:%H:%M:%S.00')
self.rundate = self.analysis_timestamp
self._make_isotopes(meas_elem)
ex = XMLExtraction(meas_elem)
exp = XMLExperiment(elem)
self.mass_spectrometer = self.measurement.mass_spectrometer.name
self.extraction_script_name = '---'
self.measurement_script_name = '---'
self.extract_device = '---'
self.position = '---'
self.xyz_position = '---'
self.extract_value = ex.extract_value
self.extract_units = ex.extract_units
self.duration = ex.extract_duration
self.cleanup = ex.cleanup_duration
self.beam_diameter = '---'
self.pattern = '---'
self.ramp_duration = '---'
self.ramp_rate = '---'
self.collection_time_zero_offset = '---'
self.extract_device = exp.extract_device
parm = elem.find('Parameters')
self.j = ufloat(parm.get('jValue'), parm.get('jValueSigma'))
self.ar39decayfactor = 1
self.ar37decayfactor = 1
self.data_reduction_tag = ''
ix = XMLIrradiationPosition(elem)
# self.irradiation_label = ix.level.irradiation.name
self.irradiation = ix.level.irradiation.name
self.irradiation_level = ix.level.name
self.irradiation_pos = ''
sx = XMLSample(elem)
self.project = sx.project.name
self.sample = sx.name
self.material = sx.material.name
self.comment = ''
self.sensitivity = 0
self.uage = self._make_ufloat(meas_elem, 'measuredAge')
self.age = nominal_value(self.uage)
self.age_err = std_dev(self.uage)
self.age_err_wo_j = std_dev(self.uage)
self.age_err_wo_j_irrad = std_dev(self.uage)
self.kca = self._make_ufloat(meas_elem, 'measuredKCaRatio')
self.uF = self._make_ufloat(meas_elem, 'corrected40ArRad39ArKRatio')
self.age_err_wo_j = 0
self.kcl = ufloat(0, 0)
self.radiogenic_yield = ufloat(meas_elem.get('fraction40ArRadiogenic'), 0)
self.F_err_wo_irrad = 0
# self.Ar40/Ar39_decay_corrected=0
# self.Ar40/Ar37_decay_corrected=0
# self.Ar40/Ar36=0
# self.Ar38/Ar39_decay_corrected=0
# self.Ar37_decay_corrected/Ar39_decay_corrected=0
# self.Ar36/Ar39_decay_corrected=0
def calculate_age(self, force=False, **kw):
pass
def _make_ufloat(self, meas_elem, key):
return ufloat(meas_elem.get(key), meas_elem.get('{}Sigma'.format(key)))
def _analysis_view_default(self):
return XMLAnalysisView(model=self, analysis_id=self.uuid)
@property
def record_id(self):
return make_runid(self.uuid, self.aliquot, self.step)
def _make_isotopes(self, m):
isos = {}
for k, kk in (('Ar40', '40Ar'), ('Ar39', '39Ar'), ('Ar38', '38Ar'), ('Ar37', '37Ar'), ('Ar36', '36Ar')):
if m.get('intercept{}'.format(kk)):
isos[k] = XMLIsotope(kk, m)
self.isotopes = isos
# def __getattr__(self, item):
# if '/' in item:
# return ufloat(0, 0)
# elif item != 'analysis_view':
# print 'define {}'.format(item)
# return '---'
# else:
# return XMLAnalysisView(model=self, analysis_id=self.uuid)
class XMLExperiment(object):
def __init__(self, elem):
exp = elem.xpath('Parameters/Experiment')[0]
self.extract_device = exp.get('extractionMethod')
class XMLExtraction(object):
def __init__(self, meas_elem):
self.extract_value = float(meas_elem.get('temperature'))
self.extract_units = meas_elem.get('temperatureUnit')
self.cleanup_duration = float(meas_elem.get('isolationDuration'))
self.extract_duration = '---'
class XMLMeasurement(object):
def __init__(self, elem, meas_elem):
self.mass_spectrometer = XMLMassSpectrometer(elem)
class XMLLabnumber(object):
selected_flux_id = None
def __init__(self, elem):
self.identifier = elem.get('igsn')
pos = XMLIrradiationPosition(elem)
self.irradiation_position = pos
self.sample = XMLSample(elem)
class XMLSample(object):
def __init__(self, elem):
self.name = elem.get('sampleID')
self.igsn = elem.get('igsn')
self.lon = float(elem.get('longitude'))
self.lat = float(elem.get('latitude'))
exp = elem.xpath('Parameters/Experiment')[0]
self.material = XMLMaterial(exp)
self.project = XMLProject(exp)
class XMLMaterial(object):
def __init__(self, exp_elem):
self.name = exp_elem.get('sampleMaterialType')
class XMLProject(object):
def __init__(self, exp_elem):
self.name = exp_elem.get('projectName')
class XMLIrradiationPosition(object):
def __init__(self, elem):
self.position = '---'
self.level = XMLIrradiationLevel(elem)
class XMLIrradiationLevel(object):
def __init__(self, elem):
self.name = ''
self.irradiation = XMLIrradiation(elem)
class XMLIrradiation(object):
def __init__(self, elem):
irrad = elem.xpath('Parameters/Experiment/Irradiation')[0]
self.name = irrad.get('irradiationName')
# ============= EOF =============================================
| en | 0.54226 | # =============================================================================== # Copyright 2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # ============= standard library imports ======================== # ============= local library imports ========================== # ------------------------------------------------- # self.main_view = XMLMainView(analysis_id=self.model.uuid) # self.main_view.load(self.model) # selected_histories = None # self.labnumber = XMLLabnumber(elem) # self.labnumber.identifier = self.uuid # self.irradiation_label = ix.level.irradiation.name # self.Ar40/Ar39_decay_corrected=0 # self.Ar40/Ar37_decay_corrected=0 # self.Ar40/Ar36=0 # self.Ar38/Ar39_decay_corrected=0 # self.Ar37_decay_corrected/Ar39_decay_corrected=0 # self.Ar36/Ar39_decay_corrected=0 # def __getattr__(self, item): # if '/' in item: # return ufloat(0, 0) # elif item != 'analysis_view': # print 'define {}'.format(item) # return '---' # else: # return XMLAnalysisView(model=self, analysis_id=self.uuid) # ============= EOF ============================================= | 1.400866 | 1 |
feature/volume.py | dev-alberto/fitz_python | 0 | 6630635 | from feature.feature import EmptyFeature
import numpy as np
class Volume(EmptyFeature):
def __init__(self, raw_data_manager, history_lengh=None):
super().__init__(1, raw_data_manager, history_lengh=history_lengh)
def compute(self, data_dict):
volume = data_dict.get('volume')
return np.array(volume, dtype=object)
| from feature.feature import EmptyFeature
import numpy as np
class Volume(EmptyFeature):
def __init__(self, raw_data_manager, history_lengh=None):
super().__init__(1, raw_data_manager, history_lengh=history_lengh)
def compute(self, data_dict):
volume = data_dict.get('volume')
return np.array(volume, dtype=object)
| none | 1 | 2.909801 | 3 |
|
config_sync.py | tc45/FDM_config_sync | 0 | 6630636 | <reponame>tc45/FDM_config_sync<filename>config_sync.py
import argparse
import logging
import yaml
from fdm import FDMClient
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--config", "-c", help="Path to the configuration file", default="fdm.cfg")
parser.add_argument("--debug", "-d", help="Display debug logs", action="store_true")
return parser.parse_args()
def init_logger(log_level=logging.INFO):
log = logging.getLogger(__file__)
log.setLevel(log_level)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console_handler.setFormatter(formatter)
log.addHandler(console_handler)
return log
class ConfigSync:
def __init__(self, config, log):
self.log = log
self.config_file = config
self.log.info('Initializing ConfigSync class.')
self.config = self._parse_config(config)
self.fdm = self._init_fdm_client(self.config)
self.log.debug('configSync class initialization finished.')
def _parse_config(self, config_file):
self.log.info('parsing the configuration file.')
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
self.log.debug(f'The following parameters were received: {config}')
return config
def _init_fdm_client(self, config):
self.log.info('Initializing FDMClient class.')
host = config.get('fdm_host')
username = config.get('fdm_username')
password = config.get('fdm_password')
print(f'Username is {username} and password is {password}')
fdm = FDMClient(host, username=username, password=password, log=self.log)
self.log.info('Login to FDM.')
fdm.login()
return fdm
def _get_url_category(self, name):
category_dict = None
for category in self.url_categories:
category_name = category['name']
if category_name == name:
category_dict = {
'urlCategory': {
'name': category_name,
'id': category['id'],
'type': category['type']
},
'type': 'urlcategorymatcher'
}
break
return category_dict
def get_config(self):
access_rule_name = self.config['url_filtering']['rule_name']
self.log.info('Requesting access rule for URL filtering from FDM.')
self.access_rule = self.fdm.get_access_rule_by_name(access_rule_name)
def sync(self):
self.log.info('starting the config sync.')
self.log.info('Requesting URL categories from FDM.')
self.url_categories = self.fdm.get_url_categories()
self.access_rule['urlFilter']['urlCategories'] = []
self.log.info('Updating the access rule.')
for category in self.config['url_filtering']['url_categories']:
cat_dict = self._get_url_category(category)
if cat_dict:
self.access_rule['urlFilter']['urlCategories'].append(cat_dict)
self.log.info('Adding the configuration to FDM.')
self.fdm.put_access_rule(self.access_rule)
def deploy(self):
self.log.info('Starting with the configuration deployment.')
self.fdm.deploy()
self.log.info('Configuration deployment successful.')
self.log.info('Logging out of the FDM.')
self.fdm.logout()
if __name__ == "__main__":
args = parse_arguments()
if args.debug:
log = init_logger(logging.DEBUG)
else:
log = init_logger()
cs = ConfigSync(config=args.config, log=log)
# print(cs.fdm.token)
cs.get_config()
cs.sync()
cs.deploy()
| import argparse
import logging
import yaml
from fdm import FDMClient
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--config", "-c", help="Path to the configuration file", default="fdm.cfg")
parser.add_argument("--debug", "-d", help="Display debug logs", action="store_true")
return parser.parse_args()
def init_logger(log_level=logging.INFO):
log = logging.getLogger(__file__)
log.setLevel(log_level)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console_handler.setFormatter(formatter)
log.addHandler(console_handler)
return log
class ConfigSync:
def __init__(self, config, log):
self.log = log
self.config_file = config
self.log.info('Initializing ConfigSync class.')
self.config = self._parse_config(config)
self.fdm = self._init_fdm_client(self.config)
self.log.debug('configSync class initialization finished.')
def _parse_config(self, config_file):
self.log.info('parsing the configuration file.')
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
self.log.debug(f'The following parameters were received: {config}')
return config
def _init_fdm_client(self, config):
self.log.info('Initializing FDMClient class.')
host = config.get('fdm_host')
username = config.get('fdm_username')
password = config.get('fdm_password')
print(f'Username is {username} and password is {password}')
fdm = FDMClient(host, username=username, password=password, log=self.log)
self.log.info('Login to FDM.')
fdm.login()
return fdm
def _get_url_category(self, name):
category_dict = None
for category in self.url_categories:
category_name = category['name']
if category_name == name:
category_dict = {
'urlCategory': {
'name': category_name,
'id': category['id'],
'type': category['type']
},
'type': 'urlcategorymatcher'
}
break
return category_dict
def get_config(self):
access_rule_name = self.config['url_filtering']['rule_name']
self.log.info('Requesting access rule for URL filtering from FDM.')
self.access_rule = self.fdm.get_access_rule_by_name(access_rule_name)
def sync(self):
self.log.info('starting the config sync.')
self.log.info('Requesting URL categories from FDM.')
self.url_categories = self.fdm.get_url_categories()
self.access_rule['urlFilter']['urlCategories'] = []
self.log.info('Updating the access rule.')
for category in self.config['url_filtering']['url_categories']:
cat_dict = self._get_url_category(category)
if cat_dict:
self.access_rule['urlFilter']['urlCategories'].append(cat_dict)
self.log.info('Adding the configuration to FDM.')
self.fdm.put_access_rule(self.access_rule)
def deploy(self):
self.log.info('Starting with the configuration deployment.')
self.fdm.deploy()
self.log.info('Configuration deployment successful.')
self.log.info('Logging out of the FDM.')
self.fdm.logout()
if __name__ == "__main__":
args = parse_arguments()
if args.debug:
log = init_logger(logging.DEBUG)
else:
log = init_logger()
cs = ConfigSync(config=args.config, log=log)
# print(cs.fdm.token)
cs.get_config()
cs.sync()
cs.deploy() | de | 0.084149 | # print(cs.fdm.token) | 2.652246 | 3 |
avwx/structs.py | AirbusDriver/avwx-engine | 0 | 6630637 | """
Contains dataclasses to hold report data
"""
# stdlib
import json
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
class _LazyLoad:
source: Path
data: dict = None
def __init__(self, filename: str):
self.source = Path(__file__).parent.joinpath(f"{filename}.json")
def _load(self):
self.data = json.load(self.source.open())
def __getitem__(self, key: str) -> object:
if not self.data:
self._load()
return self.data[key]
def __contains__(self, key: str) -> bool:
if not self.data:
self._load()
return key in self.data
def __len__(self) -> int:
if not self.data:
self._load()
return len(self.data)
def __iter__(self):
if not self.data:
self._load()
for key in self.data:
yield key
def values(self) -> list:
if not self.data:
self._load()
return self.data.values()
AIRCRAFT = _LazyLoad("aircraft")
@dataclass
class Aircraft:
code: str
type: str
@classmethod
def from_icao(cls, code: str) -> "Aircraft":
"""
Load an Aircraft from an ICAO aircraft code
"""
try:
return cls(code=code, type=AIRCRAFT[code])
except KeyError:
raise ValueError(code + " is not a known aircraft code")
@dataclass
class Units:
altimeter: str
altitude: str
temperature: str
visibility: str
wind_speed: str
@dataclass
class Number:
repr: str
value: float
spoken: str
@dataclass
class Fraction(Number):
numerator: int
denominator: int
normalized: str
@dataclass
class Timestamp:
repr: str
dt: datetime
@dataclass
class Cloud:
repr: str
type: str = None
base: int = None
top: int = None
modifier: str = None
direction: str = None
@dataclass
class Location:
repr: str
station: str
direction: Number
distance: Number
@dataclass
class RemarksData:
dewpoint_decimal: float = None
temperature_decimal: float = None
@dataclass
class ReportData:
raw: str
station: str
time: Timestamp
remarks: str
@dataclass
class SharedData:
altimeter: Number
clouds: [Cloud]
flight_rules: str
other: [str]
sanitized: str
visibility: Number
wind_direction: Number
wind_gust: Number
wind_speed: Number
@dataclass
class MetarData(ReportData, SharedData):
dewpoint: Number
remarks_info: RemarksData
runway_visibility: [str]
temperature: Number
wind_variable_direction: [Number]
@dataclass
class TafLineData(SharedData):
end_time: Timestamp
icing: [str]
probability: Number
raw: str
start_time: Timestamp
turbulence: [str]
type: str
wind_shear: str
@dataclass
class TafData(ReportData):
forecast: [TafLineData]
start_time: Timestamp
end_time: Timestamp
max_temp: float = None
min_temp: float = None
alts: [str] = None
temps: [str] = None
@dataclass
class ReportTrans:
altimeter: str
clouds: str
other: str
visibility: str
@dataclass
class MetarTrans(ReportTrans):
dewpoint: str
remarks: dict
temperature: str
wind: str
@dataclass
class TafLineTrans(ReportTrans):
icing: str
turbulence: str
wind: str
wind_shear: str
@dataclass
class TafTrans:
forecast: [TafLineTrans]
max_temp: str
min_temp: str
remarks: dict
@dataclass
class Turbulence:
severity: str
floor: Number = None
ceiling: Number = None
@dataclass
class Icing(Turbulence):
type: str = None
@dataclass
class PirepData(ReportData):
aircraft: Aircraft = None
altitude: Number = None
clouds: [Cloud] = None
flight_visibility: Number = None
icing: Icing = None
location: Location = None
sanitized: str = None
temperature: Number = None
turbulence: Turbulence = None
type: str = None
wx: [str] = None
# @dataclass
# class AirepData(ReportData):
# pass
| """
Contains dataclasses to hold report data
"""
# stdlib
import json
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
class _LazyLoad:
source: Path
data: dict = None
def __init__(self, filename: str):
self.source = Path(__file__).parent.joinpath(f"{filename}.json")
def _load(self):
self.data = json.load(self.source.open())
def __getitem__(self, key: str) -> object:
if not self.data:
self._load()
return self.data[key]
def __contains__(self, key: str) -> bool:
if not self.data:
self._load()
return key in self.data
def __len__(self) -> int:
if not self.data:
self._load()
return len(self.data)
def __iter__(self):
if not self.data:
self._load()
for key in self.data:
yield key
def values(self) -> list:
if not self.data:
self._load()
return self.data.values()
AIRCRAFT = _LazyLoad("aircraft")
@dataclass
class Aircraft:
code: str
type: str
@classmethod
def from_icao(cls, code: str) -> "Aircraft":
"""
Load an Aircraft from an ICAO aircraft code
"""
try:
return cls(code=code, type=AIRCRAFT[code])
except KeyError:
raise ValueError(code + " is not a known aircraft code")
@dataclass
class Units:
altimeter: str
altitude: str
temperature: str
visibility: str
wind_speed: str
@dataclass
class Number:
repr: str
value: float
spoken: str
@dataclass
class Fraction(Number):
numerator: int
denominator: int
normalized: str
@dataclass
class Timestamp:
repr: str
dt: datetime
@dataclass
class Cloud:
repr: str
type: str = None
base: int = None
top: int = None
modifier: str = None
direction: str = None
@dataclass
class Location:
repr: str
station: str
direction: Number
distance: Number
@dataclass
class RemarksData:
dewpoint_decimal: float = None
temperature_decimal: float = None
@dataclass
class ReportData:
raw: str
station: str
time: Timestamp
remarks: str
@dataclass
class SharedData:
altimeter: Number
clouds: [Cloud]
flight_rules: str
other: [str]
sanitized: str
visibility: Number
wind_direction: Number
wind_gust: Number
wind_speed: Number
@dataclass
class MetarData(ReportData, SharedData):
dewpoint: Number
remarks_info: RemarksData
runway_visibility: [str]
temperature: Number
wind_variable_direction: [Number]
@dataclass
class TafLineData(SharedData):
end_time: Timestamp
icing: [str]
probability: Number
raw: str
start_time: Timestamp
turbulence: [str]
type: str
wind_shear: str
@dataclass
class TafData(ReportData):
forecast: [TafLineData]
start_time: Timestamp
end_time: Timestamp
max_temp: float = None
min_temp: float = None
alts: [str] = None
temps: [str] = None
@dataclass
class ReportTrans:
altimeter: str
clouds: str
other: str
visibility: str
@dataclass
class MetarTrans(ReportTrans):
dewpoint: str
remarks: dict
temperature: str
wind: str
@dataclass
class TafLineTrans(ReportTrans):
icing: str
turbulence: str
wind: str
wind_shear: str
@dataclass
class TafTrans:
forecast: [TafLineTrans]
max_temp: str
min_temp: str
remarks: dict
@dataclass
class Turbulence:
severity: str
floor: Number = None
ceiling: Number = None
@dataclass
class Icing(Turbulence):
type: str = None
@dataclass
class PirepData(ReportData):
aircraft: Aircraft = None
altitude: Number = None
clouds: [Cloud] = None
flight_visibility: Number = None
icing: Icing = None
location: Location = None
sanitized: str = None
temperature: Number = None
turbulence: Turbulence = None
type: str = None
wx: [str] = None
# @dataclass
# class AirepData(ReportData):
# pass
| en | 0.57032 | Contains dataclasses to hold report data # stdlib Load an Aircraft from an ICAO aircraft code # @dataclass # class AirepData(ReportData): # pass | 2.988961 | 3 |
lmdb/home/migrations/0006_auto_20161101_0622.py | huzaifafaruqui/Movies-Website | 11 | 6630638 | <reponame>huzaifafaruqui/Movies-Website
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-11-01 06:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0005_movie_genre'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='title',
field=models.CharField(blank=True, max_length=30),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-11-01 06:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0005_movie_genre'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='title',
field=models.CharField(blank=True, max_length=30),
),
] | en | 0.819662 | # -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-11-01 06:22 | 1.556215 | 2 |
jyotisha/panchaanga/writer/table/day_details.py | Prabhakaran-cbe/jyotisha | 40 | 6630639 | from indic_transliteration import sanscript
from jyotisha.panchaanga.spatio_temporal import City
from jyotisha.panchaanga.temporal import names, AngaType, era
from jyotisha.panchaanga.temporal.festival import rules
from jyotisha.panchaanga.temporal.festival.rules import RulesRepo
ujjain = City.get_city_from_db(name="Ujjain")
def to_table_dict(panchaanga, script=sanscript.DEVANAGARI):
final_dict = {"data": []}
rules_collection = rules.RulesCollection.get_cached(
repos_tuple=tuple(panchaanga.computation_system.festival_options.repos), julian_handling=panchaanga.computation_system.festival_options.julian_handling)
for daily_panchaanga in panchaanga.daily_panchaangas_sorted(skip_padding_days=True):
day_dict = {}
day_dict["gregorian"] = daily_panchaanga.date.get_date_str()
day_dict["islamic"] = daily_panchaanga.date.to_islamic_date().get_date_str()
day_dict["islamic_month"] = daily_panchaanga.get_month_str(month_type=RulesRepo.ISLAMIC_MONTH_DIR, script=script)
if not panchaanga.start_date.year > 1800:
day_dict["julian"] = daily_panchaanga.date.to_juluan_date().get_date_str()
day_dict["Indian_civil"] = daily_panchaanga.date.to_indian_civil_date().get_date_str()
day_dict["lunar"] = daily_panchaanga.get_date(month_type=RulesRepo.LUNAR_MONTH_DIR).get_date_str()
day_dict["lunar_month"] = daily_panchaanga.get_month_str(month_type=RulesRepo.LUNAR_MONTH_DIR, script=script)
if panchaanga.city.name == "Mysore" and panchaanga.start_date.year in range(1700, 1820):
lunar_month = daily_panchaanga.get_date(month_type=RulesRepo.LUNAR_MONTH_DIR).month
day_dict["lunar_month_tipu"] = names.get_tipu_month_str(month=lunar_month)
day_dict["lunar_year (maudUdI)"] = daily_panchaanga.get_year_number(month_type=RulesRepo.LUNAR_MONTH_DIR, era_id=era.ERA_TIPU_MAULUDI)
day_dict["lunar_samvatsara"] = daily_panchaanga.get_samvatsara(month_type=RulesRepo.LUNAR_MONTH_DIR).get_name(script=script)
day_dict["lunar_year (shaka)"] = daily_panchaanga.get_year_number(month_type=RulesRepo.LUNAR_MONTH_DIR, era_id=era.ERA_SHAKA)
day_dict["lunar_year (kali)"] = daily_panchaanga.get_year_number(month_type=RulesRepo.LUNAR_MONTH_DIR, era_id=era.ERA_KALI)
day_dict["tropical"] = daily_panchaanga.get_date(month_type=RulesRepo.TROPICAL_MONTH_DIR).get_date_str()
day_dict["tropical_month"] = daily_panchaanga.get_month_str(month_type=RulesRepo.TROPICAL_MONTH_DIR, script=script)
day_dict["sidereal_solar"] = daily_panchaanga.get_date(month_type=RulesRepo.SIDEREAL_SOLAR_MONTH_DIR).get_date_str()
day_dict["sidereal_solar"] = daily_panchaanga.get_month_str(month_type=RulesRepo.SIDEREAL_SOLAR_MONTH_DIR, script=script)
day_dict["tithis"] = daily_panchaanga.sunrise_day_angas.get_anga_data_str(anga_type=AngaType.TITHI, script=script, reference_jd=daily_panchaanga.julian_day_start)
day_dict["vaara"] = names.NAMES['VARA_NAMES']['sa'][script][daily_panchaanga.date.get_weekday()]
day_dict["lunar_nakshatras"] = daily_panchaanga.sunrise_day_angas.get_anga_data_str(anga_type=AngaType.NAKSHATRA, script=script, reference_jd=daily_panchaanga.julian_day_start)
day_dict["festivals"] = ", ".join([x.get_full_title(fest_details_dict=rules_collection.name_to_rule) for x in daily_panchaanga.festival_id_to_instance.values()])
final_dict["data"].append(day_dict)
return final_dict
| from indic_transliteration import sanscript
from jyotisha.panchaanga.spatio_temporal import City
from jyotisha.panchaanga.temporal import names, AngaType, era
from jyotisha.panchaanga.temporal.festival import rules
from jyotisha.panchaanga.temporal.festival.rules import RulesRepo
ujjain = City.get_city_from_db(name="Ujjain")
def to_table_dict(panchaanga, script=sanscript.DEVANAGARI):
final_dict = {"data": []}
rules_collection = rules.RulesCollection.get_cached(
repos_tuple=tuple(panchaanga.computation_system.festival_options.repos), julian_handling=panchaanga.computation_system.festival_options.julian_handling)
for daily_panchaanga in panchaanga.daily_panchaangas_sorted(skip_padding_days=True):
day_dict = {}
day_dict["gregorian"] = daily_panchaanga.date.get_date_str()
day_dict["islamic"] = daily_panchaanga.date.to_islamic_date().get_date_str()
day_dict["islamic_month"] = daily_panchaanga.get_month_str(month_type=RulesRepo.ISLAMIC_MONTH_DIR, script=script)
if not panchaanga.start_date.year > 1800:
day_dict["julian"] = daily_panchaanga.date.to_juluan_date().get_date_str()
day_dict["Indian_civil"] = daily_panchaanga.date.to_indian_civil_date().get_date_str()
day_dict["lunar"] = daily_panchaanga.get_date(month_type=RulesRepo.LUNAR_MONTH_DIR).get_date_str()
day_dict["lunar_month"] = daily_panchaanga.get_month_str(month_type=RulesRepo.LUNAR_MONTH_DIR, script=script)
if panchaanga.city.name == "Mysore" and panchaanga.start_date.year in range(1700, 1820):
lunar_month = daily_panchaanga.get_date(month_type=RulesRepo.LUNAR_MONTH_DIR).month
day_dict["lunar_month_tipu"] = names.get_tipu_month_str(month=lunar_month)
day_dict["lunar_year (maudUdI)"] = daily_panchaanga.get_year_number(month_type=RulesRepo.LUNAR_MONTH_DIR, era_id=era.ERA_TIPU_MAULUDI)
day_dict["lunar_samvatsara"] = daily_panchaanga.get_samvatsara(month_type=RulesRepo.LUNAR_MONTH_DIR).get_name(script=script)
day_dict["lunar_year (shaka)"] = daily_panchaanga.get_year_number(month_type=RulesRepo.LUNAR_MONTH_DIR, era_id=era.ERA_SHAKA)
day_dict["lunar_year (kali)"] = daily_panchaanga.get_year_number(month_type=RulesRepo.LUNAR_MONTH_DIR, era_id=era.ERA_KALI)
day_dict["tropical"] = daily_panchaanga.get_date(month_type=RulesRepo.TROPICAL_MONTH_DIR).get_date_str()
day_dict["tropical_month"] = daily_panchaanga.get_month_str(month_type=RulesRepo.TROPICAL_MONTH_DIR, script=script)
day_dict["sidereal_solar"] = daily_panchaanga.get_date(month_type=RulesRepo.SIDEREAL_SOLAR_MONTH_DIR).get_date_str()
day_dict["sidereal_solar"] = daily_panchaanga.get_month_str(month_type=RulesRepo.SIDEREAL_SOLAR_MONTH_DIR, script=script)
day_dict["tithis"] = daily_panchaanga.sunrise_day_angas.get_anga_data_str(anga_type=AngaType.TITHI, script=script, reference_jd=daily_panchaanga.julian_day_start)
day_dict["vaara"] = names.NAMES['VARA_NAMES']['sa'][script][daily_panchaanga.date.get_weekday()]
day_dict["lunar_nakshatras"] = daily_panchaanga.sunrise_day_angas.get_anga_data_str(anga_type=AngaType.NAKSHATRA, script=script, reference_jd=daily_panchaanga.julian_day_start)
day_dict["festivals"] = ", ".join([x.get_full_title(fest_details_dict=rules_collection.name_to_rule) for x in daily_panchaanga.festival_id_to_instance.values()])
final_dict["data"].append(day_dict)
return final_dict
| none | 1 | 2.310549 | 2 |
|
torchpruner/operator/operator.py | Ocean-627/torch-model-compression | 86 | 6630640 | import onnxruntime.backend
import onnx
import onnxruntime
import torch
from collections import OrderedDict
from onnx import AttributeProto, TensorProto, GraphProto, helper, shape_inference
import torchpruner.register as register
from onnx import numpy_helper
from onnx import version_converter
import numpy as np
import onnxruntime.backend
torch2onnx_type_mapping = {
"Float": TensorProto.FLOAT,
"UINT8": TensorProto.UINT8,
"INT8": TensorProto.INT8,
"UINT16": TensorProto.UINT16,
"INT16": TensorProto.INT16,
"INT32": TensorProto.INT32,
"Long": TensorProto.INT64,
"Bool": TensorProto.BOOL,
"Double": TensorProto.DOUBLE,
}
num2type_mapping = {
"1": "Float",
"2": "UINT8",
"3": "INT8",
"4": "UINT16",
"5": "INT16",
"6": "INT32",
"7": "Long",
"9": "Bool",
"11": "Double",
}
def _extract_shape(value_info):
shape_dict = OrderedDict()
for info in value_info:
name = info.name
shape_type = str(info.type.tensor_type.elem_type)
shape = info.type.tensor_type.shape
shape_list = []
for d in shape.dim:
shape_list.append(d.dim_value)
shape_dict[name] = (shape_type, shape_list)
return shape_dict
class OperatorNode(object):
def __init__(self, node):
import torchpruner.graph as g
# define the OperatorNode data structure
outputs = list(node.outputs())
self.name = None
op_kind = node.kind().split("::")
self.type = op_kind[1]
self.protocal = op_kind[0]
self.obj_list = node.scopeName().split(".")
if len(self.obj_list) == 1 and self.obj_list[0] == "":
self.obj_list = ["self"]
self.name = ".".join(self.obj_list)
self.name += "."
self.name += self.type
self.params = OrderedDict()
self.device = "CPU"
attlist = list(node.attributeNames())
for i in range(0, len(attlist)):
if isinstance(node[attlist[i]], torch.Tensor):
self.params[attlist[i]] = node[attlist[i]].numpy()
else:
self.params[attlist[i]] = node[attlist[i]]
# the operator node will be filled at the build graph
self.in_data: g.DataNode = []
self.out_data: g.DataNode = []
def set_device(self, device):
self.device = device.upper()
def __str__(self):
return_string = ""
for node in self.out_data:
return_string += str(node)
return_string += ", "
if len(return_string) != 0:
return_string = return_string[:-2]
return_string += " = "
return_string += self.protocal
return_string += "::"
return_string += self.type
return_string += "["
for key in self.params.keys():
return_string += key
return_string += "="
return_string += str(self.params[key])
return_string += ", "
if return_string[-1] != "[":
return_string = return_string[:-2]
return_string += "]("
for node in self.in_data:
return_string += "%" + node.name
return_string += ", "
if return_string[-1] != "(":
return_string = return_string[:-2]
return_string += ")"
return_string += ", scope: "
return_string += ".".join(self.obj_list)
return return_string
def __repr__(self):
return self.__str__()
def rank(self, node):
out_nodes = self.out_data
for i in range(0, len(out_nodes)):
out_node = out_nodes[i]
if id(node) == id(out_node):
return "out", i
in_nodes = self.in_data
for i in range(0, len(in_nodes)):
in_node = in_nodes[i]
if id(node) == id(in_node):
return "in", i
def flops(self):
return 0
def fill_shape(self):
need_fill = False
out_data_nodes = self.out_data
for node in out_data_nodes:
if node.kind != "NoneType" and node.type() is None:
need_fill = True
break
if need_fill:
out_data_node_names = []
for node in out_data_nodes:
out_data_node_names.append(node.name)
in_data_nodes = self.in_data
in_data_node_names = []
in_data_nodes_protos = []
for node in in_data_nodes:
in_data_node_names.append(node.name)
in_data_nodes_protos.append(
helper.make_tensor_value_info(
node.name,
torch2onnx_type_mapping[node.type()],
list(node.size()),
)
)
operator_params = {}
for key in self.params:
if isinstance(self.params[key], np.ndarray):
operator_params[key] = numpy_helper.from_array(self.params[key])
else:
operator_params[key] = self.params[key]
node_def = helper.make_node(
self.type, # node name
in_data_node_names, # inputs
out_data_node_names, # outputs
**operator_params
)
graph_def = helper.make_graph(
[node_def], "node-graph", in_data_nodes_protos, []
)
model = helper.make_model(
graph_def,
producer_name="node-model",
opset_imports=[helper.make_opsetid("", 11)],
)
try:
inferred_model = shape_inference.infer_shapes(model)
except Exception as e:
print(e)
print(model)
value_info = inferred_model.graph.value_info
shape_dict = _extract_shape(value_info)
shape_dict_keys = list(shape_dict.keys())
for i in range(0, len(out_data_node_names)):
name = out_data_node_names[i]
node = out_data_nodes[i]
if node.type() is None:
if name not in shape_dict_keys:
raise RuntimeError(
"Fail to predict the shape on operator name: '"
+ self.name
+ "', type: '"
+ self.type
+ "'"
)
else:
node._type = num2type_mapping[shape_dict[name][0]]
node._size = shape_dict[name][1]
def fill_value(self):
# fix the torch operator error
if self.type == "Equal":
if len(self.out_data) != 0:
self.out_data[0]._type = "Bool"
out_data_nodes = self.out_data
out_data_node_names = []
out_data_node_protos = []
for node in out_data_nodes:
if node.kind == "NoneType":
return
out_data_node_names.append(node.name)
out_data_node_protos.append(
helper.make_tensor_value_info(
node.name, torch2onnx_type_mapping[node.type()], list(node.size())
)
)
in_data_nodes = self.in_data
in_data_node_names = []
in_data_node_protos = []
feed_dict = {}
inputs = []
for node in in_data_nodes:
if node.kind == "NoneType":
continue
in_data_node_names.append(node.name)
in_data_node_protos.append(
helper.make_tensor_value_info(
node.name, torch2onnx_type_mapping[node.type()], list(node.size())
)
)
feed_dict[node.name] = node.data
inputs.append(node.data)
operator_params = {}
for key in self.params:
if isinstance(self.params[key], np.ndarray):
operator_params[key] = numpy_helper.from_array(self.params[key])
else:
operator_params[key] = self.params[key]
node_def = helper.make_node(
self.type, # node name
in_data_node_names, # inputs
out_data_node_names, # outputs
**operator_params
)
graph_def = helper.make_graph(
[node_def], "node-graph", in_data_node_protos, out_data_node_protos
)
model = helper.make_model(
graph_def,
producer_name="node-model",
opset_imports=[helper.make_opsetid("", 11)],
)
onnx.checker.check_model(model)
s = onnx._serialize(model)
sess = onnxruntime.backend.prepare(s, device=self.device)
results = onnxruntime.backend.run(sess, inputs)
for i in range(0, len(out_data_node_names)):
# name=out_data_node_names[i]
out_data_nodes[i].data = results[i]
if out_data_nodes[i]._size != list(results[i].shape):
out_data_nodes[i]._size = list(results[i].shape)
def analysis(self, node, mask):
raise NotImplementedError("The analysis is not complete")
| import onnxruntime.backend
import onnx
import onnxruntime
import torch
from collections import OrderedDict
from onnx import AttributeProto, TensorProto, GraphProto, helper, shape_inference
import torchpruner.register as register
from onnx import numpy_helper
from onnx import version_converter
import numpy as np
import onnxruntime.backend
torch2onnx_type_mapping = {
"Float": TensorProto.FLOAT,
"UINT8": TensorProto.UINT8,
"INT8": TensorProto.INT8,
"UINT16": TensorProto.UINT16,
"INT16": TensorProto.INT16,
"INT32": TensorProto.INT32,
"Long": TensorProto.INT64,
"Bool": TensorProto.BOOL,
"Double": TensorProto.DOUBLE,
}
num2type_mapping = {
"1": "Float",
"2": "UINT8",
"3": "INT8",
"4": "UINT16",
"5": "INT16",
"6": "INT32",
"7": "Long",
"9": "Bool",
"11": "Double",
}
def _extract_shape(value_info):
shape_dict = OrderedDict()
for info in value_info:
name = info.name
shape_type = str(info.type.tensor_type.elem_type)
shape = info.type.tensor_type.shape
shape_list = []
for d in shape.dim:
shape_list.append(d.dim_value)
shape_dict[name] = (shape_type, shape_list)
return shape_dict
class OperatorNode(object):
def __init__(self, node):
import torchpruner.graph as g
# define the OperatorNode data structure
outputs = list(node.outputs())
self.name = None
op_kind = node.kind().split("::")
self.type = op_kind[1]
self.protocal = op_kind[0]
self.obj_list = node.scopeName().split(".")
if len(self.obj_list) == 1 and self.obj_list[0] == "":
self.obj_list = ["self"]
self.name = ".".join(self.obj_list)
self.name += "."
self.name += self.type
self.params = OrderedDict()
self.device = "CPU"
attlist = list(node.attributeNames())
for i in range(0, len(attlist)):
if isinstance(node[attlist[i]], torch.Tensor):
self.params[attlist[i]] = node[attlist[i]].numpy()
else:
self.params[attlist[i]] = node[attlist[i]]
# the operator node will be filled at the build graph
self.in_data: g.DataNode = []
self.out_data: g.DataNode = []
def set_device(self, device):
self.device = device.upper()
def __str__(self):
return_string = ""
for node in self.out_data:
return_string += str(node)
return_string += ", "
if len(return_string) != 0:
return_string = return_string[:-2]
return_string += " = "
return_string += self.protocal
return_string += "::"
return_string += self.type
return_string += "["
for key in self.params.keys():
return_string += key
return_string += "="
return_string += str(self.params[key])
return_string += ", "
if return_string[-1] != "[":
return_string = return_string[:-2]
return_string += "]("
for node in self.in_data:
return_string += "%" + node.name
return_string += ", "
if return_string[-1] != "(":
return_string = return_string[:-2]
return_string += ")"
return_string += ", scope: "
return_string += ".".join(self.obj_list)
return return_string
def __repr__(self):
return self.__str__()
def rank(self, node):
out_nodes = self.out_data
for i in range(0, len(out_nodes)):
out_node = out_nodes[i]
if id(node) == id(out_node):
return "out", i
in_nodes = self.in_data
for i in range(0, len(in_nodes)):
in_node = in_nodes[i]
if id(node) == id(in_node):
return "in", i
def flops(self):
return 0
def fill_shape(self):
need_fill = False
out_data_nodes = self.out_data
for node in out_data_nodes:
if node.kind != "NoneType" and node.type() is None:
need_fill = True
break
if need_fill:
out_data_node_names = []
for node in out_data_nodes:
out_data_node_names.append(node.name)
in_data_nodes = self.in_data
in_data_node_names = []
in_data_nodes_protos = []
for node in in_data_nodes:
in_data_node_names.append(node.name)
in_data_nodes_protos.append(
helper.make_tensor_value_info(
node.name,
torch2onnx_type_mapping[node.type()],
list(node.size()),
)
)
operator_params = {}
for key in self.params:
if isinstance(self.params[key], np.ndarray):
operator_params[key] = numpy_helper.from_array(self.params[key])
else:
operator_params[key] = self.params[key]
node_def = helper.make_node(
self.type, # node name
in_data_node_names, # inputs
out_data_node_names, # outputs
**operator_params
)
graph_def = helper.make_graph(
[node_def], "node-graph", in_data_nodes_protos, []
)
model = helper.make_model(
graph_def,
producer_name="node-model",
opset_imports=[helper.make_opsetid("", 11)],
)
try:
inferred_model = shape_inference.infer_shapes(model)
except Exception as e:
print(e)
print(model)
value_info = inferred_model.graph.value_info
shape_dict = _extract_shape(value_info)
shape_dict_keys = list(shape_dict.keys())
for i in range(0, len(out_data_node_names)):
name = out_data_node_names[i]
node = out_data_nodes[i]
if node.type() is None:
if name not in shape_dict_keys:
raise RuntimeError(
"Fail to predict the shape on operator name: '"
+ self.name
+ "', type: '"
+ self.type
+ "'"
)
else:
node._type = num2type_mapping[shape_dict[name][0]]
node._size = shape_dict[name][1]
def fill_value(self):
# fix the torch operator error
if self.type == "Equal":
if len(self.out_data) != 0:
self.out_data[0]._type = "Bool"
out_data_nodes = self.out_data
out_data_node_names = []
out_data_node_protos = []
for node in out_data_nodes:
if node.kind == "NoneType":
return
out_data_node_names.append(node.name)
out_data_node_protos.append(
helper.make_tensor_value_info(
node.name, torch2onnx_type_mapping[node.type()], list(node.size())
)
)
in_data_nodes = self.in_data
in_data_node_names = []
in_data_node_protos = []
feed_dict = {}
inputs = []
for node in in_data_nodes:
if node.kind == "NoneType":
continue
in_data_node_names.append(node.name)
in_data_node_protos.append(
helper.make_tensor_value_info(
node.name, torch2onnx_type_mapping[node.type()], list(node.size())
)
)
feed_dict[node.name] = node.data
inputs.append(node.data)
operator_params = {}
for key in self.params:
if isinstance(self.params[key], np.ndarray):
operator_params[key] = numpy_helper.from_array(self.params[key])
else:
operator_params[key] = self.params[key]
node_def = helper.make_node(
self.type, # node name
in_data_node_names, # inputs
out_data_node_names, # outputs
**operator_params
)
graph_def = helper.make_graph(
[node_def], "node-graph", in_data_node_protos, out_data_node_protos
)
model = helper.make_model(
graph_def,
producer_name="node-model",
opset_imports=[helper.make_opsetid("", 11)],
)
onnx.checker.check_model(model)
s = onnx._serialize(model)
sess = onnxruntime.backend.prepare(s, device=self.device)
results = onnxruntime.backend.run(sess, inputs)
for i in range(0, len(out_data_node_names)):
# name=out_data_node_names[i]
out_data_nodes[i].data = results[i]
if out_data_nodes[i]._size != list(results[i].shape):
out_data_nodes[i]._size = list(results[i].shape)
def analysis(self, node, mask):
raise NotImplementedError("The analysis is not complete")
| en | 0.418201 | # define the OperatorNode data structure # the operator node will be filled at the build graph # node name # inputs # outputs # fix the torch operator error # node name # inputs # outputs # name=out_data_node_names[i] | 2.173624 | 2 |
tests/mixins.py | ShadowJonathan/txredisapi | 104 | 6630641 | # coding: utf-8
# Copyright 2009 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.trial import unittest
from twisted.internet import defer
import os
s = os.getenv("DBREDIS_1_PORT_6379_TCP_ADDR")
if s is not None:
REDIS_HOST = "dbredis_1"
else:
REDIS_HOST = "localhost"
REDIS_PORT = 6379
class RedisVersionCheckMixin(object):
@defer.inlineCallbacks
def checkVersion(self, major, minor, patch=0):
d = yield self.db.info("server")
if u'redis_version' not in d:
defer.returnValue(False)
ver = d[u'redis_version']
self.redis_version = ver
ver_list = [int(x) for x in ver.split(u'.')]
if len(ver_list) < 2:
defer.returnValue(False)
if len(ver_list) == 2:
ver_list.append(0)
if ver_list[0] > major:
defer.returnValue(True)
elif ver_list[0] == major:
if ver_list[1] > minor:
defer.returnValue(True)
elif ver_list[1] == minor:
if ver_list[2] >= patch:
defer.returnValue(True)
defer.returnValue(False)
class Redis26CheckMixin(RedisVersionCheckMixin):
def is_redis_2_6(self):
"""
Returns true if the Redis version >= 2.6
"""
return self.checkVersion(2, 6)
def _skipCheck(self):
if not self.redis_2_6:
skipMsg = "Redis version < 2.6 (found version: %s)"
raise unittest.SkipTest(skipMsg % self.redis_version)
| # coding: utf-8
# Copyright 2009 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.trial import unittest
from twisted.internet import defer
import os
s = os.getenv("DBREDIS_1_PORT_6379_TCP_ADDR")
if s is not None:
REDIS_HOST = "dbredis_1"
else:
REDIS_HOST = "localhost"
REDIS_PORT = 6379
class RedisVersionCheckMixin(object):
@defer.inlineCallbacks
def checkVersion(self, major, minor, patch=0):
d = yield self.db.info("server")
if u'redis_version' not in d:
defer.returnValue(False)
ver = d[u'redis_version']
self.redis_version = ver
ver_list = [int(x) for x in ver.split(u'.')]
if len(ver_list) < 2:
defer.returnValue(False)
if len(ver_list) == 2:
ver_list.append(0)
if ver_list[0] > major:
defer.returnValue(True)
elif ver_list[0] == major:
if ver_list[1] > minor:
defer.returnValue(True)
elif ver_list[1] == minor:
if ver_list[2] >= patch:
defer.returnValue(True)
defer.returnValue(False)
class Redis26CheckMixin(RedisVersionCheckMixin):
def is_redis_2_6(self):
"""
Returns true if the Redis version >= 2.6
"""
return self.checkVersion(2, 6)
def _skipCheck(self):
if not self.redis_2_6:
skipMsg = "Redis version < 2.6 (found version: %s)"
raise unittest.SkipTest(skipMsg % self.redis_version)
| en | 0.826061 | # coding: utf-8 # Copyright 2009 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Returns true if the Redis version >= 2.6 | 2.177031 | 2 |
google.py | ramanirudh/translate_using_googletrans | 1 | 6630642 |
## Program to translate using google translate
## Do not continuously translate. Give a break of atleast 30 minutes after every job
## Uses googletrans package available in python
## <NAME> ## 30 Dec 2019
## Filenames are hard-coded. To translate a file, please search "Input" comment line and type filename in the "open" function call. Do the same for "Output". Save the program and run in python
## I ran the program using python2, may work with python3 also
import io
from googletrans import Translator
translator = Translator()
## Input
file = io.open('xad','r')
## Output
outfile = io.open('xad_google.hi','w',encoding='utf-8')
for each in file:
trans = translator.translate(each,dest ='hi')
#print(trans.text)
outfile.write(trans.text)
outfile.write(u'\n')
outfile.close()
file.close()
|
## Program to translate using google translate
## Do not continuously translate. Give a break of atleast 30 minutes after every job
## Uses googletrans package available in python
## <NAME> ## 30 Dec 2019
## Filenames are hard-coded. To translate a file, please search "Input" comment line and type filename in the "open" function call. Do the same for "Output". Save the program and run in python
## I ran the program using python2, may work with python3 also
import io
from googletrans import Translator
translator = Translator()
## Input
file = io.open('xad','r')
## Output
outfile = io.open('xad_google.hi','w',encoding='utf-8')
for each in file:
trans = translator.translate(each,dest ='hi')
#print(trans.text)
outfile.write(trans.text)
outfile.write(u'\n')
outfile.close()
file.close()
| en | 0.712814 | ## Program to translate using google translate ## Do not continuously translate. Give a break of atleast 30 minutes after every job ## Uses googletrans package available in python ## <NAME> ## 30 Dec 2019 ## Filenames are hard-coded. To translate a file, please search "Input" comment line and type filename in the "open" function call. Do the same for "Output". Save the program and run in python ## I ran the program using python2, may work with python3 also ## Input ## Output #print(trans.text) | 2.995114 | 3 |
frappe/core/doctype/role_profile/test_role_profile.py | ssuda777/frappe | 1 | 6630643 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and Contributors
# See license.txt
import frappe
import unittest
test_dependencies = ['Role']
class TestRoleProfile(unittest.TestCase):
def test_make_new_role_profile(self):
new_role_profile = frappe.get_doc(dict(doctype='Role Profile', role_profile='Test 1')).insert()
self.assertEqual(new_role_profile.role_profile, 'Test 1')
# add role
new_role_profile.append("roles", {
"role": '_Test Role 2'
})
new_role_profile.save()
self.assertEqual(new_role_profile.roles[0].role, '_Test Role 2')
# clear roles
new_role_profile.roles = []
new_role_profile.save()
self.assertEqual(new_role_profile.roles, [])
| # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and Contributors
# See license.txt
import frappe
import unittest
test_dependencies = ['Role']
class TestRoleProfile(unittest.TestCase):
def test_make_new_role_profile(self):
new_role_profile = frappe.get_doc(dict(doctype='Role Profile', role_profile='Test 1')).insert()
self.assertEqual(new_role_profile.role_profile, 'Test 1')
# add role
new_role_profile.append("roles", {
"role": '_Test Role 2'
})
new_role_profile.save()
self.assertEqual(new_role_profile.roles[0].role, '_Test Role 2')
# clear roles
new_role_profile.roles = []
new_role_profile.save()
self.assertEqual(new_role_profile.roles, [])
| en | 0.720451 | # -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies and Contributors # See license.txt # add role # clear roles | 2.48884 | 2 |
googlecode-issues-exporter/issues.py | ballschin52/support-tools | 41 | 6630644 | <filename>googlecode-issues-exporter/issues.py<gh_stars>10-100
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading Google Code issues to an issue service.
"""
import collections
import datetime
import json
import re
import sys
import HTMLParser
# Regular expression used by Google Code for auto-linking issue references,
# e.g. "issue #8" or "bug5".
GC_ISSUE_REF_RE = re.compile(r"""
(?P<prefix>\b(issue|bug)\s*)
(?P<project_name>\s+[-a-z0-9]+[:\#])?
(?P<number_sign>\#?)
(?P<issue_id>\d+)\b""", re.IGNORECASE | re.MULTILINE | re.VERBOSE)
# Regular expression to match issue references generated by this tool and
# match GitHub's system. (e.g. "- **Blocking**: #1, #2, #3")
EX_ISSUE_REF_RE = re.compile(
r"- \*\*(?P<tag>([^\*]+))\*\*: #(?P<issues>([^\n]+))")
def RemapIssueIds(comment, id_mapping):
"""Rewrite a comment's text based on an ID mapping.
Args:
comment: A string with the comment text. e.g. 'Closes issue #42'.
id_mapping: A dictionary mapping Google Code to GitHub issue IDs.
e.g. { '42': '142' }
Returns:
The rewritten comment text.
"""
def replaceGoogleCodeIssueReferences(match):
# Ignore references to other projects.
if match.group('project_name'):
return match.group()
# Ignore issues not found in the ID mapping.
google_code_id = match.group('issue_id')
if not id_mapping or google_code_id not in id_mapping:
return match.group()
github_id = id_mapping[google_code_id]
return match.group().replace(google_code_id, github_id)
def replaceExportedIssueReferences(match):
# Parse the issues list and regenerate.
gc_issue_ids = match.group('issues').split(", #")
gh_issue_ids = []
for gc_issue_id in gc_issue_ids:
if id_mapping and gc_issue_id in id_mapping:
gh_issue_ids.append(id_mapping[gc_issue_id])
else:
gh_issue_ids.append(gc_issue_id)
return "- **%s**: #%s" % (
match.group('tag'), ", #".join(gh_issue_ids))
comment = GC_ISSUE_REF_RE.sub(replaceGoogleCodeIssueReferences, comment)
comment = EX_ISSUE_REF_RE.sub(replaceExportedIssueReferences, comment)
return comment
def _ParseIssueReferences(issue_ref_list):
"""Parses a list of issue references into a tuple of IDs added/removed.
For example: [ "alpha:7", "beta:8", "-gamma:9" ] => ([ "7", "8" ], [ "9" ])
NOTE: We don't support cross-project issue references. Rather we
just assume the issue reference is within the same project.
"""
added = []
removed = []
for proj in issue_ref_list:
parts = proj.split(":")
proj_id = parts[1] if len(parts) >= 2 else proj[1:]
if proj[0] != "-":
added.append(proj_id)
else:
removed.append(proj_id)
return added, removed
class IdentityDict(dict):
def __missing__(self, key):
return key
def TryFormatDate(date):
"""Attempt to clean up a timestamp date."""
try:
if date.endswith(":"):
date = date[:len(date) - 1]
datetime_version = datetime.datetime.strptime(
date, "%Y-%m-%dT%H:%M:%S.%fZ")
return str(datetime_version)
except ValueError as ve:
return date
def WrapText(text, max):
"""Inserts a newline if any line of a file is > max chars.
Note that the newline is inserted at the first whitespace
character, so there may be lines longer than max.
"""
char_list = list(text)
last_linebreak = 0
for i in range(0, len(char_list)):
if char_list[i] == '\n' or char_list[i] == '\r':
last_linebreak = i
if i - last_linebreak > max and char_list[i] == ' ':
# Replace ' ' with '\n'
char_list.pop(i)
char_list.insert(i, '\n')
last_linebreak = i
return ''.join(char_list)
class Error(Exception):
"""Base error class."""
class InvalidUserError(Error):
"""Error for an invalid user."""
class ProjectNotFoundError(Error):
"""Error for a non-existent project."""
class ServiceError(Error):
"""Error when communicating with the issue or user service."""
class UserService(object):
"""Abstract user operations.
Handles user operations on an user API.
"""
def IsUser(self, username):
"""Checks if the user exists.
Args:
username: The username to check.
Returns:
True if the username exists.
"""
raise NotImplementedError()
class GoogleCodeIssue(object):
"""Google Code issue.
Handles parsing and viewing a Google Code issue.
"""
def __init__(self, issue, project_name, user_map):
"""Initialize the GoogleCodeIssue.
Args:
issue: The Google Code Issue as a dictionary.
project_name: The name of the project the issue belongs to.
user_map: A map from Google Code usernames to issue service names.
"""
self._issue = issue
self._project_name = project_name
self._user_map = user_map
def GetProjectName(self):
"""Returns the project name."""
return self._project_name
def GetUserMap(self):
"""Returns the user map."""
return self._user_map
def GetOwner(self):
"""Get the owner username of a Google Code issue.
This will ALWAYS be the person requesting the issue export.
"""
return self._user_map["user_requesting_export"]
def GetContentUpdatedOn(self):
"""Get the date the content was last updated from a Google Code issue.
Returns:
The time stamp when the issue content was last updated
"""
return self._issue["updated"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code issue.
Returns:
The time stamp when the issue content was created
"""
return self._issue["published"]
def GetId(self):
"""Get the id from a Google Code issue.
Returns:
The issue id
"""
return self._issue["id"]
def GetLabels(self):
"""Get the labels from a Google Code issue.
Returns:
A list of the labels of this issue.
"""
labels = self._issue.get("labels", [])
# Add status as a label.
if "status" in self._issue:
labels.append("Status-" + self._issue["status"])
return labels
def GetKind(self):
"""Get the kind from a Google Code issue.
Returns:
The issue kind, if none is found defaults to 'Defect'
"""
types = [t for t in self.GetLabels() if "Type-" in t]
if types:
return types[0][len("Type-"):]
return "Defect"
def GetPriority(self):
"""Get the priority from a Google Code issue.
Returns:
The issue priority, if none is found defaults to 'Medium'
"""
priorities = [p for p in self.GetLabels() if "Priority-" in p]
if priorities:
return priorities[0][len("Priority-"):]
return "Medium"
def GetAuthor(self):
"""Get the author's username of a Google Code issue.
Returns:
The Google Code username that the issue is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._issue:
return None
author = self._issue["author"]["name"]
return self._user_map[author]
def GetStatus(self):
"""Get the status from a Google Code issue.
Returns:
The issue status
"""
status = self._issue["status"].lower()
if status == "accepted":
status = "open"
return status
def GetTitle(self):
"""Get the title from a Google Code issue.
Returns:
The issue title
"""
title = self._issue["title"]
# It is not possible to create a Google Code issue without a title, but you
# can edit an issue to remove its title afterwards.
if title.isspace():
title = "<empty title>"
return title
def GetUpdatedOn(self):
"""Get the date the issue was last updated.
Returns:
The time stamp when the issue was last updated
"""
return self.GetCreatedOn()
def GetComments(self):
"""Get the list of comments for the issue (if any).
Returns:
The list of comments attached to the issue
"""
# The 0th comment is the issue's description. Also, filter out
# any deleted comments.
comments = self._issue["comments"]["items"][1:]
return [c for c in comments if not "deletedBy" in c]
def IsOpen(self):
"""Check if an issue is marked as open.
Returns:
True if the issue was open.
"""
return "state" in self._issue and self._issue["state"] == "open"
def GetDescription(self):
"""Returns the Description of the issue."""
# Just return the description of the underlying comment. However,
# we fudge a few things since metadata is stored differently for
# "the issue" (i.e. comment #0) and other comments.
comment_0_data = self._issue["comments"]["items"][0]
googlecode_comment = GoogleCodeComment(self, comment_0_data)
issue_description = googlecode_comment.GetDescription()
# Be careful not to run afoul of issue reference rewriting...
issue_header = "Originally reported on Google Code with ID %s\n" % (
self.GetId())
return issue_header + issue_description
class GoogleCodeComment(object):
"""Google Code Comment.
Handles parsing and viewing a Google Code Comment.
"""
def __init__(self, googlecode_issue, comment, id_mapping=None):
"""Initialize the GoogleCodeComment.
Args:
googlecode_issue: A GoogleCodeIssue instance.
comment: The Google Code Comment as dictionary.
id_mapping: Mapping from Google Code issue IDs to their new locations.
"""
self._comment = comment
self._googlecode_issue = googlecode_issue
self._id_mapping = id_mapping
def GetContent(self):
"""Get the content from a Google Code comment.
Returns:
The issue comment
"""
return self._comment["content"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code comment.
Returns:
The time stamp when the issue comment content was created
"""
return self._comment["published"]
def GetId(self):
"""Get the id from a Google Code comment.
Returns:
The issue comment id
"""
return self._comment["id"]
def GetLabels(self):
"""Get the labels modified with the comment."""
if "updates" in self._comment:
if "labels" in self._comment["updates"]:
return self._comment["updates"]["labels"]
return []
def GetIssue(self):
"""Get the GoogleCodeIssue this comment belongs to.
Returns:
The issue id
"""
return self._googlecode_issue
def GetUpdatedOn(self):
"""Get the date the issue comment content was last updated.
Returns:
The time stamp when the issue comment content was last updated
"""
return self.GetCreatedOn()
def GetAuthor(self):
"""Get the author's username of a Google Code issue comment.
Returns:
The Google Code username that the issue comment is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._comment:
return None
author = self._comment["author"]["name"]
return self.GetIssue().GetUserMap()[author]
def GetDescription(self):
"""Returns the Description of the comment."""
author = self.GetAuthor()
comment_date = self.GetCreatedOn()
comment_text = self.GetContent()
comment_updates = {}
if "updates" in self._comment:
comment_updates = self._comment["updates"]
body = ""
if comment_text:
# Google Takeout includes escaped HTML such as > and á.
html_parser = HTMLParser.HTMLParser()
comment_text = html_parser.unescape(comment_text)
# Remove <b> tags, which Codesite automatically includes if issue body
# is based on a prompt.
comment_text = comment_text.replace("<b>", "")
comment_text = comment_text.replace("</b>", "")
# 82 instead of 80 in case it was already wrapped...
comment_text = WrapText(comment_text, 82)
body += "```\n" + comment_text + "\n```\n\n"
footer = "Reported by `%s` on %s\n" % (
author, TryFormatDate(comment_date))
if "status" in comment_updates:
footer += "- **Status changed**: `%s`\n" % (comment_updates["status"])
footer += self._GetLabelInfo()
footer += self._GetLinksToOtherIssues()
if "mergedInto" in comment_updates and comment_updates["mergedInto"]:
footer += "- **Merged into**: #%s\n" % (
comment_updates["mergedInto"])
# Add references to attachments as appropriate. (Do this last since it
# inserts a horizontal rule.)
footer += self._GetAttachmentInfo()
raw_comment_body = body + footer
return RemapIssueIds(raw_comment_body, self._id_mapping)
def _GetLabelInfo(self):
"""Returns Markdown text for a comment's labels as appropriate."""
if not self.GetLabels():
return ""
labels_added = []
labels_removed = []
for label in self.GetLabels():
if label.startswith("-"):
labels_removed.append(label[1:])
else:
labels_added.append(label)
label_info = ""
if labels_added:
label_info += "- **Labels added**: %s\n" % (", ".join(labels_added))
if labels_removed:
label_info += "- **Labels removed**: %s\n" % (", ".join(labels_removed))
return label_info
def _GetLinksToOtherIssues(self):
"""Returns Markdown text for a comment's links to other issues."""
if "updates" not in self._comment:
return ""
updates = self._comment["updates"]
ref_info = ""
if "blocking" in updates:
added, removed = _ParseIssueReferences(updates["blocking"])
if added:
ref_info += "- **Blocking**: #" + ", #".join(added) + "\n"
if removed:
ref_info += "- **No longer blocking**: #" + ", #".join(removed) + "\n"
if "blockedOn" in updates:
added, removed = _ParseIssueReferences(updates["blockedOn"])
if added:
ref_info += "- **Blocked on**: #" + ", #".join(added) + "\n"
if removed:
ref_info += ("- **No longer blocked on**: #" +
", #".join(removed) + "\n")
return ref_info
def _GetAttachmentInfo(self):
"""Returns Markdown text for a comment's attachments as appropriate."""
attachmentLines = []
attachments = self._comment["attachments"] if "attachments" in self._comment else []
for attachment in attachments:
if "isDeleted" in attachment:
# Deleted attachments won't be found on the issue mirror.
continue
link = "https://storage.googleapis.com/google-code-attachments/%s/issue-%d/comment-%d/%s" % (
self.GetIssue().GetProjectName(), self.GetIssue().GetId(),
self.GetId(), attachment["fileName"])
def has_extension(extension):
return attachment["fileName"].lower().endswith(extension)
is_image_attachment = False
for extension in [".png", ".jpg", ".jpeg", ".bmp", ".tif", ".gif"]:
is_image_attachment |= has_extension(".png")
if is_image_attachment:
line = " * *Attachment: %s<br>*" % (
attachment["fileName"], attachment["fileName"], link)
else:
line = " * *Attachment: [%s](%s)*" % (attachment["fileName"], link)
attachmentLines.append(line)
if len(attachmentLines) > 0:
return "\n<hr>\n" + "\n".join(attachmentLines)
return ""
class IssueService(object):
"""Abstract issue operations.
Handles creating and updating issues and comments on an user API.
"""
def GetIssues(self, state="open"):
"""Gets all of the issue for the repository with the given state.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
The list of all of the issues with the given state.
Raises:
IOError: An error occurred accessing previously created issues.
"""
raise NotImplementedError()
def GetComments(self, issue_number):
"""Gets all the comments for the issue with the given ID."""
raise NotImplementedError()
def CreateIssue(self, googlecode_issue):
"""Creates an issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
ServiceError: An error occurred creating the issue.
"""
raise NotImplementedError()
def EditIssue(self, googlecode_issue, issue_number):
"""Edits an existing issue."""
raise NotImplementedError()
def CloseIssue(self, issue_number):
"""Closes an issue.
Args:
issue_number: The issue number.
"""
raise NotImplementedError()
def CreateComment(self, issue_number, googlecode_comment):
"""Creates a comment on an issue.
Args:
issue_number: The issue number.
googlecode_comment: An instance of GoogleCodeComment
"""
raise NotImplementedError()
def EditComment(self, googlecode_issue, googlecode_comment, comment_number):
"""Edits an existing comment."""
raise NotImplementedError()
def LoadIssueData(issue_file_path, project_name):
"""Loads issue data from a file.
Args:
issue_file_path: path to the file to load
project_name: name of the project to load
Returns:
Issue data as a list of dictionaries.
Raises:
ProjectNotFoundError: the project_name was not found in the file.
"""
with open(issue_file_path) as user_file:
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name == project["name"]:
return project["issues"]["items"]
raise ProjectNotFoundError("Project %s not found" % project_name)
def LoadUserData(user_file_path, user_service):
"""Loads user data from a file. If not present, the user name will
just return whatever is passed to it.
Args:
user_file_path: path to the file to load
user_service: an instance of UserService
"""
identity_dict = IdentityDict()
if not user_file_path:
return identity_dict
with open(user_file_path) as user_data:
user_json = user_data.read()
user_map = json.loads(user_json)["users"]
for username in user_map.values():
if not user_service.IsUser(username):
raise InvalidUserError("%s is not a User" % username)
result.update(user_map)
return result
class IssueExporter(object):
"""Issue Migration.
Handles the uploading issues from Google Code to an issue service.
"""
def __init__(self, issue_service, user_service, issue_json_data,
project_name, user_map):
"""Initialize the IssueExporter.
Args:
issue_service: An instance of IssueService.
user_service: An instance of UserService.
project_name: The name of the project to export to.
issue_json_data: A data object of issues from Google Code.
user_map: A map from user email addresses to service usernames.
"""
self._issue_service = issue_service
self._user_service = user_service
self._issue_json_data = issue_json_data
self._project_name = project_name
self._user_map = user_map
# Specialized index of issues to quickly check what has been migrated to
# GitHub and if so, determine it's new issue ID. See Init(...).
self._issue_index = {}
self._prefix = "" # Output only.
self._issue_total = 0
self._issue_number = 0
self._comment_number = 0
self._comment_total = 0
self._skipped_issues = 0
# Mapping from Google Code issue ID to destination service issue ID.
self._id_mapping = {}
def Init(self, require_all_issues_exported=False):
"""Initialize the needed variables.
Arg:
require_all_issues_exported: Bool. Require that all issues have
been exported. Used to ensure that rewritting comments won't fail.
"""
print "Building issue index."
self._issue_index = {}
index = self._issue_index
for issue in self._issue_json_data:
gc_issue = GoogleCodeIssue(issue, self._project_name, self._user_map)
if gc_issue.GetTitle() not in index:
index[gc_issue.GetTitle()] = []
index[gc_issue.GetTitle()].append({
"googlecode_id": gc_issue.GetId(),
"exported": False,
"exported_id": -1,
"comment_count": -1,
})
print "Determining which issues have already been exported."
open_issues = self._issue_service.GetIssues("open")
closed_issues = self._issue_service.GetIssues("closed")
all_exported_issues = open_issues + closed_issues
# Sort issues by GitHub ID, since Google Code issues will be exported in
# order we can use the exported issue's chronology to resolve ambiguities
# for issues with the same title. Yes, GitHub number == ID.
all_exported_issues = sorted(all_exported_issues,
key=lambda issue: issue["number"])
for exported_issue in all_exported_issues:
exported_issue_id = exported_issue["number"]
exported_issue_title = exported_issue["title"]
if exported_issue_title not in index:
print "Warning: GitHub issue #%s '%s' not in Google Takeout dump." % (
exported_issue_id, exported_issue_title)
continue
# Mark of the issue as exported.
for idx in range(0, len(index[exported_issue_title])):
if not index[exported_issue_title][idx]["exported"]:
index[exported_issue_title][idx]["exported"] = True
index[exported_issue_title][idx]["exported_id"] = exported_issue_id
index[exported_issue_title][idx]["comment_count"] = (
exported_issue["comments"])
break
if idx >= len(index[exported_issue_title]):
print "Warning: Couldn't find the %sth issue titled '%s'." % (
idx, exported_issue_title)
# Build the ID map based on previously created issue. Only used if
# rewriting comments.
if not require_all_issues_exported:
return
print "Confirming all issues have been exported."
for title in index:
for issue in index[title]:
self._id_mapping[str(issue["googlecode_id"])] = str(issue["exported_id"])
if not issue["exported"]:
raise Exception(
"Issue #%s '%s' not found. Can't rewrite comments." % (
gc_issue.GetId(), gc_issue.GetTitle()))
print "len(id_map) = %s, with %s total issues" % (
len(self._id_mapping), len(self._issue_json_data))
if len(self._id_mapping) < len(self._issue_json_data):
raise Exception("Not all issues have been exported.")
def _GetExportedIssue(self, googlecode_issue):
"""Return metadata about the exported Google Code issue."""
index = self._issue_index
issue_title = googlecode_issue.GetTitle()
issue_id = googlecode_issue.GetId()
if issue_title not in index:
raise Exception("Google Code issue '%s' not expected to be exported." % (
issue_title))
for idx in range(0, len(index[issue_title])):
if index[issue_title][idx]["googlecode_id"] == issue_id:
return index[issue_title][idx]
raise Exception("Unable to find Google Code issue #%s." % (issue_id))
def _HasIssueBeenExported(self, googlecode_issue):
"""Returns whether or not a Google Code issue has been exported."""
export_metadata = self._GetExportedIssue(googlecode_issue)
return export_metadata["exported"]
def _UpdateProgressBar(self):
"""Update issue count 'feed'.
This displays the current status of the script to the user.
"""
feed_string = ("\r%sIssue: %d/%d -> Comment: %d/%d " %
(self._prefix, self._issue_number, self._issue_total,
self._comment_number, self._comment_total))
sys.stdout.write(feed_string)
sys.stdout.flush()
def _CreateIssue(self, googlecode_issue):
"""Converts an issue from Google Code to an issue service.
This will take the Google Code issue and create a corresponding issue on
the issue service. If the issue on Google Code was closed it will also
be closed on the issue service.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number assigned by the service.
"""
return self._issue_service.CreateIssue(googlecode_issue)
def _CreateComments(self, comments, issue_number, googlecode_issue):
"""Converts a list of issue comment from Google Code to an issue service.
This will take a list of Google Code issue comments and create
corresponding comments on an issue service for the given issue number.
Args:
comments: A list of comments (each comment is just a string).
issue_number: The issue number.
source_issue_id: The Google Code issue id.
"""
self._comment_total = len(comments)
self._comment_number = 0
for comment in comments:
googlecode_comment = GoogleCodeComment(googlecode_issue, comment)
self._comment_number += 1
self._UpdateProgressBar()
self._issue_service.CreateComment(issue_number, googlecode_comment)
def _RewriteComments(self, googlecode_issue, exported_issue_number):
"""Rewrite all comments in the issue to update issue ID references.
Args:
googlecode_issue: The Google Code issue to rewrite.
issue_number: The issue ID on the **destination** system.
"""
id_mapping = self._id_mapping
comments = googlecode_issue.GetComments()
self._prefix = "Rewriting "
self._comment_total = len(comments)
self._comment_number = 0
self._issue_service.EditIssue(googlecode_issue, exported_issue_number)
# Get existing comments from the destination, necessary because we don't
# know the IDs used on the output side. (GitHub uses timestamps :P)
existing_comments = self._issue_service.GetComments(exported_issue_number)
for comment_idx in range(0, len(comments)):
if comment_idx >= len(existing_comments):
print "\nError: More comments on Google Code than on dest service?"
print "Google Code #%s vs. dest service #%s (%s comments vs. %s)" % (
googlecode_issue.GetId(), exported_issue_number,
len(comments), len(existing_comments))
break
comment = comments[comment_idx]
comment_number = existing_comments[comment_idx]["id"]
gc_comment = GoogleCodeComment(googlecode_issue, comment, id_mapping)
self._comment_number += 1
self._UpdateProgressBar()
self._issue_service.EditComment(
exported_issue_number, gc_comment, comment_number)
def _FixBlockingBlockedOn(self, issue_json):
"""Fix the issue JSON object to normalize how blocking/blocked-on are used.
There is a bug in how Google Takeout exports blocking/blocked-on status.
Each comment may have an update with a list of added/removed
blocked/blocking issues. However, comment #0, the "original issue state"
does not contain this information.
However, the issue does contain summary information. (i.e. a union of
initial state and all comment updates.
This function figures out what should be in comment #0 so everything
actually makes sense when rendered.
"""
# Issue references we add to comment #0
# - References that are removed later, but not explicitly added.
# (assumed to have been added on comment #0).
# - References that are in the summary, but not explicitly added.
# (assumed to have been added on comment #0).
def IssueRefToString(issue_ref):
return issue_ref["projectId"] + ":" + str(issue_ref["issueId"])
def GetUnionReferences(kind_name):
"""The initial issue reference IDs."""
references = []
if kind_name in issue_json:
for reference in issue_json[kind_name]:
references.append(IssueRefToString(reference))
references, _ = _ParseIssueReferences(references)
return references
def DesiredReferences(union_references, kind_name):
"""Returns the desired references on commeng #0 for the kind."""
current_list = [] # List of references as we simulate the comments.
desired_list = union_references[:] # The desired list to output.
issue_comments = issue_json["comments"]["items"]
for comment in issue_comments:
if "updates" not in comment:
continue
updates = comment["updates"]
if kind_name in updates:
added, removed = _ParseIssueReferences(updates[kind_name])
# If the reference was added in this comment, we don't need
# to add it to comment #0 since you'll "see" the addition.
for added_ref in added:
current_list.append(added_ref)
if added_ref in union_references and added_ref in desired_list:
desired_list.remove(added_ref)
# If the reference was removed in this comment AND it wasn't
# previously added by a comment, then we should add it to the
# output list. (We infer the issue was created with it.)
for removed_ref in removed:
if removed_ref not in union_references and (
removed_ref not in current_list):
desired_list.append(removed_ref)
return desired_list
def AddToComment0(issue_references, kind_name):
if not issue_references:
return
comment_0_data = issue_json["comments"]["items"][0]
if "updates" not in comment_0_data:
comment_0_data["updates"] = {}
comment_0_updates = comment_0_data["updates"]
if kind_name not in comment_0_updates:
comment_0_updates[kind_name] = []
comment_0_updates[kind_name].extend(
["???:" + iid for iid in issue_references])
starting_blocking = GetUnionReferences("blocking")
desired_blocking = DesiredReferences(starting_blocking, "blocking")
AddToComment0(desired_blocking, "blocking")
starting_blockedon = GetUnionReferences("blockedOn")
desired_blockedon = DesiredReferences(starting_blockedon, "blockedOn")
AddToComment0(desired_blockedon, "blockedOn")
return issue_json
def Start(self, rewrite_comments=False):
"""Start the issue export process.
Args:
rewrite_comments: Bool. If set will rewrite the comments for previously
exported issues. Used to fix export problems and remap issue IDs.
"""
print "Starting issue export for '%s'" % (self._project_name)
self._issue_total = len(self._issue_json_data)
self._comment_total = 0
self._issue_number = 0
self._comment_number = 0
self._skipped_issues = 0
last_issue_skipped = False # Only used for formatting output.
for issue in self._issue_json_data:
self._FixBlockingBlockedOn(issue)
googlecode_issue = GoogleCodeIssue(
issue, self._project_name, self._user_map)
issue_title = googlecode_issue.GetTitle()
short_issue_title = (
issue_title[:16] + '...') if len(issue_title) > 18 else issue_title
self._issue_number += 1
# Check if the issue has already been posted.
if self._HasIssueBeenExported(googlecode_issue):
export_metadata = self._GetExportedIssue(googlecode_issue)
print "%sGoogle Code issue #%s already exported with ID #%s." % (
("\n" if not last_issue_skipped else ""),
export_metadata["googlecode_id"],
export_metadata["exported_id"])
last_issue_skipped = True
self._skipped_issues = self._skipped_issues + 1
# Verify all comments are present.
issue_comments = googlecode_issue.GetComments()
num_issue_comments = len(issue_comments)
num_existing_comments = export_metadata["comment_count"]
if num_issue_comments > num_existing_comments:
for idx in range(num_existing_comments, num_issue_comments):
comment_data = issue_comments[idx]
googlecode_comment = GoogleCodeComment(
googlecode_issue, comment_data)
self._issue_service.CreateComment(
export_metadata["exported_id"], googlecode_comment)
print " Added missing comment #%d" % (idx + 1)
if rewrite_comments:
self._RewriteComments(googlecode_issue, export_metadata["exported_id"])
print "" # Advanced past the "progress bar" line.
continue
# Post the issue for the first time.
self._UpdateProgressBar()
last_issue_skipped = False
posted_issue_id = self._CreateIssue(googlecode_issue)
comments = googlecode_issue.GetComments()
self._CreateComments(comments, posted_issue_id, googlecode_issue)
if not googlecode_issue.IsOpen():
self._issue_service.CloseIssue(posted_issue_id)
print "Finished!"
| <filename>googlecode-issues-exporter/issues.py<gh_stars>10-100
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading Google Code issues to an issue service.
"""
import collections
import datetime
import json
import re
import sys
import HTMLParser
# Regular expression used by Google Code for auto-linking issue references,
# e.g. "issue #8" or "bug5".
GC_ISSUE_REF_RE = re.compile(r"""
(?P<prefix>\b(issue|bug)\s*)
(?P<project_name>\s+[-a-z0-9]+[:\#])?
(?P<number_sign>\#?)
(?P<issue_id>\d+)\b""", re.IGNORECASE | re.MULTILINE | re.VERBOSE)
# Regular expression to match issue references generated by this tool and
# match GitHub's system. (e.g. "- **Blocking**: #1, #2, #3")
EX_ISSUE_REF_RE = re.compile(
r"- \*\*(?P<tag>([^\*]+))\*\*: #(?P<issues>([^\n]+))")
def RemapIssueIds(comment, id_mapping):
"""Rewrite a comment's text based on an ID mapping.
Args:
comment: A string with the comment text. e.g. 'Closes issue #42'.
id_mapping: A dictionary mapping Google Code to GitHub issue IDs.
e.g. { '42': '142' }
Returns:
The rewritten comment text.
"""
def replaceGoogleCodeIssueReferences(match):
# Ignore references to other projects.
if match.group('project_name'):
return match.group()
# Ignore issues not found in the ID mapping.
google_code_id = match.group('issue_id')
if not id_mapping or google_code_id not in id_mapping:
return match.group()
github_id = id_mapping[google_code_id]
return match.group().replace(google_code_id, github_id)
def replaceExportedIssueReferences(match):
# Parse the issues list and regenerate.
gc_issue_ids = match.group('issues').split(", #")
gh_issue_ids = []
for gc_issue_id in gc_issue_ids:
if id_mapping and gc_issue_id in id_mapping:
gh_issue_ids.append(id_mapping[gc_issue_id])
else:
gh_issue_ids.append(gc_issue_id)
return "- **%s**: #%s" % (
match.group('tag'), ", #".join(gh_issue_ids))
comment = GC_ISSUE_REF_RE.sub(replaceGoogleCodeIssueReferences, comment)
comment = EX_ISSUE_REF_RE.sub(replaceExportedIssueReferences, comment)
return comment
def _ParseIssueReferences(issue_ref_list):
"""Parses a list of issue references into a tuple of IDs added/removed.
For example: [ "alpha:7", "beta:8", "-gamma:9" ] => ([ "7", "8" ], [ "9" ])
NOTE: We don't support cross-project issue references. Rather we
just assume the issue reference is within the same project.
"""
added = []
removed = []
for proj in issue_ref_list:
parts = proj.split(":")
proj_id = parts[1] if len(parts) >= 2 else proj[1:]
if proj[0] != "-":
added.append(proj_id)
else:
removed.append(proj_id)
return added, removed
class IdentityDict(dict):
def __missing__(self, key):
return key
def TryFormatDate(date):
"""Attempt to clean up a timestamp date."""
try:
if date.endswith(":"):
date = date[:len(date) - 1]
datetime_version = datetime.datetime.strptime(
date, "%Y-%m-%dT%H:%M:%S.%fZ")
return str(datetime_version)
except ValueError as ve:
return date
def WrapText(text, max):
"""Inserts a newline if any line of a file is > max chars.
Note that the newline is inserted at the first whitespace
character, so there may be lines longer than max.
"""
char_list = list(text)
last_linebreak = 0
for i in range(0, len(char_list)):
if char_list[i] == '\n' or char_list[i] == '\r':
last_linebreak = i
if i - last_linebreak > max and char_list[i] == ' ':
# Replace ' ' with '\n'
char_list.pop(i)
char_list.insert(i, '\n')
last_linebreak = i
return ''.join(char_list)
class Error(Exception):
"""Base error class."""
class InvalidUserError(Error):
"""Error for an invalid user."""
class ProjectNotFoundError(Error):
"""Error for a non-existent project."""
class ServiceError(Error):
"""Error when communicating with the issue or user service."""
class UserService(object):
"""Abstract user operations.
Handles user operations on an user API.
"""
def IsUser(self, username):
"""Checks if the user exists.
Args:
username: The username to check.
Returns:
True if the username exists.
"""
raise NotImplementedError()
class GoogleCodeIssue(object):
"""Google Code issue.
Handles parsing and viewing a Google Code issue.
"""
def __init__(self, issue, project_name, user_map):
"""Initialize the GoogleCodeIssue.
Args:
issue: The Google Code Issue as a dictionary.
project_name: The name of the project the issue belongs to.
user_map: A map from Google Code usernames to issue service names.
"""
self._issue = issue
self._project_name = project_name
self._user_map = user_map
def GetProjectName(self):
"""Returns the project name."""
return self._project_name
def GetUserMap(self):
"""Returns the user map."""
return self._user_map
def GetOwner(self):
"""Get the owner username of a Google Code issue.
This will ALWAYS be the person requesting the issue export.
"""
return self._user_map["user_requesting_export"]
def GetContentUpdatedOn(self):
"""Get the date the content was last updated from a Google Code issue.
Returns:
The time stamp when the issue content was last updated
"""
return self._issue["updated"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code issue.
Returns:
The time stamp when the issue content was created
"""
return self._issue["published"]
def GetId(self):
"""Get the id from a Google Code issue.
Returns:
The issue id
"""
return self._issue["id"]
def GetLabels(self):
"""Get the labels from a Google Code issue.
Returns:
A list of the labels of this issue.
"""
labels = self._issue.get("labels", [])
# Add status as a label.
if "status" in self._issue:
labels.append("Status-" + self._issue["status"])
return labels
def GetKind(self):
"""Get the kind from a Google Code issue.
Returns:
The issue kind, if none is found defaults to 'Defect'
"""
types = [t for t in self.GetLabels() if "Type-" in t]
if types:
return types[0][len("Type-"):]
return "Defect"
def GetPriority(self):
"""Get the priority from a Google Code issue.
Returns:
The issue priority, if none is found defaults to 'Medium'
"""
priorities = [p for p in self.GetLabels() if "Priority-" in p]
if priorities:
return priorities[0][len("Priority-"):]
return "Medium"
def GetAuthor(self):
"""Get the author's username of a Google Code issue.
Returns:
The Google Code username that the issue is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._issue:
return None
author = self._issue["author"]["name"]
return self._user_map[author]
def GetStatus(self):
"""Get the status from a Google Code issue.
Returns:
The issue status
"""
status = self._issue["status"].lower()
if status == "accepted":
status = "open"
return status
def GetTitle(self):
"""Get the title from a Google Code issue.
Returns:
The issue title
"""
title = self._issue["title"]
# It is not possible to create a Google Code issue without a title, but you
# can edit an issue to remove its title afterwards.
if title.isspace():
title = "<empty title>"
return title
def GetUpdatedOn(self):
"""Get the date the issue was last updated.
Returns:
The time stamp when the issue was last updated
"""
return self.GetCreatedOn()
def GetComments(self):
"""Get the list of comments for the issue (if any).
Returns:
The list of comments attached to the issue
"""
# The 0th comment is the issue's description. Also, filter out
# any deleted comments.
comments = self._issue["comments"]["items"][1:]
return [c for c in comments if not "deletedBy" in c]
def IsOpen(self):
"""Check if an issue is marked as open.
Returns:
True if the issue was open.
"""
return "state" in self._issue and self._issue["state"] == "open"
def GetDescription(self):
"""Returns the Description of the issue."""
# Just return the description of the underlying comment. However,
# we fudge a few things since metadata is stored differently for
# "the issue" (i.e. comment #0) and other comments.
comment_0_data = self._issue["comments"]["items"][0]
googlecode_comment = GoogleCodeComment(self, comment_0_data)
issue_description = googlecode_comment.GetDescription()
# Be careful not to run afoul of issue reference rewriting...
issue_header = "Originally reported on Google Code with ID %s\n" % (
self.GetId())
return issue_header + issue_description
class GoogleCodeComment(object):
"""Google Code Comment.
Handles parsing and viewing a Google Code Comment.
"""
def __init__(self, googlecode_issue, comment, id_mapping=None):
"""Initialize the GoogleCodeComment.
Args:
googlecode_issue: A GoogleCodeIssue instance.
comment: The Google Code Comment as dictionary.
id_mapping: Mapping from Google Code issue IDs to their new locations.
"""
self._comment = comment
self._googlecode_issue = googlecode_issue
self._id_mapping = id_mapping
def GetContent(self):
"""Get the content from a Google Code comment.
Returns:
The issue comment
"""
return self._comment["content"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code comment.
Returns:
The time stamp when the issue comment content was created
"""
return self._comment["published"]
def GetId(self):
"""Get the id from a Google Code comment.
Returns:
The issue comment id
"""
return self._comment["id"]
def GetLabels(self):
"""Get the labels modified with the comment."""
if "updates" in self._comment:
if "labels" in self._comment["updates"]:
return self._comment["updates"]["labels"]
return []
def GetIssue(self):
"""Get the GoogleCodeIssue this comment belongs to.
Returns:
The issue id
"""
return self._googlecode_issue
def GetUpdatedOn(self):
"""Get the date the issue comment content was last updated.
Returns:
The time stamp when the issue comment content was last updated
"""
return self.GetCreatedOn()
def GetAuthor(self):
"""Get the author's username of a Google Code issue comment.
Returns:
The Google Code username that the issue comment is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._comment:
return None
author = self._comment["author"]["name"]
return self.GetIssue().GetUserMap()[author]
def GetDescription(self):
"""Returns the Description of the comment."""
author = self.GetAuthor()
comment_date = self.GetCreatedOn()
comment_text = self.GetContent()
comment_updates = {}
if "updates" in self._comment:
comment_updates = self._comment["updates"]
body = ""
if comment_text:
# Google Takeout includes escaped HTML such as > and á.
html_parser = HTMLParser.HTMLParser()
comment_text = html_parser.unescape(comment_text)
# Remove <b> tags, which Codesite automatically includes if issue body
# is based on a prompt.
comment_text = comment_text.replace("<b>", "")
comment_text = comment_text.replace("</b>", "")
# 82 instead of 80 in case it was already wrapped...
comment_text = WrapText(comment_text, 82)
body += "```\n" + comment_text + "\n```\n\n"
footer = "Reported by `%s` on %s\n" % (
author, TryFormatDate(comment_date))
if "status" in comment_updates:
footer += "- **Status changed**: `%s`\n" % (comment_updates["status"])
footer += self._GetLabelInfo()
footer += self._GetLinksToOtherIssues()
if "mergedInto" in comment_updates and comment_updates["mergedInto"]:
footer += "- **Merged into**: #%s\n" % (
comment_updates["mergedInto"])
# Add references to attachments as appropriate. (Do this last since it
# inserts a horizontal rule.)
footer += self._GetAttachmentInfo()
raw_comment_body = body + footer
return RemapIssueIds(raw_comment_body, self._id_mapping)
def _GetLabelInfo(self):
"""Returns Markdown text for a comment's labels as appropriate."""
if not self.GetLabels():
return ""
labels_added = []
labels_removed = []
for label in self.GetLabels():
if label.startswith("-"):
labels_removed.append(label[1:])
else:
labels_added.append(label)
label_info = ""
if labels_added:
label_info += "- **Labels added**: %s\n" % (", ".join(labels_added))
if labels_removed:
label_info += "- **Labels removed**: %s\n" % (", ".join(labels_removed))
return label_info
def _GetLinksToOtherIssues(self):
"""Returns Markdown text for a comment's links to other issues."""
if "updates" not in self._comment:
return ""
updates = self._comment["updates"]
ref_info = ""
if "blocking" in updates:
added, removed = _ParseIssueReferences(updates["blocking"])
if added:
ref_info += "- **Blocking**: #" + ", #".join(added) + "\n"
if removed:
ref_info += "- **No longer blocking**: #" + ", #".join(removed) + "\n"
if "blockedOn" in updates:
added, removed = _ParseIssueReferences(updates["blockedOn"])
if added:
ref_info += "- **Blocked on**: #" + ", #".join(added) + "\n"
if removed:
ref_info += ("- **No longer blocked on**: #" +
", #".join(removed) + "\n")
return ref_info
def _GetAttachmentInfo(self):
"""Returns Markdown text for a comment's attachments as appropriate."""
attachmentLines = []
attachments = self._comment["attachments"] if "attachments" in self._comment else []
for attachment in attachments:
if "isDeleted" in attachment:
# Deleted attachments won't be found on the issue mirror.
continue
link = "https://storage.googleapis.com/google-code-attachments/%s/issue-%d/comment-%d/%s" % (
self.GetIssue().GetProjectName(), self.GetIssue().GetId(),
self.GetId(), attachment["fileName"])
def has_extension(extension):
return attachment["fileName"].lower().endswith(extension)
is_image_attachment = False
for extension in [".png", ".jpg", ".jpeg", ".bmp", ".tif", ".gif"]:
is_image_attachment |= has_extension(".png")
if is_image_attachment:
line = " * *Attachment: %s<br>*" % (
attachment["fileName"], attachment["fileName"], link)
else:
line = " * *Attachment: [%s](%s)*" % (attachment["fileName"], link)
attachmentLines.append(line)
if len(attachmentLines) > 0:
return "\n<hr>\n" + "\n".join(attachmentLines)
return ""
class IssueService(object):
"""Abstract issue operations.
Handles creating and updating issues and comments on an user API.
"""
def GetIssues(self, state="open"):
"""Gets all of the issue for the repository with the given state.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
The list of all of the issues with the given state.
Raises:
IOError: An error occurred accessing previously created issues.
"""
raise NotImplementedError()
def GetComments(self, issue_number):
"""Gets all the comments for the issue with the given ID."""
raise NotImplementedError()
def CreateIssue(self, googlecode_issue):
"""Creates an issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
ServiceError: An error occurred creating the issue.
"""
raise NotImplementedError()
def EditIssue(self, googlecode_issue, issue_number):
"""Edits an existing issue."""
raise NotImplementedError()
def CloseIssue(self, issue_number):
"""Closes an issue.
Args:
issue_number: The issue number.
"""
raise NotImplementedError()
def CreateComment(self, issue_number, googlecode_comment):
"""Creates a comment on an issue.
Args:
issue_number: The issue number.
googlecode_comment: An instance of GoogleCodeComment
"""
raise NotImplementedError()
def EditComment(self, googlecode_issue, googlecode_comment, comment_number):
"""Edits an existing comment."""
raise NotImplementedError()
def LoadIssueData(issue_file_path, project_name):
"""Loads issue data from a file.
Args:
issue_file_path: path to the file to load
project_name: name of the project to load
Returns:
Issue data as a list of dictionaries.
Raises:
ProjectNotFoundError: the project_name was not found in the file.
"""
with open(issue_file_path) as user_file:
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name == project["name"]:
return project["issues"]["items"]
raise ProjectNotFoundError("Project %s not found" % project_name)
def LoadUserData(user_file_path, user_service):
"""Loads user data from a file. If not present, the user name will
just return whatever is passed to it.
Args:
user_file_path: path to the file to load
user_service: an instance of UserService
"""
identity_dict = IdentityDict()
if not user_file_path:
return identity_dict
with open(user_file_path) as user_data:
user_json = user_data.read()
user_map = json.loads(user_json)["users"]
for username in user_map.values():
if not user_service.IsUser(username):
raise InvalidUserError("%s is not a User" % username)
result.update(user_map)
return result
class IssueExporter(object):
"""Issue Migration.
Handles the uploading issues from Google Code to an issue service.
"""
def __init__(self, issue_service, user_service, issue_json_data,
project_name, user_map):
"""Initialize the IssueExporter.
Args:
issue_service: An instance of IssueService.
user_service: An instance of UserService.
project_name: The name of the project to export to.
issue_json_data: A data object of issues from Google Code.
user_map: A map from user email addresses to service usernames.
"""
self._issue_service = issue_service
self._user_service = user_service
self._issue_json_data = issue_json_data
self._project_name = project_name
self._user_map = user_map
# Specialized index of issues to quickly check what has been migrated to
# GitHub and if so, determine it's new issue ID. See Init(...).
self._issue_index = {}
self._prefix = "" # Output only.
self._issue_total = 0
self._issue_number = 0
self._comment_number = 0
self._comment_total = 0
self._skipped_issues = 0
# Mapping from Google Code issue ID to destination service issue ID.
self._id_mapping = {}
def Init(self, require_all_issues_exported=False):
"""Initialize the needed variables.
Arg:
require_all_issues_exported: Bool. Require that all issues have
been exported. Used to ensure that rewritting comments won't fail.
"""
print "Building issue index."
self._issue_index = {}
index = self._issue_index
for issue in self._issue_json_data:
gc_issue = GoogleCodeIssue(issue, self._project_name, self._user_map)
if gc_issue.GetTitle() not in index:
index[gc_issue.GetTitle()] = []
index[gc_issue.GetTitle()].append({
"googlecode_id": gc_issue.GetId(),
"exported": False,
"exported_id": -1,
"comment_count": -1,
})
print "Determining which issues have already been exported."
open_issues = self._issue_service.GetIssues("open")
closed_issues = self._issue_service.GetIssues("closed")
all_exported_issues = open_issues + closed_issues
# Sort issues by GitHub ID, since Google Code issues will be exported in
# order we can use the exported issue's chronology to resolve ambiguities
# for issues with the same title. Yes, GitHub number == ID.
all_exported_issues = sorted(all_exported_issues,
key=lambda issue: issue["number"])
for exported_issue in all_exported_issues:
exported_issue_id = exported_issue["number"]
exported_issue_title = exported_issue["title"]
if exported_issue_title not in index:
print "Warning: GitHub issue #%s '%s' not in Google Takeout dump." % (
exported_issue_id, exported_issue_title)
continue
# Mark of the issue as exported.
for idx in range(0, len(index[exported_issue_title])):
if not index[exported_issue_title][idx]["exported"]:
index[exported_issue_title][idx]["exported"] = True
index[exported_issue_title][idx]["exported_id"] = exported_issue_id
index[exported_issue_title][idx]["comment_count"] = (
exported_issue["comments"])
break
if idx >= len(index[exported_issue_title]):
print "Warning: Couldn't find the %sth issue titled '%s'." % (
idx, exported_issue_title)
# Build the ID map based on previously created issue. Only used if
# rewriting comments.
if not require_all_issues_exported:
return
print "Confirming all issues have been exported."
for title in index:
for issue in index[title]:
self._id_mapping[str(issue["googlecode_id"])] = str(issue["exported_id"])
if not issue["exported"]:
raise Exception(
"Issue #%s '%s' not found. Can't rewrite comments." % (
gc_issue.GetId(), gc_issue.GetTitle()))
print "len(id_map) = %s, with %s total issues" % (
len(self._id_mapping), len(self._issue_json_data))
if len(self._id_mapping) < len(self._issue_json_data):
raise Exception("Not all issues have been exported.")
def _GetExportedIssue(self, googlecode_issue):
"""Return metadata about the exported Google Code issue."""
index = self._issue_index
issue_title = googlecode_issue.GetTitle()
issue_id = googlecode_issue.GetId()
if issue_title not in index:
raise Exception("Google Code issue '%s' not expected to be exported." % (
issue_title))
for idx in range(0, len(index[issue_title])):
if index[issue_title][idx]["googlecode_id"] == issue_id:
return index[issue_title][idx]
raise Exception("Unable to find Google Code issue #%s." % (issue_id))
def _HasIssueBeenExported(self, googlecode_issue):
"""Returns whether or not a Google Code issue has been exported."""
export_metadata = self._GetExportedIssue(googlecode_issue)
return export_metadata["exported"]
def _UpdateProgressBar(self):
"""Update issue count 'feed'.
This displays the current status of the script to the user.
"""
feed_string = ("\r%sIssue: %d/%d -> Comment: %d/%d " %
(self._prefix, self._issue_number, self._issue_total,
self._comment_number, self._comment_total))
sys.stdout.write(feed_string)
sys.stdout.flush()
def _CreateIssue(self, googlecode_issue):
"""Converts an issue from Google Code to an issue service.
This will take the Google Code issue and create a corresponding issue on
the issue service. If the issue on Google Code was closed it will also
be closed on the issue service.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number assigned by the service.
"""
return self._issue_service.CreateIssue(googlecode_issue)
def _CreateComments(self, comments, issue_number, googlecode_issue):
"""Converts a list of issue comment from Google Code to an issue service.
This will take a list of Google Code issue comments and create
corresponding comments on an issue service for the given issue number.
Args:
comments: A list of comments (each comment is just a string).
issue_number: The issue number.
source_issue_id: The Google Code issue id.
"""
self._comment_total = len(comments)
self._comment_number = 0
for comment in comments:
googlecode_comment = GoogleCodeComment(googlecode_issue, comment)
self._comment_number += 1
self._UpdateProgressBar()
self._issue_service.CreateComment(issue_number, googlecode_comment)
def _RewriteComments(self, googlecode_issue, exported_issue_number):
"""Rewrite all comments in the issue to update issue ID references.
Args:
googlecode_issue: The Google Code issue to rewrite.
issue_number: The issue ID on the **destination** system.
"""
id_mapping = self._id_mapping
comments = googlecode_issue.GetComments()
self._prefix = "Rewriting "
self._comment_total = len(comments)
self._comment_number = 0
self._issue_service.EditIssue(googlecode_issue, exported_issue_number)
# Get existing comments from the destination, necessary because we don't
# know the IDs used on the output side. (GitHub uses timestamps :P)
existing_comments = self._issue_service.GetComments(exported_issue_number)
for comment_idx in range(0, len(comments)):
if comment_idx >= len(existing_comments):
print "\nError: More comments on Google Code than on dest service?"
print "Google Code #%s vs. dest service #%s (%s comments vs. %s)" % (
googlecode_issue.GetId(), exported_issue_number,
len(comments), len(existing_comments))
break
comment = comments[comment_idx]
comment_number = existing_comments[comment_idx]["id"]
gc_comment = GoogleCodeComment(googlecode_issue, comment, id_mapping)
self._comment_number += 1
self._UpdateProgressBar()
self._issue_service.EditComment(
exported_issue_number, gc_comment, comment_number)
def _FixBlockingBlockedOn(self, issue_json):
"""Fix the issue JSON object to normalize how blocking/blocked-on are used.
There is a bug in how Google Takeout exports blocking/blocked-on status.
Each comment may have an update with a list of added/removed
blocked/blocking issues. However, comment #0, the "original issue state"
does not contain this information.
However, the issue does contain summary information. (i.e. a union of
initial state and all comment updates.
This function figures out what should be in comment #0 so everything
actually makes sense when rendered.
"""
# Issue references we add to comment #0
# - References that are removed later, but not explicitly added.
# (assumed to have been added on comment #0).
# - References that are in the summary, but not explicitly added.
# (assumed to have been added on comment #0).
def IssueRefToString(issue_ref):
return issue_ref["projectId"] + ":" + str(issue_ref["issueId"])
def GetUnionReferences(kind_name):
"""The initial issue reference IDs."""
references = []
if kind_name in issue_json:
for reference in issue_json[kind_name]:
references.append(IssueRefToString(reference))
references, _ = _ParseIssueReferences(references)
return references
def DesiredReferences(union_references, kind_name):
"""Returns the desired references on commeng #0 for the kind."""
current_list = [] # List of references as we simulate the comments.
desired_list = union_references[:] # The desired list to output.
issue_comments = issue_json["comments"]["items"]
for comment in issue_comments:
if "updates" not in comment:
continue
updates = comment["updates"]
if kind_name in updates:
added, removed = _ParseIssueReferences(updates[kind_name])
# If the reference was added in this comment, we don't need
# to add it to comment #0 since you'll "see" the addition.
for added_ref in added:
current_list.append(added_ref)
if added_ref in union_references and added_ref in desired_list:
desired_list.remove(added_ref)
# If the reference was removed in this comment AND it wasn't
# previously added by a comment, then we should add it to the
# output list. (We infer the issue was created with it.)
for removed_ref in removed:
if removed_ref not in union_references and (
removed_ref not in current_list):
desired_list.append(removed_ref)
return desired_list
def AddToComment0(issue_references, kind_name):
if not issue_references:
return
comment_0_data = issue_json["comments"]["items"][0]
if "updates" not in comment_0_data:
comment_0_data["updates"] = {}
comment_0_updates = comment_0_data["updates"]
if kind_name not in comment_0_updates:
comment_0_updates[kind_name] = []
comment_0_updates[kind_name].extend(
["???:" + iid for iid in issue_references])
starting_blocking = GetUnionReferences("blocking")
desired_blocking = DesiredReferences(starting_blocking, "blocking")
AddToComment0(desired_blocking, "blocking")
starting_blockedon = GetUnionReferences("blockedOn")
desired_blockedon = DesiredReferences(starting_blockedon, "blockedOn")
AddToComment0(desired_blockedon, "blockedOn")
return issue_json
def Start(self, rewrite_comments=False):
"""Start the issue export process.
Args:
rewrite_comments: Bool. If set will rewrite the comments for previously
exported issues. Used to fix export problems and remap issue IDs.
"""
print "Starting issue export for '%s'" % (self._project_name)
self._issue_total = len(self._issue_json_data)
self._comment_total = 0
self._issue_number = 0
self._comment_number = 0
self._skipped_issues = 0
last_issue_skipped = False # Only used for formatting output.
for issue in self._issue_json_data:
self._FixBlockingBlockedOn(issue)
googlecode_issue = GoogleCodeIssue(
issue, self._project_name, self._user_map)
issue_title = googlecode_issue.GetTitle()
short_issue_title = (
issue_title[:16] + '...') if len(issue_title) > 18 else issue_title
self._issue_number += 1
# Check if the issue has already been posted.
if self._HasIssueBeenExported(googlecode_issue):
export_metadata = self._GetExportedIssue(googlecode_issue)
print "%sGoogle Code issue #%s already exported with ID #%s." % (
("\n" if not last_issue_skipped else ""),
export_metadata["googlecode_id"],
export_metadata["exported_id"])
last_issue_skipped = True
self._skipped_issues = self._skipped_issues + 1
# Verify all comments are present.
issue_comments = googlecode_issue.GetComments()
num_issue_comments = len(issue_comments)
num_existing_comments = export_metadata["comment_count"]
if num_issue_comments > num_existing_comments:
for idx in range(num_existing_comments, num_issue_comments):
comment_data = issue_comments[idx]
googlecode_comment = GoogleCodeComment(
googlecode_issue, comment_data)
self._issue_service.CreateComment(
export_metadata["exported_id"], googlecode_comment)
print " Added missing comment #%d" % (idx + 1)
if rewrite_comments:
self._RewriteComments(googlecode_issue, export_metadata["exported_id"])
print "" # Advanced past the "progress bar" line.
continue
# Post the issue for the first time.
self._UpdateProgressBar()
last_issue_skipped = False
posted_issue_id = self._CreateIssue(googlecode_issue)
comments = googlecode_issue.GetComments()
self._CreateComments(comments, posted_issue_id, googlecode_issue)
if not googlecode_issue.IsOpen():
self._issue_service.CloseIssue(posted_issue_id)
print "Finished!"
| en | 0.870394 | # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tool for uploading Google Code issues to an issue service. # Regular expression used by Google Code for auto-linking issue references, # e.g. "issue #8" or "bug5". (?P<prefix>\b(issue|bug)\s*) (?P<project_name>\s+[-a-z0-9]+[:\#])? (?P<number_sign>\#?) (?P<issue_id>\d+)\b # Regular expression to match issue references generated by this tool and # match GitHub's system. (e.g. "- **Blocking**: #1, #2, #3") #(?P<issues>([^\n]+))") Rewrite a comment's text based on an ID mapping. Args: comment: A string with the comment text. e.g. 'Closes issue #42'. id_mapping: A dictionary mapping Google Code to GitHub issue IDs. e.g. { '42': '142' } Returns: The rewritten comment text. # Ignore references to other projects. # Ignore issues not found in the ID mapping. # Parse the issues list and regenerate. #") #%s" % ( #".join(gh_issue_ids)) Parses a list of issue references into a tuple of IDs added/removed. For example: [ "alpha:7", "beta:8", "-gamma:9" ] => ([ "7", "8" ], [ "9" ]) NOTE: We don't support cross-project issue references. Rather we just assume the issue reference is within the same project. Attempt to clean up a timestamp date. Inserts a newline if any line of a file is > max chars. Note that the newline is inserted at the first whitespace character, so there may be lines longer than max. # Replace ' ' with '\n' Base error class. Error for an invalid user. Error for a non-existent project. Error when communicating with the issue or user service. Abstract user operations. Handles user operations on an user API. Checks if the user exists. Args: username: The username to check. Returns: True if the username exists. Google Code issue. Handles parsing and viewing a Google Code issue. Initialize the GoogleCodeIssue. Args: issue: The Google Code Issue as a dictionary. project_name: The name of the project the issue belongs to. user_map: A map from Google Code usernames to issue service names. Returns the project name. Returns the user map. Get the owner username of a Google Code issue. This will ALWAYS be the person requesting the issue export. Get the date the content was last updated from a Google Code issue. Returns: The time stamp when the issue content was last updated Get the creation date from a Google Code issue. Returns: The time stamp when the issue content was created Get the id from a Google Code issue. Returns: The issue id Get the labels from a Google Code issue. Returns: A list of the labels of this issue. # Add status as a label. Get the kind from a Google Code issue. Returns: The issue kind, if none is found defaults to 'Defect' Get the priority from a Google Code issue. Returns: The issue priority, if none is found defaults to 'Medium' Get the author's username of a Google Code issue. Returns: The Google Code username that the issue is authored by or the repository owner if no mapping or email address exists. Get the status from a Google Code issue. Returns: The issue status Get the title from a Google Code issue. Returns: The issue title # It is not possible to create a Google Code issue without a title, but you # can edit an issue to remove its title afterwards. Get the date the issue was last updated. Returns: The time stamp when the issue was last updated Get the list of comments for the issue (if any). Returns: The list of comments attached to the issue # The 0th comment is the issue's description. Also, filter out # any deleted comments. Check if an issue is marked as open. Returns: True if the issue was open. Returns the Description of the issue. # Just return the description of the underlying comment. However, # we fudge a few things since metadata is stored differently for # "the issue" (i.e. comment #0) and other comments. # Be careful not to run afoul of issue reference rewriting... Google Code Comment. Handles parsing and viewing a Google Code Comment. Initialize the GoogleCodeComment. Args: googlecode_issue: A GoogleCodeIssue instance. comment: The Google Code Comment as dictionary. id_mapping: Mapping from Google Code issue IDs to their new locations. Get the content from a Google Code comment. Returns: The issue comment Get the creation date from a Google Code comment. Returns: The time stamp when the issue comment content was created Get the id from a Google Code comment. Returns: The issue comment id Get the labels modified with the comment. Get the GoogleCodeIssue this comment belongs to. Returns: The issue id Get the date the issue comment content was last updated. Returns: The time stamp when the issue comment content was last updated Get the author's username of a Google Code issue comment. Returns: The Google Code username that the issue comment is authored by or the repository owner if no mapping or email address exists. Returns the Description of the comment. # Google Takeout includes escaped HTML such as > and á. # Remove <b> tags, which Codesite automatically includes if issue body # is based on a prompt. # 82 instead of 80 in case it was already wrapped... #%s\n" % ( # Add references to attachments as appropriate. (Do this last since it # inserts a horizontal rule.) Returns Markdown text for a comment's labels as appropriate. Returns Markdown text for a comment's links to other issues. #" + ", #".join(added) + "\n" #" + ", #".join(removed) + "\n" #" + ", #".join(added) + "\n" #" + #".join(removed) + "\n") Returns Markdown text for a comment's attachments as appropriate. # Deleted attachments won't be found on the issue mirror. Abstract issue operations. Handles creating and updating issues and comments on an user API. Gets all of the issue for the repository with the given state. Args: state: The state of the repository can be either 'open' or 'closed'. Returns: The list of all of the issues with the given state. Raises: IOError: An error occurred accessing previously created issues. Gets all the comments for the issue with the given ID. Creates an issue. Args: googlecode_issue: An instance of GoogleCodeIssue Returns: The issue number of the new issue. Raises: ServiceError: An error occurred creating the issue. Edits an existing issue. Closes an issue. Args: issue_number: The issue number. Creates a comment on an issue. Args: issue_number: The issue number. googlecode_comment: An instance of GoogleCodeComment Edits an existing comment. Loads issue data from a file. Args: issue_file_path: path to the file to load project_name: name of the project to load Returns: Issue data as a list of dictionaries. Raises: ProjectNotFoundError: the project_name was not found in the file. Loads user data from a file. If not present, the user name will just return whatever is passed to it. Args: user_file_path: path to the file to load user_service: an instance of UserService Issue Migration. Handles the uploading issues from Google Code to an issue service. Initialize the IssueExporter. Args: issue_service: An instance of IssueService. user_service: An instance of UserService. project_name: The name of the project to export to. issue_json_data: A data object of issues from Google Code. user_map: A map from user email addresses to service usernames. # Specialized index of issues to quickly check what has been migrated to # GitHub and if so, determine it's new issue ID. See Init(...). # Output only. # Mapping from Google Code issue ID to destination service issue ID. Initialize the needed variables. Arg: require_all_issues_exported: Bool. Require that all issues have been exported. Used to ensure that rewritting comments won't fail. # Sort issues by GitHub ID, since Google Code issues will be exported in # order we can use the exported issue's chronology to resolve ambiguities # for issues with the same title. Yes, GitHub number == ID. #%s '%s' not in Google Takeout dump." % ( # Mark of the issue as exported. # Build the ID map based on previously created issue. Only used if # rewriting comments. #%s '%s' not found. Can't rewrite comments." % ( Return metadata about the exported Google Code issue. #%s." % (issue_id)) Returns whether or not a Google Code issue has been exported. Update issue count 'feed'. This displays the current status of the script to the user. Converts an issue from Google Code to an issue service. This will take the Google Code issue and create a corresponding issue on the issue service. If the issue on Google Code was closed it will also be closed on the issue service. Args: googlecode_issue: An instance of GoogleCodeIssue Returns: The issue number assigned by the service. Converts a list of issue comment from Google Code to an issue service. This will take a list of Google Code issue comments and create corresponding comments on an issue service for the given issue number. Args: comments: A list of comments (each comment is just a string). issue_number: The issue number. source_issue_id: The Google Code issue id. Rewrite all comments in the issue to update issue ID references. Args: googlecode_issue: The Google Code issue to rewrite. issue_number: The issue ID on the **destination** system. # Get existing comments from the destination, necessary because we don't # know the IDs used on the output side. (GitHub uses timestamps :P) #%s vs. dest service #%s (%s comments vs. %s)" % ( Fix the issue JSON object to normalize how blocking/blocked-on are used. There is a bug in how Google Takeout exports blocking/blocked-on status. Each comment may have an update with a list of added/removed blocked/blocking issues. However, comment #0, the "original issue state" does not contain this information. However, the issue does contain summary information. (i.e. a union of initial state and all comment updates. This function figures out what should be in comment #0 so everything actually makes sense when rendered. # Issue references we add to comment #0 # - References that are removed later, but not explicitly added. # (assumed to have been added on comment #0). # - References that are in the summary, but not explicitly added. # (assumed to have been added on comment #0). The initial issue reference IDs. Returns the desired references on commeng #0 for the kind. # List of references as we simulate the comments. # The desired list to output. # If the reference was added in this comment, we don't need # to add it to comment #0 since you'll "see" the addition. # If the reference was removed in this comment AND it wasn't # previously added by a comment, then we should add it to the # output list. (We infer the issue was created with it.) Start the issue export process. Args: rewrite_comments: Bool. If set will rewrite the comments for previously exported issues. Used to fix export problems and remap issue IDs. # Only used for formatting output. # Check if the issue has already been posted. #%s already exported with ID #%s." % ( # Verify all comments are present. #%d" % (idx + 1) # Advanced past the "progress bar" line. # Post the issue for the first time. | 2.510388 | 3 |
tests/test_helper.py | whitemike889/braintree_python | 1 | 6630645 | <reponame>whitemike889/braintree_python<gh_stars>1-10
import json
import os
import re
import random
import sys
import unittest
import warnings
import subprocess
import time
from urllib.parse import urlencode, quote_plus
from http.client import HTTPConnection
from base64 import encodebytes
import requests
from base64 import b64decode
from contextlib import contextmanager
from datetime import date, datetime, timedelta
from decimal import Decimal
from subprocess import Popen, PIPE
from nose.tools import make_decorator
from nose.tools import raises
from braintree import *
from braintree.exceptions import *
from braintree.test.credit_card_numbers import CreditCardNumbers
from braintree.test.nonces import Nonces
from braintree.testing_gateway import *
from braintree.util import *
def raises_with_regexp(expected_exception_class, regexp_to_match):
def decorate(func):
name = func.__name__
def generated_function(*args, **kwargs):
exception_string = None
try:
func(*args, **kwargs)
except expected_exception_class as e:
exception_string = str(e)
except:
raise
if exception_string is None:
message = "%s() did not raise %s" % (name, expected_exception_class.__name__)
raise AssertionError(message)
elif re.match(regexp_to_match, exception_string) is None:
message = "%s() exception message (%s) did not match (%s)" % \
(name, exception_string, regexp_to_match)
raise AssertionError(message)
return make_decorator(func)(generated_function)
return decorate
def reset_braintree_configuration():
Configuration.configure(
Environment.Development,
"integration_merchant_id",
"integration_public_key",
"integration_private_key"
)
reset_braintree_configuration()
class AdvancedFraudIntegrationMerchant:
def __enter__(self):
Configuration.configure(
Environment.Development,
"advanced_fraud_integration_merchant_id",
"advanced_fraud_integration_public_key",
"advanced_fraud_integration_private_key"
)
def __exit__(self, type, value, trace):
reset_braintree_configuration()
def showwarning(*_):
pass
warnings.showwarning = showwarning
class TestHelper(object):
default_merchant_account_id = "sandbox_credit_card"
non_default_merchant_account_id = "sandbox_credit_card_non_default"
non_default_sub_merchant_account_id = "sandbox_sub_merchant_account"
three_d_secure_merchant_account_id = "three_d_secure_merchant_account"
fake_amex_direct_merchant_account_id = "fake_amex_direct_usd"
fake_venmo_account_merchant_account_id = "fake_first_data_venmo_account"
us_bank_merchant_account_id = "us_bank_merchant_account"
another_us_bank_merchant_account_id = "another_us_bank_merchant_account"
adyen_merchant_account_id = "adyen_ma"
hiper_brl_merchant_account_id = "hiper_brl"
add_on_discount_plan = {
"description": "Plan for integration tests -- with add-ons and discounts",
"id": "integration_plan_with_add_ons_and_discounts",
"price": Decimal("9.99"),
"trial_duration": 2,
"trial_duration_unit": Subscription.TrialDurationUnit.Day,
"trial_period": True
}
billing_day_of_month_plan = {
"description": "Plan for integration tests -- with billing day of month",
"id": "integration_plan_with_billing_day_of_month",
"billing_day_of_month": 5,
"price": Decimal("8.88"),
}
trial_plan = {
"description": "Plan for integration tests -- with trial",
"id": "integration_trial_plan",
"price": Decimal("43.21"),
"trial_period": True,
"trial_duration": 2,
"trial_duration_unit": Subscription.TrialDurationUnit.Day
}
trialless_plan = {
"description": "Plan for integration tests -- without a trial",
"id": "integration_trialless_plan",
"price": Decimal("12.34"),
"trial_period": False
}
valid_token_characters = list("<KEY>")
text_type = str
raw_type = bytes
@staticmethod
def make_past_due(subscription, number_of_days_past_due=1):
Configuration.gateway().testing.make_past_due(subscription.id, number_of_days_past_due)
@staticmethod
def escrow_transaction(transaction_id):
Configuration.gateway().testing.escrow_transaction(transaction_id)
@staticmethod
def settle_transaction(transaction_id):
return Configuration.gateway().testing.settle_transaction(transaction_id)
@staticmethod
def settlement_confirm_transaction(transaction_id):
return Configuration.gateway().testing.settlement_confirm_transaction(transaction_id)
@staticmethod
def settlement_decline_transaction(transaction_id):
return Configuration.gateway().testing.settlement_decline_transaction(transaction_id)
@staticmethod
def settlement_pending_transaction(transaction_id):
return Configuration.gateway().testing.settlement_pending_transaction(transaction_id)
@staticmethod
def create_3ds_verification(merchant_account_id, params):
return Configuration.gateway().testing.create_3ds_verification(merchant_account_id, params)
@staticmethod
@contextmanager
def other_merchant(merchant_id, public_key, private_key):
old_merchant_id = Configuration.merchant_id
old_public_key = Configuration.public_key
old_private_key = Configuration.private_key
Configuration.merchant_id = merchant_id
Configuration.public_key = public_key
Configuration.private_key = private_key
try:
yield
finally:
Configuration.merchant_id = old_merchant_id
Configuration.public_key = old_public_key
Configuration.private_key = old_private_key
@staticmethod
def includes(collection, expected):
for item in collection.items:
if item.id == expected.id:
return True
return False
@staticmethod
def in_list(collection, expected):
for item in collection:
if item == expected:
return True
return False
@staticmethod
def includes_status(collection, status):
for item in collection.items:
if item.status == status:
return True
return False
@staticmethod
def now_minus_offset(offset):
now = datetime.utcnow()
return (now - timedelta(hours=offset)).strftime("%Y-%m-%d")
@staticmethod
def unique(some_list):
return set(some_list)
@staticmethod
def __headers():
return {
"Accept": "application/xml",
"Content-type": "application/x-www-form-urlencoded",
}
@staticmethod
def generate_decoded_client_token(params=None):
client_token = None
if params:
client_token = ClientToken.generate(params)
else:
client_token = ClientToken.generate()
decoded_client_token = b64decode(client_token).decode()
return decoded_client_token
@staticmethod
def nonce_for_paypal_account(paypal_account_details):
client_token = json.loads(TestHelper.generate_decoded_client_token())
client = ClientApiHttp(Configuration.instantiate(), {
"authorization_fingerprint": client_token["authorizationFingerprint"]
})
_, nonce = client.get_paypal_nonce(paypal_account_details)
return nonce
@staticmethod
def random_token_block(x):
string = ""
for i in range(6):
string += random.choice(TestHelper.valid_token_characters)
return string
@staticmethod
def generate_valid_us_bank_account_nonce(routing_number="021000021", account_number="567891234"):
client_token = json.loads(TestHelper.generate_decoded_client_token())
headers = {
"Content-Type": "application/json",
"Braintree-Version": "2016-10-07",
"Authorization": "Bearer " + client_token["braintree_api"]["access_token"]
}
payload = {
"type": "us_bank_account",
"billing_address": {
"street_address": "123 Ave",
"region": "CA",
"locality": "San Francisco",
"postal_code": "94112"
},
"account_type": "checking",
"ownership_type": "personal",
"routing_number": routing_number,
"account_number": account_number,
"first_name": "Dan",
"last_name": "Schulman",
"ach_mandate": {
"text": "cl mandate text"
}
}
resp = requests.post(client_token["braintree_api"]["url"] + "/tokens", headers=headers, data=json.dumps(payload) )
respJson = json.loads(resp.text)
return respJson["data"]["id"]
@staticmethod
def generate_plaid_us_bank_account_nonce():
client_token = json.loads(TestHelper.generate_decoded_client_token())
headers = {
"Content-Type": "application/json",
"Braintree-Version": "2016-10-07",
"Authorization": "Bearer " + client_token["braintree_api"]["access_token"]
}
payload = {
"type": "plaid_public_token",
"public_token": "good",
"account_id": "plaid_account_id",
"ownership_type": "business",
"business_name": "PayPal, Inc.",
"billing_address": {
"street_address": "123 Ave",
"region": "CA",
"locality": "San Francisco",
"postal_code": "94112"
},
"ach_mandate": {
"text": "cl mandate text"
}
}
resp = requests.post(client_token["braintree_api"]["url"] + "/tokens", headers=headers, data=json.dumps(payload) )
respJson = json.loads(resp.text)
return respJson["data"]["id"]
@staticmethod
def generate_invalid_us_bank_account_nonce():
token = "<PASSWORD>"
for i in range(4):
token += "_" + TestHelper.random_token_block('d')
token += "_xxx"
return token
@staticmethod
def generate_three_d_secure_nonce(gateway, params):
url = gateway.config.base_merchant_path() + "/three_d_secure/create_nonce/" + TestHelper.three_d_secure_merchant_account_id
response = gateway.config.http().post(url, params)
return response["payment_method_nonce"]["nonce"]
@staticmethod
def create_disputed_transaction():
if hasattr(TestHelper, 'disputed_transaction'):
return TestHelper.disputed_transaction
disputed_transaction = Transaction.sale({
"amount": "10.00",
"credit_card": {
"number": CreditCardNumbers.Disputes.Chargeback,
"expiration_date": "04/2018"
}
})
for _ in range(1, 60):
transactions = Transaction.search([
TransactionSearch.id == disputed_transaction.transaction.id,
TransactionSearch.dispute_date == datetime.today()
])
if transactions.maximum_size == 1:
TestHelper.disputed_transaction = transactions.first
return TestHelper.disputed_transaction
else:
time.sleep(1)
raise ValueError('Disputed transaction could not be found')
@staticmethod
def create_grant(gateway, params):
config = gateway.config
response = config.http().post("/oauth_testing/grants", {
"grant": params
})
return response["grant"]["code"]
@staticmethod
def create_payment_method_grant_fixtures():
config = Configuration(
merchant_id="integration_merchant_public_id",
public_key="oauth_app_partner_user_public_key",
private_key="oauth_app_partner_user_private_key",
environment=Environment.Development
)
gateway = BraintreeGateway(config)
customer = gateway.customer.create().customer
credit_card = gateway.credit_card.create(
params={
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"billing_address": {
"first_name": "Jon",
"last_name": "Doe",
"postal_code": "95131"
}
}
).credit_card
oauth_app_gateway = BraintreeGateway(
client_id="client_id$development$integration_client_id",
client_secret="client_secret$development$integration_client_secret",
environment=Environment.Development
)
code = TestHelper.create_grant(oauth_app_gateway, {
"merchant_public_id": "integration_merchant_id",
"scope": "grant_payment_method"
})
access_token = oauth_app_gateway.oauth.create_token_from_code({
"code": code
}).credentials.access_token
granting_gateway = BraintreeGateway(
access_token=access_token,
)
return (granting_gateway, credit_card)
@staticmethod
def sample_notification_from_xml(xml):
gateway = Configuration.gateway()
payload = encodebytes(xml)
hmac_payload = Crypto.sha1_hmac_hash(gateway.config.private_key, payload)
signature = "%s|%s" % (gateway.config.public_key, hmac_payload)
return {'bt_signature': signature, 'bt_payload': payload}
class ClientApiHttp(Http):
def __init__(self, config, options):
self.config = config
self.options = options
self.http = Http(config)
@staticmethod
def create():
config = Configuration.instantiate()
client_token = TestHelper.generate_decoded_client_token()
authorization_fingerprint = json.loads(client_token)["authorizationFingerprint"]
return ClientApiHttp(config, {
"authorization_fingerprint": authorization_fingerprint,
"shared_customer_identifier": "fake_identifier",
"shared_customer_identifier_type": "testing"
})
def get(self, path):
return self._make_request("GET", path)
def post(self, path, params=None):
return self._make_request("POST", path, params)
def put(self, path, params=None):
return self._make_request("PUT", path, params)
def _make_request(self, http_verb, path, params=None):
http_strategy = self.config.http_strategy()
request_body = json.dumps(params) if params else None
return http_strategy.http_do(http_verb, path, self.__headers(), request_body)
def set_authorization_fingerprint(self, authorization_fingerprint):
self.options['authorization_fingerprint'] = authorization_fingerprint
def get_configuration(self):
encoded_fingerprint = quote_plus(self.options["authorization_fingerprint"])
url = "/merchants/%s/client_api/v1/configuration" % self.config.merchant_id
url += "?authorizationFingerprint=%s" % encoded_fingerprint
url += "&configVersion=3"
return self.get(url)
def get_cards(self):
encoded_fingerprint = quote_plus(self.options["authorization_fingerprint"])
url = "/merchants/%s/client_api/v1/payment_methods.json" % self.config.merchant_id
url += "?authorizationFingerprint=%s" % encoded_fingerprint
url += "&sharedCustomerIdentifier=%s" % self.options["shared_customer_identifier"]
url += "&sharedCustomerIdentifierType=%s" % self.options["shared_customer_identifier_type"]
return self.get(url)
def add_card(self, params):
url = "/merchants/%s/client_api/v1/payment_methods/credit_cards.json" % self.config.merchant_id
if 'authorization_fingerprint' in self.options:
params['authorizationFingerprint'] = self.options['authorization_fingerprint']
if 'shared_customer_identifier' in self.options:
params['sharedCustomerIdentifier'] = self.options['shared_customer_identifier']
if 'shared_customer_identifier_type' in self.options:
params['sharedCustomerIdentifierType'] = self.options['shared_customer_identifier_type']
return self.post(url, params)
def get_paypal_nonce(self, paypal_params):
url = "/merchants/%s/client_api/v1/payment_methods/paypal_accounts" % self.config.merchant_id
params = {"paypal_account": paypal_params}
if 'authorization_fingerprint' in self.options:
params['authorizationFingerprint'] = self.options['authorization_fingerprint']
status_code, response = self.post(url, params)
nonce = None
if status_code == 202:
nonce = json.loads(response)["paypalAccounts"][0]["nonce"]
return [status_code, nonce]
def get_credit_card_nonce(self, credit_card_params):
url = "/merchants/%s/client_api/v1/payment_methods/credit_cards" % self.config.merchant_id
params = {"credit_card": credit_card_params}
if 'authorization_fingerprint' in self.options:
params['authorizationFingerprint'] = self.options['authorization_fingerprint']
status_code, response = self.post(url, params)
nonce = None
if status_code in [201, 202]:
nonce = json.loads(response)["creditCards"][0]["nonce"]
return [status_code, nonce]
def __headers(self):
return {
"Content-type": "application/json",
"User-Agent": "Braintree Python " + version.Version, #pylint: disable=E0602
"X-ApiVersion": Configuration.api_version()
}
| import json
import os
import re
import random
import sys
import unittest
import warnings
import subprocess
import time
from urllib.parse import urlencode, quote_plus
from http.client import HTTPConnection
from base64 import encodebytes
import requests
from base64 import b64decode
from contextlib import contextmanager
from datetime import date, datetime, timedelta
from decimal import Decimal
from subprocess import Popen, PIPE
from nose.tools import make_decorator
from nose.tools import raises
from braintree import *
from braintree.exceptions import *
from braintree.test.credit_card_numbers import CreditCardNumbers
from braintree.test.nonces import Nonces
from braintree.testing_gateway import *
from braintree.util import *
def raises_with_regexp(expected_exception_class, regexp_to_match):
def decorate(func):
name = func.__name__
def generated_function(*args, **kwargs):
exception_string = None
try:
func(*args, **kwargs)
except expected_exception_class as e:
exception_string = str(e)
except:
raise
if exception_string is None:
message = "%s() did not raise %s" % (name, expected_exception_class.__name__)
raise AssertionError(message)
elif re.match(regexp_to_match, exception_string) is None:
message = "%s() exception message (%s) did not match (%s)" % \
(name, exception_string, regexp_to_match)
raise AssertionError(message)
return make_decorator(func)(generated_function)
return decorate
def reset_braintree_configuration():
Configuration.configure(
Environment.Development,
"integration_merchant_id",
"integration_public_key",
"integration_private_key"
)
reset_braintree_configuration()
class AdvancedFraudIntegrationMerchant:
def __enter__(self):
Configuration.configure(
Environment.Development,
"advanced_fraud_integration_merchant_id",
"advanced_fraud_integration_public_key",
"advanced_fraud_integration_private_key"
)
def __exit__(self, type, value, trace):
reset_braintree_configuration()
def showwarning(*_):
pass
warnings.showwarning = showwarning
class TestHelper(object):
default_merchant_account_id = "sandbox_credit_card"
non_default_merchant_account_id = "sandbox_credit_card_non_default"
non_default_sub_merchant_account_id = "sandbox_sub_merchant_account"
three_d_secure_merchant_account_id = "three_d_secure_merchant_account"
fake_amex_direct_merchant_account_id = "fake_amex_direct_usd"
fake_venmo_account_merchant_account_id = "fake_first_data_venmo_account"
us_bank_merchant_account_id = "us_bank_merchant_account"
another_us_bank_merchant_account_id = "another_us_bank_merchant_account"
adyen_merchant_account_id = "adyen_ma"
hiper_brl_merchant_account_id = "hiper_brl"
add_on_discount_plan = {
"description": "Plan for integration tests -- with add-ons and discounts",
"id": "integration_plan_with_add_ons_and_discounts",
"price": Decimal("9.99"),
"trial_duration": 2,
"trial_duration_unit": Subscription.TrialDurationUnit.Day,
"trial_period": True
}
billing_day_of_month_plan = {
"description": "Plan for integration tests -- with billing day of month",
"id": "integration_plan_with_billing_day_of_month",
"billing_day_of_month": 5,
"price": Decimal("8.88"),
}
trial_plan = {
"description": "Plan for integration tests -- with trial",
"id": "integration_trial_plan",
"price": Decimal("43.21"),
"trial_period": True,
"trial_duration": 2,
"trial_duration_unit": Subscription.TrialDurationUnit.Day
}
trialless_plan = {
"description": "Plan for integration tests -- without a trial",
"id": "integration_trialless_plan",
"price": Decimal("12.34"),
"trial_period": False
}
valid_token_characters = list("<KEY>")
text_type = str
raw_type = bytes
@staticmethod
def make_past_due(subscription, number_of_days_past_due=1):
Configuration.gateway().testing.make_past_due(subscription.id, number_of_days_past_due)
@staticmethod
def escrow_transaction(transaction_id):
Configuration.gateway().testing.escrow_transaction(transaction_id)
@staticmethod
def settle_transaction(transaction_id):
return Configuration.gateway().testing.settle_transaction(transaction_id)
@staticmethod
def settlement_confirm_transaction(transaction_id):
return Configuration.gateway().testing.settlement_confirm_transaction(transaction_id)
@staticmethod
def settlement_decline_transaction(transaction_id):
return Configuration.gateway().testing.settlement_decline_transaction(transaction_id)
@staticmethod
def settlement_pending_transaction(transaction_id):
return Configuration.gateway().testing.settlement_pending_transaction(transaction_id)
@staticmethod
def create_3ds_verification(merchant_account_id, params):
return Configuration.gateway().testing.create_3ds_verification(merchant_account_id, params)
@staticmethod
@contextmanager
def other_merchant(merchant_id, public_key, private_key):
old_merchant_id = Configuration.merchant_id
old_public_key = Configuration.public_key
old_private_key = Configuration.private_key
Configuration.merchant_id = merchant_id
Configuration.public_key = public_key
Configuration.private_key = private_key
try:
yield
finally:
Configuration.merchant_id = old_merchant_id
Configuration.public_key = old_public_key
Configuration.private_key = old_private_key
@staticmethod
def includes(collection, expected):
for item in collection.items:
if item.id == expected.id:
return True
return False
@staticmethod
def in_list(collection, expected):
for item in collection:
if item == expected:
return True
return False
@staticmethod
def includes_status(collection, status):
for item in collection.items:
if item.status == status:
return True
return False
@staticmethod
def now_minus_offset(offset):
now = datetime.utcnow()
return (now - timedelta(hours=offset)).strftime("%Y-%m-%d")
@staticmethod
def unique(some_list):
return set(some_list)
@staticmethod
def __headers():
return {
"Accept": "application/xml",
"Content-type": "application/x-www-form-urlencoded",
}
@staticmethod
def generate_decoded_client_token(params=None):
client_token = None
if params:
client_token = ClientToken.generate(params)
else:
client_token = ClientToken.generate()
decoded_client_token = b64decode(client_token).decode()
return decoded_client_token
@staticmethod
def nonce_for_paypal_account(paypal_account_details):
client_token = json.loads(TestHelper.generate_decoded_client_token())
client = ClientApiHttp(Configuration.instantiate(), {
"authorization_fingerprint": client_token["authorizationFingerprint"]
})
_, nonce = client.get_paypal_nonce(paypal_account_details)
return nonce
@staticmethod
def random_token_block(x):
string = ""
for i in range(6):
string += random.choice(TestHelper.valid_token_characters)
return string
@staticmethod
def generate_valid_us_bank_account_nonce(routing_number="021000021", account_number="567891234"):
client_token = json.loads(TestHelper.generate_decoded_client_token())
headers = {
"Content-Type": "application/json",
"Braintree-Version": "2016-10-07",
"Authorization": "Bearer " + client_token["braintree_api"]["access_token"]
}
payload = {
"type": "us_bank_account",
"billing_address": {
"street_address": "123 Ave",
"region": "CA",
"locality": "San Francisco",
"postal_code": "94112"
},
"account_type": "checking",
"ownership_type": "personal",
"routing_number": routing_number,
"account_number": account_number,
"first_name": "Dan",
"last_name": "Schulman",
"ach_mandate": {
"text": "cl mandate text"
}
}
resp = requests.post(client_token["braintree_api"]["url"] + "/tokens", headers=headers, data=json.dumps(payload) )
respJson = json.loads(resp.text)
return respJson["data"]["id"]
@staticmethod
def generate_plaid_us_bank_account_nonce():
client_token = json.loads(TestHelper.generate_decoded_client_token())
headers = {
"Content-Type": "application/json",
"Braintree-Version": "2016-10-07",
"Authorization": "Bearer " + client_token["braintree_api"]["access_token"]
}
payload = {
"type": "plaid_public_token",
"public_token": "good",
"account_id": "plaid_account_id",
"ownership_type": "business",
"business_name": "PayPal, Inc.",
"billing_address": {
"street_address": "123 Ave",
"region": "CA",
"locality": "San Francisco",
"postal_code": "94112"
},
"ach_mandate": {
"text": "cl mandate text"
}
}
resp = requests.post(client_token["braintree_api"]["url"] + "/tokens", headers=headers, data=json.dumps(payload) )
respJson = json.loads(resp.text)
return respJson["data"]["id"]
@staticmethod
def generate_invalid_us_bank_account_nonce():
token = "<PASSWORD>"
for i in range(4):
token += "_" + TestHelper.random_token_block('d')
token += "_xxx"
return token
@staticmethod
def generate_three_d_secure_nonce(gateway, params):
url = gateway.config.base_merchant_path() + "/three_d_secure/create_nonce/" + TestHelper.three_d_secure_merchant_account_id
response = gateway.config.http().post(url, params)
return response["payment_method_nonce"]["nonce"]
@staticmethod
def create_disputed_transaction():
if hasattr(TestHelper, 'disputed_transaction'):
return TestHelper.disputed_transaction
disputed_transaction = Transaction.sale({
"amount": "10.00",
"credit_card": {
"number": CreditCardNumbers.Disputes.Chargeback,
"expiration_date": "04/2018"
}
})
for _ in range(1, 60):
transactions = Transaction.search([
TransactionSearch.id == disputed_transaction.transaction.id,
TransactionSearch.dispute_date == datetime.today()
])
if transactions.maximum_size == 1:
TestHelper.disputed_transaction = transactions.first
return TestHelper.disputed_transaction
else:
time.sleep(1)
raise ValueError('Disputed transaction could not be found')
@staticmethod
def create_grant(gateway, params):
config = gateway.config
response = config.http().post("/oauth_testing/grants", {
"grant": params
})
return response["grant"]["code"]
@staticmethod
def create_payment_method_grant_fixtures():
config = Configuration(
merchant_id="integration_merchant_public_id",
public_key="oauth_app_partner_user_public_key",
private_key="oauth_app_partner_user_private_key",
environment=Environment.Development
)
gateway = BraintreeGateway(config)
customer = gateway.customer.create().customer
credit_card = gateway.credit_card.create(
params={
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"billing_address": {
"first_name": "Jon",
"last_name": "Doe",
"postal_code": "95131"
}
}
).credit_card
oauth_app_gateway = BraintreeGateway(
client_id="client_id$development$integration_client_id",
client_secret="client_secret$development$integration_client_secret",
environment=Environment.Development
)
code = TestHelper.create_grant(oauth_app_gateway, {
"merchant_public_id": "integration_merchant_id",
"scope": "grant_payment_method"
})
access_token = oauth_app_gateway.oauth.create_token_from_code({
"code": code
}).credentials.access_token
granting_gateway = BraintreeGateway(
access_token=access_token,
)
return (granting_gateway, credit_card)
@staticmethod
def sample_notification_from_xml(xml):
gateway = Configuration.gateway()
payload = encodebytes(xml)
hmac_payload = Crypto.sha1_hmac_hash(gateway.config.private_key, payload)
signature = "%s|%s" % (gateway.config.public_key, hmac_payload)
return {'bt_signature': signature, 'bt_payload': payload}
class ClientApiHttp(Http):
def __init__(self, config, options):
self.config = config
self.options = options
self.http = Http(config)
@staticmethod
def create():
config = Configuration.instantiate()
client_token = TestHelper.generate_decoded_client_token()
authorization_fingerprint = json.loads(client_token)["authorizationFingerprint"]
return ClientApiHttp(config, {
"authorization_fingerprint": authorization_fingerprint,
"shared_customer_identifier": "fake_identifier",
"shared_customer_identifier_type": "testing"
})
def get(self, path):
return self._make_request("GET", path)
def post(self, path, params=None):
return self._make_request("POST", path, params)
def put(self, path, params=None):
return self._make_request("PUT", path, params)
def _make_request(self, http_verb, path, params=None):
http_strategy = self.config.http_strategy()
request_body = json.dumps(params) if params else None
return http_strategy.http_do(http_verb, path, self.__headers(), request_body)
def set_authorization_fingerprint(self, authorization_fingerprint):
self.options['authorization_fingerprint'] = authorization_fingerprint
def get_configuration(self):
encoded_fingerprint = quote_plus(self.options["authorization_fingerprint"])
url = "/merchants/%s/client_api/v1/configuration" % self.config.merchant_id
url += "?authorizationFingerprint=%s" % encoded_fingerprint
url += "&configVersion=3"
return self.get(url)
def get_cards(self):
encoded_fingerprint = quote_plus(self.options["authorization_fingerprint"])
url = "/merchants/%s/client_api/v1/payment_methods.json" % self.config.merchant_id
url += "?authorizationFingerprint=%s" % encoded_fingerprint
url += "&sharedCustomerIdentifier=%s" % self.options["shared_customer_identifier"]
url += "&sharedCustomerIdentifierType=%s" % self.options["shared_customer_identifier_type"]
return self.get(url)
def add_card(self, params):
url = "/merchants/%s/client_api/v1/payment_methods/credit_cards.json" % self.config.merchant_id
if 'authorization_fingerprint' in self.options:
params['authorizationFingerprint'] = self.options['authorization_fingerprint']
if 'shared_customer_identifier' in self.options:
params['sharedCustomerIdentifier'] = self.options['shared_customer_identifier']
if 'shared_customer_identifier_type' in self.options:
params['sharedCustomerIdentifierType'] = self.options['shared_customer_identifier_type']
return self.post(url, params)
def get_paypal_nonce(self, paypal_params):
url = "/merchants/%s/client_api/v1/payment_methods/paypal_accounts" % self.config.merchant_id
params = {"paypal_account": paypal_params}
if 'authorization_fingerprint' in self.options:
params['authorizationFingerprint'] = self.options['authorization_fingerprint']
status_code, response = self.post(url, params)
nonce = None
if status_code == 202:
nonce = json.loads(response)["paypalAccounts"][0]["nonce"]
return [status_code, nonce]
def get_credit_card_nonce(self, credit_card_params):
url = "/merchants/%s/client_api/v1/payment_methods/credit_cards" % self.config.merchant_id
params = {"credit_card": credit_card_params}
if 'authorization_fingerprint' in self.options:
params['authorizationFingerprint'] = self.options['authorization_fingerprint']
status_code, response = self.post(url, params)
nonce = None
if status_code in [201, 202]:
nonce = json.loads(response)["creditCards"][0]["nonce"]
return [status_code, nonce]
def __headers(self):
return {
"Content-type": "application/json",
"User-Agent": "Braintree Python " + version.Version, #pylint: disable=E0602
"X-ApiVersion": Configuration.api_version()
} | de | 0.211761 | #pylint: disable=E0602 | 1.980059 | 2 |
tilapia/lib/secret/keys/ed25519.py | huazhouwang/python_multichain_wallet | 2 | 6630646 | from typing import Tuple
from nacl import exceptions as nacl_exceptions
from nacl import signing
from tilapia.lib.basic.functional.require import require
from tilapia.lib.secret.interfaces import KeyInterface
class ED25519(KeyInterface):
def __init__(self, prvkey: bytes = None, pubkey: bytes = None):
super(ED25519, self).__init__(prvkey=prvkey, pubkey=pubkey)
self._prvkey = None
self._pubkey = None
if prvkey is not None:
require(len(prvkey) == 32, f"Length of prvkey should be 32 on ed25519, but now is {len(prvkey)}")
self._prvkey = signing.SigningKey(prvkey)
self._pubkey = self._prvkey.verify_key
else:
require(len(pubkey) == 32, f"Length of pubkey should be 32 on ed25519, but now is {len(pubkey)}")
self._pubkey = signing.VerifyKey(pubkey)
def get_pubkey(self, compressed=True) -> bytes:
return bytes(self._pubkey)
def get_prvkey(self) -> bytes:
require(self.has_prvkey())
return bytes(self._prvkey)
def verify(self, digest: bytes, signature: bytes) -> bool:
try:
_ = self._pubkey.verify(digest, signature)
except nacl_exceptions.BadSignatureError:
return False
else:
return True
def has_prvkey(self) -> bool:
return self._prvkey is not None
def sign(self, digest: bytes) -> Tuple[bytes, int]:
super().sign(digest)
return self._prvkey.sign(digest).signature, 0
| from typing import Tuple
from nacl import exceptions as nacl_exceptions
from nacl import signing
from tilapia.lib.basic.functional.require import require
from tilapia.lib.secret.interfaces import KeyInterface
class ED25519(KeyInterface):
def __init__(self, prvkey: bytes = None, pubkey: bytes = None):
super(ED25519, self).__init__(prvkey=prvkey, pubkey=pubkey)
self._prvkey = None
self._pubkey = None
if prvkey is not None:
require(len(prvkey) == 32, f"Length of prvkey should be 32 on ed25519, but now is {len(prvkey)}")
self._prvkey = signing.SigningKey(prvkey)
self._pubkey = self._prvkey.verify_key
else:
require(len(pubkey) == 32, f"Length of pubkey should be 32 on ed25519, but now is {len(pubkey)}")
self._pubkey = signing.VerifyKey(pubkey)
def get_pubkey(self, compressed=True) -> bytes:
return bytes(self._pubkey)
def get_prvkey(self) -> bytes:
require(self.has_prvkey())
return bytes(self._prvkey)
def verify(self, digest: bytes, signature: bytes) -> bool:
try:
_ = self._pubkey.verify(digest, signature)
except nacl_exceptions.BadSignatureError:
return False
else:
return True
def has_prvkey(self) -> bool:
return self._prvkey is not None
def sign(self, digest: bytes) -> Tuple[bytes, int]:
super().sign(digest)
return self._prvkey.sign(digest).signature, 0
| none | 1 | 2.102165 | 2 |
|
src/movie_assistant_trainner.py | VP-DE-ML/if_a_movie_assistant | 0 | 6630647 | import tensorflow as tf
import json
import numpy as np
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
import pickle
import random
import os
from keras.preprocessing.text import Tokenizer
# from keras.utils.np_utils import to_categorical
from keras.utils import to_categorical
class MovieAssistantTrainer:
def __init__(self):
pass
def load_training_data(self, training_file):
data_file = open(training_file).read()
intents_file = json.loads(data_file)
samples_text = []
intents_labels = []
output_rows = []
for item in intents_file['intents']:
for pattern in item['userinputs']:
pattern = str(pattern).lower() # review
row = {}
row['text'] = pattern
row['intent'] = item['intent']
samples_text.append(pattern)
intents_labels.append(item['intent'])
output_rows.append(row)
unique_labels = np.unique(intents_labels)
print("Samples: ", len(output_rows))
print("Intents: ", len(intents_labels))
print("Unique intents: ", len(unique_labels))
return samples_text, intents_labels, output_rows, unique_labels
# Save model and components
def save_model(self, model, hist, tokenizer, unique_intents, sequence_maxlen, filename):
# Save Keras Tokenizer
path = "../lib/"
pickle.dump(tokenizer, open(path + filename + 'tokenizer.pkl', 'wb'))
print("Tokenizer saved")
# Save unique intents
pickle.dump(unique_intents, open(path + filename + 'intents.pkl', 'wb'))
print("Unique intents saved")
# Save model
model.save(path + filename + 'model.h5', hist)
print("Model Saved")
# Save weights
model.save_weights(path + filename + 'weights.hdf5')
print("Weights saved")
pickle.dump(sequence_maxlen, open(path + filename + 'sequence_maxlen.pkl', 'wb'))
print("vector length saved")
def create_embedding_layer(self, output_dimension, words_number, sequence_maxlen):
# count = 0 MOre behind
words_list = []
embeddings_index = {}
with open('../input/pre_train_50d.txt', encoding="utf8") as pre_trained:
for line in pre_trained:
splitted_line = line.split()
word = splitted_line[0]
words_list.append(word)
coefs = np.asarray(splitted_line[1:], dtype='float32')
embeddings_index[word] = coefs
# count = count+1
pre_trained.close()
print("Found %s word vectors." % len(embeddings_index))
tokenizer = Tokenizer(num_words=words_number, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(words_list)
word_index = tokenizer.word_index
print("Vocabulary index: ", len(word_index))
# Prepare embedding matrix
hits = 0
misses = 0
embedding_matrix = np.zeros((len(word_index) + 1, output_dimension))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
# This includes the representation for "padding" and "OOV"
embedding_matrix[i] = embedding_vector
hits += 1
else:
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))
embedding_layer = Embedding(input_dim=len(word_index) + 1, output_dim=output_dimension,
weights=[embedding_matrix], input_length=sequence_maxlen, trainable=True)
print("Emb created")
return tokenizer, embedding_layer
def create_model(self, embedding_layer, num_classes, sequence_maxlen, output_dimension):
model = Sequential()
model.add(embedding_layer)
model.add(LSTM(128, return_sequences=False, input_shape=(sequence_maxlen, output_dimension)))
model.add(Dense(num_classes, activation='softmax'))
print(model.summary())
return model
| import tensorflow as tf
import json
import numpy as np
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
import pickle
import random
import os
from keras.preprocessing.text import Tokenizer
# from keras.utils.np_utils import to_categorical
from keras.utils import to_categorical
class MovieAssistantTrainer:
def __init__(self):
pass
def load_training_data(self, training_file):
data_file = open(training_file).read()
intents_file = json.loads(data_file)
samples_text = []
intents_labels = []
output_rows = []
for item in intents_file['intents']:
for pattern in item['userinputs']:
pattern = str(pattern).lower() # review
row = {}
row['text'] = pattern
row['intent'] = item['intent']
samples_text.append(pattern)
intents_labels.append(item['intent'])
output_rows.append(row)
unique_labels = np.unique(intents_labels)
print("Samples: ", len(output_rows))
print("Intents: ", len(intents_labels))
print("Unique intents: ", len(unique_labels))
return samples_text, intents_labels, output_rows, unique_labels
# Save model and components
def save_model(self, model, hist, tokenizer, unique_intents, sequence_maxlen, filename):
# Save Keras Tokenizer
path = "../lib/"
pickle.dump(tokenizer, open(path + filename + 'tokenizer.pkl', 'wb'))
print("Tokenizer saved")
# Save unique intents
pickle.dump(unique_intents, open(path + filename + 'intents.pkl', 'wb'))
print("Unique intents saved")
# Save model
model.save(path + filename + 'model.h5', hist)
print("Model Saved")
# Save weights
model.save_weights(path + filename + 'weights.hdf5')
print("Weights saved")
pickle.dump(sequence_maxlen, open(path + filename + 'sequence_maxlen.pkl', 'wb'))
print("vector length saved")
def create_embedding_layer(self, output_dimension, words_number, sequence_maxlen):
# count = 0 MOre behind
words_list = []
embeddings_index = {}
with open('../input/pre_train_50d.txt', encoding="utf8") as pre_trained:
for line in pre_trained:
splitted_line = line.split()
word = splitted_line[0]
words_list.append(word)
coefs = np.asarray(splitted_line[1:], dtype='float32')
embeddings_index[word] = coefs
# count = count+1
pre_trained.close()
print("Found %s word vectors." % len(embeddings_index))
tokenizer = Tokenizer(num_words=words_number, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(words_list)
word_index = tokenizer.word_index
print("Vocabulary index: ", len(word_index))
# Prepare embedding matrix
hits = 0
misses = 0
embedding_matrix = np.zeros((len(word_index) + 1, output_dimension))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
# This includes the representation for "padding" and "OOV"
embedding_matrix[i] = embedding_vector
hits += 1
else:
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))
embedding_layer = Embedding(input_dim=len(word_index) + 1, output_dim=output_dimension,
weights=[embedding_matrix], input_length=sequence_maxlen, trainable=True)
print("Emb created")
return tokenizer, embedding_layer
def create_model(self, embedding_layer, num_classes, sequence_maxlen, output_dimension):
model = Sequential()
model.add(embedding_layer)
model.add(LSTM(128, return_sequences=False, input_shape=(sequence_maxlen, output_dimension)))
model.add(Dense(num_classes, activation='softmax'))
print(model.summary())
return model
| en | 0.732383 | # from keras.utils.np_utils import to_categorical # review # Save model and components # Save Keras Tokenizer # Save unique intents # Save model # Save weights # count = 0 MOre behind # count = count+1 # Prepare embedding matrix # words not found in embedding index will be all-zeros. # This includes the representation for "padding" and "OOV" | 2.48919 | 2 |
tools/sync_rses.py | faluchet/rucio | 0 | 6630648 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os.path
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_path)
os.chdir(base_path)
import json # noqa: E402
import sys # noqa: E402
import traceback # noqa: E402
from rucio.client import Client # noqa: E402
from rucio.common.exception import Duplicate # noqa: E402
UNKNOWN = 3
CRITICAL = 2
WARNING = 1
OK = 0
def main(argv):
# parameters
if argv:
rse_repo_file = argv[0]
else:
rse_repo_file = 'etc/rse_repository.json'
json_data = open(rse_repo_file)
repo_data = json.load(json_data)
json_data.close()
c = Client()
for rse in repo_data:
try:
deterministic = repo_data[rse].get('deterministic', True)
volatile = repo_data[rse].get('volatile', False)
region_code = repo_data[rse].get('region_code')
country_name = repo_data[rse].get('country_name')
staging_area = repo_data[rse].get('staging_area')
continent = repo_data[rse].get('continent')
time_zone = repo_data[rse].get('time_zone')
ISP = repo_data[rse].get('ISP')
c.add_rse(rse, deterministic=deterministic, volatile=volatile,
region_code=region_code, country_name=country_name, staging_area=staging_area,
continent=continent, time_zone=time_zone, ISP=ISP)
except Duplicate:
print('%(rse)s already added' % locals())
except:
errno, errstr = sys.exc_info()[:2]
trcbck = traceback.format_exc()
print('Interrupted processing with %s %s %s.' % (errno, errstr, trcbck))
for p_id in repo_data[rse]['protocols']['supported']:
try:
p = repo_data[rse]['protocols']['supported'][p_id]
p['scheme'] = p_id
c.add_protocol(rse, p)
except ValueError as e:
print(rse, e)
except Duplicate as e:
print(rse, e)
except Exception:
errno, errstr = sys.exc_info()[:2]
trcbck = traceback.format_exc()
print('Interrupted processing for %s with %s %s %s.' % (rse, errno, errstr, trcbck))
if __name__ == '__main__':
main(sys.argv[1:])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os.path
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_path)
os.chdir(base_path)
import json # noqa: E402
import sys # noqa: E402
import traceback # noqa: E402
from rucio.client import Client # noqa: E402
from rucio.common.exception import Duplicate # noqa: E402
UNKNOWN = 3
CRITICAL = 2
WARNING = 1
OK = 0
def main(argv):
# parameters
if argv:
rse_repo_file = argv[0]
else:
rse_repo_file = 'etc/rse_repository.json'
json_data = open(rse_repo_file)
repo_data = json.load(json_data)
json_data.close()
c = Client()
for rse in repo_data:
try:
deterministic = repo_data[rse].get('deterministic', True)
volatile = repo_data[rse].get('volatile', False)
region_code = repo_data[rse].get('region_code')
country_name = repo_data[rse].get('country_name')
staging_area = repo_data[rse].get('staging_area')
continent = repo_data[rse].get('continent')
time_zone = repo_data[rse].get('time_zone')
ISP = repo_data[rse].get('ISP')
c.add_rse(rse, deterministic=deterministic, volatile=volatile,
region_code=region_code, country_name=country_name, staging_area=staging_area,
continent=continent, time_zone=time_zone, ISP=ISP)
except Duplicate:
print('%(rse)s already added' % locals())
except:
errno, errstr = sys.exc_info()[:2]
trcbck = traceback.format_exc()
print('Interrupted processing with %s %s %s.' % (errno, errstr, trcbck))
for p_id in repo_data[rse]['protocols']['supported']:
try:
p = repo_data[rse]['protocols']['supported'][p_id]
p['scheme'] = p_id
c.add_protocol(rse, p)
except ValueError as e:
print(rse, e)
except Duplicate as e:
print(rse, e)
except Exception:
errno, errstr = sys.exc_info()[:2]
trcbck = traceback.format_exc()
print('Interrupted processing for %s with %s %s %s.' % (rse, errno, errstr, trcbck))
if __name__ == '__main__':
main(sys.argv[1:]) | en | 0.753751 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright European Organization for Nuclear Research (CERN) since 2012 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # noqa: E402 # noqa: E402 # noqa: E402 # noqa: E402 # noqa: E402 # parameters | 1.976728 | 2 |
django_thermostat/pypelib/persistence/PersistenceEngine.py | jpardobl/django-thermostat | 0 | 6630649 | import os
import sys
import time
'''
@author: msune,omoya,CarolinaFernandez
@@organization: i2CAT, OFELIA FP7
Persistence engine
Implementes driver-based persistence backend selection
'''
class PersistenceEngine():
#Default Class Attributes
_defaultParser = "RegexParser"
_defaultPersistence = "Django"
#Drivers
_drivers = ["Django","RAWFile"]
#Fill with appropiate path
PATH_TO_DRIVERS="backends"
def __init__(self):
raise Exception("Static class cannot be instanciated")
@staticmethod
def _getDriver(driverName):
print "driver name: %s" %driverName
if driverName == "Django":
PATH = PersistenceEngine.PATH_TO_DRIVERS + '.django.Django'
try:
exec('from ' + PATH + ' import Django')
return Django
except:
raise Exception(driverName + ' persistence driver not found in ' + PersistenceEngine.PATH_TO_DRIVERS)
elif driverName == "RAWFile":
PATH = PersistenceEngine.PATH_TO_DRIVERS + '.rawfile.RAWFile'
try:
exec('from ' + PATH + ' import RAWFile')
return RAWFile
except:
raise Exception(driverName + ' persistence driver not found in ' + PersistenceEngine.PATH_TO_DRIVERS)
else:
raise Exception(driverName + ' not supported')
@staticmethod
def save(obj, pBackend, parser=None, **kwargs):
return PersistenceEngine._getDriver(pBackend).save(obj, parser, **kwargs)
@staticmethod
def load(tableName, pBackend, resolverMappings, parser=None, **kwargs):
return PersistenceEngine._getDriver(pBackend).load(tableName, resolverMappings, parser, **kwargs)
'''
Retrieves every Driver's PolicyRuleTable object for a given name.
This method should be seldom used.
'''
@staticmethod
def loadAll(tableName, pBackend):
return PersistenceEngine._getDriver(pBackend).loadAll(tableName)
'''
Deletes a Driver's PolicyRuleTable object for a given ID.
This method should be seldom used.
'''
@staticmethod
def delete(tableID, pBackend):
return PersistenceEngine._getDriver(pBackend).delete(tableID)
| import os
import sys
import time
'''
@author: msune,omoya,CarolinaFernandez
@@organization: i2CAT, OFELIA FP7
Persistence engine
Implementes driver-based persistence backend selection
'''
class PersistenceEngine():
#Default Class Attributes
_defaultParser = "RegexParser"
_defaultPersistence = "Django"
#Drivers
_drivers = ["Django","RAWFile"]
#Fill with appropiate path
PATH_TO_DRIVERS="backends"
def __init__(self):
raise Exception("Static class cannot be instanciated")
@staticmethod
def _getDriver(driverName):
print "driver name: %s" %driverName
if driverName == "Django":
PATH = PersistenceEngine.PATH_TO_DRIVERS + '.django.Django'
try:
exec('from ' + PATH + ' import Django')
return Django
except:
raise Exception(driverName + ' persistence driver not found in ' + PersistenceEngine.PATH_TO_DRIVERS)
elif driverName == "RAWFile":
PATH = PersistenceEngine.PATH_TO_DRIVERS + '.rawfile.RAWFile'
try:
exec('from ' + PATH + ' import RAWFile')
return RAWFile
except:
raise Exception(driverName + ' persistence driver not found in ' + PersistenceEngine.PATH_TO_DRIVERS)
else:
raise Exception(driverName + ' not supported')
@staticmethod
def save(obj, pBackend, parser=None, **kwargs):
return PersistenceEngine._getDriver(pBackend).save(obj, parser, **kwargs)
@staticmethod
def load(tableName, pBackend, resolverMappings, parser=None, **kwargs):
return PersistenceEngine._getDriver(pBackend).load(tableName, resolverMappings, parser, **kwargs)
'''
Retrieves every Driver's PolicyRuleTable object for a given name.
This method should be seldom used.
'''
@staticmethod
def loadAll(tableName, pBackend):
return PersistenceEngine._getDriver(pBackend).loadAll(tableName)
'''
Deletes a Driver's PolicyRuleTable object for a given ID.
This method should be seldom used.
'''
@staticmethod
def delete(tableID, pBackend):
return PersistenceEngine._getDriver(pBackend).delete(tableID)
| en | 0.698223 | @author: msune,omoya,CarolinaFernandez @@organization: i2CAT, OFELIA FP7 Persistence engine Implementes driver-based persistence backend selection #Default Class Attributes #Drivers #Fill with appropiate path Retrieves every Driver's PolicyRuleTable object for a given name. This method should be seldom used. Deletes a Driver's PolicyRuleTable object for a given ID. This method should be seldom used. | 2.325096 | 2 |
custom_components/meross_lan/config_flow.py | gelokatil/meross_lan | 0 | 6630650 | """Config flow for Meross IoT local LAN integration."""
from time import time
import voluptuous as vol
from typing import OrderedDict
import json
try:
from pytz import common_timezones
except Exception:
common_timezones = None
from homeassistant import config_entries
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .merossclient import MerossHttpClient, MerossDeviceDescriptor, const as mc, get_productnametype
from .helpers import LOGGER, mqtt_is_loaded
from .const import (
DOMAIN,
CONF_HOST, CONF_DEVICE_ID, CONF_KEY,
CONF_PAYLOAD, CONF_DEVICE_TYPE,
CONF_PROTOCOL, CONF_PROTOCOL_OPTIONS,
CONF_POLLING_PERIOD, CONF_POLLING_PERIOD_DEFAULT,
CONF_TIME_ZONE,
CONF_TRACE, CONF_TRACE_TIMEOUT,
)
async def _http_discovery(host: str, key: str, hass) -> dict:
client = MerossHttpClient(host, key, async_get_clientsession(hass), LOGGER)
payload = (await client.async_request(mc.NS_APPLIANCE_SYSTEM_ALL)).get(mc.KEY_PAYLOAD)
payload.update((await client.async_request(mc.NS_APPLIANCE_SYSTEM_ABILITY)).get(mc.KEY_PAYLOAD))
return {
CONF_HOST: host,
CONF_PAYLOAD: payload,
CONF_KEY: key
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Meross IoT local LAN."""
_discovery_info: dict = None
_device_id: str = None
_host: str = None
_key: str = None
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
def async_get_options_flow(config_entry):
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
errors = {}
if user_input is None:
# we could get here from user flow start in UI
# or following dhcp discovery
if self._host is None:
# check we already configured the hub ..
if (DOMAIN not in self._async_current_ids()) and mqtt_is_loaded(self.hass):
return await self.async_step_hub()
else:
self._host = user_input[CONF_HOST]
self._key = user_input.get(CONF_KEY)
try:
discovery_info = await _http_discovery(self._host, self._key, self.hass)
return await self.async_step_discovery(discovery_info)
except Exception as e:
LOGGER.debug("Error (%s) connecting to meross device (host:%s)", str(e), self._host)
errors["base"] = "cannot_connect"
config_schema = {
vol.Required(CONF_HOST, description={"suggested_value": self._host}): str,
vol.Optional(CONF_KEY, description={"suggested_value": self._key}): str,
}
return self.async_show_form(step_id="user", data_schema=vol.Schema(config_schema), errors=errors)
async def async_step_discovery(self, discovery_info: DiscoveryInfoType):
await self._async_set_info(discovery_info)
return await self.async_step_device()
async def async_step_dhcp(self, discovery_info: DiscoveryInfoType):
"""Handle a flow initialized by DHCP discovery."""
LOGGER.debug("received dhcp discovery: %s", json.dumps(discovery_info))
self._host = discovery_info.get('ip')
self._macaddress = discovery_info.get('macaddress')
"""
we'll update the unique_id for the flow when we'll have the device_id
macaddress would have been a better choice since the beginning (...)
but I don't want to mess with ConfigEntry versioning right now
Here this is needed in case we cannot correctly identify the device
via our api and the dhcp integration keeps pushing us discoveries for
the same device
"""
await self.async_set_unique_id(self._macaddress, raise_on_progress=True)
"""
Check we already dont have the device registered.
This is probably overkill since the ConfigFlow will recognize
the duplicated unique_id sooner or later
"""
api = self.hass.data.get(DOMAIN)
if api is not None:
if api.has_device(self._host, self._macaddress):
return self.async_abort(reason='already_configured')
self._key = api.key
try:
# try device identification so the user/UI has a good context to start with
_discovery_info = await _http_discovery(self._host, self._key, self.hass)
await self._async_set_info(_discovery_info)
# now just let the user edit/accept the host address even if identification was fine
except Exception as e:
LOGGER.debug("Error (%s) connecting to meross device (host:%s)", str(e), self._host)
# forgive and continue if we cant discover the device...let the user work it out
return await self.async_step_user()
async def async_step_device(self, user_input=None):
data = self._discovery_info
if user_input is None:
config_schema = {}
return self.async_show_form(
step_id="device",
data_schema=vol.Schema(config_schema),
description_placeholders=self._placeholders
)
return self.async_create_entry(title=self._descriptor.type + " " + self._device_id, data=data)
async def async_step_hub(self, user_input=None):
#right now this is only used to setup MQTT Hub feature to allow discovery and mqtt message sub/pub
if user_input == None:
await self.async_set_unique_id(DOMAIN)
self._abort_if_unique_id_configured()
config_schema = { vol.Optional(CONF_KEY): str }
return self.async_show_form(step_id="hub", data_schema=vol.Schema(config_schema))
return self.async_create_entry(title="MQTT Hub", data=user_input)
async def _async_set_info(self, discovery_info: DiscoveryInfoType) -> None:
self._discovery_info = discovery_info
self._descriptor = MerossDeviceDescriptor(discovery_info.get(CONF_PAYLOAD, {}))
self._device_id = self._descriptor.uuid
await self.async_set_unique_id(self._device_id)
self._abort_if_unique_id_configured()
discovery_info[CONF_TIME_ZONE] = self._descriptor.timezone
if CONF_DEVICE_ID not in discovery_info:#this is coming from manual user entry or dhcp discovery
discovery_info[CONF_DEVICE_ID] = self._device_id
self._placeholders = {
CONF_DEVICE_TYPE: get_productnametype(self._descriptor.type),
CONF_DEVICE_ID: self._device_id,
CONF_PAYLOAD: ""#json.dumps(data.get(CONF_PAYLOAD, {}))
}
self.context["title_placeholders"] = self._placeholders
return
class OptionsFlowHandler(config_entries.OptionsFlow):
"""
Manage device options configuration
"""
def __init__(self, config_entry):
self._config_entry = config_entry
async def async_step_init(self, user_input=None):
if self._config_entry.unique_id == DOMAIN:
return await self.async_step_hub(user_input)
return await self.async_step_device(user_input)
async def async_step_hub(self, user_input=None):
if user_input is not None:
data = dict(self._config_entry.data)
data[CONF_KEY] = user_input.get(CONF_KEY)
self.hass.config_entries.async_update_entry(self._config_entry, data=data)
return self.async_create_entry(title="", data=None)
config_schema = OrderedDict()
config_schema[
vol.Optional(
CONF_KEY,
description={ "suggested_value" : self._config_entry.data.get(CONF_KEY) }
)
] = str
return self.async_show_form(step_id="hub", data_schema=vol.Schema(config_schema))
async def async_step_device(self, user_input=None):
data = self._config_entry.data
if user_input is not None:
data = dict(data)
data[CONF_KEY] = user_input.get(CONF_KEY)
data[CONF_PROTOCOL] = user_input.get(CONF_PROTOCOL)
data[CONF_POLLING_PERIOD] = user_input.get(CONF_POLLING_PERIOD)
data[CONF_TIME_ZONE] = user_input.get(CONF_TIME_ZONE)
data[CONF_TRACE] = time() + CONF_TRACE_TIMEOUT if user_input.get(CONF_TRACE) else 0
self.hass.config_entries.async_update_entry(self._config_entry, data=data)
return self.async_create_entry(title=None, data=None)
config_schema = OrderedDict()
config_schema[
vol.Optional(
CONF_KEY,
description={"suggested_value": data.get(CONF_KEY)}
)
] = str
config_schema[
vol.Optional(
CONF_PROTOCOL,
description={"suggested_value": data.get(CONF_PROTOCOL)}
)
] = vol.In(CONF_PROTOCOL_OPTIONS)
config_schema[
vol.Optional(
CONF_POLLING_PERIOD,
default=CONF_POLLING_PERIOD_DEFAULT,
description={"suggested_value": data.get(CONF_POLLING_PERIOD)}
)
] = cv.positive_int
config_schema[
vol.Optional(
CONF_TIME_ZONE,
description={"suggested_value": data.get(CONF_TIME_ZONE)}
)
] = vol.In(common_timezones) if common_timezones is not None else str
config_schema[
vol.Optional(
CONF_TRACE,
# CONF_TRACE contains the trace 'end' time epoch if set
description={"suggested_value": data.get(CONF_TRACE, 0) > time()}
)
] = bool
descriptor = MerossDeviceDescriptor(data.get(CONF_PAYLOAD, {}))
return self.async_show_form(
step_id="device",
data_schema=vol.Schema(config_schema),
description_placeholders={
CONF_DEVICE_TYPE: get_productnametype(descriptor.type),
CONF_DEVICE_ID: data.get(CONF_DEVICE_ID),
CONF_HOST: data.get(CONF_HOST) or "MQTT",
CONF_PAYLOAD: ""#json.dumps(data.get(CONF_PAYLOAD, {}))
}
)
| """Config flow for Meross IoT local LAN integration."""
from time import time
import voluptuous as vol
from typing import OrderedDict
import json
try:
from pytz import common_timezones
except Exception:
common_timezones = None
from homeassistant import config_entries
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .merossclient import MerossHttpClient, MerossDeviceDescriptor, const as mc, get_productnametype
from .helpers import LOGGER, mqtt_is_loaded
from .const import (
DOMAIN,
CONF_HOST, CONF_DEVICE_ID, CONF_KEY,
CONF_PAYLOAD, CONF_DEVICE_TYPE,
CONF_PROTOCOL, CONF_PROTOCOL_OPTIONS,
CONF_POLLING_PERIOD, CONF_POLLING_PERIOD_DEFAULT,
CONF_TIME_ZONE,
CONF_TRACE, CONF_TRACE_TIMEOUT,
)
async def _http_discovery(host: str, key: str, hass) -> dict:
client = MerossHttpClient(host, key, async_get_clientsession(hass), LOGGER)
payload = (await client.async_request(mc.NS_APPLIANCE_SYSTEM_ALL)).get(mc.KEY_PAYLOAD)
payload.update((await client.async_request(mc.NS_APPLIANCE_SYSTEM_ABILITY)).get(mc.KEY_PAYLOAD))
return {
CONF_HOST: host,
CONF_PAYLOAD: payload,
CONF_KEY: key
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Meross IoT local LAN."""
_discovery_info: dict = None
_device_id: str = None
_host: str = None
_key: str = None
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
def async_get_options_flow(config_entry):
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
errors = {}
if user_input is None:
# we could get here from user flow start in UI
# or following dhcp discovery
if self._host is None:
# check we already configured the hub ..
if (DOMAIN not in self._async_current_ids()) and mqtt_is_loaded(self.hass):
return await self.async_step_hub()
else:
self._host = user_input[CONF_HOST]
self._key = user_input.get(CONF_KEY)
try:
discovery_info = await _http_discovery(self._host, self._key, self.hass)
return await self.async_step_discovery(discovery_info)
except Exception as e:
LOGGER.debug("Error (%s) connecting to meross device (host:%s)", str(e), self._host)
errors["base"] = "cannot_connect"
config_schema = {
vol.Required(CONF_HOST, description={"suggested_value": self._host}): str,
vol.Optional(CONF_KEY, description={"suggested_value": self._key}): str,
}
return self.async_show_form(step_id="user", data_schema=vol.Schema(config_schema), errors=errors)
async def async_step_discovery(self, discovery_info: DiscoveryInfoType):
await self._async_set_info(discovery_info)
return await self.async_step_device()
async def async_step_dhcp(self, discovery_info: DiscoveryInfoType):
"""Handle a flow initialized by DHCP discovery."""
LOGGER.debug("received dhcp discovery: %s", json.dumps(discovery_info))
self._host = discovery_info.get('ip')
self._macaddress = discovery_info.get('macaddress')
"""
we'll update the unique_id for the flow when we'll have the device_id
macaddress would have been a better choice since the beginning (...)
but I don't want to mess with ConfigEntry versioning right now
Here this is needed in case we cannot correctly identify the device
via our api and the dhcp integration keeps pushing us discoveries for
the same device
"""
await self.async_set_unique_id(self._macaddress, raise_on_progress=True)
"""
Check we already dont have the device registered.
This is probably overkill since the ConfigFlow will recognize
the duplicated unique_id sooner or later
"""
api = self.hass.data.get(DOMAIN)
if api is not None:
if api.has_device(self._host, self._macaddress):
return self.async_abort(reason='already_configured')
self._key = api.key
try:
# try device identification so the user/UI has a good context to start with
_discovery_info = await _http_discovery(self._host, self._key, self.hass)
await self._async_set_info(_discovery_info)
# now just let the user edit/accept the host address even if identification was fine
except Exception as e:
LOGGER.debug("Error (%s) connecting to meross device (host:%s)", str(e), self._host)
# forgive and continue if we cant discover the device...let the user work it out
return await self.async_step_user()
async def async_step_device(self, user_input=None):
data = self._discovery_info
if user_input is None:
config_schema = {}
return self.async_show_form(
step_id="device",
data_schema=vol.Schema(config_schema),
description_placeholders=self._placeholders
)
return self.async_create_entry(title=self._descriptor.type + " " + self._device_id, data=data)
async def async_step_hub(self, user_input=None):
#right now this is only used to setup MQTT Hub feature to allow discovery and mqtt message sub/pub
if user_input == None:
await self.async_set_unique_id(DOMAIN)
self._abort_if_unique_id_configured()
config_schema = { vol.Optional(CONF_KEY): str }
return self.async_show_form(step_id="hub", data_schema=vol.Schema(config_schema))
return self.async_create_entry(title="MQTT Hub", data=user_input)
async def _async_set_info(self, discovery_info: DiscoveryInfoType) -> None:
self._discovery_info = discovery_info
self._descriptor = MerossDeviceDescriptor(discovery_info.get(CONF_PAYLOAD, {}))
self._device_id = self._descriptor.uuid
await self.async_set_unique_id(self._device_id)
self._abort_if_unique_id_configured()
discovery_info[CONF_TIME_ZONE] = self._descriptor.timezone
if CONF_DEVICE_ID not in discovery_info:#this is coming from manual user entry or dhcp discovery
discovery_info[CONF_DEVICE_ID] = self._device_id
self._placeholders = {
CONF_DEVICE_TYPE: get_productnametype(self._descriptor.type),
CONF_DEVICE_ID: self._device_id,
CONF_PAYLOAD: ""#json.dumps(data.get(CONF_PAYLOAD, {}))
}
self.context["title_placeholders"] = self._placeholders
return
class OptionsFlowHandler(config_entries.OptionsFlow):
"""
Manage device options configuration
"""
def __init__(self, config_entry):
self._config_entry = config_entry
async def async_step_init(self, user_input=None):
if self._config_entry.unique_id == DOMAIN:
return await self.async_step_hub(user_input)
return await self.async_step_device(user_input)
async def async_step_hub(self, user_input=None):
if user_input is not None:
data = dict(self._config_entry.data)
data[CONF_KEY] = user_input.get(CONF_KEY)
self.hass.config_entries.async_update_entry(self._config_entry, data=data)
return self.async_create_entry(title="", data=None)
config_schema = OrderedDict()
config_schema[
vol.Optional(
CONF_KEY,
description={ "suggested_value" : self._config_entry.data.get(CONF_KEY) }
)
] = str
return self.async_show_form(step_id="hub", data_schema=vol.Schema(config_schema))
async def async_step_device(self, user_input=None):
data = self._config_entry.data
if user_input is not None:
data = dict(data)
data[CONF_KEY] = user_input.get(CONF_KEY)
data[CONF_PROTOCOL] = user_input.get(CONF_PROTOCOL)
data[CONF_POLLING_PERIOD] = user_input.get(CONF_POLLING_PERIOD)
data[CONF_TIME_ZONE] = user_input.get(CONF_TIME_ZONE)
data[CONF_TRACE] = time() + CONF_TRACE_TIMEOUT if user_input.get(CONF_TRACE) else 0
self.hass.config_entries.async_update_entry(self._config_entry, data=data)
return self.async_create_entry(title=None, data=None)
config_schema = OrderedDict()
config_schema[
vol.Optional(
CONF_KEY,
description={"suggested_value": data.get(CONF_KEY)}
)
] = str
config_schema[
vol.Optional(
CONF_PROTOCOL,
description={"suggested_value": data.get(CONF_PROTOCOL)}
)
] = vol.In(CONF_PROTOCOL_OPTIONS)
config_schema[
vol.Optional(
CONF_POLLING_PERIOD,
default=CONF_POLLING_PERIOD_DEFAULT,
description={"suggested_value": data.get(CONF_POLLING_PERIOD)}
)
] = cv.positive_int
config_schema[
vol.Optional(
CONF_TIME_ZONE,
description={"suggested_value": data.get(CONF_TIME_ZONE)}
)
] = vol.In(common_timezones) if common_timezones is not None else str
config_schema[
vol.Optional(
CONF_TRACE,
# CONF_TRACE contains the trace 'end' time epoch if set
description={"suggested_value": data.get(CONF_TRACE, 0) > time()}
)
] = bool
descriptor = MerossDeviceDescriptor(data.get(CONF_PAYLOAD, {}))
return self.async_show_form(
step_id="device",
data_schema=vol.Schema(config_schema),
description_placeholders={
CONF_DEVICE_TYPE: get_productnametype(descriptor.type),
CONF_DEVICE_ID: data.get(CONF_DEVICE_ID),
CONF_HOST: data.get(CONF_HOST) or "MQTT",
CONF_PAYLOAD: ""#json.dumps(data.get(CONF_PAYLOAD, {}))
}
)
| en | 0.886231 | Config flow for Meross IoT local LAN integration. Handle a config flow for Meross IoT local LAN. # we could get here from user flow start in UI # or following dhcp discovery # check we already configured the hub .. Handle a flow initialized by DHCP discovery. we'll update the unique_id for the flow when we'll have the device_id macaddress would have been a better choice since the beginning (...) but I don't want to mess with ConfigEntry versioning right now Here this is needed in case we cannot correctly identify the device via our api and the dhcp integration keeps pushing us discoveries for the same device Check we already dont have the device registered. This is probably overkill since the ConfigFlow will recognize the duplicated unique_id sooner or later # try device identification so the user/UI has a good context to start with # now just let the user edit/accept the host address even if identification was fine # forgive and continue if we cant discover the device...let the user work it out #right now this is only used to setup MQTT Hub feature to allow discovery and mqtt message sub/pub #this is coming from manual user entry or dhcp discovery Manage device options configuration # CONF_TRACE contains the trace 'end' time epoch if set | 2.256241 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.