content
stringlengths 5
1.05M
|
---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
from ._resource_management_client_enums import *
class AliasPathType(msrest.serialization.Model):
"""AliasPathType.
:ivar path: The path of an alias.
:vartype path: str
:ivar api_versions: The api versions.
:vartype api_versions: list[str]
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
}
def __init__(
self,
*,
path: Optional[str] = None,
api_versions: Optional[List[str]] = None,
**kwargs
):
"""
:keyword path: The path of an alias.
:paramtype path: str
:keyword api_versions: The api versions.
:paramtype api_versions: list[str]
"""
super(AliasPathType, self).__init__(**kwargs)
self.path = path
self.api_versions = api_versions
class AliasType(msrest.serialization.Model):
"""AliasType.
:ivar name: The alias name.
:vartype name: str
:ivar paths: The paths for an alias.
:vartype paths: list[~azure.mgmt.resource.resources.v2016_02_01.models.AliasPathType]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'paths': {'key': 'paths', 'type': '[AliasPathType]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
paths: Optional[List["AliasPathType"]] = None,
**kwargs
):
"""
:keyword name: The alias name.
:paramtype name: str
:keyword paths: The paths for an alias.
:paramtype paths: list[~azure.mgmt.resource.resources.v2016_02_01.models.AliasPathType]
"""
super(AliasType, self).__init__(**kwargs)
self.name = name
self.paths = paths
class BasicDependency(msrest.serialization.Model):
"""Deployment dependency information.
:ivar id: The ID of the dependency.
:vartype id: str
:ivar resource_type: The dependency resource type.
:vartype resource_type: str
:ivar resource_name: The dependency resource name.
:vartype resource_name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
resource_type: Optional[str] = None,
resource_name: Optional[str] = None,
**kwargs
):
"""
:keyword id: The ID of the dependency.
:paramtype id: str
:keyword resource_type: The dependency resource type.
:paramtype resource_type: str
:keyword resource_name: The dependency resource name.
:paramtype resource_name: str
"""
super(BasicDependency, self).__init__(**kwargs)
self.id = id
self.resource_type = resource_type
self.resource_name = resource_name
class DebugSetting(msrest.serialization.Model):
"""DebugSetting.
:ivar detail_level: The debug detail level.
:vartype detail_level: str
"""
_attribute_map = {
'detail_level': {'key': 'detailLevel', 'type': 'str'},
}
def __init__(
self,
*,
detail_level: Optional[str] = None,
**kwargs
):
"""
:keyword detail_level: The debug detail level.
:paramtype detail_level: str
"""
super(DebugSetting, self).__init__(**kwargs)
self.detail_level = detail_level
class Dependency(msrest.serialization.Model):
"""Deployment dependency information.
:ivar depends_on: The list of dependencies.
:vartype depends_on: list[~azure.mgmt.resource.resources.v2016_02_01.models.BasicDependency]
:ivar id: The ID of the dependency.
:vartype id: str
:ivar resource_type: The dependency resource type.
:vartype resource_type: str
:ivar resource_name: The dependency resource name.
:vartype resource_name: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': '[BasicDependency]'},
'id': {'key': 'id', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(
self,
*,
depends_on: Optional[List["BasicDependency"]] = None,
id: Optional[str] = None,
resource_type: Optional[str] = None,
resource_name: Optional[str] = None,
**kwargs
):
"""
:keyword depends_on: The list of dependencies.
:paramtype depends_on: list[~azure.mgmt.resource.resources.v2016_02_01.models.BasicDependency]
:keyword id: The ID of the dependency.
:paramtype id: str
:keyword resource_type: The dependency resource type.
:paramtype resource_type: str
:keyword resource_name: The dependency resource name.
:paramtype resource_name: str
"""
super(Dependency, self).__init__(**kwargs)
self.depends_on = depends_on
self.id = id
self.resource_type = resource_type
self.resource_name = resource_name
class Deployment(msrest.serialization.Model):
"""Deployment operation parameters.
:ivar properties: The deployment properties.
:vartype properties: ~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'DeploymentProperties'},
}
def __init__(
self,
*,
properties: Optional["DeploymentProperties"] = None,
**kwargs
):
"""
:keyword properties: The deployment properties.
:paramtype properties: ~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentProperties
"""
super(Deployment, self).__init__(**kwargs)
self.properties = properties
class DeploymentExportResult(msrest.serialization.Model):
"""DeploymentExportResult.
:ivar template: The template content.
:vartype template: any
"""
_attribute_map = {
'template': {'key': 'template', 'type': 'object'},
}
def __init__(
self,
*,
template: Optional[Any] = None,
**kwargs
):
"""
:keyword template: The template content.
:paramtype template: any
"""
super(DeploymentExportResult, self).__init__(**kwargs)
self.template = template
class DeploymentExtended(msrest.serialization.Model):
"""Deployment information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The ID of the deployment.
:vartype id: str
:ivar name: Required. The name of the deployment.
:vartype name: str
:ivar properties: Deployment properties.
:vartype properties:
~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentPropertiesExtended
"""
_validation = {
'id': {'readonly': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DeploymentPropertiesExtended'},
}
def __init__(
self,
*,
name: str,
properties: Optional["DeploymentPropertiesExtended"] = None,
**kwargs
):
"""
:keyword name: Required. The name of the deployment.
:paramtype name: str
:keyword properties: Deployment properties.
:paramtype properties:
~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentPropertiesExtended
"""
super(DeploymentExtended, self).__init__(**kwargs)
self.id = None
self.name = name
self.properties = properties
class DeploymentExtendedFilter(msrest.serialization.Model):
"""Deployment filter.
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
"""
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
provisioning_state: Optional[str] = None,
**kwargs
):
"""
:keyword provisioning_state: The provisioning state.
:paramtype provisioning_state: str
"""
super(DeploymentExtendedFilter, self).__init__(**kwargs)
self.provisioning_state = provisioning_state
class DeploymentListResult(msrest.serialization.Model):
"""List of deployments.
:ivar value: The list of deployments.
:vartype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentExtended]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DeploymentExtended]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["DeploymentExtended"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: The list of deployments.
:paramtype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentExtended]
:keyword next_link: The URL to get the next set of results.
:paramtype next_link: str
"""
super(DeploymentListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DeploymentOperation(msrest.serialization.Model):
"""Deployment operation information.
:ivar id: Full deployment operation id.
:vartype id: str
:ivar operation_id: Deployment operation id.
:vartype operation_id: str
:ivar properties: Deployment properties.
:vartype properties:
~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentOperationProperties
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'operation_id': {'key': 'operationId', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DeploymentOperationProperties'},
}
def __init__(
self,
*,
id: Optional[str] = None,
operation_id: Optional[str] = None,
properties: Optional["DeploymentOperationProperties"] = None,
**kwargs
):
"""
:keyword id: Full deployment operation id.
:paramtype id: str
:keyword operation_id: Deployment operation id.
:paramtype operation_id: str
:keyword properties: Deployment properties.
:paramtype properties:
~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentOperationProperties
"""
super(DeploymentOperation, self).__init__(**kwargs)
self.id = id
self.operation_id = operation_id
self.properties = properties
class DeploymentOperationProperties(msrest.serialization.Model):
"""Deployment operation properties.
:ivar provisioning_state: The state of the provisioning.
:vartype provisioning_state: str
:ivar timestamp: The date and time of the operation.
:vartype timestamp: ~datetime.datetime
:ivar service_request_id: Deployment operation service request id.
:vartype service_request_id: str
:ivar status_code: Operation status code.
:vartype status_code: str
:ivar status_message: Operation status message.
:vartype status_message: any
:ivar target_resource: The target resource.
:vartype target_resource: ~azure.mgmt.resource.resources.v2016_02_01.models.TargetResource
:ivar request: The HTTP request message.
:vartype request: ~azure.mgmt.resource.resources.v2016_02_01.models.HttpMessage
:ivar response: The HTTP response message.
:vartype response: ~azure.mgmt.resource.resources.v2016_02_01.models.HttpMessage
"""
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'service_request_id': {'key': 'serviceRequestId', 'type': 'str'},
'status_code': {'key': 'statusCode', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'object'},
'target_resource': {'key': 'targetResource', 'type': 'TargetResource'},
'request': {'key': 'request', 'type': 'HttpMessage'},
'response': {'key': 'response', 'type': 'HttpMessage'},
}
def __init__(
self,
*,
provisioning_state: Optional[str] = None,
timestamp: Optional[datetime.datetime] = None,
service_request_id: Optional[str] = None,
status_code: Optional[str] = None,
status_message: Optional[Any] = None,
target_resource: Optional["TargetResource"] = None,
request: Optional["HttpMessage"] = None,
response: Optional["HttpMessage"] = None,
**kwargs
):
"""
:keyword provisioning_state: The state of the provisioning.
:paramtype provisioning_state: str
:keyword timestamp: The date and time of the operation.
:paramtype timestamp: ~datetime.datetime
:keyword service_request_id: Deployment operation service request id.
:paramtype service_request_id: str
:keyword status_code: Operation status code.
:paramtype status_code: str
:keyword status_message: Operation status message.
:paramtype status_message: any
:keyword target_resource: The target resource.
:paramtype target_resource: ~azure.mgmt.resource.resources.v2016_02_01.models.TargetResource
:keyword request: The HTTP request message.
:paramtype request: ~azure.mgmt.resource.resources.v2016_02_01.models.HttpMessage
:keyword response: The HTTP response message.
:paramtype response: ~azure.mgmt.resource.resources.v2016_02_01.models.HttpMessage
"""
super(DeploymentOperationProperties, self).__init__(**kwargs)
self.provisioning_state = provisioning_state
self.timestamp = timestamp
self.service_request_id = service_request_id
self.status_code = status_code
self.status_message = status_message
self.target_resource = target_resource
self.request = request
self.response = response
class DeploymentOperationsListResult(msrest.serialization.Model):
"""List of deployment operations.
:ivar value: The list of deployments.
:vartype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentOperation]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DeploymentOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["DeploymentOperation"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: The list of deployments.
:paramtype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentOperation]
:keyword next_link: The URL to get the next set of results.
:paramtype next_link: str
"""
super(DeploymentOperationsListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DeploymentProperties(msrest.serialization.Model):
"""Deployment properties.
All required parameters must be populated in order to send to Azure.
:ivar template: The template content. It can be a JObject or a well formed JSON string. Use
only one of Template or TemplateLink.
:vartype template: any
:ivar template_link: The template URI. Use only one of Template or TemplateLink.
:vartype template_link: ~azure.mgmt.resource.resources.v2016_02_01.models.TemplateLink
:ivar parameters: Deployment parameters. It can be a JObject or a well formed JSON string. Use
only one of Parameters or ParametersLink.
:vartype parameters: any
:ivar parameters_link: The parameters URI. Use only one of Parameters or ParametersLink.
:vartype parameters_link: ~azure.mgmt.resource.resources.v2016_02_01.models.ParametersLink
:ivar mode: Required. The deployment mode. Possible values include: "Incremental", "Complete".
:vartype mode: str or ~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentMode
:ivar debug_setting: The debug setting of the deployment.
:vartype debug_setting: ~azure.mgmt.resource.resources.v2016_02_01.models.DebugSetting
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'template': {'key': 'template', 'type': 'object'},
'template_link': {'key': 'templateLink', 'type': 'TemplateLink'},
'parameters': {'key': 'parameters', 'type': 'object'},
'parameters_link': {'key': 'parametersLink', 'type': 'ParametersLink'},
'mode': {'key': 'mode', 'type': 'str'},
'debug_setting': {'key': 'debugSetting', 'type': 'DebugSetting'},
}
def __init__(
self,
*,
mode: Union[str, "DeploymentMode"],
template: Optional[Any] = None,
template_link: Optional["TemplateLink"] = None,
parameters: Optional[Any] = None,
parameters_link: Optional["ParametersLink"] = None,
debug_setting: Optional["DebugSetting"] = None,
**kwargs
):
"""
:keyword template: The template content. It can be a JObject or a well formed JSON string. Use
only one of Template or TemplateLink.
:paramtype template: any
:keyword template_link: The template URI. Use only one of Template or TemplateLink.
:paramtype template_link: ~azure.mgmt.resource.resources.v2016_02_01.models.TemplateLink
:keyword parameters: Deployment parameters. It can be a JObject or a well formed JSON string.
Use only one of Parameters or ParametersLink.
:paramtype parameters: any
:keyword parameters_link: The parameters URI. Use only one of Parameters or ParametersLink.
:paramtype parameters_link: ~azure.mgmt.resource.resources.v2016_02_01.models.ParametersLink
:keyword mode: Required. The deployment mode. Possible values include: "Incremental",
"Complete".
:paramtype mode: str or ~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentMode
:keyword debug_setting: The debug setting of the deployment.
:paramtype debug_setting: ~azure.mgmt.resource.resources.v2016_02_01.models.DebugSetting
"""
super(DeploymentProperties, self).__init__(**kwargs)
self.template = template
self.template_link = template_link
self.parameters = parameters
self.parameters_link = parameters_link
self.mode = mode
self.debug_setting = debug_setting
class DeploymentPropertiesExtended(msrest.serialization.Model):
"""Deployment properties with additional details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: The state of the provisioning.
:vartype provisioning_state: str
:ivar correlation_id: The correlation ID of the deployment.
:vartype correlation_id: str
:ivar timestamp: The timestamp of the template deployment.
:vartype timestamp: ~datetime.datetime
:ivar outputs: Key/value pairs that represent deployment output.
:vartype outputs: any
:ivar providers: The list of resource providers needed for the deployment.
:vartype providers: list[~azure.mgmt.resource.resources.v2016_02_01.models.Provider]
:ivar dependencies: The list of deployment dependencies.
:vartype dependencies: list[~azure.mgmt.resource.resources.v2016_02_01.models.Dependency]
:ivar template: The template content. Use only one of Template or TemplateLink.
:vartype template: any
:ivar template_link: The URI referencing the template. Use only one of Template or
TemplateLink.
:vartype template_link: ~azure.mgmt.resource.resources.v2016_02_01.models.TemplateLink
:ivar parameters: Deployment parameters. Use only one of Parameters or ParametersLink.
:vartype parameters: any
:ivar parameters_link: The URI referencing the parameters. Use only one of Parameters or
ParametersLink.
:vartype parameters_link: ~azure.mgmt.resource.resources.v2016_02_01.models.ParametersLink
:ivar mode: The deployment mode. Possible values include: "Incremental", "Complete".
:vartype mode: str or ~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentMode
:ivar debug_setting: The debug setting of the deployment.
:vartype debug_setting: ~azure.mgmt.resource.resources.v2016_02_01.models.DebugSetting
:ivar error: The deployment error.
:vartype error: ~azure.mgmt.resource.resources.v2016_02_01.models.ErrorResponse
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'correlation_id': {'key': 'correlationId', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'outputs': {'key': 'outputs', 'type': 'object'},
'providers': {'key': 'providers', 'type': '[Provider]'},
'dependencies': {'key': 'dependencies', 'type': '[Dependency]'},
'template': {'key': 'template', 'type': 'object'},
'template_link': {'key': 'templateLink', 'type': 'TemplateLink'},
'parameters': {'key': 'parameters', 'type': 'object'},
'parameters_link': {'key': 'parametersLink', 'type': 'ParametersLink'},
'mode': {'key': 'mode', 'type': 'str'},
'debug_setting': {'key': 'debugSetting', 'type': 'DebugSetting'},
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
*,
provisioning_state: Optional[str] = None,
correlation_id: Optional[str] = None,
timestamp: Optional[datetime.datetime] = None,
outputs: Optional[Any] = None,
providers: Optional[List["Provider"]] = None,
dependencies: Optional[List["Dependency"]] = None,
template: Optional[Any] = None,
template_link: Optional["TemplateLink"] = None,
parameters: Optional[Any] = None,
parameters_link: Optional["ParametersLink"] = None,
mode: Optional[Union[str, "DeploymentMode"]] = None,
debug_setting: Optional["DebugSetting"] = None,
**kwargs
):
"""
:keyword provisioning_state: The state of the provisioning.
:paramtype provisioning_state: str
:keyword correlation_id: The correlation ID of the deployment.
:paramtype correlation_id: str
:keyword timestamp: The timestamp of the template deployment.
:paramtype timestamp: ~datetime.datetime
:keyword outputs: Key/value pairs that represent deployment output.
:paramtype outputs: any
:keyword providers: The list of resource providers needed for the deployment.
:paramtype providers: list[~azure.mgmt.resource.resources.v2016_02_01.models.Provider]
:keyword dependencies: The list of deployment dependencies.
:paramtype dependencies: list[~azure.mgmt.resource.resources.v2016_02_01.models.Dependency]
:keyword template: The template content. Use only one of Template or TemplateLink.
:paramtype template: any
:keyword template_link: The URI referencing the template. Use only one of Template or
TemplateLink.
:paramtype template_link: ~azure.mgmt.resource.resources.v2016_02_01.models.TemplateLink
:keyword parameters: Deployment parameters. Use only one of Parameters or ParametersLink.
:paramtype parameters: any
:keyword parameters_link: The URI referencing the parameters. Use only one of Parameters or
ParametersLink.
:paramtype parameters_link: ~azure.mgmt.resource.resources.v2016_02_01.models.ParametersLink
:keyword mode: The deployment mode. Possible values include: "Incremental", "Complete".
:paramtype mode: str or ~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentMode
:keyword debug_setting: The debug setting of the deployment.
:paramtype debug_setting: ~azure.mgmt.resource.resources.v2016_02_01.models.DebugSetting
"""
super(DeploymentPropertiesExtended, self).__init__(**kwargs)
self.provisioning_state = provisioning_state
self.correlation_id = correlation_id
self.timestamp = timestamp
self.outputs = outputs
self.providers = providers
self.dependencies = dependencies
self.template = template
self.template_link = template_link
self.parameters = parameters
self.parameters_link = parameters_link
self.mode = mode
self.debug_setting = debug_setting
self.error = None
class DeploymentValidateResult(msrest.serialization.Model):
"""Information from validate template deployment response.
:ivar error: Validation error.
:vartype error:
~azure.mgmt.resource.resources.v2016_02_01.models.ResourceManagementErrorWithDetails
:ivar properties: The template deployment properties.
:vartype properties:
~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentPropertiesExtended
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ResourceManagementErrorWithDetails'},
'properties': {'key': 'properties', 'type': 'DeploymentPropertiesExtended'},
}
def __init__(
self,
*,
error: Optional["ResourceManagementErrorWithDetails"] = None,
properties: Optional["DeploymentPropertiesExtended"] = None,
**kwargs
):
"""
:keyword error: Validation error.
:paramtype error:
~azure.mgmt.resource.resources.v2016_02_01.models.ResourceManagementErrorWithDetails
:keyword properties: The template deployment properties.
:paramtype properties:
~azure.mgmt.resource.resources.v2016_02_01.models.DeploymentPropertiesExtended
"""
super(DeploymentValidateResult, self).__init__(**kwargs)
self.error = error
self.properties = properties
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.resource.resources.v2016_02_01.models.ErrorResponse]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.resource.resources.v2016_02_01.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorResponse]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ExportTemplateRequest(msrest.serialization.Model):
"""Export resource group template request parameters.
:ivar resources: The IDs of the resources to filter the export by. To export all resources,
supply an array with single entry '*'.
:vartype resources: list[str]
:ivar options: The export template options. A CSV-formatted list containing zero or more of the
following: 'IncludeParameterDefaultValue', 'IncludeComments',
'SkipResourceNameParameterization', 'SkipAllParameterization'.
:vartype options: str
"""
_attribute_map = {
'resources': {'key': 'resources', 'type': '[str]'},
'options': {'key': 'options', 'type': 'str'},
}
def __init__(
self,
*,
resources: Optional[List[str]] = None,
options: Optional[str] = None,
**kwargs
):
"""
:keyword resources: The IDs of the resources to filter the export by. To export all resources,
supply an array with single entry '*'.
:paramtype resources: list[str]
:keyword options: The export template options. A CSV-formatted list containing zero or more of
the following: 'IncludeParameterDefaultValue', 'IncludeComments',
'SkipResourceNameParameterization', 'SkipAllParameterization'.
:paramtype options: str
"""
super(ExportTemplateRequest, self).__init__(**kwargs)
self.resources = resources
self.options = options
class Resource(msrest.serialization.Model):
"""Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class GenericResource(Resource):
"""Resource information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar plan: The plan of the resource.
:vartype plan: ~azure.mgmt.resource.resources.v2016_02_01.models.Plan
:ivar properties: The resource properties.
:vartype properties: any
:ivar kind: The kind of the resource.
:vartype kind: str
:ivar managed_by: Id of the resource that manages this resource.
:vartype managed_by: str
:ivar sku: The sku of the resource.
:vartype sku: ~azure.mgmt.resource.resources.v2016_02_01.models.Sku
:ivar identity: The identity of the resource.
:vartype identity: ~azure.mgmt.resource.resources.v2016_02_01.models.Identity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'properties': {'key': 'properties', 'type': 'object'},
'kind': {'key': 'kind', 'type': 'str'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'identity': {'key': 'identity', 'type': 'Identity'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
properties: Optional[Any] = None,
kind: Optional[str] = None,
managed_by: Optional[str] = None,
sku: Optional["Sku"] = None,
identity: Optional["Identity"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword plan: The plan of the resource.
:paramtype plan: ~azure.mgmt.resource.resources.v2016_02_01.models.Plan
:keyword properties: The resource properties.
:paramtype properties: any
:keyword kind: The kind of the resource.
:paramtype kind: str
:keyword managed_by: Id of the resource that manages this resource.
:paramtype managed_by: str
:keyword sku: The sku of the resource.
:paramtype sku: ~azure.mgmt.resource.resources.v2016_02_01.models.Sku
:keyword identity: The identity of the resource.
:paramtype identity: ~azure.mgmt.resource.resources.v2016_02_01.models.Identity
"""
super(GenericResource, self).__init__(location=location, tags=tags, **kwargs)
self.plan = plan
self.properties = properties
self.kind = kind
self.managed_by = managed_by
self.sku = sku
self.identity = identity
class GenericResourceExpanded(GenericResource):
"""Resource information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar plan: The plan of the resource.
:vartype plan: ~azure.mgmt.resource.resources.v2016_02_01.models.Plan
:ivar properties: The resource properties.
:vartype properties: any
:ivar kind: The kind of the resource.
:vartype kind: str
:ivar managed_by: Id of the resource that manages this resource.
:vartype managed_by: str
:ivar sku: The sku of the resource.
:vartype sku: ~azure.mgmt.resource.resources.v2016_02_01.models.Sku
:ivar identity: The identity of the resource.
:vartype identity: ~azure.mgmt.resource.resources.v2016_02_01.models.Identity
:ivar created_time: The created time of the resource. This is only present if requested via the
$expand query parameter.
:vartype created_time: ~datetime.datetime
:ivar changed_time: The changed time of the resource. This is only present if requested via the
$expand query parameter.
:vartype changed_time: ~datetime.datetime
:ivar provisioning_state: The provisioning state of the resource. This is only present if
requested via the $expand query parameter.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_time': {'readonly': True},
'changed_time': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'properties': {'key': 'properties', 'type': 'object'},
'kind': {'key': 'kind', 'type': 'str'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'identity': {'key': 'identity', 'type': 'Identity'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'changed_time': {'key': 'changedTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
plan: Optional["Plan"] = None,
properties: Optional[Any] = None,
kind: Optional[str] = None,
managed_by: Optional[str] = None,
sku: Optional["Sku"] = None,
identity: Optional["Identity"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword plan: The plan of the resource.
:paramtype plan: ~azure.mgmt.resource.resources.v2016_02_01.models.Plan
:keyword properties: The resource properties.
:paramtype properties: any
:keyword kind: The kind of the resource.
:paramtype kind: str
:keyword managed_by: Id of the resource that manages this resource.
:paramtype managed_by: str
:keyword sku: The sku of the resource.
:paramtype sku: ~azure.mgmt.resource.resources.v2016_02_01.models.Sku
:keyword identity: The identity of the resource.
:paramtype identity: ~azure.mgmt.resource.resources.v2016_02_01.models.Identity
"""
super(GenericResourceExpanded, self).__init__(location=location, tags=tags, plan=plan, properties=properties, kind=kind, managed_by=managed_by, sku=sku, identity=identity, **kwargs)
self.created_time = None
self.changed_time = None
self.provisioning_state = None
class GenericResourceFilter(msrest.serialization.Model):
"""Resource filter.
:ivar resource_type: The resource type.
:vartype resource_type: str
:ivar tagname: The tag name.
:vartype tagname: str
:ivar tagvalue: The tag value.
:vartype tagvalue: str
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'tagname': {'key': 'tagname', 'type': 'str'},
'tagvalue': {'key': 'tagvalue', 'type': 'str'},
}
def __init__(
self,
*,
resource_type: Optional[str] = None,
tagname: Optional[str] = None,
tagvalue: Optional[str] = None,
**kwargs
):
"""
:keyword resource_type: The resource type.
:paramtype resource_type: str
:keyword tagname: The tag name.
:paramtype tagname: str
:keyword tagvalue: The tag value.
:paramtype tagvalue: str
"""
super(GenericResourceFilter, self).__init__(**kwargs)
self.resource_type = resource_type
self.tagname = tagname
self.tagvalue = tagvalue
class HttpMessage(msrest.serialization.Model):
"""HttpMessage.
:ivar content: HTTP message content.
:vartype content: any
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'object'},
}
def __init__(
self,
*,
content: Optional[Any] = None,
**kwargs
):
"""
:keyword content: HTTP message content.
:paramtype content: any
"""
super(HttpMessage, self).__init__(**kwargs)
self.content = content
class Identity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of resource.
:vartype tenant_id: str
:ivar type: The identity type. The only acceptable values to pass in are None and
"SystemAssigned". The default value is None.
:vartype type: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[str] = None,
**kwargs
):
"""
:keyword type: The identity type. The only acceptable values to pass in are None and
"SystemAssigned". The default value is None.
:paramtype type: str
"""
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
class ParametersLink(msrest.serialization.Model):
"""Entity representing the reference to the deployment parameters.
All required parameters must be populated in order to send to Azure.
:ivar uri: Required. URI referencing the template.
:vartype uri: str
:ivar content_version: If included it must match the ContentVersion in the template.
:vartype content_version: str
"""
_validation = {
'uri': {'required': True},
}
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
content_version: Optional[str] = None,
**kwargs
):
"""
:keyword uri: Required. URI referencing the template.
:paramtype uri: str
:keyword content_version: If included it must match the ContentVersion in the template.
:paramtype content_version: str
"""
super(ParametersLink, self).__init__(**kwargs)
self.uri = uri
self.content_version = content_version
class Plan(msrest.serialization.Model):
"""Plan for the resource.
:ivar name: The plan ID.
:vartype name: str
:ivar publisher: The publisher ID.
:vartype publisher: str
:ivar product: The offer ID.
:vartype product: str
:ivar promotion_code: The promotion code.
:vartype promotion_code: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'promotion_code': {'key': 'promotionCode', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
publisher: Optional[str] = None,
product: Optional[str] = None,
promotion_code: Optional[str] = None,
**kwargs
):
"""
:keyword name: The plan ID.
:paramtype name: str
:keyword publisher: The publisher ID.
:paramtype publisher: str
:keyword product: The offer ID.
:paramtype product: str
:keyword promotion_code: The promotion code.
:paramtype promotion_code: str
"""
super(Plan, self).__init__(**kwargs)
self.name = name
self.publisher = publisher
self.product = product
self.promotion_code = promotion_code
class Provider(msrest.serialization.Model):
"""Resource provider information.
:ivar id: The provider id.
:vartype id: str
:ivar namespace: The namespace of the provider.
:vartype namespace: str
:ivar registration_state: The registration state of the provider.
:vartype registration_state: str
:ivar resource_types: The collection of provider resource types.
:vartype resource_types:
list[~azure.mgmt.resource.resources.v2016_02_01.models.ProviderResourceType]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'registration_state': {'key': 'registrationState', 'type': 'str'},
'resource_types': {'key': 'resourceTypes', 'type': '[ProviderResourceType]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
namespace: Optional[str] = None,
registration_state: Optional[str] = None,
resource_types: Optional[List["ProviderResourceType"]] = None,
**kwargs
):
"""
:keyword id: The provider id.
:paramtype id: str
:keyword namespace: The namespace of the provider.
:paramtype namespace: str
:keyword registration_state: The registration state of the provider.
:paramtype registration_state: str
:keyword resource_types: The collection of provider resource types.
:paramtype resource_types:
list[~azure.mgmt.resource.resources.v2016_02_01.models.ProviderResourceType]
"""
super(Provider, self).__init__(**kwargs)
self.id = id
self.namespace = namespace
self.registration_state = registration_state
self.resource_types = resource_types
class ProviderListResult(msrest.serialization.Model):
"""List of resource providers.
:ivar value: The list of resource providers.
:vartype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.Provider]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Provider]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Provider"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: The list of resource providers.
:paramtype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.Provider]
:keyword next_link: The URL to get the next set of results.
:paramtype next_link: str
"""
super(ProviderListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ProviderResourceType(msrest.serialization.Model):
"""Resource type managed by the resource provider.
:ivar resource_type: The resource type.
:vartype resource_type: str
:ivar locations: The collection of locations where this resource type can be created in.
:vartype locations: list[str]
:ivar aliases: The aliases that are supported by this resource type.
:vartype aliases: list[~azure.mgmt.resource.resources.v2016_02_01.models.AliasType]
:ivar api_versions: The api version.
:vartype api_versions: list[str]
:ivar properties: The properties.
:vartype properties: dict[str, str]
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'aliases': {'key': 'aliases', 'type': '[AliasType]'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(
self,
*,
resource_type: Optional[str] = None,
locations: Optional[List[str]] = None,
aliases: Optional[List["AliasType"]] = None,
api_versions: Optional[List[str]] = None,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword resource_type: The resource type.
:paramtype resource_type: str
:keyword locations: The collection of locations where this resource type can be created in.
:paramtype locations: list[str]
:keyword aliases: The aliases that are supported by this resource type.
:paramtype aliases: list[~azure.mgmt.resource.resources.v2016_02_01.models.AliasType]
:keyword api_versions: The api version.
:paramtype api_versions: list[str]
:keyword properties: The properties.
:paramtype properties: dict[str, str]
"""
super(ProviderResourceType, self).__init__(**kwargs)
self.resource_type = resource_type
self.locations = locations
self.aliases = aliases
self.api_versions = api_versions
self.properties = properties
class ResourceGroup(msrest.serialization.Model):
"""Resource group information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The ID of the resource group.
:vartype id: str
:ivar name: The Name of the resource group.
:vartype name: str
:ivar properties: The resource group properties.
:vartype properties: ~azure.mgmt.resource.resources.v2016_02_01.models.ResourceGroupProperties
:ivar location: Required. The location of the resource group. It cannot be changed after the
resource group has been created. Has to be one of the supported Azure Locations, such as West
US, East US, West Europe, East Asia, etc.
:vartype location: str
:ivar tags: A set of tags. The tags attached to the resource group.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ResourceGroupProperties'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
name: Optional[str] = None,
properties: Optional["ResourceGroupProperties"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword name: The Name of the resource group.
:paramtype name: str
:keyword properties: The resource group properties.
:paramtype properties:
~azure.mgmt.resource.resources.v2016_02_01.models.ResourceGroupProperties
:keyword location: Required. The location of the resource group. It cannot be changed after the
resource group has been created. Has to be one of the supported Azure Locations, such as West
US, East US, West Europe, East Asia, etc.
:paramtype location: str
:keyword tags: A set of tags. The tags attached to the resource group.
:paramtype tags: dict[str, str]
"""
super(ResourceGroup, self).__init__(**kwargs)
self.id = None
self.name = name
self.properties = properties
self.location = location
self.tags = tags
class ResourceGroupExportResult(msrest.serialization.Model):
"""ResourceGroupExportResult.
:ivar template: The template content.
:vartype template: any
:ivar error: The error.
:vartype error:
~azure.mgmt.resource.resources.v2016_02_01.models.ResourceManagementErrorWithDetails
"""
_attribute_map = {
'template': {'key': 'template', 'type': 'object'},
'error': {'key': 'error', 'type': 'ResourceManagementErrorWithDetails'},
}
def __init__(
self,
*,
template: Optional[Any] = None,
error: Optional["ResourceManagementErrorWithDetails"] = None,
**kwargs
):
"""
:keyword template: The template content.
:paramtype template: any
:keyword error: The error.
:paramtype error:
~azure.mgmt.resource.resources.v2016_02_01.models.ResourceManagementErrorWithDetails
"""
super(ResourceGroupExportResult, self).__init__(**kwargs)
self.template = template
self.error = error
class ResourceGroupFilter(msrest.serialization.Model):
"""Resource group filter.
:ivar tag_name: The tag name.
:vartype tag_name: str
:ivar tag_value: The tag value.
:vartype tag_value: str
"""
_attribute_map = {
'tag_name': {'key': 'tagName', 'type': 'str'},
'tag_value': {'key': 'tagValue', 'type': 'str'},
}
def __init__(
self,
*,
tag_name: Optional[str] = None,
tag_value: Optional[str] = None,
**kwargs
):
"""
:keyword tag_name: The tag name.
:paramtype tag_name: str
:keyword tag_value: The tag value.
:paramtype tag_value: str
"""
super(ResourceGroupFilter, self).__init__(**kwargs)
self.tag_name = tag_name
self.tag_value = tag_value
class ResourceGroupListResult(msrest.serialization.Model):
"""List of resource groups.
All required parameters must be populated in order to send to Azure.
:ivar value: The list of resource groups.
:vartype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.ResourceGroup]
:ivar next_link: Required. The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: str,
value: Optional[List["ResourceGroup"]] = None,
**kwargs
):
"""
:keyword value: The list of resource groups.
:paramtype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.ResourceGroup]
:keyword next_link: Required. The URL to get the next set of results.
:paramtype next_link: str
"""
super(ResourceGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ResourceGroupProperties(msrest.serialization.Model):
"""The resource group properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ResourceGroupProperties, self).__init__(**kwargs)
self.provisioning_state = None
class ResourceListResult(msrest.serialization.Model):
"""List of resource groups.
All required parameters must be populated in order to send to Azure.
:ivar value: The list of resources.
:vartype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.GenericResourceExpanded]
:ivar next_link: Required. The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GenericResourceExpanded]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: str,
value: Optional[List["GenericResourceExpanded"]] = None,
**kwargs
):
"""
:keyword value: The list of resources.
:paramtype value:
list[~azure.mgmt.resource.resources.v2016_02_01.models.GenericResourceExpanded]
:keyword next_link: Required. The URL to get the next set of results.
:paramtype next_link: str
"""
super(ResourceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ResourceManagementErrorWithDetails(msrest.serialization.Model):
"""ResourceManagementErrorWithDetails.
All required parameters must be populated in order to send to Azure.
:ivar code: Required. The error code returned from the server.
:vartype code: str
:ivar message: Required. The error message returned from the server.
:vartype message: str
:ivar target: The target of the error.
:vartype target: str
:ivar details: Validation error.
:vartype details:
list[~azure.mgmt.resource.resources.v2016_02_01.models.ResourceManagementErrorWithDetails]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ResourceManagementErrorWithDetails]'},
}
def __init__(
self,
*,
code: str,
message: str,
target: Optional[str] = None,
details: Optional[List["ResourceManagementErrorWithDetails"]] = None,
**kwargs
):
"""
:keyword code: Required. The error code returned from the server.
:paramtype code: str
:keyword message: Required. The error message returned from the server.
:paramtype message: str
:keyword target: The target of the error.
:paramtype target: str
:keyword details: Validation error.
:paramtype details:
list[~azure.mgmt.resource.resources.v2016_02_01.models.ResourceManagementErrorWithDetails]
"""
super(ResourceManagementErrorWithDetails, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class ResourceProviderOperationDisplayProperties(msrest.serialization.Model):
"""Resource provider operation's display properties.
:ivar publisher: Operation description.
:vartype publisher: str
:ivar provider: Operation provider.
:vartype provider: str
:ivar resource: Operation resource.
:vartype resource: str
:ivar operation: Operation.
:vartype operation: str
:ivar description: Operation description.
:vartype description: str
"""
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
publisher: Optional[str] = None,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword publisher: Operation description.
:paramtype publisher: str
:keyword provider: Operation provider.
:paramtype provider: str
:keyword resource: Operation resource.
:paramtype resource: str
:keyword operation: Operation.
:paramtype operation: str
:keyword description: Operation description.
:paramtype description: str
"""
super(ResourceProviderOperationDisplayProperties, self).__init__(**kwargs)
self.publisher = publisher
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class ResourcesMoveInfo(msrest.serialization.Model):
"""Parameters of move resources.
:ivar resources: The ids of the resources.
:vartype resources: list[str]
:ivar target_resource_group: The target resource group.
:vartype target_resource_group: str
"""
_attribute_map = {
'resources': {'key': 'resources', 'type': '[str]'},
'target_resource_group': {'key': 'targetResourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
resources: Optional[List[str]] = None,
target_resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword resources: The ids of the resources.
:paramtype resources: list[str]
:keyword target_resource_group: The target resource group.
:paramtype target_resource_group: str
"""
super(ResourcesMoveInfo, self).__init__(**kwargs)
self.resources = resources
self.target_resource_group = target_resource_group
class Sku(msrest.serialization.Model):
"""Sku for the resource.
:ivar name: The sku name.
:vartype name: str
:ivar tier: The sku tier.
:vartype tier: str
:ivar size: The sku size.
:vartype size: str
:ivar family: The sku family.
:vartype family: str
:ivar model: The sku model.
:vartype model: str
:ivar capacity: The sku capacity.
:vartype capacity: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'model': {'key': 'model', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
size: Optional[str] = None,
family: Optional[str] = None,
model: Optional[str] = None,
capacity: Optional[int] = None,
**kwargs
):
"""
:keyword name: The sku name.
:paramtype name: str
:keyword tier: The sku tier.
:paramtype tier: str
:keyword size: The sku size.
:paramtype size: str
:keyword family: The sku family.
:paramtype family: str
:keyword model: The sku model.
:paramtype model: str
:keyword capacity: The sku capacity.
:paramtype capacity: int
"""
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.size = size
self.family = family
self.model = model
self.capacity = capacity
class SubResource(msrest.serialization.Model):
"""SubResource.
:ivar id: Resource Id.
:vartype id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource Id.
:paramtype id: str
"""
super(SubResource, self).__init__(**kwargs)
self.id = id
class TagCount(msrest.serialization.Model):
"""Tag count.
:ivar type: Type of count.
:vartype type: str
:ivar value: Value of count.
:vartype value: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
"""
:keyword type: Type of count.
:paramtype type: str
:keyword value: Value of count.
:paramtype value: str
"""
super(TagCount, self).__init__(**kwargs)
self.type = type
self.value = value
class TagDetails(msrest.serialization.Model):
"""Tag details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The tag ID.
:vartype id: str
:ivar tag_name: The tag name.
:vartype tag_name: str
:ivar count: The tag count.
:vartype count: ~azure.mgmt.resource.resources.v2016_02_01.models.TagCount
:ivar values: The list of tag values.
:vartype values: list[~azure.mgmt.resource.resources.v2016_02_01.models.TagValue]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'tag_name': {'key': 'tagName', 'type': 'str'},
'count': {'key': 'count', 'type': 'TagCount'},
'values': {'key': 'values', 'type': '[TagValue]'},
}
def __init__(
self,
*,
tag_name: Optional[str] = None,
count: Optional["TagCount"] = None,
values: Optional[List["TagValue"]] = None,
**kwargs
):
"""
:keyword tag_name: The tag name.
:paramtype tag_name: str
:keyword count: The tag count.
:paramtype count: ~azure.mgmt.resource.resources.v2016_02_01.models.TagCount
:keyword values: The list of tag values.
:paramtype values: list[~azure.mgmt.resource.resources.v2016_02_01.models.TagValue]
"""
super(TagDetails, self).__init__(**kwargs)
self.id = None
self.tag_name = tag_name
self.count = count
self.values = values
class TagsListResult(msrest.serialization.Model):
"""List of subscription tags.
All required parameters must be populated in order to send to Azure.
:ivar value: The list of tags.
:vartype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.TagDetails]
:ivar next_link: Required. The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TagDetails]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: str,
value: Optional[List["TagDetails"]] = None,
**kwargs
):
"""
:keyword value: The list of tags.
:paramtype value: list[~azure.mgmt.resource.resources.v2016_02_01.models.TagDetails]
:keyword next_link: Required. The URL to get the next set of results.
:paramtype next_link: str
"""
super(TagsListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class TagValue(msrest.serialization.Model):
"""Tag information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The tag ID.
:vartype id: str
:ivar tag_value: The tag value.
:vartype tag_value: str
:ivar count: The tag value count.
:vartype count: ~azure.mgmt.resource.resources.v2016_02_01.models.TagCount
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'tag_value': {'key': 'tagValue', 'type': 'str'},
'count': {'key': 'count', 'type': 'TagCount'},
}
def __init__(
self,
*,
tag_value: Optional[str] = None,
count: Optional["TagCount"] = None,
**kwargs
):
"""
:keyword tag_value: The tag value.
:paramtype tag_value: str
:keyword count: The tag value count.
:paramtype count: ~azure.mgmt.resource.resources.v2016_02_01.models.TagCount
"""
super(TagValue, self).__init__(**kwargs)
self.id = None
self.tag_value = tag_value
self.count = count
class TargetResource(msrest.serialization.Model):
"""Target resource.
:ivar id: The ID of the resource.
:vartype id: str
:ivar resource_name: The name of the resource.
:vartype resource_name: str
:ivar resource_type: The type of the resource.
:vartype resource_type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
resource_name: Optional[str] = None,
resource_type: Optional[str] = None,
**kwargs
):
"""
:keyword id: The ID of the resource.
:paramtype id: str
:keyword resource_name: The name of the resource.
:paramtype resource_name: str
:keyword resource_type: The type of the resource.
:paramtype resource_type: str
"""
super(TargetResource, self).__init__(**kwargs)
self.id = id
self.resource_name = resource_name
self.resource_type = resource_type
class TemplateHashResult(msrest.serialization.Model):
"""Result of the request to calculate template hash. It contains a string of minified template and its hash.
:ivar minified_template: The minified template string.
:vartype minified_template: str
:ivar template_hash: The template hash.
:vartype template_hash: str
"""
_attribute_map = {
'minified_template': {'key': 'minifiedTemplate', 'type': 'str'},
'template_hash': {'key': 'templateHash', 'type': 'str'},
}
def __init__(
self,
*,
minified_template: Optional[str] = None,
template_hash: Optional[str] = None,
**kwargs
):
"""
:keyword minified_template: The minified template string.
:paramtype minified_template: str
:keyword template_hash: The template hash.
:paramtype template_hash: str
"""
super(TemplateHashResult, self).__init__(**kwargs)
self.minified_template = minified_template
self.template_hash = template_hash
class TemplateLink(msrest.serialization.Model):
"""Entity representing the reference to the template.
All required parameters must be populated in order to send to Azure.
:ivar uri: Required. URI referencing the template.
:vartype uri: str
:ivar content_version: If included it must match the ContentVersion in the template.
:vartype content_version: str
"""
_validation = {
'uri': {'required': True},
}
_attribute_map = {
'uri': {'key': 'uri', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
content_version: Optional[str] = None,
**kwargs
):
"""
:keyword uri: Required. URI referencing the template.
:paramtype uri: str
:keyword content_version: If included it must match the ContentVersion in the template.
:paramtype content_version: str
"""
super(TemplateLink, self).__init__(**kwargs)
self.uri = uri
self.content_version = content_version
|
from __future__ import print_function
import os
import sys
import re
import logging
import unicodedata
from codecs import open
from random import shuffle, randint, choice
replacement_pattern = re.compile(ur'\$\{(?P<token>[^}]+)\}')
# iOS 11 + Android 26
mobile_safe_pattern = re.compile(ur'[\u0000-\u0377\u037a-\u037f\u0384-\u038a\u038c\u038e-\u03a1\u03a3-\u052f\u0531-\u0556\u0559-\u055f\u0561-\u0587\u0589-\u058a\u058f\u0591-\u05c7\u05d0-\u05ea\u05f0-\u05f4\u0600-\u0604\u0606-\u061c\u061e-\u070d\u070f-\u074a\u074d-\u07b1\u07c0-\u07fa\u0800-\u082d\u0830-\u083e\u0840-\u085b\u085e\u08a0\u08a2-\u08ac\u08e4-\u08f9\u08fb-\u08fe\u0900-\u097f\u0981-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09fb\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0af1\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b77\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bfa\u0c01-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c59\u0c60-\u0c63\u0c66-\u0c6f\u0c78-\u0c7f\u0c82-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d02-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d57\u0d60-\u0d63\u0d66-\u0d75\u0d79-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2-\u0df4\u0e01-\u0e3a\u0e3f-\u0e5b\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edd\u0f00-\u0f47\u0f49-\u0f6c\u0f71-\u0f8b\u0f90-\u0f97\u0f99-\u0fbc\u0fbe-\u0fcc\u0fce-\u0fd8\u1000-\u1021\u1023-\u1027\u1029-\u1032\u1036-\u1059\u10a0-\u10c5\u10d0-\u10fc\u1100-\u1112\u115f-\u1175\u119e\u11a8-\u11c2\u1200-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135f-\u137c\u1380-\u1399\u13a0-\u13f4\u1401-\u1676\u1680-\u169c\u16a0-\u16f0\u1700-\u170c\u170e-\u1714\u1720-\u1736\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17dd\u17e0-\u17e9\u17f0-\u17f9\u1800-\u180e\u1810-\u1819\u1820-\u1877\u1880-\u18aa\u1900-\u191c\u1920-\u192b\u1930-\u193b\u1940\u1944-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u19de-\u1a1b\u1a1e-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa0-\u1aad\u1b00-\u1b4b\u1b50-\u1b7c\u1b80-\u1bf3\u1bfc-\u1c37\u1c3b-\u1c49\u1c4d-\u1c7f\u1cc0-\u1cc7\u1d00-\u1dca\u1dcd\u1dfe-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fc4\u1fc6-\u1fd3\u1fd6-\u1fdb\u1fdd-\u1fef\u1ff2-\u1ff4\u1ff6-\u1ffe\u2000-\u2027\u202a-\u205f\u206a-\u2071\u2074-\u208e\u2090-\u2094\u20a0-\u20bf\u20d0-\u20e1\u20e3-\u20f0\u2100-\u214e\u2150-\u2184\u2189\u2190-\u237a\u237c-\u237d\u2380-\u2383\u2388-\u238b\u2393-\u2395\u239b-\u23b9\u23ce-\u23d0\u23da-\u23e7\u23e9-\u23f3\u23f8-\u23fa\u2400-\u2424\u2440-\u244a\u2460-\u269c\u26a0-\u26b2\u26bd-\u26be\u26c4-\u26c5\u26c8\u26ce-\u26cf\u26d1\u26d3-\u26d4\u26e2\u26e9-\u26ea\u26f0-\u26f5\u26f7-\u26fa\u26fd\u2701-\u275e\u2761-\u27c9\u27cb-\u27cd\u27d0-\u2aff\u2b05-\u2b07\u2b12-\u2b4c\u2b50-\u2b55\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2c70\u2c74-\u2c77\u2c79-\u2c7a\u2c7c-\u2c7d\u2c80-\u2cf3\u2cf9-\u2cff\u2d30-\u2d67\u2d6f-\u2d70\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2e18\u2e1c-\u2e1d\u2e2e\u2e30-\u2e31\u3000-\u3003\u3005-\u301f\u3021-\u3029\u3030\u303d\u3041-\u3094\u3099-\u309e\u30a0-\u30f6\u30fb-\u30fe\u3105-\u3129\u3131-\u318e\u3200-\u321c\u3220-\u3229\u3231-\u3232\u3239\u3260-\u327b\u327f\u3297\u3299\u32a3-\u32a8\u3303\u330d\u3314\u3318\u3322-\u3323\u3326-\u3327\u332b\u3336\u333b\u3349-\u334a\u334d\u3351\u3357\u337b-\u337e\u3380-\u3384\u3388-\u33ca\u33cd-\u33d3\u33d5-\u33d6\u33d8\u33db-\u33dd\u3400-\u4db5\u4e00-\u9fa5\ua000-\ua48c\ua490-\ua4c6\ua4d0-\ua62b\ua640-\ua69d\ua69f-\ua6f7\ua700-\ua721\ua727\ua789-\ua78c\ua792\ua7a4\ua800-\ua82b\ua830-\ua839\ua840-\ua877\ua880-\ua8c4\ua8ce-\ua8d9\ua900-\ua953\ua95f\ua980-\ua9cd\ua9cf-\ua9d9\ua9de-\ua9df\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa5c-\uaa5f\uaa80-\uaac2\uaadb-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabed\uabf0-\uabf9\uac00-\ud7a3\uf900-\ufa2d\ufb00-\ufb06\ufb1d-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbc1\ufbd3-\ufd3f\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfd\ufe00\ufe10-\ufe19\ufe20-\ufe26\ufe30-\ufe31\ufe33-\ufe46\ufe49-\ufe52\ufe54-\ufe57\ufe59-\ufe66\ufe68-\ufe6b\ufe70-\ufe74\ufe76-\ufefc\ufeff\uff01-\uff5e\uff61-\uff9f\uffe0-\uffe6\ufffc-\ufffd\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010100-\U00010102\U00010107-\U00010133\U00010137-\U0001013f\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010320-\U00010323\U00010330-\U0001034a\U00010380-\U0001039d\U0001039f-\U000103c3\U000103c8-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010857-\U0001085f\U00010900-\U0001091b\U0001091f-\U00010939\U0001093f\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a38-\U00010a3a\U00010a3f-\U00010a47\U00010a50-\U00010a58\U00010a60-\U00010a7f\U00010b00-\U00010b35\U00010b39-\U00010b55\U00010b58-\U00010b72\U00010b78-\U00010b7f\U00010c00-\U00010c48\U00011000-\U0001104d\U00011052-\U0001106f\U00011080-\U000110c1\U00012000-\U0001236e\U00012400-\U00012462\U00012470-\U00012473\U00013000-\U0001342e\U00016800-\U00016a38\U0001d173-\U0001d17a\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d7c9\U0001d7ce-\U0001d7ff\U0001f004\U0001f0cf\U0001f170-\U0001f171\U0001f17e-\U0001f17f\U0001f18e\U0001f191-\U0001f19a\U0001f1e6-\U0001f1ff\U0001f201-\U0001f202\U0001f21a\U0001f22f\U0001f232-\U0001f23a\U0001f250-\U0001f251\U0001f300-\U0001f321\U0001f324-\U0001f393\U0001f396-\U0001f397\U0001f399-\U0001f39b\U0001f39e-\U0001f3f0\U0001f3f3-\U0001f3f5\U0001f3f7-\U0001f4fd\U0001f4ff-\U0001f53d\U0001f549-\U0001f54e\U0001f550-\U0001f567\U0001f56f-\U0001f570\U0001f573-\U0001f57a\U0001f587\U0001f58a-\U0001f58d\U0001f590\U0001f595-\U0001f596\U0001f5a4-\U0001f5a5\U0001f5a8\U0001f5b1-\U0001f5b2\U0001f5bc\U0001f5c2-\U0001f5c4\U0001f5d1-\U0001f5d3\U0001f5dc-\U0001f5de\U0001f5e1\U0001f5e3\U0001f5e8\U0001f5ef\U0001f5f3\U0001f5fa-\U0001f64f\U0001f680-\U0001f6c5\U0001f6cb-\U0001f6d2\U0001f6e0-\U0001f6e5\U0001f6e9\U0001f6eb-\U0001f6ec\U0001f6f0\U0001f6f3-\U0001f6f6\U0001f910-\U0001f91e\U0001f920-\U0001f927\U0001f930\U0001f933-\U0001f93a\U0001f93c-\U0001f93e\U0001f940-\U0001f945\U0001f947-\U0001f94b\U0001f950-\U0001f95e\U0001f980-\U0001f991\U0001f9c0\U000e0030-\U000e0039\U000e0061-\U000e007a\U000e007f]')
unicode_min, unicode_max = 0, 0x10ffff
unicode_cat_blacklist = ['Mn', 'Mc', 'Me', 'Cc', 'Cf', 'Cs', 'Co', 'Cn']
profile = {}
with open('profile.properties', encoding='utf-8') as infile:
for line in infile:
key, value = line.strip().split('=')
profile[key] = value
adjectives = []
people = []
def load_words(filename):
if os.path.isfile(filename):
with open(filename) as f:
return f.read().splitlines()
else:
print(filename + ' not found. Generate it.')
sys.exit(1)
adjectives = load_words('adjectives.txt')
def _character():
while True:
i = randint(unicode_min, unicode_max)
c = unichr(i)
if unicodedata.category(c) not in unicode_cat_blacklist \
and mobile_safe_pattern.match(c):
return c
def _enclosing_character():
enclosing = [unichr(i) for i in xrange(unicode_min, unicode_max)
if unicodedata.category(unichr(i)) == 'Me']
return choice(enclosing)
def _indefinite_adjective():
adj = _adjective()
article = 'an' if adj.lower()[0] in 'aeiou' else 'a'
return '%s %s' % (article, adj)
def _adjective():
return choice(adjectives)
def _fix_capitalization(text, location):
return text[0].upper() + text[1:] if location == 0 else text
def _dispatch(match):
res = globals()['_%s' % match.group('token')]()
return _fix_capitalization(res, match.start())
def randomize(text):
return replacement_pattern.sub(_dispatch, text) if text else ''
def randomized_profile():
return {key: randomize(value) for key, value in profile.iteritems()}
def main():
print('\n'.join(['%s=%s' % (k, v)
for k, v in randomized_profile().iteritems()]).encode('utf-8'))
if __name__ == '__main__':
import sys
if '-v' in sys.argv:
logging.getLogger().setLevel(logging.DEBUG)
main()
|
from django.db import models
class Item(models.Model):
name = models.CharField(max_length=100)
items = models.ManyToManyField('self', blank=True)
private = models.BooleanField(default=True)
def __unicode__(self):
return self.name
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
from funcy import partial
from testing import *
from pyecs import *
from pycompupipe.components import GuiElement
class TestGuiElement():
def test_usage(self):
e = Entity()
g = e.add_component(GuiElement())
e.fire_callbacks("awake")
assert g.position == (0,0)
assert g.size == (0,0)
assert g.anchor == (0,0)
assert g.rect() == (0,0,0,0)
def test_anchor1(self):
e = Entity()
g = e.add_component(GuiElement((0,0),(100,50),(0,0)))
e.fire_callbacks("awake")
assert g.position == (0,0)
assert g.size == (100,50)
assert g.anchor == (0,0)
assert g.rect() == (0,0,100,50)
def test_anchor2(self):
e = Entity()
g = e.add_component(GuiElement((0,0),(100,50),(0.5,0.5)))
e.fire_callbacks("awake")
assert g.position == (0,0)
assert g.size == (100,50)
assert g.anchor == (0.5,0.5)
assert g.rect() == (-50,-25,100,50)
def test_anchor3(self):
e = Entity()
g = e.add_component(GuiElement((0,0),(100,50),(1,1)))
e.fire_callbacks("awake")
assert g.position == (0,0)
assert g.size == (100,50)
assert g.anchor == (1,1)
assert g.rect() == (-100,-50,100,50)
def test_anchor4(self):
e = Entity()
g = e.add_component(GuiElement((0,0),(100,50),(0,1)))
e.fire_callbacks("awake")
assert g.position == (0,0)
assert g.size == (100,50)
assert g.anchor == (0,1)
assert g.rect() == (0,-50,100,50)
def test_relative_pos1(self):
e = Entity()
g = e.add_component(GuiElement((50,20),(100,50),(0,0)))
e2 = Entity()
g2 = e2.add_component(GuiElement((10,10),(5,5),(0,0),relative_position=True))
e2.fire_callbacks("awake")
e.add_entity(e2)
e.fire_callbacks("awake")
assert g2.rect() == (60,30,5,5)
def test_relative_pos2(self):
e = Entity()
g = e.add_component(GuiElement((50,20),(100,50),(0.5,0.5)))
e2 = Entity()
g2 = e2.add_component(GuiElement((10,10),(5,5),(0,0),relative_position=True))
e2.fire_callbacks("awake")
e.add_entity(e2)
e.fire_callbacks("awake")
assert g.rect() == (0,-5,100,50)
assert g2.rect() == (10,5,5,5)
def test_relative_pos3(self):
e = Entity()
g = e.add_component(GuiElement((50,20),(100,50),(0.5,0.5)))
e2 = Entity()
g2 = e2.add_component(GuiElement((10,10),(5,5),(1,1),relative_position=True))
e2.fire_callbacks("awake")
e.add_entity(e2)
e.fire_callbacks("awake")
assert g.rect() == (0,-5,100,50)
assert g2.rect() == (5,0,5,5)
@forEach("x",partial(generateRandomNormals,0,1),5)
@forEach("y",partial(generateRandomNormals,0,1),5)
@forEach("w",partial(generateUniformRandoms,0,1),5)
@forEach("h",partial(generateUniformRandoms,0,1),5)
@forEach("i",partial(generateUniformRandoms,-1,2),5)
@forEach("j",partial(generateUniformRandoms,-1,2),5)
def test_is_in(self,x,y,w,h,i,j):
g = GuiElement((x,y),(w,h),(0,0))
if 0 <= i and i <= 1 and 0 <= j and j <= 1:
assert g.is_in((x+i*w,y+j*h)) == True
else:
assert g.is_in((x+i*w,y+j*h)) == False
def test_snap_to_grid1(self):
g = GuiElement((0,0),(16,16),(0,0),snap_to_grid=16)
assert g.rect() == (0,0,16,16)
def test_snap_to_grid2(self):
g = GuiElement((10,10),(16,16),(0,0),snap_to_grid=16)
assert g.rect() == (0,0,16,16)
def test_snap_to_grid3(self):
g = GuiElement((16,16),(16,16),(0,0),snap_to_grid=16)
assert g.rect() == (16,16,16,16)
def test_snap_to_grid4(self):
g = GuiElement((16,16),(20,20),(0,0),snap_to_grid=16)
assert g.rect() == (16,16,16,16)
def test_snap_to_grid5(self):
g = GuiElement((16,16),(24,24),(0,0),snap_to_grid=16)
assert g.rect() == (16,16,32,32)
def test_snap_to_grid6(self):
g = GuiElement((16,16),(32,32),(0,0),snap_to_grid=16)
assert g.rect() == (16,16,32,32)
@forEach("x",partial(generateRandomNormals,0,1),10)
@forEach("y",partial(generateRandomNormals,0,1),10)
def test_position_pipeline(self,x,y):
e = Entity()
g = GuiElement((x,y),(0,0),(0,0))
e.add_component(g)
assert e.fire_callbacks_pipeline("position") == (x,y)
@forEach("x",partial(generateRandomNormals,0,1),10)
@forEach("y",partial(generateRandomNormals,0,1),10)
def test_position_pipeline_inner_anchor(self,x,y):
e = Entity()
g = GuiElement((x,y),(20,10),(0.5,0.5))
# anchor (0.5,0.5) means (x,y) is position of center of GuiElement
e.add_component(g)
# no inner anchor means we want top-left of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position"),
(x-10,y-5))
# explicitely request top-left of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(0,0)),
(x-10,y-5))
# explicitely request center-left of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(0,0.5)),
(x-10,y))
# explicitely request bottom-left of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(0,1)),
(x-10,y+5))
# explicitely request top-center of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(0.5,0)),
(x,y-5))
# explicitely request center of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(0.5,0.5)),
(x,y))
# explicitely request bottom-center of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(0.5,1)),
(x,y+5))
# explicitely request top-right of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(1,0)),
(x+10,y-5))
# explicitely request center-right of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(1,0.5)),
(x+10,y))
# explicitely request bottom-right of gui-element
np.testing.assert_almost_equal(
e.fire_callbacks_pipeline("position",(1,1)),
(x+10,y+5))
|
"""
Playing games with a package's __name__ can cause a NullPointerException.
"""
import support
import test049p
del test049p.__name__
hasattr(test049p, 'missing')
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import contextlib
import os
import re
import shutil
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_file_dump
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class IncompleteCustomScalaIntegrationTest(PantsRunIntegrationTest):
@contextlib.contextmanager
def tmp_custom_scala(self, path_suffix):
"""Temporarily create a BUILD file in the root for custom scala testing."""
if os.path.exists(self.tmp_build_file_path):
raise RuntimeError('BUILD file exists failing to avoid overwritting file.'
'Ensure that file does not exist from a previous run')
path = os.path.join(self.target_path, path_suffix)
try:
# Bootstrap the BUILD file.
shutil.copyfile(path, self.tmp_build_file_path)
# And create an empty scalastyle config.
with self.tmp_scalastyle_config() as scalastyle_config_option:
yield scalastyle_config_option
finally:
os.remove(self.tmp_build_file_path)
@contextlib.contextmanager
def tmp_scalastyle_config(self):
with temporary_dir(root_dir=get_buildroot()) as scalastyle_dir:
path = os.path.join(scalastyle_dir, 'config.xml')
safe_file_dump(path, '''<scalastyle/>''')
yield '--lint-scalastyle-config={}'.format(path)
def pants_run(self, options=None):
if options is None:
options = []
full_options = options + ['clean-all', 'compile', 'lint', self.target_path]
return self.run_pants(full_options)
def run_repl(self, target, program, options=None):
"""Run a repl for the given target with the given input, and return stdout_data."""
command = ['repl']
if options:
command.extend(options)
command.extend([target, '--quiet'])
return self.run_pants(command=command, stdin_data=program)
@classmethod
def hermetic(cls):
return True
def setUp(self):
self.target_path = 'testprojects/src/scala/org/pantsbuild/testproject/custom_scala_platform'
self.tmp_build_file_path = 'BUILD.CustomScalaIntegTests'
def test_working_210(self):
with self.tmp_scalastyle_config() as scalastyle_config_option:
pants_run = self.pants_run(options=['-ldebug', '--scala-version=2.10', scalastyle_config_option])
self.assert_success(pants_run)
assert re.search('Bootstrapping scalastyle_2_10', pants_run.stdout_data), pants_run.stdout_data
def test_working_211(self):
with self.tmp_scalastyle_config() as scalastyle_config_option:
pants_run = self.pants_run(options=['-ldebug', '--scala-version=2.11', scalastyle_config_option])
self.assert_success(pants_run)
assert re.search('Bootstrapping scalastyle_2_11', pants_run.stdout_data), pants_run.stdout_data
def test_working_212(self):
with self.tmp_scalastyle_config() as scalastyle_config_option:
pants_run = self.pants_run(options=['-ldebug', '--scala-version=2.12', scalastyle_config_option])
self.assert_success(pants_run)
assert re.search('Bootstrapping scalastyle_2_12', pants_run.stdout_data), pants_run.stdout_data
def test_repl_working_custom_211(self):
with self.tmp_custom_scala('custom_211_scalatools.build') as scalastyle_config_option:
pants_run = self.run_repl(
'testprojects/src/scala/org/pantsbuild/testproject/custom_scala_platform',
dedent("""
import org.pantsbuild.testproject.custom_scala_platform
Hello.main(Seq("World").toArray))
"""),
options=[
'--scala-version=custom',
'--scala-suffix-version=2.11',
scalastyle_config_option,
]
)
# Make sure this didn't happen:
# FAILURE: No bootstrap callback registered for //:scala-repl in scala
self.assert_success(pants_run)
def test_working_custom_211(self):
with self.tmp_custom_scala('custom_211_scalatools.build') as scalastyle_config_option:
pants_run = self.pants_run(
options=[
'-ldebug',
'--scala-version=custom',
'--scala-suffix-version=2.11',
scalastyle_config_option,
]
)
self.assert_success(pants_run)
assert not re.search('Bootstrapping scalastyle_2_10', pants_run.stdout_data)
assert not re.search('Bootstrapping scalastyle_2_11', pants_run.stdout_data)
def test_working_custom_212(self):
with self.tmp_custom_scala('custom_212_scalatools.build') as scalastyle_config_option:
pants_run = self.pants_run(
options=[
'-ldebug',
'--scala-version=custom',
'--scala-suffix-version=2.12',
scalastyle_config_option,
]
)
self.assert_success(pants_run)
assert not re.search('Bootstrapping scalastyle_2_11', pants_run.stdout_data)
assert not re.search('Bootstrapping scalastyle_2_12', pants_run.stdout_data)
def test_missing_compiler(self):
with self.tmp_custom_scala('custom_211_missing_compiler.build') as scalastyle_config_option:
pants_run = self.pants_run(
options=[
'--scala-version=custom',
'--scala-suffix-version=2.11',
scalastyle_config_option,
]
)
self.assert_failure(pants_run)
assert "Unable to bootstrap tool: 'scalac'" in pants_run.stdout_data
def test_missing_runtime(self):
with self.tmp_custom_scala('custom_211_missing_runtime.build') as scalastyle_config_option:
pants_run = self.pants_run(
options=[
'--scala-version=custom',
'--scala-suffix-version=2.11',
scalastyle_config_option,
]
)
self.assert_failure(pants_run)
|
import os
import json
# from sys import last_traceback
# from typing_extensions import OrderedDict
from flask import Flask, request, redirect, url_for, session, render_template
from authlib.integrations.flask_client import OAuth
from datetime import timedelta
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# dotenv setup
from dotenv import load_dotenv
load_dotenv()
# App config
app = Flask(__name__)
# Session config
app.secret_key = os.getenv("APP_SECRET_KEY")
app.config["SESSION_COOKIE_NAME"] = "google-login-session"
app.config["PERMANENT_SESSION_LIFETIME"] = timedelta(minutes=10)
# PostgresSQL congig
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///test.db"
# app.config[
# "SQLALCHEMY_DATABASE_URI"
# ] = "postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{table}".format(
# user=os.getenv("POSTGRES_USER"),
# passwd=os.getenv("POSTGRES_PASSWORD"),
# host=os.getenv("POSTGRES_HOST"),
# port=5432,
# table=os.getenv("POSTGRES_DB"),
# )
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# oAuth Setup
oauth = OAuth(app)
google = oauth.register(
name="google",
client_id=os.getenv("GOOGLE_CLIENT_ID"),
client_secret=os.getenv("GOOGLE_CLIENT_SECRET"),
access_token_url="https://accounts.google.com/o/oauth2/token",
access_token_params=None,
authorize_url="https://accounts.google.com/o/oauth2/auth",
authorize_params=None,
api_base_url="https://www.googleapis.com/oauth2/v1/",
userinfo_endpoint="https://openidconnect.googleapis.com/v1/userinfo",
client_kwargs={"scope": "openid email profile"},
)
# ----------------------------------------------------------------
# db logic and helpers
# User model
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(), unique=True, nullable=False)
name = db.Column(db.String(20), default=None)
# 1 -> many relationship with User -> Destination
trips = db.relationship("Trip", backref="person", lazy=True)
def __init__(self, email, name):
self.email = email
self.name = name
def __repr__(self):
return f"Person('{self.email}, '{self.name}')"
# Trip Model
class Trip(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
person_id = db.Column(db.Integer, db.ForeignKey("person.id"), nullable=False)
destination = db.relationship("Destination", backref="trip", lazy=True)
def __init__(self, name, person_id):
self.name = name
self.person_id = person_id
def __repr__(self):
return f"Trip('{self.name}', '{self.person_id}')"
# Destination model
class Destination(db.Model):
id = db.Column(db.Integer, primary_key=True)
order = db.Column(db.Integer)
place_id = db.Column(db.String())
area_name = db.Column(db.String())
lat = db.Column(db.String())
lng = db.Column(db.String())
trip_id = db.Column(db.Integer, db.ForeignKey("trip.id"), nullable=False)
def __init__(self, order, place_id, area_name, lat, lng, trip_id):
self.order = order
self.place_id = place_id
self.area_name = area_name
self.lat = lat
self.lng = lng
self.trip_id = trip_id
def __repr__(self):
return f"Destinations('{self.order}', '{self.place_id}','{self.area_name}','{self.lat}','{self.lng}','{self.trip_id}')"
# stores user information into db
def addUser(userInfo):
email = userInfo["email"]
name = userInfo["name"]
error = None
if not email:
error = "email is required."
# Will only store email if email does not already exist in db
if Person.query.filter_by(email=email).first() is None:
if error is None:
new_user = Person(email=email, name=name)
db.session.add(new_user)
db.session.commit()
print(f"User {email}, {name} created successfully")
else:
return error, 418
else:
return
# returns the user object
def getUser():
user = {}
user["email"] = session["email"]
user["name"] = session["name"]
user["picture"] = session["picture"]
return user
def addDest(order, place_id, area_name, lat, lng, trip_id):
newDest = Destination(
order=order,
place_id=place_id,
area_name=area_name,
lat=lat,
lng=lng,
trip_id=trip_id,
)
trip = Trip.query.filter_by(person_id=trip_id).first()
print(trip.destination)
db.session.add(newDest)
db.session.commit()
# checks if the user has any trips present in db
def checkTrips(userInfo):
email = userInfo["email"]
user = Person.query.filter_by(email=email).first()
# Populating with dummy data
# trip1 = Trip(name="NY", person_id=user.id)
# trip2 = Trip(name="SF", person_id=user.id)
# db.session.add(trip1)
# db.session.add(trip2)
# db.session.commit()
# trip = Trip.query.first()
# dest1 = Destination(order="1", address="123 Test Ave", trip_id=trip.id)
# db.session.add(dest1)
# db.session.commit()
trips = user.trips
return trips
# adds a trip into the db
def addTrip(email, trip_name):
user = Person.query.filter_by(email=email).first()
newTrip = Trip(trip_name, person_id=user.id)
db.session.add(newTrip)
db.session.commit()
# deletes userData
def deleteUserInfo(userId):
delete = Destination.query.filter_by(id=userId).first()
db.session.delete(delete)
@app.route("/")
def landing_page():
return render_template("landing.html")
@app.route("/trips")
def trips_page():
user = getUser()
# checks if user has any trips stored
trips = checkTrips(user)
# if does then stores it into user_info dict
user["trips"] = trips
return render_template("trips.html", user=user)
@app.route("/planner/<trip_id>")
def planner_page(trip_id):
# TODO pull up destinations here
# destinations = getDest(trip_id)
return render_template("planner.html")
@app.route("/login")
def login():
google = oauth.create_client("google") # create the google oauth client
redirect_uri = url_for("authorize", _external=True)
return google.authorize_redirect(redirect_uri)
@app.route("/authorize")
def authorize():
google = oauth.create_client("google") # create the google oauth client
token = (
google.authorize_access_token()
) # Access token from google (needed to get user info)
resp = google.get("userinfo") # userinfo contains stuff u specificed in the scrope
user_info = resp.json()
# store email into db
added = addUser(user_info)
# print(user_info)
# stores the user email, name, and picture into session storage
session["email"] = user_info["email"]
session["name"] = user_info["name"]
session["picture"] = user_info["picture"]
user_info["added"] = added
return redirect("/trips")
@app.route("/logout")
def logout():
for key in list(session.keys()):
session.pop(key)
return redirect("/")
# api route that returns destinations by trip_id
@app.route("/api/<trip_id>/destinations")
def getDestinations(trip_id):
trip = Trip.query.filter_by(person_id=trip_id).first()
destination = trip.destination
str = ""
# TODO: convert to json
for value in destination:
str += (
f"order: {value.order} dest_id: {value.dest_id}, trip_id: {value.trip_id} "
)
return str
# api route that creates a new trip and routes to trip page
@app.route("/api/create_trip/<trip_name>")
def createTrip(trip_name):
email = session["email"]
addTrip(email, trip_name)
return redirect("/login")
@app.route("/api/destination/<trip_id>", methods=["POST"])
def createDestination(trip_id):
# if get send data, if post save data
if request.method == "POST":
json_data = request.data
# print(json_data)
json_list = json.loads(json_data)
for p in json_list:
order = p["order"]
place_id = p["location_data"]["place_id"]
area_name = p["location_data"]["area_name"]
lat = p["location_data"]["coordinate"]["location"]["lat"]
lng = p["location_data"]["coordinate"]["location"]["lng"]
addDest(order, place_id, area_name, lat, lng, trip_id)
# json_data contains an arry of destinations
# data model {order: val, location_data: {place_id: val, area_name: val, coordinate: {location: {lat: val, lng: val}}}}
# Destination
# - Order number
# - place_id string
# - area_name string
# - lat string
# - lng string
# addDest(order, dest_id, trip_id)
return redirect("/planner")
@app.before_first_request
def before_req_func():
# db.drop_all()
db.create_all()
if __name__ == "__main__":
db.init_app(app)
app.run(host="0.0.0.0")
|
from pynput.keyboard import Key, Listener
currently_pressed_key = None
def on_press(key):
global currently_pressed_key
if key == currently_pressed_key:
print('{0} repeated'.format(key))
else:
print('{0} pressed'.format(key))
currently_pressed_key = key
def on_release(key):
global currently_pressed_key
print('{0} release'.format(key))
currently_pressed_key = None
if key == Key.esc:
# Stop listener
return False
# Collect events until released
with Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join() |
"""
lazy - Decorators and utilities for lazy evaluation in Python
Alberto Bertogli ([email protected])
"""
class _LazyWrapper:
"""Lazy wrapper class for the decorator defined below.
It's closely related so don't use it.
We don't use a new-style class, otherwise we would have to implement
stub methods for __getattribute__, __hash__ and lots of others that
are inherited from object by default. This works too and is simple.
I'll deal with them when they become mandatory.
"""
def __init__(self, f, args, kwargs):
self._override = True
self._isset = False
self._value = None
self._func = f
self._args = args
self._kwargs = kwargs
self._override = False
def _checkset(self):
if not self._isset:
self._override = True
self._value = self._func(*self._args, **self._kwargs)
self._isset = True
self._checkset = lambda: True
self._override = False
def __getattr__(self, name):
if self.__dict__['_override']:
return self.__dict__[name]
self._checkset()
return self._value.__getattribute__(name)
def __setattr__(self, name, val):
if name == '_override' or self._override:
self.__dict__[name] = val
return
self._checkset()
setattr(self._value, name, val)
return
def lazy(f):
"Lazy evaluation decorator"
def newf(*args, **kwargs):
return _LazyWrapper(f, args, kwargs)
return newf |
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test object owner of comments."""
from collections import OrderedDict
import ddt
from ggrc.models import Assessment, AccessControlList, Revision, all_models
from integration.ggrc import api_helper
from integration.ggrc import TestCase
from integration.ggrc.models import factories
from integration.ggrc_basic_permissions.models \
import factories as rbac_factories
@ddt.ddt
class TestCommentAdmin(TestCase):
"""Test Admin role on comments."""
def setUp(self):
"""Setup test case."""
super(TestCommentAdmin, self).setUp()
self.api = api_helper.Api()
self.response = self.client.get("/login")
@staticmethod
def _get_assessment_import_data(audit_slug, assignees=None, creators=None):
"""Return import Assessment data block"""
data_block = [
OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit_slug),
("Title*", "Assessment title 1"),
("Description", "Some desc 1"),
("Notes", "notes 1"),
("State*", "NOT STARTED"),
("Recipients", "Verifiers, Assignees"),
("Send by default", "Yes"),
("Evidence URL", "http://i.imgur.com/Lppr347.jpg")
]),
]
if assignees:
data_block[0]["Assignees*"] = assignees
if creators:
data_block[0]["Creators*"] = creators
return data_block
# pylint: disable=too-many-locals
@ddt.data(
("Administrator", "assignees,creators", "[email protected]"),
("Creator", "assignees,creators", "[email protected]"),
("Editor", "assignees,creators", "[email protected]"),
("Reader", "assignees,creators", "[email protected]"),
)
@ddt.unpack
def test_admin_role(self, role, assessment_roles, email):
"""Test comment Admin and its revision of assessment comment."""
person = factories.PersonFactory(email=email)
person_id = person.id
creator_role = all_models.Role.query.filter(
all_models.Role.name == role
).one()
rbac_factories.UserRoleFactory(role=creator_role, person=person)
audit = factories.AuditFactory()
assessment_roles = dict.fromkeys(assessment_roles.split(','), email)
data_block = self._get_assessment_import_data(audit.slug,
**assessment_roles)
response = self.import_data(*data_block)
self._check_csv_response(response, {})
asmt1 = Assessment.query.filter_by(title="Assessment title 1").first()
self.api.set_user(all_models.Person.query.get(person_id))
request_data = [{
"comment": {
"description": "<p>{}</p>".format("some comment"),
"context": None,
"assignee_type": "Assignees,Verifiers,Creators",
},
}]
# logged user will be set as comment admin
self.api.post(all_models.Comment, request_data)
comment = all_models.Comment.query.first()
self.api.put(asmt1, {
"actions": {"add_related": [{"id": comment.id,
"type": "Comment"}]},
})
acr_comment_id = all_models.AccessControlRole.query.filter_by(
object_type="Comment",
name="Admin"
).first().id
acl = AccessControlList.query.filter_by(
object_id=comment.id,
object_type=comment.type,
ac_role_id=acr_comment_id
).first()
self.assertTrue(acl, "ACL row is not created")
revision = Revision.query.filter_by(
resource_id=acl.id,
resource_type=acl.type
).first()
self.assertTrue(revision, "Revision of ACL is not created")
|
chave = 9999
while True:
chave = input().split()
if chave == ["0"]:
break
medida1, medida2, percent = map(int, chave)
terreno = ((medida1 * medida2) * 100 / percent) ** 0.5
print(int(terreno))
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Classes representing the monitoring interface for tasks or devices."""
import base64
import httplib2
import json
import logging
import socket
import traceback
from googleapiclient import discovery
from googleapiclient import errors
from infra_libs import httplib2_utils
from infra_libs.ts_mon.common import interface
from infra_libs.ts_mon.common import http_metrics
from infra_libs.ts_mon.common import pb_to_popo
from infra_libs.ts_mon.protos import metrics_pb2
try: # pragma: no cover
from oauth2client import gce
except ImportError: # pragma: no cover
# Try oauth2client 3.0.0 location.
from oauth2client.contrib import gce
from oauth2client.client import GoogleCredentials
from oauth2client.file import Storage
# Special string that can be passed through as the credentials path to use the
# default Appengine or GCE service account.
APPENGINE_CREDENTIALS = ':appengine'
GCE_CREDENTIALS = ':gce'
class CredentialFactory(object):
"""Base class for things that can create OAuth2Credentials."""
@classmethod
def from_string(cls, path):
"""Creates an appropriate subclass from a file path or magic string."""
if path == APPENGINE_CREDENTIALS:
return AppengineCredentials()
if path == GCE_CREDENTIALS:
return GCECredentials()
return FileCredentials(path)
def create(self, scopes):
raise NotImplementedError
class GCECredentials(CredentialFactory):
def create(self, scopes):
return gce.AppAssertionCredentials(scopes)
class AppengineCredentials(CredentialFactory):
def create(self, scopes): # pragma: no cover
# This import doesn't work outside appengine, so delay it until it's used.
try: # pragma: no cover
from oauth2client import appengine
except ImportError: # pragma: no cover
# Try oauth2client 3.0.0 location.
from oauth2client.contrib import appengine
return appengine.AppAssertionCredentials(scopes)
class FileCredentials(CredentialFactory):
def __init__(self, path):
self.path = path
def create(self, scopes):
with open(self.path, 'r') as fh:
data = json.load(fh)
if data.get('type', None):
credentials = GoogleCredentials.from_stream(self.path)
credentials = credentials.create_scoped(scopes)
return credentials
return Storage(self.path).get()
class DelegateServiceAccountCredentials(CredentialFactory):
IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
def __init__(self, service_account_email, base):
self.base = base
self.service_account_email = service_account_email
def create(self, scopes):
logging.info('Delegating to service account %s', self.service_account_email)
http = httplib2_utils.InstrumentedHttp('actor-credentials')
http = self.base.create([self.IAM_SCOPE]).authorize(http)
return httplib2_utils.DelegateServiceAccountCredentials(
http, self.service_account_email, scopes)
class Monitor(object):
"""Abstract base class encapsulating the ability to collect and send metrics.
This is a singleton class. There should only be one instance of a Monitor at
a time. It will be created and initialized by process_argparse_options. It
must exist in order for any metrics to be sent, although both Targets and
Metrics may be initialized before the underlying Monitor. If it does not exist
at the time that a Metric is sent, an exception will be raised.
send() can be either synchronous or asynchronous. If synchronous, it needs to
make the HTTP request, wait for a response and return None.
If asynchronous, send() should start the request and immediately return some
object which is later passed to wait() once all requests have been started.
failed() will return a bool indicating whether the last send failed.
"""
_SCOPES = []
def send(self, metric_pb):
raise NotImplementedError()
def wait(self, state): # pragma: no cover
pass
def failed(self):
raise NotImplementedError()
class HttpsMonitor(Monitor):
_SCOPES = ['https://www.googleapis.com/auth/prodxmon']
def __init__(self, endpoint, credential_factory, http=None, ca_certs=None):
self._endpoint = endpoint
self._failed = False
credentials = credential_factory.create(self._SCOPES)
if http is None:
http = httplib2_utils.RetriableHttp(
httplib2_utils.InstrumentedHttp('acq-mon-api', ca_certs=ca_certs),
max_tries=2)
self._http = credentials.authorize(http)
def encode_to_json(self, metric_pb):
return json.dumps({'payload': pb_to_popo.convert(metric_pb)})
def send(self, metric_pb):
body = self.encode_to_json(metric_pb)
try:
resp, content = self._http.request(self._endpoint,
method='POST',
body=body,
headers={'Content-Type': 'application/json'})
if resp.status == 200:
self._failed = False
else:
logging.warning('HttpsMonitor.send received status %d: %s', resp.status,
content)
self._failed = True
except (ValueError, errors.Error,
socket.timeout, socket.error, socket.herror, socket.gaierror,
httplib2.HttpLib2Error):
logging.exception('HttpsMonitor.send failed')
self._failed = True
def failed(self):
return self._failed
class DebugMonitor(Monitor):
"""Class which writes metrics to logs or a local file for debugging."""
def __init__(self, filepath=None):
if filepath is None:
self._fh = None
else:
self._fh = open(filepath, 'a')
def send(self, metric_pb):
text = str(metric_pb)
logging.info('Flushing monitoring metrics:\n%s', text)
if self._fh is not None:
self._fh.write(text + '\n\n')
self._fh.flush()
def failed(self):
return False
class NullMonitor(Monitor):
"""Class that doesn't send metrics anywhere."""
def send(self, metric_pb):
pass
def failed(self):
return False
|
class LinksDataProvider:
def __init__(self, links):
self.links = links if links else []
def get_links(self):
return self.links
|
from collections import ChainMap
import arcade
from arcade import Texture
from arcade.experimental.uistyle import UIFlatButtonStyle
from arcade.gui.widgets import UIInteractiveWidget, Surface
class UITextureButton(UIInteractiveWidget):
"""
A button with an image for the face of the button.
:param float x: x coordinate of bottom left
:param float y: y coordinate of bottom left
:param float width: width of widget. Defaults to texture width if not specified.
:param float height: height of widget. Defaults to texture height if not specified.
:param Texture texture: texture to display for the widget.
:param Texture texture_hovered: different texture to display if mouse is hovering over button.
:param Texture texture_pressed: different texture to display if mouse button is pressed while hovering over button.
:param str text: text to add to the button.
:param style: style information for the button.
:param float scale: scale the button, based on the base texture size.
:param size_hint: Tuple of floats (0.0-1.0), how much space of the parent should be requested
:param size_hint_min: min width and height in pixel
:param size_hint_max: max width and height in pixel
"""
def __init__(
self,
x: float = 0,
y: float = 0,
width: float = None,
height: float = None,
texture: Texture = None,
texture_hovered: Texture = None,
texture_pressed: Texture = None,
text: str = "",
scale: float = None,
size_hint=None,
size_hint_min=None,
size_hint_max=None,
style=None,
**kwargs,
):
if width is None and texture is not None:
width = texture.width
if height is None and texture is not None:
height = texture.height
if scale is not None and texture is not None:
height = texture.height * scale
width = texture.width * scale
super().__init__(
x,
y,
width,
height,
size_hint=size_hint,
size_hint_min=size_hint_min,
size_hint_max=size_hint_max,
)
self._tex = texture
self._tex_hovered = texture_hovered
self._tex_pressed = texture_pressed
self._style = style or {}
self._text = text
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
self.trigger_render()
@property
def texture(self):
return self._tex
@texture.setter
def texture(self, value: Texture):
self._tex = value
self.trigger_render()
@property
def texture_hovered(self):
return self._tex_hovered
@texture_hovered.setter
def texture_hovered(self, value: Texture):
self._tex_hovered = value
self.trigger_render()
@property
def texture_pressed(self):
return self._tex_pressed
@texture_pressed.setter
def texture_pressed(self, value: Texture):
self._tex_pressed = value
self.trigger_render()
def do_render(self, surface: Surface):
self.prepare_render(surface)
tex = self._tex
if self.pressed and self._tex_pressed:
tex = self._tex_pressed
elif self.hovered and self._tex_hovered:
tex = self._tex_hovered
if tex:
surface.draw_texture(0, 0, self.width, self.height, tex)
if self.text:
text_margin = 2
font_size = self._style.get("font_size", 15)
font_color = self._style.get("font_color", arcade.color.WHITE)
border_width = self._style.get("border_width", 2)
# border_color = self._style.get("border_color", None)
# bg_color = self._style.get("bg_color", (21, 19, 21))
start_x = self.width // 2
start_y = self.height // 2 + 4
if self.pressed:
start_y -= 2
arcade.draw_text(
text=self.text,
start_x=start_x,
start_y=start_y,
font_size=font_size,
color=font_color,
align="center",
anchor_x="center",
anchor_y="center",
width=self.width - 2 * border_width - 2 * text_margin,
)
class UIFlatButton(UIInteractiveWidget):
"""
A text button, with support for background color and a border.
:param float x: x coordinate of bottom left
:param float y: y coordinate of bottom left
:param float width: width of widget. Defaults to texture width if not specified.
:param float height: height of widget. Defaults to texture height if not specified.
:param str text: text to add to the button.
:param style: Used to style the button
"""
def __init__(
self,
x: float = 0,
y: float = 0,
width: float = 100,
height: float = 50,
text="",
size_hint=None,
size_hint_min=None,
size_hint_max=None,
style=None,
**kwargs,
):
super().__init__(
x,
y,
width,
height,
size_hint=size_hint,
size_hint_min=size_hint_min,
size_hint_max=size_hint_max,
style=ChainMap(style or {}, UIFlatButtonStyle()),
) # type: ignore
self._text = text
def do_render(self, surface: Surface):
self.prepare_render(surface)
state = "pressed" if self.pressed else "hovered" if self.hovered else "normal"
# Render button
font_name = self.style.get(f"{state}_font_name")
font_size = self.style.get(f"{state}_font_size")
font_color = self.style.get(f"{state}_font_color")
border_width = self.style.get(f"{state}_border_width")
border_color = self.style.get(f"{state}_border")
bg_color = self.style.get(f"{state}_bg")
# render BG (which is not the widgets background)
if bg_color:
surface.clear(bg_color)
# render button border (which is not the widgets border)
if border_color and border_width:
arcade.draw_xywh_rectangle_outline(
border_width,
border_width,
self.content_width - 2 * border_width,
self.content_height - 2 * border_width,
color=border_color,
border_width=border_width,
)
# render text
if self.text and font_color:
start_x = self.content_width // 2
start_y = self.content_height // 2
text_margin = 2
arcade.draw_text(
text=self.text,
start_x=start_x,
start_y=start_y,
font_name=font_name, # type: ignore
font_size=font_size, # type: ignore
color=font_color, # type: ignore
align="center",
anchor_x="center",
anchor_y="center",
width=self.content_width - 2 * border_width - 2 * text_margin,
)
# TODO Replace with arcade Property
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
self.trigger_render()
|
# -*- coding: utf-8 -*-
import wx
import OnlineStatus
from datetime import datetime
class DialogPanel(wx.Panel):
def __init__(self, parent, photo, title, friendphoto, text, date, status):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.Size(290, -1), wx.TAB_TRAVERSAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.avatar = wx.StaticBitmap(self, wx.ID_ANY, photo, wx.DefaultPosition, wx.DefaultSize, 0)
sizer.Add(self.avatar, 0, wx.TOP | wx.LEFT | wx.BOTTOM, 5)
sizer2 = wx.BoxSizer(wx.VERTICAL)
sizer6 = wx.BoxSizer(wx.HORIZONTAL)
toppanel = wx.Panel(self, wx.ID_ANY, wx.DefaultPosition, wx.Size(170, -1), wx.TAB_TRAVERSAL)
topsizer = wx.BoxSizer( wx.HORIZONTAL )
self.title = wx.StaticText(toppanel, wx.ID_ANY, cutstr(title, 22), wx.DefaultPosition, wx.DefaultSize, 0)
self.title.Wrap(-1)
self.title.SetFont(wx.Font(11, wx.MODERN, wx.NORMAL, wx.FONTWEIGHT_NORMAL, face="Tahoma"))
topsizer.Add(self.title, 0, wx.ALL, 5)
self.onlinestatus = OnlineStatus.OnlineStatus(toppanel, wx.DefaultPosition, False)
topsizer.Add(self.onlinestatus, 0, wx.TOP, 11)
toppanel.SetSizer(topsizer)
sizer6.Add(toppanel, 1, wx.EXPAND, 0)
self.date = wx.StaticText(self, wx.ID_ANY, self.GetDate(date), wx.DefaultPosition, wx.Size(70, -1), 0)
self.date.SetFont(wx.Font(8, wx.MODERN, wx.NORMAL, wx.FONTWEIGHT_NORMAL, face="Tahoma"))
self.date.SetForegroundColour(wx.Color(114, 114, 112))
sizer6.Add(self.date, 0, wx.TOP, 5)
sizer2.Add(sizer6, 1, wx.EXPAND, 0)
sizer4 = wx.BoxSizer(wx.HORIZONTAL)
if friendphoto != None:
self.friendphoto = wx.StaticBitmap( self, wx.ID_ANY, friendphoto, wx.DefaultPosition, wx.Size(25, 25), 0)
sizer4.Add(self.friendphoto, 0, wx.LEFT | wx.BOTTOM, 5)
else:
self.friendphoto = None
self.textpanel = wx.Panel(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
sizer5 = wx.BoxSizer(wx.VERTICAL)
self.text = wx.StaticText(self.textpanel, wx.ID_ANY, cutstr(text, 43), wx.DefaultPosition, wx.DefaultSize, 0)
self.text.Wrap(-1)
#self.text.SetFont(wx.Font(10, wx.MODERN, wx.NORMAL, wx.FONTWEIGHT_NORMAL, face="Tahoma"))
self.text.SetForegroundColour(wx.Color(104, 104, 102))
sizer5.Add(self.text, 0, wx.TOP | wx.LEFT | wx.BOTTOM, 5)
self.textpanel.SetSizer(sizer5)
#self.textpanel.Layout()
sizer4.Add(self.textpanel, 1, wx.ALL, 0)
sizer2.Add(sizer4, 1, wx.EXPAND, 0)
sizer.Add(sizer2, 1, wx.EXPAND, 0)
self.SetStatus(status)
self.SetSizer(sizer)
self.Layout()
def GetDate(self, date):
time = datetime.fromtimestamp(date)
now = datetime.now()
startday = datetime(now.year, now.month, now.day, hour=0, minute=0, second=0)
endday = datetime(now.year, now.month, now.day, hour=23, minute=59, second=59)
if startday <= time <= endday: #if a message sent today
return " " + time.strftime('%H:%M:%S') # return time
else:
return time.strftime('%Y.%m.%d') #return date
def SetStatus(self, status):
if self.friendphoto == None and not status:
# если последнее сообщения отправлено пользователем, и его не прочитал его собеседник
pass
elif self.friendphoto != None and not status:
# если сообщение отправлено собеседником, но его не прочитал пользователь
self.SetBackgroundColour(wx.Color(237, 241, 245))
elif status == True:
# если сообщения прочитаны
self.SetBackgroundColour(wx.Color(255, 255, 255))
def SetOnline(self, online):
self.onlinestatus.SetOnline(online)
def cutstr(string, number):
if len(string) > number:
return string[:number]
else:
return string
if __name__ == "__main__":
class TestFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(310, 212), wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL)
self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize)
bSizer1 = wx.BoxSizer(wx.VERTICAL)
self.dialog = DialogPanel(self, wx.Bitmap("C:\\Projects\\vk_messenger\\2G3ZSYjqBWw.jpg"), "Hello world!", wx.Bitmap("C:\\Projects\\vk_messenger\\2G3ZSYjqBWw.jpg"), u"1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0", 1387711111, False)
bSizer1.Add(self.dialog, 0, wx.EXPAND, 0)
self.SetSizer(bSizer1)
self.Layout()
self.Centre(wx.BOTH)
class MyApp(wx.App):
def OnInit(self):
self.testframe = TestFrame(None)
self.testframe.Show(True)
self.SetTopWindow(self.testframe)
return True
app = MyApp(0)
app.MainLoop()
|
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.models import User
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render, redirect
from .models import CarDealer, CarModel, CarMake
from .restapis import get_dealers_from_cf, get_dealer_reviews_from_cf, post_review_request_to_cf, get_dealer_by_id
from django.contrib.auth import login, logout, authenticate
from django.contrib import messages
from datetime import datetime
import logging
import json
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
# Create an `about` view to render a static about page
# def about(request):
# ...
# Create a `contact` view to return a static contact page
#def contact(request):
def registration_request(request):
context = {}
if request.method == 'GET':
return render(request, 'djangoapp/user_registration_bootstrap.html', context)
elif request.method == 'POST':
# Check if user exists
username = request.POST['username']
password = request.POST['psw']
first_name = request.POST['firstname']
last_name = request.POST['lastname']
user_exist = False
try:
User.objects.get(username=username)
user_exist = True
except:
logger.error("New user")
if not user_exist:
user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name,
password=password)
login(request, user)
return redirect("djangoapp:index")
else:
context['message'] = "User already exists."
return render(request, 'djangoapp/user_registration_bootstrap.html', context)
def login_request(request):
context = {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['psw']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
#return render(request, 'djangoapp/dealer_details.html', context)
return redirect('djangoapp:index')
else:
context['message'] = "Invalid username or password."
return render(request, 'djangoapp/user_login_bootstrap.html', context)
else:
return render(request, 'djangoapp/user_login_bootstrap.html', context)
def logout_request(request):
logout(request)
return redirect('djangoapp:index')
def about_request(request):
context={}
return render(request, 'djangoapp/about.html', context)
def contact_request(request):
context={}
return render(request, 'djangoapp/contact.html', context)
# ...
def index_request(request):
context={}
return render(request, 'djangoapp/index.html', context)
def get_dealerships(request, state=None):
""" retrieve and presents data of dealers """
context={}
if request.method == "GET":
context["dealership_list"] = get_dealers_from_cf(state) # Get dealers from the URL
#dealer_names = ' '.join([dealer.short_name for dealer in dealerships]) # Concat all dealer's short name
#return HttpResponse(dealer_names) # Return a list of dealer short name
return render(request, 'djangoapp/index.html', context)
def get_reviews(request,dealer_id=0):
""" retrieve and presents data of reviews """
context={}
if request.method == "GET":
context["review_list"] = get_dealer_reviews_from_cf(dealer_id)
context["dealer"] = get_dealer_by_id(dealer_id)
if len(context["review_list"]) == 0:
context["review_list_is_empty"]=True
else:
context["review_list_is_empty"]=False
#reviews from the URL
#reviewers_names = ' '.join([review.name for review in reviews]) # Concat all reviewers's name
#return HttpResponse(reviewers_names) # Return a list of reviewers short name
return render(request, 'djangoapp/dealer_details.html', context)
def add_review(request, dealer_id):
context={}
context["dealer_id"] = dealer_id
if request.method == "GET":
context["dealer"] = get_dealer_by_id(dealer_id)
context["cars"] = CarModel.objects.filter(dealerId=dealer_id) # REFACTOR : name of delear ID somewhere
return render(request, 'djangoapp/add_review.html', context)
if request.method == "POST":
user = request.user
if user.is_authenticated:
json_payload_dict={}
review={}
#required fields
review["name"] = user.username
review["dealership"] = dealer_id
#review["purchase"]=request.POST["purchasecheck"]
checked_values = request.POST.getlist("purchasecheck")
if len(checked_values)==1:
review["purchase"]= True
else:
review["purchase"]= False
review["review"]=request.POST["content"]# REFACTOR : rename in HTML form
#optionals
car_object=CarModel.objects.filter(id=request.POST["car"])
review["car_make"]=car_object[0].name
review["car_model"]=car_object[0].type # REFACTOR : mode of type
review["car_year"]=car_object[0].year.strftime("%Y")
review["purchase_date"]=request.POST["purchasedate"]
# out of form
review["another"]=None
# not persistant
review["time"] = datetime.utcnow().isoformat()
response = post_review_request_to_cf(review)
#TODO error analysis
return redirect("djangoapp:dealer_details", dealer_id)
# Create a `get_dealer_details` view to render the reviews of a dealer
# def get_dealer_details(request, dealer_id):
# ...
# Create a `add_review` view to submit a review
# def add_review(request, dealer_id):
# ...
|
from typing import Tuple
from .utils import (
SafetyException,
_calculate_M,
_calculate_x,
_generate_random_bytes,
_get_srp_generator,
_get_srp_prime,
_Hash,
_pad,
_to_int,
)
class EvidenceException(Exception):
"""
Exception raised when server evidence key does not match.
"""
pass
def generate_a_pair() -> Tuple[int, int]:
"""
Generate private ephemeral a and public key A.
Returns:
Tuple (private a (int), public A (int))
"""
prime = _get_srp_prime()
generator = _get_srp_generator()
a = _to_int(_generate_random_bytes(32)) # RFC-5054 reccomends 256 bits
A = pow(generator, a, prime)
return a, A
def process_challenge(
identity: str,
password: str,
salt: bytes,
a: int,
A: int,
B: int,
) -> Tuple[bytes, bytes]:
"""
Takes in salt and public value B to respond to SRP challenge with
message M. Also returns session key for later authentication that
the server is legit.
Args:
identity (str): the identity to process
password (str): the password to process
a (int): the ephemeral value a generated by the client
A (int): the public value A generated by the client
B (int): the public value B from the server
Returns:
Tuple (message (bytes), session_key (bytes))
Raises:
SafetyException: if fails to pass SRP-6a safety checks
"""
prime = _get_srp_prime()
width = prime.bit_length()
generator = _get_srp_generator()
padded_generator = _pad(generator, width)
padded_A = _pad(A, width)
padded_B = _pad(B, width)
# u - random scrambling param
u = _to_int(_Hash(padded_A, padded_B))
# x - private key
x = _calculate_x(salt, identity, password)
# k - multiplier
k = _to_int(_Hash(prime, padded_generator))
# SRP-6a safety checks
if B == 0:
raise SafetyException("Public value B is 0. Auth Failed.")
if u == 0:
raise SafetyException("Scrambler u is 0. Auth Failed.")
# Premaster secret, S = (B - k*(generator^x)) ^ (a + u*x)
t1 = B - k * pow(generator, x, prime)
t2 = a + u * x
# Calculate shared session key
S = pow(t1, t2, prime)
session_key = _Hash(S)
# Shared message to server
M = _calculate_M(
generator,
prime,
identity,
salt,
A,
B,
session_key,
)
return M, session_key
def verify_session(
A: int,
M: bytes,
session_key: bytes,
server_H_AMK: bytes,
) -> bytes:
"""
Verify session with server evidence key H_AMK.
Args:
A (int): the public A value generated by the client
M (bytes): the message the client sends to the server
session_key (bytes): the strong private session key generated by the client
server_H_AMK (bytes): the evidence key returned by the server
"""
client_H_AMK = _Hash(A, M, session_key)
if client_H_AMK != server_H_AMK:
raise EvidenceException("Evidence keys do not match. Auth Failed.")
return client_H_AMK
|
#!/usr/bin/python
# coding: utf-8
from wand.image import Image
from PIL import Image as PI
import pyocr
import pyocr.builders
import io
import sys
import argparse
import time
from tesserocr import PyTessBaseAPI, PSM, RIL
import tesserocr
import os
import re
class LocalOCR(object):
def __init__(self, ocr_language):
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
self.tool = tools[0]
print("OCR tool: %s" % self.tool)
try:
langs = self.tool.get_available_languages()
self.lang = langs[0]
if ocr_language in langs:
self.lang = ocr_language
print("OCR selected language: %s (available: %s)" % (self.lang.upper(), ", ".join(langs)))
except Exception as e:
print("{}".format(e))
def process(self, pdf_filename, pdf_resolution, imageformat, do_orientation):
final_text = ""
image_pdf = Image(filename=pdf_filename, resolution=pdf_resolution)
image_page = image_pdf.convert(imageformat)
page = 1
process_start = time.time()
for img in image_page.sequence:
img_per_page = Image(image=img)
img_per_page.type = 'grayscale'
img_per_page.depth = 8
img_per_page.density = pdf_resolution
try:
img_per_page.level(black=0.3, white=1.0, gamma=1.5, channel=None)
except AttributeError as e:
print("Update Wand library: %s" % e)
img_per_page.save(filename="buffer.png")
page_start = time.time()
txt = self.image2txt_pyocr(img_per_page.make_blob(imageformat), do_orientation)
page_elaboration = time.time() - page_start
print("page %s - size %s - process %2d sec. - text %s" %
(page, img_per_page.size, page_elaboration, len(txt)))
final_text += "%s\n" % txt
page += 1
img.destroy()
process_end = time.time() - process_start
print("Total elaboration time: %s" % process_end)
return final_text
def image2txt_pyocr(self, image, do_orientation):
txt = ""
orientation = ""
img_per_page = PI.open(io.BytesIO(image))
if do_orientation is True:
try:
if self.tool.can_detect_orientation():
orientation = self.tool.detect_orientation(img_per_page, lang=self.lang)
angle = orientation["angle"]
if angle != 0:
img_per_page.rotate(orientation["angle"])
except pyocr.PyocrException as exc:
print("Orientation detection failed: {}".format(exc))
print("Orientation: {}".format(orientation))
try:
txt = self.tool.image_to_string(
img_per_page, lang=self.lang,
builder=pyocr.builders.TextBuilder()
)
except pyocr.error.TesseractError as e:
print("{}".format(e))
return txt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process input PDF file to CSV by OCR')
parser.add_argument('pdf_filename', nargs='?', default='INPUT.pdf',
help='Input PDF file')
parser.add_argument('pdf_resolution', nargs='?', default=300,
help='Input PDF dpi resolution')
parser.add_argument('ocr_language', nargs='?', default='ita',
help='OCR language')
parser.add_argument('ocr_imageformat', nargs='?', default='png',
help='OCR image format')
parser.add_argument('ocr_do_orientation', nargs='?', default=True,
help='OCR do orientation test')
parser.add_argument('text_output', nargs='?', default="output.txt",
help='OCR text output')
args = parser.parse_args()
if not args.pdf_filename:
print('--filename is mandatory')
sys.exit(1)
p = pdf_to_txt(args.ocr_language)
print("1. TEXT file \"%s\" not found - Process PDF file \"%s\"" % (args.text_output, args.pdf_filename))
output = p.process(args.pdf_filename, args.pdf_resolution, args.ocr_imageformat, args.ocr_do_orientation)
print("2 Writing TEXT output file \"%s\"" % args.text_output)
file = open(args.text_output, "w")
for i in output:
file.write(i.encode("utf-8"))
file.close()
|
"""Module to manage background export task."""
import logging
import os
from background_task import background
from apps.users.models import User
from .export import JekyllSiteExport
from .models import Manifest
LOGGER = logging.getLogger(__name__)
@background(schedule=1)
def github_export_task(
manifest_pid, version, github_repo=None,
user_id=None, owner_ids=None, deep_zoom=False):
"""Background GitHub export
:param manifest_pid: Manifest pid
:type manifest_pid: str
:param version: IIIF API version
:type version: str
:param github_repo: GitHub repo name, defaults to None
:type github_repo: str, optional
:param user_id: ID of exporter's user, defaults to None
:type user_id: UUID, optional
:param owner_ids: List of annotation owners, defaults to None
:type owner_ids: list, optional
:param deep_zoom: If True, include deep zoom in export, defaults to False
:type deep_zoom: bool, optional
"""
LOGGER.info('Background github export started.')
# need manifest ID
# need deep_zoom
manifest = Manifest.objects.get(pid=manifest_pid)
user = User.objects.get(id=user_id)
jekyll_exporter = JekyllSiteExport(
manifest,
version,
github_repo=github_repo,
deep_zoom=deep_zoom,
owners=owner_ids,
user=user
)
jekyll_exporter.github_export(user.email)
LOGGER.info('Background github export finished.')
@background(schedule=1)
def download_export_task(
manifest_pid, version, github_repo=None,
user_id=None, owner_ids=None, deep_zoom=False):
"""Background download export.
:param manifest_pid: Manifest pid
:type manifest_pid: str
:param version: IIIF API version
:type version: str
:param github_repo: GitHub repo name, defaults to None
:type github_repo: str, optional
:param user_id: ID of exporter's user, defaults to None
:type user_id: UUID, optional
:param owner_ids: List of annotation owners, defaults to None
:type owner_ids: list, optional
:param deep_zoom: If True, include deep zoom in export, defaults to False
:type deep_zoom: bool, optional
"""
LOGGER.info('Background download export started.')
# need manifest ID
# need deep_zoom
manifest = Manifest.objects.get(pid=manifest_pid)
user = User.objects.get(id=user_id)
jekyll_exporter = JekyllSiteExport(
manifest,
version,
github_repo=github_repo,
deep_zoom=deep_zoom,
owners=owner_ids,
user=user
)
zipfile_name = jekyll_exporter.download_export(user.email, manifest)
delete_download_task(zipfile_name)
LOGGER.info('Background download export finished.')
@background(schedule=86400)
def delete_download_task(download_path):
"""Background delete download task.
:param download_path: System path for download.
:type download_path: str
"""
LOGGER.info('Background download deletion started.')
os.remove(download_path)
LOGGER.info('Background download deletion finished.')
|
import random
import pygame
pygame.init()
class Button():
def __init__(self):
self.textBoxes = {}
#----Clicked In----
def clickedIn(self,x,y,width,height):
global mouse_state, mouse_x, mouse_y
if mouse_state == 1 and mouse_x >= x and mouse_x <= (x + width) and mouse_y >= y and mouse_y <= (y + height):
return True
#----Clicked Out----
def clickedOut(self,x,y,width,height):
global mouse_state, mouse_x, mouse_y
if mouse_state == 1 and mouse_x < x or mouse_state == 1 and mouse_x > (x + width) or mouse_state == 1 and mouse_y < y or mouse_state == 1 and mouse_y > (y + height):
return True
#----Hovering----
def hovering(self,x,y,width,height):
global mouse_state, mouse_x, mouse_y
if mouse_state == 0 and mouse_x >= x and mouse_x <= (x + width) and mouse_y >= y and mouse_y <= (y + height):
return True
#----Click Button----
def clickButton(self,x,y,width,height,normalColor,hoverColor,textFont,text,textColor,stateHolding = False,stateVariable = 0,state = 1):
if not self.clickedIn(x,y,width,height) and not self.hovering(x,y,width,height):
pygame.draw.rect(screen,normalColor,(x,y,width,height))
elif self.hovering(x,y,width,height):
pygame.draw.rect(screen,hoverColor,(x,y,width,height))
if stateHolding == True and stateVariable == state:
pygame.draw.rect(screen,hoverColor,(x,y,width,height))
buttonText = textFont.render(text,True,textColor)
buttonText_x = buttonText.get_rect().width
buttonText_y = buttonText.get_rect().height
screen.blit(buttonText,(((x + (width / 2)) - (buttonText_x / 2)),((y + (height / 2)) - (buttonText_y / 2))))
if self.clickedIn(x,y,width,height):
return True
WHITE = (255,255,255)
GREY = (127,127,127)
BLACK = (0,0,0)
RED = (255,0,0)
GREEN = (0,255,0)
DGREEN = (0,127,0)
font = pygame.font.SysFont('Comic Sans MS',20)
size = (600,700)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Snake")
done = False
clock = pygame.time.Clock()
scale = 30
class Snake():
def __init__(self):
self.alive = True
self.length = 1
self.tail = []
self.x = 0
self.y = 0
self.xV = 0
self.yV = 1
self.tick = 0
def draw(self):
for section in self.tail:
pygame.draw.rect(screen,WHITE,(((section[0]) * scale),((section[1]) * scale) + 100,scale,scale))
def update(self):
if self.alive == True:
if self.tick == 10:
self.x += self.xV
self.y += self.yV
for segment in self.tail:
if segment[0] == self.x and segment[1] == self.y:
self.alive = False
self.tick = 0
self.tail.append((self.x,self.y))
else:
self.tick += 1
while len(self.tail) > self.length:
self.tail.pop(0)
if self.x == -1:
self.alive = False
self.x = 0
if self.x == (size[0] / scale):
self.alive = False
self.x = (size[0] / scale) - 1
if self.y == -1:
self.alive = False
self.y = 0
if self.y == (size[1] - 100) / scale:
self.alive = False
self.y = ((size[1] - 100) / scale) - 1
def reset(self):
self.alive = True
self.length = 1
self.tail.clear()
self.x = 0
self.y = 0
self.xV = 0
self.yV = 1
self.tick = 0
class Food():
def __init__(self):
self.x = random.randrange((size[0] / scale) - 1)
self.y = random.randrange(((size[1] - 100) / scale) - 1)
def draw(self):
pygame.draw.rect(screen,RED,((self.x * scale),(self.y * scale) + 100,scale,scale))
def update(self):
if snake.x == self.x and snake.y == self.y:
self.reset()
snake.length += 1
def reset(self):
self.x = random.randrange((size[0] / scale) - 1)
self.y = random.randrange(((size[1] - 100) / scale) - 1)
class Utility():
def __init__(self):
return
def draw(self):
text = font.render("Length: " + str(snake.length),True,BLACK)
text_y = text.get_rect().height
screen.blit(text,(90,(50 - (text_y / 2))))
text = font.render("Alive: " + str(snake.alive),True,BLACK)
text_y = text.get_rect().height
screen.blit(text,(size[0] - 210,(50 - (text_y / 2))))
pygame.draw.line(screen,BLACK,(0,100),(size[0],100),7)
if snake.alive == False:
if button.clickButton((size[0] / 2) - 75,25,150,50,GREEN,DGREEN,font,"Play Again",WHITE):
snake.reset()
food.reset()
for i in range(int(size[0] / scale) - 1):
pygame.draw.line(screen,BLACK,(0,(100 + (i * scale) + scale)),(size[0],(100 + (i * scale) + scale)),3)
pygame.draw.line(screen,BLACK,(((i * scale) + scale),100),(((i * scale) + scale),size[1]),3)
def update(self):
return
button = Button()
snake = Snake()
food = Food()
utility = Utility()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_state = 1
pygame.mouse.set_pos(mouse_x,mouse_y + 1)
else:
mouse_state = 0
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
snake.yV = 0
snake.xV = -1
if event.key == pygame.K_RIGHT or event.key == pygame.K_d:
snake.yV = 0
snake.xV = 1
if event.key == pygame.K_UP or event.key == pygame.K_w:
snake.xV = 0
snake.yV = -1
if event.key == pygame.K_DOWN or event.key == pygame.K_s:
snake.xV = 0
snake.yV = 1
mouse_x = pygame.mouse.get_pos()[0]
mouse_y = pygame.mouse.get_pos()[1]
pygame.display.set_caption("Snake, FPS: " + str(clock.get_fps()))
screen.fill(GREY)
snake.update()
food.update()
utility.update()
food.draw()
snake.draw()
utility.draw()
pygame.display.flip()
clock.tick(50)
pygame.quit() |
from Coordinate import Coordinate
from datetime import *
import time
# Class node
class Node(object):
# Number of pickup/delivery nodes
n_nodes = 1
# Number of depots
d_nodes = 1
def __init__(self, type, id, x, y, demand):
self.type = type
self.id = id
self.coord = Coordinate(x, y)
self.arrival_t = 0
self.load = {'p1': 0,
'p2': 0,
'p3': 0,
'p4': 0,
's1': 0,
's2': 0,
's3': 0}
self.id_next = None
self.demand = demand
def get_demand(self):
return self.demand
def get_load(self):
return self.load
def get_type(self):
return self.type
def set_type(self, type):
self.type = type
def get_id_next(self):
return self.id_next
def set_id_next(self, id_next):
self.id_next = id_next
def set_arrival_t(self, arrival_t):
self.arrival_t = arrival_t
def set_vehicle(self, vehicle):
self.vehicle = vehicle
def set_load(self, load):
self.load = load
def get_id(self):
return self.id
def get_arrival_t(self):
return self.arrival_t
def get_load(self):
return self.load
def get_load_0(self):
return {id:int(self.load[id]) for id in self.load.keys() if int(self.load[id])>0}
def get_coord(self):
return self.coord
@classmethod
def increment_id(self):
Node.n_nodes = Node.n_nodes + 1
@classmethod
def increment_id_depot(self):
Node.d_nodes = Node.d_nodes + 1
@classmethod
def get_n_nodes(self):
return Node.n_nodes
@classmethod
def get_d_nodes(self):
return Node.d_nodes
@classmethod
def factory_node(self, type, id, x, y, demand):
if type == 'DL':
return NodeDL(type, id, x, y, demand)
elif type == 'PK':
return NodePK(type, id, x, y, demand)
elif type == 'DP':
return NodeDepot(type, id, x, y, demand)
else:
return None
@classmethod
def copy_node(self, node):
return Node.factory_node(node.get_type(), node.get_id(), node.get_coord().get_x(), node.get_coord().get_y(), node.get_demand())
def __str__(self):
return " "+str(self.get_id()) + str(self.coord) + " " + str({id:int(self.demand[id]) for id in self.demand.keys() if int(self.demand[id])!=0 })
# Pickup node
class NodePK(Node):
def __init__(self, type, id, x, y, demand):
new_id = id
if new_id == None:
new_id = "pk" + str(Node.get_n_nodes())
Node.increment_id()
Node.__init__(self, type, new_id, x, y, demand)
def set_arrival_t(self, arrival_t):
self.arrival_t = arrival_t
def set_vehicle(self, vehicle):
self.vehicle = vehicle
def set_load(self, load):
self.load = load
def __str__(self):
return '|PK|' + super().__str__() + ' - LOAD: ' + str({id:int(self.load[id]) for id in self.load.keys() if int(self.load[id])>0}) + ' - ARR: ' + datetime.fromtimestamp(int(self.arrival_t)).strftime('%Y-%m-%d %H:%M')
# Delivery node
class NodeDL(Node):
def __init__(self, type, id, x, y, demand):
new_id = id
if new_id == None:
new_id = "dl" + str(Node.get_n_nodes())
Node.increment_id()
Node.__init__(self, type, new_id, x, y, demand)
def __str__(self):
return '|DL|' + super().__str__() + ' - LOAD: ' + str({id:int(self.load[id]) for id in self.load.keys() if int(self.load[id])>0}) + ' - ARR: ' + datetime.fromtimestamp(int(self.arrival_t)).strftime('%Y-%m-%d %H:%M')
# Departure/arrival node
class NodeDepot(Node):
def __init__(self, type, id, x, y, demand):
new_id = id
if new_id == None:
new_id = "dp" + str(Node.get_d_nodes())
Node.increment_id_depot()
Node.__init__(self, type, new_id, x, y, demand)
def __str__(self):
# + self.load + self.vehicle + self.arrival_t
arr = datetime.fromtimestamp(int(self.arrival_t)).strftime('%Y-%m-%d %H:%M')
if(int(self.arrival_t)==0):
arr = '0'
return '|->|' + super().__str__() + ' - LOAD: ' + str({id:int(self.load[id]) for id in self.load.keys() if int(self.load[id])>0}) + ' - ARR: ' + arr
|
def sum_func(a, *args):
s = a+sum(args)
print(s)
sum_func(10)
sum_func(10,20)
sum_func(10,20,30)
sum_func(10, 20, 30, 40)
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.decorators import format_doc
from astropy.coordinates.baseframe import base_doc
from .baseradec import BaseRADecFrame, doc_components
__all__ = ['ICRS']
@format_doc(base_doc, components=doc_components, footer="")
class ICRS(BaseRADecFrame):
"""
A coordinate or frame in the ICRS system.
If you're looking for "J2000" coordinates, and aren't sure if you want to
use this or `~astropy.coordinates.FK5`, you probably want to use ICRS. It's
more well-defined as a catalog coordinate and is an inertial system, and is
very close (within tens of milliarcseconds) to J2000 equatorial.
For more background on the ICRS and related coordinate transformations, see
the references provided in the :ref:`astropy:astropy-coordinates-seealso`
section of the documentation.
"""
|
# coding: utf-8
# In[21]:
from functools import reduce
# In[24]:
def factor(x):
listx=[z for z in range(1,x+1)]
fun_la=lambda a,b: a*b
zz=reduce(fun_la,listx)
print(zz)
# In[26]:
factor(4)
# In[ ]:
|
import argparse
import abc
from typing import List, Any, Optional, Dict
from pedtools.commands.hookspecs import PedtoolsPlugin
# From https://stackoverflow.com/questions/44542605/python-how-to-get-all-default-values-from-argparse
def get_argparse_defaults(parser):
defaults = {}
for action in parser._actions:
if not action.required and action.dest != "help":
defaults[action.dest] = action.default
return defaults
def get_argparse_required(parser):
required = []
for action in parser._actions:
if action.required:
required.append(action.dest)
return required
def non_runnable(funcobj: Any) -> Any:
"""A decorator indicating non runnable action
This attribute remains unless overridden by the implemented action
"""
funcobj.__isnotrunnable__ = True
return funcobj
class PedtoolsAction(metaclass=abc.ABCMeta):
""" Base class to define actions
If the child object doesn't implement the action method, print help is assumed
Defines base fags and methods that can be overridden by the children
"""
def __init__(self) -> None:
self._hook: Optional[PedtoolsPlugin] = None
@non_runnable
def action(self, config: dict): # needs to be implemented in the child objects, else
pass
@abc.abstractmethod
def help_description(self) -> Optional[str]:
raise NotImplementedError
def action_flags(self) -> List[argparse.ArgumentParser]:
return []
def group_description(self) -> Optional[str]:
return None
def register_subparsers(self) -> List[argparse.ArgumentParser]:
# define common shared arguments
base_subparser = argparse.ArgumentParser(add_help=False)
base_subparser.add_argument(
'--cite', action=store_true, help='Print citable reference for this module')
additional_parsers = self.action_flags()
additional_parsers.append(base_subparser)
return additional_parsers
def get_config_parameters(self):
all_parsers = self.register_subparsers()
defaults = {}
required = []
for parser in all_parsers:
defaults.update(get_argparse_defaults(parser))
required = required + get_argparse_required(parser)
return (required, defaults)
def add_hook(self, hook: PedtoolsPlugin) -> None:
self._hook = hook
def run_action(self, config: dict):
config = self.pre_action(config)
self.action(config)
self.post_action(config)
def pre_action(self, config: dict) -> dict:
if self._hook:
configs = self._hook.pedtoolsclient_add_pre_action(
config=config)
final_config = {}
for c in configs:
final_config.update(c)
return final_config
return config
def post_action(self, config: dict):
if self._hook:
self._hook.pedtoolsclient_add_post_action(
config=config)
|
names = ['BBB','OOO','PPP','CCC']
salaries = [3000,2000,4500,8000]
aDict = dict(zip(names,salaries))
print(aDict) |
expected_output = {
"lisp_router_instances": {
0: {
"lisp_router_id": {
"site_id": "unspecified",
"xtr_id": "0x730E0861-0x12996F6D-0xEFEA2114-0xE1C951F7",
},
"lisp_router_instance_id": 0,
"service": {
"ipv6": {
"delegated_database_tree": False,
"etr": {
"accept_mapping_data": "disabled, verify disabled",
"enabled": True,
"encapsulation": "lisp",
"map_cache_ttl": "1d00h",
"use_petrs": {"10.10.10.10": {"use_petr": "10.10.10.10"}},
"mapping_servers": {
"10.166.13.13": {
"ms_address": "10.166.13.13",
"uptime": "00:00:35",
},
"10.64.4.4": {
"ms_address": "10.64.4.4",
"uptime": "17:49:58",
},
},
"proxy_etr_router": False,
},
"instance_id": {
"101": {
"database": {
"dynamic_database_limit": 65535,
"dynamic_database_size": 0,
"inactive_deconfig_away_size": 0,
"route_import_database_limit": 1000,
"route_import_database_size": 0,
"static_database_limit": 65535,
"static_database_size": 1,
"total_database_mapping_size": 1,
},
"eid_table": "vrf red",
"itr": {
"local_rloc_last_resort": "10.16.2.2",
"use_proxy_etr_rloc": "10.10.10.10",
},
"map_cache": {
"imported_route_count": 0,
"imported_route_limit": 1000,
"map_cache_size": 2,
"persistent_map_cache": False,
"static_mappings_configured": 0,
},
"map_request_source": "derived from EID destination",
"mapping_servers": {
"10.166.13.13": {
"ms_address": "10.166.13.13",
"uptime": "00:00:35",
},
"10.64.4.4": {
"ms_address": "10.64.4.4",
"uptime": "17:49:58",
},
},
"site_registration_limit": 0,
}
},
"itr": {
"enabled": True,
"map_resolvers": {
"10.166.13.13": {"map_resolver": "10.166.13.13"},
"10.64.4.4": {"map_resolver": "10.64.4.4"},
},
"max_smr_per_map_cache_entry": "8 more specifics",
"multiple_smr_suppression_time": 20,
"proxy_itr_router": False,
"solicit_map_request": "accept and process",
},
"locator_status_algorithms": {
"ipv4_rloc_min_mask_len": 0,
"ipv6_rloc_min_mask_len": 0,
"lsb_reports": "process",
"rloc_probe_algorithm": False,
"rloc_probe_on_member_change": False,
"rloc_probe_on_route_change": "N/A (periodic probing disabled)",
},
"locator_table": "default",
"map_cache": {
"map_cache_activity_check_period": 60,
"map_cache_fib_updates": "established",
"map_cache_limit": 1000,
},
"map_resolver": {"enabled": False},
"map_server": {"enabled": False},
"mobility_first_hop_router": False,
"nat_traversal_router": False,
"service": "ipv6",
}
},
}
}
}
|
#!/bin/python
import math
import os
import random
import re
import sys
# Complete the twoStrings function below.
def twoStrings(s1, s2):
#Converting the strings to a set in both cases.
s1 = set(s1)
s2 = set(s2)
common_substring = set.intersection(s1 , s2)#This function checks the common set of elements between 2 sets (i.e common substring in two strings in this case).
isEmpty = (common_substring == set())#Checks and stores if there are any characters in common_substring.
#Returns YES or NO depending on whether there is common substring in s1 and s2.
if isEmpty:
return "NO"
else:
return "YES"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(raw_input())
for q_itr in xrange(q):
s1 = raw_input()
s2 = raw_input()
result = twoStrings(s1, s2)
fptr.write(result + '\n')
fptr.close()
|
import sys
sys.path.append("../")
from shapely.geometry import LineString
import datetime as dt
import Utilities
import plots
import doc
import numpy as np
import streamlit as st
from hydralit import HydraHeadApp
import logging
class JamClass(HydraHeadApp):
def __init__(self, data, fps):
self.data = data
self.frames = np.unique(self.data[:, 1])
self.peds = np.unique(data[:, 0]).astype(int)
self.fps = fps
def init_sidebar(self):
st.sidebar.header("🐌 Jam")
# choose_jam_duration = st.sidebar.checkbox(
# "▶️ Show",
# value=False,
# help="Plot change of the number of pedestrian in jam versus time",
# key="jam_duration",
# )
jam_speed = st.sidebar.slider(
"Min jam speed / m/s",
0.1,
1.0,
0.5,
help="An agent slower that this speed is in jam",
key="jVmin",
)
min_jam_time = st.sidebar.slider(
"Min jam duration / s",
1,
180,
1,
help="A jam lasts at least that long",
key="jTmin",
)
min_jam_agents = st.sidebar.slider(
"Min agents in jam",
2,
200,
20,
help="A jam has at least so many agents",
key="jNmin",
)
return jam_speed, min_jam_time, min_jam_agents
def run(self):
info = st.expander("Documentation: Jam definitions (click to expand)")
with info:
doc.doc_jam()
jam_speed, min_jam_time, min_jam_agents = JamClass.init_sidebar(self)
logging.info("calculate jam")
logging.info(f"jam speed {jam_speed}")
logging.info(f"min jam agents {min_jam_agents}")
logging.info(f"min jam time {min_jam_time}")
c1, c2 = st.columns((1, 1))
pl2 = c1.empty()
pl = c2.empty()
precision = c1.slider(
"Precision",
0,
int(10 * self.fps),
help="Condition on the length of jam durations (in frame)",
)
nbins = c2.slider(
"Number of bins", 5, 40, value=10, help="Number of bins", key="lifetime"
)
pl3 = c1.empty()
pl4 = c1.empty()
nbins2 = pl4.slider(
"Number of bins", 5, 40, value=10, help="Number of bins", key="waiting"
)
## lifetime
jam_frames = Utilities.jam_frames(self.data, jam_speed)
with Utilities.profile("jam_lifetime"):
lifetime, chuncks, max_lifetime, from_to = Utilities.jam_lifetime(
self.data, jam_frames[10:], min_jam_agents, self.fps, precision
) # remove the first frames, cause in simulation people stand
## duration
logging.info(f"waiting time with {min_jam_time}")
with Utilities.profile("jam_waiting_time"):
waiting_time = Utilities.jam_waiting_time(
self.data, jam_speed, min_jam_time, self.fps, precision
)
if not waiting_time.size:
wtimes = np.array([])
else:
wtimes = waiting_time[:, 1]
with Utilities.profile("Rendering Jam figures"):
## plots
fig1 = plots.plot_jam_lifetime(
self.frames,
lifetime,
self.fps,
max_lifetime,
from_to,
min_jam_agents,
)
hist = plots.plot_jam_lifetime_hist(chuncks, self.fps, nbins)
pl2.plotly_chart(fig1, use_container_width=True)
pl.plotly_chart(hist, use_container_width=True)
# --
hist = plots.plot_jam_waiting_hist(wtimes, self.fps, nbins2)
pl3.plotly_chart(hist, use_container_width=True)
|
from h2pubsub import Server
Server(address='0.0.0.0', port=443).run_forever()
|
from __future__ import print_function
import tensorflow as tf
import keras
from tensorflow.keras.models import load_model
from keras import backend as K
from keras.layers import Input
import numpy as np
import subprocess
from tensorloader import TensorLoader as tl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, roc_curve, auc, precision_recall_curve,average_precision_score, confusion_matrix
import pandas as pd
from sklearn import impute
import argparse
import os
import time
#Step 0: Process arguments
parser = argparse.ArgumentParser(description='CoRE-ATAC Prediction Tool')
parser.add_argument("datadirectory")
parser.add_argument("basename")
parser.add_argument("model")
parser.add_argument("outputfile")
parser.add_argument('--pf', dest='pf', type=str, default="",
help='Destination of PEAS features)')
parser.add_argument('--le', dest='le', type=str, default="",
help='Destination of LabelEncoder.)')
parser.add_argument('--swapchannels', default=False, action='store_true', dest='swap')
args = parser.parse_args()
datadirectory = args.datadirectory
basename = args.basename
model = args.model
outputfile = args.outputfile
featurefile = args.pf
labelencoder = args.le
swapchannels = args.swap
def predict(datadirectory, basename, model, outputfile, featurefile, labelencoder, swapchannels):
model = load_model(model)
if featurefile == "":
featurefile = "/CoRE-ATAC/PEAS/features.txt"
if labelencoder == "":
labelencoder = "/CoRE-ATAC/PEAS/labelencoder.txt"
#Step 1: Load the data
start_time = time.time()
seqdata,sigdata,annot,summitpeaks,peaks = tl.readTensors(basename, datadirectory, 600, sequence=True, signal=True)
peasfeatures = tl.getPEASFeatures(datadirectory+"/peak_features/"+basename+"_features.txt", featurefile, labelencoder, peaks)
#num_classes = 4
peasfeatures = np.expand_dims(peasfeatures, axis=2)
sigseqdata = tl.getSeqSigTensor(seqdata, sigdata)
print("--- Data loaded in %s seconds ---" % (time.time() - start_time))
x_test_sigseq = sigseqdata
if swapchannels == False:
x_test_sigseq = np.moveaxis(x_test_sigseq, 1, -1) #Originally had channels first, but CPU tensorflow requires channels last
x_test_peas = peasfeatures
#Step 2: Make predictions
start_time = time.time()
sig_predictions, peas_predictions, predictions = model.predict([x_test_sigseq, x_test_peas])
print("--- Data predicted in %s seconds ---" % (time.time() - start_time))
#Write the output file:
columns = ["Chr", "Start", "End", "Promoter Probability", "Enhancer Probability", "Insulator Probability", "Other Probability"]
pd.DataFrame(np.concatenate((peaks, predictions), axis=1), columns=columns).to_csv(outputfile, header=None, index=None, sep="\t")
predict(datadirectory, basename, model, outputfile, featurefile, labelencoder, swapchannels)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: schema.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='schema.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0cschema.proto\"b\n\x08Location\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x12\n\npostalCode\x18\x02 \x01(\t\x12\x0c\n\x04\x63ity\x18\x03 \x01(\t\x12\x13\n\x0b\x63ountryCode\x18\x04 \x01(\t\x12\x0e\n\x06region\x18\x05 \x01(\t\"9\n\x07Profile\x12\x0f\n\x07network\x18\x01 \x01(\t\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0b\n\x03url\x18\x03 \x01(\t\"\xaf\x01\n\x06\x42\x61sics\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\t\x12\x0f\n\x07picture\x18\x03 \x01(\t\x12\r\n\x05\x65mail\x18\x04 \x01(\t\x12\r\n\x05phone\x18\x05 \x01(\t\x12\x0f\n\x07website\x18\x06 \x01(\t\x12\x0f\n\x07summary\x18\x07 \x01(\t\x12\x1b\n\x08location\x18\x08 \x01(\x0b\x32\t.Location\x12\x1a\n\x08profiles\x18\t \x03(\x0b\x32\x08.Profile\"\x83\x01\n\x04Work\x12\x0f\n\x07\x63ompany\x18\x01 \x01(\t\x12\x10\n\x08position\x18\x02 \x01(\t\x12\x0f\n\x07website\x18\x03 \x01(\t\x12\x11\n\tstartDate\x18\x04 \x01(\t\x12\x0f\n\x07\x65ndDate\x18\x05 \x01(\t\x12\x0f\n\x07summary\x18\x06 \x01(\t\x12\x12\n\nhighlights\x18\x07 \x03(\t\"\x8d\x01\n\tVolunteer\x12\x14\n\x0corganization\x18\x01 \x01(\t\x12\x10\n\x08position\x18\x02 \x01(\t\x12\x0f\n\x07website\x18\x03 \x01(\t\x12\x11\n\tstartDate\x18\x04 \x01(\t\x12\x0f\n\x07\x65ndDate\x18\x05 \x01(\t\x12\x0f\n\x07summary\x18\x06 \x01(\t\x12\x12\n\nhighlights\x18\x07 \x03(\t\"\x83\x01\n\tEducation\x12\x13\n\x0binstitution\x18\x01 \x01(\t\x12\x0c\n\x04\x61rea\x18\x02 \x01(\t\x12\x11\n\tstudyType\x18\x03 \x01(\t\x12\x11\n\tstartDate\x18\x04 \x01(\t\x12\x0f\n\x07\x65ndDate\x18\x05 \x01(\t\x12\x0b\n\x03gpa\x18\x06 \x01(\t\x12\x0f\n\x07\x63ourses\x18\x07 \x03(\t\"F\n\x05\x41ward\x12\r\n\x05title\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61te\x18\x02 \x01(\t\x12\x0f\n\x07\x61warder\x18\x03 \x01(\t\x12\x0f\n\x07summary\x18\x04 \x01(\t\"e\n\x0bPublication\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tpublisher\x18\x02 \x01(\t\x12\x13\n\x0breleaseDate\x18\x03 \x01(\t\x12\x0f\n\x07website\x18\x04 \x01(\t\x12\x0f\n\x07summary\x18\x05 \x01(\t\"6\n\x05Skill\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05level\x18\x02 \x01(\t\x12\x10\n\x08keywords\x18\x03 \x03(\t\"-\n\x08Language\x12\x10\n\x08language\x18\x01 \x01(\t\x12\x0f\n\x07\x66luency\x18\x02 \x01(\t\"*\n\x08Interest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08keywords\x18\x02 \x03(\t\",\n\tReference\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\treference\x18\x02 \x01(\t\"\xa2\x02\n\x04Main\x12\x17\n\x06\x62\x61sics\x18\x01 \x01(\x0b\x32\x07.Basics\x12\x13\n\x04work\x18\x02 \x03(\x0b\x32\x05.Work\x12\x1d\n\tvolunteer\x18\x03 \x03(\x0b\x32\n.Volunteer\x12\x1d\n\teducation\x18\x04 \x03(\x0b\x32\n.Education\x12\x16\n\x06\x61wards\x18\x05 \x03(\x0b\x32\x06.Award\x12\"\n\x0cpublications\x18\x06 \x03(\x0b\x32\x0c.Publication\x12\x16\n\x06skills\x18\x07 \x03(\x0b\x32\x06.Skill\x12\x1c\n\tlanguages\x18\x08 \x03(\x0b\x32\t.Language\x12\x1c\n\tinterests\x18\t \x03(\x0b\x32\t.Interest\x12\x1e\n\nreferences\x18\n \x03(\x0b\x32\n.Referenceb\x06proto3'
)
_LOCATION = _descriptor.Descriptor(
name='Location',
full_name='Location',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='Location.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='postalCode', full_name='Location.postalCode', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='city', full_name='Location.city', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='countryCode', full_name='Location.countryCode', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='region', full_name='Location.region', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=114,
)
_PROFILE = _descriptor.Descriptor(
name='Profile',
full_name='Profile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='network', full_name='Profile.network', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='username', full_name='Profile.username', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url', full_name='Profile.url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=173,
)
_BASICS = _descriptor.Descriptor(
name='Basics',
full_name='Basics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Basics.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='label', full_name='Basics.label', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='picture', full_name='Basics.picture', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='Basics.email', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='phone', full_name='Basics.phone', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='website', full_name='Basics.website', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='summary', full_name='Basics.summary', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='location', full_name='Basics.location', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='profiles', full_name='Basics.profiles', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=351,
)
_WORK = _descriptor.Descriptor(
name='Work',
full_name='Work',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='company', full_name='Work.company', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='position', full_name='Work.position', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='website', full_name='Work.website', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='startDate', full_name='Work.startDate', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='endDate', full_name='Work.endDate', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='summary', full_name='Work.summary', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='highlights', full_name='Work.highlights', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=354,
serialized_end=485,
)
_VOLUNTEER = _descriptor.Descriptor(
name='Volunteer',
full_name='Volunteer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='organization', full_name='Volunteer.organization', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='position', full_name='Volunteer.position', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='website', full_name='Volunteer.website', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='startDate', full_name='Volunteer.startDate', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='endDate', full_name='Volunteer.endDate', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='summary', full_name='Volunteer.summary', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='highlights', full_name='Volunteer.highlights', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=488,
serialized_end=629,
)
_EDUCATION = _descriptor.Descriptor(
name='Education',
full_name='Education',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='institution', full_name='Education.institution', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='area', full_name='Education.area', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='studyType', full_name='Education.studyType', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='startDate', full_name='Education.startDate', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='endDate', full_name='Education.endDate', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gpa', full_name='Education.gpa', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='courses', full_name='Education.courses', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=632,
serialized_end=763,
)
_AWARD = _descriptor.Descriptor(
name='Award',
full_name='Award',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='Award.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='date', full_name='Award.date', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='awarder', full_name='Award.awarder', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='summary', full_name='Award.summary', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=765,
serialized_end=835,
)
_PUBLICATION = _descriptor.Descriptor(
name='Publication',
full_name='Publication',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Publication.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='publisher', full_name='Publication.publisher', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='releaseDate', full_name='Publication.releaseDate', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='website', full_name='Publication.website', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='summary', full_name='Publication.summary', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=837,
serialized_end=938,
)
_SKILL = _descriptor.Descriptor(
name='Skill',
full_name='Skill',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Skill.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='level', full_name='Skill.level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='keywords', full_name='Skill.keywords', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=940,
serialized_end=994,
)
_LANGUAGE = _descriptor.Descriptor(
name='Language',
full_name='Language',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='language', full_name='Language.language', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fluency', full_name='Language.fluency', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=996,
serialized_end=1041,
)
_INTEREST = _descriptor.Descriptor(
name='Interest',
full_name='Interest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Interest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='keywords', full_name='Interest.keywords', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1043,
serialized_end=1085,
)
_REFERENCE = _descriptor.Descriptor(
name='Reference',
full_name='Reference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Reference.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reference', full_name='Reference.reference', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1087,
serialized_end=1131,
)
_MAIN = _descriptor.Descriptor(
name='Main',
full_name='Main',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='basics', full_name='Main.basics', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='work', full_name='Main.work', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='volunteer', full_name='Main.volunteer', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='education', full_name='Main.education', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='awards', full_name='Main.awards', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='publications', full_name='Main.publications', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='skills', full_name='Main.skills', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='languages', full_name='Main.languages', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interests', full_name='Main.interests', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='references', full_name='Main.references', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1134,
serialized_end=1424,
)
_BASICS.fields_by_name['location'].message_type = _LOCATION
_BASICS.fields_by_name['profiles'].message_type = _PROFILE
_MAIN.fields_by_name['basics'].message_type = _BASICS
_MAIN.fields_by_name['work'].message_type = _WORK
_MAIN.fields_by_name['volunteer'].message_type = _VOLUNTEER
_MAIN.fields_by_name['education'].message_type = _EDUCATION
_MAIN.fields_by_name['awards'].message_type = _AWARD
_MAIN.fields_by_name['publications'].message_type = _PUBLICATION
_MAIN.fields_by_name['skills'].message_type = _SKILL
_MAIN.fields_by_name['languages'].message_type = _LANGUAGE
_MAIN.fields_by_name['interests'].message_type = _INTEREST
_MAIN.fields_by_name['references'].message_type = _REFERENCE
DESCRIPTOR.message_types_by_name['Location'] = _LOCATION
DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE
DESCRIPTOR.message_types_by_name['Basics'] = _BASICS
DESCRIPTOR.message_types_by_name['Work'] = _WORK
DESCRIPTOR.message_types_by_name['Volunteer'] = _VOLUNTEER
DESCRIPTOR.message_types_by_name['Education'] = _EDUCATION
DESCRIPTOR.message_types_by_name['Award'] = _AWARD
DESCRIPTOR.message_types_by_name['Publication'] = _PUBLICATION
DESCRIPTOR.message_types_by_name['Skill'] = _SKILL
DESCRIPTOR.message_types_by_name['Language'] = _LANGUAGE
DESCRIPTOR.message_types_by_name['Interest'] = _INTEREST
DESCRIPTOR.message_types_by_name['Reference'] = _REFERENCE
DESCRIPTOR.message_types_by_name['Main'] = _MAIN
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Location = _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), {
'DESCRIPTOR' : _LOCATION,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Location)
})
_sym_db.RegisterMessage(Location)
Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), {
'DESCRIPTOR' : _PROFILE,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Profile)
})
_sym_db.RegisterMessage(Profile)
Basics = _reflection.GeneratedProtocolMessageType('Basics', (_message.Message,), {
'DESCRIPTOR' : _BASICS,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Basics)
})
_sym_db.RegisterMessage(Basics)
Work = _reflection.GeneratedProtocolMessageType('Work', (_message.Message,), {
'DESCRIPTOR' : _WORK,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Work)
})
_sym_db.RegisterMessage(Work)
Volunteer = _reflection.GeneratedProtocolMessageType('Volunteer', (_message.Message,), {
'DESCRIPTOR' : _VOLUNTEER,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Volunteer)
})
_sym_db.RegisterMessage(Volunteer)
Education = _reflection.GeneratedProtocolMessageType('Education', (_message.Message,), {
'DESCRIPTOR' : _EDUCATION,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Education)
})
_sym_db.RegisterMessage(Education)
Award = _reflection.GeneratedProtocolMessageType('Award', (_message.Message,), {
'DESCRIPTOR' : _AWARD,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Award)
})
_sym_db.RegisterMessage(Award)
Publication = _reflection.GeneratedProtocolMessageType('Publication', (_message.Message,), {
'DESCRIPTOR' : _PUBLICATION,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Publication)
})
_sym_db.RegisterMessage(Publication)
Skill = _reflection.GeneratedProtocolMessageType('Skill', (_message.Message,), {
'DESCRIPTOR' : _SKILL,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Skill)
})
_sym_db.RegisterMessage(Skill)
Language = _reflection.GeneratedProtocolMessageType('Language', (_message.Message,), {
'DESCRIPTOR' : _LANGUAGE,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Language)
})
_sym_db.RegisterMessage(Language)
Interest = _reflection.GeneratedProtocolMessageType('Interest', (_message.Message,), {
'DESCRIPTOR' : _INTEREST,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Interest)
})
_sym_db.RegisterMessage(Interest)
Reference = _reflection.GeneratedProtocolMessageType('Reference', (_message.Message,), {
'DESCRIPTOR' : _REFERENCE,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Reference)
})
_sym_db.RegisterMessage(Reference)
Main = _reflection.GeneratedProtocolMessageType('Main', (_message.Message,), {
'DESCRIPTOR' : _MAIN,
'__module__' : 'schema_pb2'
# @@protoc_insertion_point(class_scope:Main)
})
_sym_db.RegisterMessage(Main)
# @@protoc_insertion_point(module_scope)
|
import unittest
import EvenFibonacciNumbers
class TestEvenFib(unittest.TestCase):
def test_base(self):
# 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987
self.assertEqual(EvenFibonacciNumbers.sum_even_fib(55), 2+8+34)
if __name__ == '__main__':
unittest.main()
|
from models.contact import Contact
import random
import string
import getopt
import sys
import os.path
import jsonpickle
__author__ = 'pzqa'
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contact.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
contact_empty = [Contact(f_name='', m_name='', l_name='', n_name='',
title='', company='', address_one='',
home='', mobile='', work='',
fax='', email_one='',
email_two='', email_three='',
homepage='', bday='-', bmonth='-', byear='',
aday='-', amonth='-', ayear='',
address_two='', phone_two='',
notes='')]
contact_random = [Contact(f_name=random_string("f_name", 10), m_name=random_string("m_name", 10),
l_name=random_string("l_name", 10), n_name=random_string("n_name", 10),
title=random_string("title", 10), company=random_string("company", 10),
address_one=random_string("address_one", 10),
home=random_string("home", 10), mobile=random_string("mobile", 10),
work=random_string("work", 10), fax=random_string("fax", 10),
email_one=random_string("email_one", 10),
email_two=random_string("email_two", 10),
email_three=random_string("email_three", 10),
homepage=random_string("homepage", 10), bday='-', bmonth='-', byear='',
aday='-', amonth='-', ayear='',
address_two=random_string("address_two", 10),
phone_two=random_string("phone_two", 12),
notes=random_string("notes", 50)) for i in range(n)]
test_data = contact_empty + contact_random
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(test_data))
|
from browser import document as doc
from browser import window
from browser import alert
from browser.html import *
# globals #########################
refr = False
geo = window.navigator.geolocation
watchid = 0
img = doc["world_map"]
container = doc["container"]
print(img.abs_left, img.abs_top)
projection = window.Robinson.new(img.offsetWidth, img.offsetHeight)
# functions ###########################
def navi(pos):
xyz = pos.coords
ul = UL(id="nav")
ul <= LI('lat: %s' % xyz.latitude)
ul <= LI('lon: %s' % xyz.longitude)
point = projection.project(xyz.latitude, xyz.longitude)
print("point", point.x, point.y)
x = img.abs_left + int(img.offsetWidth / 2) + int(point.x)
y = img.abs_top + int(img.offsetHeight / 2) - int(point.y)
print(x, y)
div = DIV("x", style={"position": "absolute",
"top": y,
"left": x,
"background-color": "red",
"zIndex": 99})
container <= div
def nonavi(error):
print(error)
def navirefresh(ev):
global refr, watchid
refr = False if refr else True
if refr == True:
doc["switch"].className = "switch on"
watchid = geo.watchPosition(navi, nonavi)
else:
doc["switch"].className = "switch"
geo.clearWatch(watchid)
# the setup
if geo:
geo.getCurrentPosition(navi, nonavi)
doc["switch"].className = "switch"
doc["switch"].bind('click', navirefresh)
else:
alert('geolocation not supported')
|
import pandas as pd
# a list of strings
x = ['Python', 'Pandas','numpy']
# Calling DataFrame constructor on list
df = pd.DataFrame(x)
print(df)
|
"""Top level for tools."""
from .autocorrelation import compute_morans_i
from .branch_length_estimator import IIDExponentialBayesian, IIDExponentialMLE
from .coupling import compute_evolutionary_coupling
from .parameter_estimators import (
estimate_missing_data_rates,
estimate_mutation_rate,
)
from .small_parsimony import fitch_count, fitch_hartigan, score_small_parsimony
from .topology import compute_cophenetic_correlation, compute_expansion_pvalues
from .tree_metrics import (
calculate_likelihood_continuous,
calculate_likelihood_discrete,
calculate_parsimony,
)
|
import sys
import argparse
import os.path
import numpy as np
import numpy.linalg
def norm(x):
return numpy.linalg.norm(x, ord=np.inf)
def main():
parser = argparse.ArgumentParser(
description='compare a numpy .npz file to a reference file')
parser.add_argument('data', metavar='A.npz', help='file to test')
parser.add_argument('refv', metavar='B.npz', help='reference file')
args = parser.parse_args()
if os.path.isdir(args.refv):
refv = os.path.join(args.refv, os.path.basename(args.data))
else:
refv = args.refv
try:
with np.load(args.data) as data, np.load(refv) as ref:
dataset = set(data.files)
refset = set(ref.files)
equalset = set()
for key in sorted(refset & dataset):
a = data[key]
b = ref[key]
if a.shape != b.shape:
print('* {}: different shape {} {}.'.format(
key, a.shape, b.shape))
continue
if np.all(a == b):
equalset.add(key)
else:
try:
err = norm(a-b)
except TypeError:
print('* {}: differ'.format(key))
else:
print('* {}: |diff|_inf = {}'.format(key, err))
if refset - dataset:
print('* missing vars in {}:'.format(args.data))
for k in sorted(refset - dataset):
print(' {}'.format(k))
if dataset - refset:
print('* extra vars in {}:'.format(args.data))
for k in sorted(dataset - refset):
print(' {}'.format(k))
if equalset == dataset:
print("Files '{}' and '{}' are identical.".format(
args.data, refv))
sys.exit(0)
else:
print('* other {} vars identical.'.format(len(equalset)))
sys.exit(1)
except IOError as exp:
print(exp)
if __name__ == '__main__':
main()
|
import datetime
import time
from discord.ext.tasks import loop
from discord.ext import commands
import asyncpraw
from asyncprawcore.exceptions import AsyncPrawcoreException
import asyncpraw.exceptions
from modules.reddit_feed.reddit_post import RedditPost
import config
# Reddit feed settings
CHECK_INTERVAL = 5 # seconds to wait before checking again
SUBMISSION_LIMIT = 5 # number of submissions to check
# initialize AsyncPraw reddit api
reddit = asyncpraw.Reddit(
client_id=config.CLIENT_ID,
client_secret=config.CLIENT_SECRET,
password=config.PASSWORD,
user_agent=f"{config.ME} Bot",
username=config.ME,
)
class RedditFeedCog(commands.Cog, name="Reddit Feed"):
"""Checks for `resend` command and starts Reddit feed loop to check submissions"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
"""When discord is connected"""
# Start Reddit loop
self.reddit_feed.start()
def is_in_guild(guild_id):
"""check that command is in a guild"""
async def predicate(ctx):
return ctx.guild and ctx.guild.id == guild_id
return commands.check(predicate)
@commands.command(name="resend")
@commands.has_permissions(administrator=True)
@is_in_guild(config.GUILD_ID)
async def resend(self, ctx):
"""Command to resend the last post again.
Invoked with !resend"""
# log command in console
print("Received resend command")
# respond to command
await ctx.send("Resending last announcement!")
# check for last submission in subreddit
subreddit = await reddit.subreddit(config.SUB)
async for submission in subreddit.new(limit=1):
# process submission
await RedditPost(self.bot, submission).process_post()
@loop(seconds=CHECK_INTERVAL)
async def reddit_feed(self):
"""loop every few seconds to check for new submissions"""
try:
# check for new submission in subreddit
subreddit = await reddit.subreddit(config.SUB)
async for submission in subreddit.new(limit=SUBMISSION_LIMIT):
# check if the post has been seen before
if not submission.saved:
# save post to mark as seen
await submission.save()
# process submission
await RedditPost(self.bot, submission).process_post()
except AsyncPrawcoreException as err:
print(f"EXCEPTION: AsyncPrawcoreException. {err}")
time.sleep(10)
@reddit_feed.before_loop
async def reddit_feed_init(self):
"""print startup info before reddit feed loop begins"""
print(f"Logged in: {str(datetime.datetime.now())[:-7]}")
print(f"Timezone: {time.tzname[time.localtime().tm_isdst]}")
print(f"Subreddit: {config.SUB}")
print(f"Checking {SUBMISSION_LIMIT} posts every {CHECK_INTERVAL} seconds")
def setup(bot):
bot.add_cog(RedditFeedCog(bot))
|
from amadeus.client.decorator import Decorator
from amadeus.reference_data.urls._checkin_links import CheckinLinks
class Urls(Decorator, object):
def __init__(self, client):
Decorator.__init__(self, client)
self.checkin_links = CheckinLinks(client)
|
# Copyright 2011-2015 Chris Behrens
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Plugin Library"""
import functools
import os
import re
import sys
import time
import log
import utils
LOG = log.get_logger()
_plugin_instances = []
_plugin_hooks = {}
def _reset_variables():
"""Local function to init some variables that are common between
load and reload
"""
global _plugin_instances
global _plugin_hooks
_plugin_instances = []
_plugin_hooks = {}
for x in _hook_names:
_plugin_hooks[x] = []
def hook_add(hookname, arg, poll_timer=60):
"""Generic decorator to add hooks. Generally, this is not called
directly by plugins. Decorators that plugins use are automatically
generated below with the setattrs you'll see
"""
def wrap(f):
if hookname == "poll":
@utils.spawn
def _f(self, *args, **kwargs):
while True:
f(self, *args, **kwargs)
time.sleep(poll_timer)
setattr(_f, "_is_%s_hook" % hookname, True)
_f._hook_arg = arg
return _f
else:
setattr(f, "_is_%s_hook" % hookname, True)
f._hook_arg = arg
return f
return wrap
def hook_get(hookname):
"""Function to return the list of hooks of a particular type. Genearlly
this is not called directly. Callers tend to use the dynamically
generated calls 'hook_get_*' that are created below with the setattrs
"""
return _plugin_hooks[hookname]
def active_get(hookname):
"""Function to return the list of hook arguments. Genearlly
this is not called directly. Callers tend to use the dynamically
generated calls 'active_get_*' that are created below with the
setattrs
"""
return ", ".join(sorted([x[2] for x in _plugin_hooks[hookname]]))
_hook_names = ["keyword", "command", "msg_regex", "poll"]
_reset_variables()
_this_mod = sys.modules[__name__]
for x in _hook_names:
# Dynamically create the decorators and functions for various hooks
setattr(_this_mod, "hook_add_%s" % x, functools.partial(hook_add, x))
setattr(_this_mod, "hook_get_%ss" % x, functools.partial(hook_get, x))
setattr(_this_mod, "active_%ss" % x, functools.partial(active_get, x))
class PluginMetaClass(type):
"""The metaclass that makes all of the plugin magic work. All subclassing
gets caught here, which we can use to have plugins automagically
register themselves
"""
def __init__(cls, name, bases, attrs):
"""Catch subclassing. If the class doesn't yet have _plugin_classes,
it means it's the Plugin class itself, otherwise it's a class
that's been subclassed from Plugin (ie, a real plugin class)
"""
if not hasattr(cls, "_plugin_classes"):
cls._plugin_classes = []
else:
cls._plugin_classes.append(cls)
cls.__name__ = name
class Plugin(object):
"""The class that all plugin classes should inherit from"""
__metaclass__ = PluginMetaClass
def __init__(self, session, *args, **kwargs):
"""Default constructor for Plugin. Stores the client instance, etc"""
self.session = session
self.name = self.__class__.__name__
def _init_plugins(*args, **kwargs):
"""Create instances of the plugin classes and create a cache
of their hook functions
"""
for cls in Plugin._plugin_classes:
# Create instance of 'p'
instance = cls(*args, **kwargs)
# Store the instance
_plugin_instances.append(instance)
# Setup _keyword_hooks by looking at all of the attributes
# in the class and finding the ones that have a _is_*_hook
# attribute
for attr_name in dir(instance):
attr = getattr(instance, attr_name)
for hook_key in _hook_names:
if getattr(attr, "_is_%s_hook" % hook_key, False):
hook_arg = getattr(attr, "_hook_arg", None)
# Append (module, method, arg) tuple
_plugin_hooks[hook_key].append((attr.__module__, attr,
hook_arg))
def load_user_plugin(plugin, *args, **kwargs):
"""Load a user plugin"""
sys.path.append(utils.get_home_directory() + "plugins")
user_plugins = os.listdir(utils.get_directory("plugins"))
for user_plugin in user_plugins:
if user_plugin.endswith(".py"):
user_plugin = user_plugin[:-3]
if plugin == user_plugin:
try:
__import__(plugin, globals(), locals(), [plugin])
except Exception, exc:
LOG.error(exc)
def load_plugins(*args, **kwargs):
"""Module function that loads plugins from a particular directory"""
config = utils.get_config()
plugin_names = config.get("plugins", type="list")
for plugin_name in plugin_names:
load_user_plugin(plugin_name, *args, **kwargs)
try:
__import__("pyhole.plugins", globals(), locals(), [plugin_name])
except Exception, exc:
LOG.error(exc)
_init_plugins(*args, **kwargs)
def reload_plugins(*args, **kwargs):
"""Module function that'll reload all of the plugins"""
config = utils.get_config()
# Terminate running poll instances
for plugin in _plugin_instances:
for attr_name in dir(plugin):
attr = getattr(plugin, attr_name)
if getattr(attr, "_is_poll_hook", False):
# TODO(jk0): Doing this kills the entire process. We need to
# figure out how to kill it properly. Until this is done,
# reloading will not work with polls.
# attr().throw(KeyboardInterrupt)
pass
# When the modules are reloaded, the meta class will append
# all of the classes again, so we need to make sure this is empty
Plugin._plugin_classes = []
_reset_variables()
# Now reload all of the plugins
plugins_to_reload = []
plugindir = "pyhole.plugins"
local_plugin_dir = utils.get_home_directory() + "plugins"
# Reload existing plugins
for mod, val in sys.modules.items():
if plugindir in mod and val and mod != plugindir:
mod_file = val.__file__
if not os.path.isfile(mod_file):
continue
for p in config.get("plugins", type="list"):
if plugindir + "." + p == mod:
plugins_to_reload.append(mod)
if local_plugin_dir in str(val):
plugins_to_reload.append(mod)
for plugin in plugins_to_reload:
try:
reload(sys.modules[plugin])
except Exception, exc:
LOG.error(exc)
# Load new plugins
load_plugins(*args, **kwargs)
def active_plugins():
"""Get the loaded plugin names"""
return ", ".join(sorted([x.__name__ for x in Plugin._plugin_classes]))
def active_plugin_classes():
"""Get the loaded plugin classes"""
return Plugin._plugin_classes
def run_hook_command(session, mod_name, func, message, arg, **kwargs):
"""Make a call to a plugin hook."""
try:
if arg:
session.log.debug("Calling: %s.%s(\"%s\")" % (mod_name,
func.__name__, arg))
else:
session.log.debug("Calling: %s.%s(None)" % (mod_name,
func.__name__))
func(message, arg, **kwargs)
except Exception, exc:
session.log.exception(exc)
def run_hook_polls(session):
"""Run polls in the background."""
message = None
for mod_name, func, cmd in hook_get_polls():
run_hook_command(session, mod_name, func, message, cmd)
def run_msg_regexp_hooks(session, message, private):
"""Run regexp hooks."""
msg = message.message
for mod_name, func, msg_regex in hook_get_msg_regexs():
match = re.search(msg_regex, msg, re.I)
if match:
run_hook_command(session, mod_name, func, message, match,
private=private)
def run_keyword_hooks(session, message, private):
"""Run keyword hooks."""
msg = message.message
words = msg.split(" ")
for mod_name, func, kwarg in hook_get_keywords():
for word in words:
match = re.search("^%s(.+)" % kwarg, word, re.I)
if match:
run_hook_command(session, mod_name, func, message,
match.group(1), private=private)
def run_command_hooks(session, message, private):
"""Run command hooks."""
msg = message.message
for mod_name, func, cmd in hook_get_commands():
session.addressed = False
if private:
match = re.search("^%s$|^%s\s(.*)$" % (cmd, cmd), msg,
re.I)
if match:
run_hook_command(session, mod_name, func, message,
match.group(1), private=private,
addressed=session.addressed)
if msg.startswith(session.command_prefix):
# Strip off command prefix
msg_rest = msg[len(session.command_prefix):]
else:
# Check for command starting with nick being addressed
msg_start_upper = msg[:len(session.nick) + 1].upper()
if msg_start_upper == session.nick.upper() + ":":
# Get rest of string after "nick:" and white spaces
msg_rest = re.sub("^\s+", "",
msg[len(session.nick) + 1:])
else:
continue
session.addressed = True
match = re.search("^%s$|^%s\s(.*)$" % (cmd, cmd), msg_rest, re.I)
if match:
run_hook_command(session, mod_name, func, message, match.group(1),
private=private,
addressed=session.addressed)
def poll_messages(session, message, private=False):
"""Watch for known commands."""
session.addressed = False
run_command_hooks(session, message, private)
run_keyword_hooks(session, message, private)
run_msg_regexp_hooks(session, message, private)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.distributions.flat import Flat
from beanmachine.ppl.distributions.unit import Unit
__all__ = ["Flat", "Unit"]
|
from generators.common.Helper import AttributeKind
from .MakoStaticClassGenerator import MakoStaticClassGenerator
class MakoTypeGenerator(MakoStaticClassGenerator):
"""
Generic Mako generator for atomic type schemas.
"""
def __init__(self, helper, name: str, schema, class_schema, template_path: str, file_extension: str):
super().__init__(template_path + 'Type.mako',
helper.get_generated_class_name(name, class_schema, schema) + file_extension, helper, schema,
class_schema)
class_schema['name'] = name[0].lower() + name[1:]
self.name = name
self.attribute_name = self.class_schema['name']
self.size = self.class_schema['size']
self.generated_class_name = helper.get_generated_class_name(name, class_schema, schema)
self.constructor_param_type = helper.get_generated_type(self.schema, self.class_schema)
self.attribute_kind = helper.get_attribute_kind(self.class_schema)
self.attribute_type = helper.get_generated_type(self.schema, self.class_schema)
self.comments = helper.get_comments_from_attribute(self.class_schema)
self.AttributeKind = AttributeKind
|
from .linear_covariance import LinearCovariance
from .linear import LinearDuo, LinearNoRho
|
'''This module contains a wrapper for the mlflow logging framework. It will automatically set the
local and global parameters for the training run when used in the deep learning tools,
like architecture, learning rate etc.
Mlflow will automatically create a folder called mlruns with the results.
If you want a graphical overview use 'mlflow ui' (In a shell, not python).
It will run a server on default port 5000, which you can access in browser.
Usage:
from utils.logging.log import Log, Autolog
logger = Log(training_config)
logger.log_hyperparameters({'Architecture': 'CNN', 'Learning rate': 0.001})
logger.log_metric('Loss', 0.1)
or
with Log(training_config) as logger:
logger.log_metric('Loss', 0.1)
'''
import mlflow
from config.config import global_config
class Log():
def __init__(self,
train_config: dict = None,
run_id: str = None,
experiment_id: str = None,
run_name: str = None,
nested: bool = False) -> None:
mlflow.end_run()
mlflow.start_run(run_id=run_id,
experiment_id=experiment_id,
run_name=run_name,
nested=nested)
self.log_hyperparameters(global_config)
if train_config:
for key in train_config:
if key == 'MODEL':
mlflow.log_param(key, type(train_config[key]))
else:
mlflow.log_param(key, train_config[key])
def log_hyperparameters(self, params: {str}, *args) -> None:
mlflow.log_params(params, *args)
def log_metric(self, key: str, value: str, *args) -> None:
mlflow.log_metric(key, value, *args)
def log_metrics(self, metrics: {str}, *args) -> None:
mlflow.log_metrics(metrics, *args)
def access_run(self):
return mlflow.active_run()
def end_run(self) -> None:
mlflow.end_run()
def __enter__(self) -> None:
return self
def __exit__(self, exception_type, exception_value, traceback) -> None:
self.end_run()
def help(self) -> str:
return """This Log module is a wrapper of mlflow.
The Hyperparameters are values that differentiate the runs from each other and do not change during a run.
Like the batch size.
Metrics are values that change during the run. Like the loss value.
There are versions of log_param() and log_metric() that log multiple values in a dictionary at once.
They are called log_params() and log_metrics() (s appended).
It makes sense to call the Log Object including the training_config provided in the training scripts.
Most important parameters are already included there and are useful to be logged.
If you want to view the logged results you should start `mlflow ui` in a terminal.
Then you can access it via your browser. The default port is 5000.
The runs will be saved in a directory called mlruns which will be created in the directory where you
start the training run.
For more information check out the mlflow documentation."""
|
class Question:
def __init__(self,q_text,q_ans):
self.text = q_text
self.answer = q_ans |
###############################################################################
#
# Package: NetMsgs
#
# File: NetMsgsBase.py
#
"""
NetMsgs Base Data Module
"""
## \file
## \package NetMsgs.NetMsgsBase
##
## $LastChangedDate: 2012-07-23 14:06:10 -0600 (Mon, 23 Jul 2012) $
## $Rev: 2098 $
##
## \brief NetMsgs Base Data Module
##
## \sa
## \htmlonly
## <a href="../pydoc/NetMsgs.NetMsgsBase.html">PyDoc Generated Documentation</a>
## \endhtmlonly
##
## \author Robin Knight ([email protected])
##
## \copyright
## \h_copy 2009-2017. RoadNarrows LLC.\n
## http://www.roadnarrows.com\n
## All Rights Reserved
##
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that
# (1) The above copyright notice and the following two paragraphs
# appear in all copies of the source code and (2) redistributions
# including binaries reproduces these notices in the supporting
# documentation. Substantial modifications to this software may be
# copyrighted by their authors and need not follow the licensing terms
# described here, provided that the new terms are clearly indicated in
# all files where they apply.
#
# IN NO EVENT SHALL THE AUTHOR, ROADNARROWS LLC, OR ANY MEMBERS/EMPLOYEES
# OF ROADNARROW LLC OR DISTRIBUTORS OF THIS SOFTWARE BE LIABLE TO ANY
# PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
# DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE AUTHORS OR ANY OF THE ABOVE PARTIES HAVE BEEN ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHOR AND ROADNARROWS LLC SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN
# "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
###############################################################################
import sys
from NetMsgs.NetMsgsCore import *
## Message Encoding Type Enumeration
NMEncoding = ['flat', 'itv'] # future , 'cli']
## Message Byte Ordering Type Enumeration
NMEndian = ['big', 'little', 'native']
##
## Built-In message field types, keyed by XML field type name.
## code - message field type byte code
## desc - short description
## flen - packed message field length (bytes)
## comp - complexity. one of: simple compound na
## T - C/C++ type specifier
## pre - member name prefix (quasi-Hungarian)
##
NMBuiltInFieldTypes = {
'pad': { # not really a field type
'code': NMFTypePad,
'desc': "pad byte",
'flen': 1,
'comp': 'na',
'T': '',
'pre': '',
},
'char': {
'code': NMFTypeChar,
'desc': "8-bit character",
'flen': NMFVAL_LEN_CHAR,
'comp': 'simple',
'T': 'char',
'pre': 'c',
},
'u8': {
'code': NMFTypeU8,
'desc': "unsigned 8-bit integer",
'flen': NMFVAL_LEN_U8,
'comp': 'simple',
'T': 'byte_t',
'pre': 'by',
},
's8': {
'code': NMFTypeS8,
'desc': "signed 8-bit integer",
'flen': NMFVAL_LEN_S8,
'comp': 'simple',
'T': 'signed char',
'pre': 'hhi',
},
'bool': {
'code': NMFTypeBool,
'desc': "boolean 0=false, 1(non-zero)=true",
'flen': NMFVAL_LEN_BOOL,
'comp': 'simple',
'T': 'bool_t',
'pre': 'b',
},
'u16': {
'code': NMFTypeU16,
'desc': "unsigned 16-bit integer",
'flen': NMFVAL_LEN_U16,
'comp': 'simple',
'T': 'ushort_t',
'pre': 'hu',
},
's16': {
'code': NMFTypeS16,
'desc': "signed 16-bit integer",
'flen': NMFVAL_LEN_S16,
'comp': 'simple',
'T': 'short',
'pre': 'hi',
},
'u32': {
'code': NMFTypeU32,
'desc': "unsigned 32-bit integer",
'flen': NMFVAL_LEN_U32,
'comp': 'simple',
'T': 'uint_t',
'pre': 'u',
},
's32': {
'code': NMFTypeS32,
'desc': "signed 32-bit integer",
'flen': NMFVAL_LEN_S32,
'comp': 'simple',
'T': 'int',
'pre': 'i',
},
'u64': {
'code': NMFTypeU64,
'desc': "unsigned 64-bit integer",
'flen': NMFVAL_LEN_U64,
'comp': 'simple',
'T': 'ulonglong_t',
'pre': 'llu',
},
's64': {
'code': NMFTypeS64,
'desc': "signed 64-bit integer",
'flen': NMFVAL_LEN_S64,
'comp': 'simple',
'T': 'long long',
'pre': 'lli',
},
'f32': {
'code': NMFTypeF32,
'desc': "32-bit floating-point number",
'flen': NMFVAL_LEN_F32,
'comp': 'simple',
'T': 'float',
'pre': 'hf',
},
'f64': {
'code': NMFTypeF64,
'desc': "64-bit floating-point number",
'flen': NMFVAL_LEN_F64,
'comp': 'simple',
'T': 'double',
'pre': 'f',
},
'p32': {
'code': NMFTypeP32,
'desc': "32-bit pointer",
'flen': NMFVAL_LEN_P32,
'comp': 'simple',
'T': 'void *',
'pre': 'p',
},
'p64': {
'code': NMFTypeP64,
'desc': "64-bit pointer",
'flen': NMFVAL_LEN_P64,
'comp': 'simple',
'T': 'void *',
'pre': 'p',
},
'string': {
'code': NMFTypeString,
'desc': "char[] string",
'flen': 'variable',
'comp': 'compound',
'T': 'char',
'pre': 's',
},
'struct': {
'code': NMFTypeStruct,
'desc': "structure",
'flen': 'variable',
'comp': 'compound',
'T': 'struct',
'pre': 'st',
},
'vector': {
'code': NMFTypeVector,
'desc': "vector - one dimensional array",
'flen': 'variable',
'comp': 'compound',
'T': '',
'pre': 'vec',
},
}
##
#
# Aliases
#
NMBuiltInFieldTypes['byte'] = NMBuiltInFieldTypes['u8']
NMBuiltInFieldTypes['schar'] = NMBuiltInFieldTypes['s8']
NMBuiltInFieldTypes['ushort'] = NMBuiltInFieldTypes['u16']
NMBuiltInFieldTypes['short'] = NMBuiltInFieldTypes['s16']
NMBuiltInFieldTypes['uint'] = NMBuiltInFieldTypes['u32']
NMBuiltInFieldTypes['int'] = NMBuiltInFieldTypes['s32']
NMBuiltInFieldTypes['ulonglong'] = NMBuiltInFieldTypes['u64']
NMBuiltInFieldTypes['longlong'] = NMBuiltInFieldTypes['s64']
NMBuiltInFieldTypes['pointer'] = NMBuiltInFieldTypes['p32']
NMBuiltInFieldTypes['longpointer'] = NMBuiltInFieldTypes['p64']
NMBuiltInFieldTypes['float'] = NMBuiltInFieldTypes['f32']
NMBuiltInFieldTypes['double'] = NMBuiltInFieldTypes['f64']
## Get NetMsgs field type code given the XML field type.
NMFCode = lambda xmlftype: NMBuiltInFieldTypes[xmlftype]['code']
## The full set of XML ftype values
NMAliasMap = {
'byte': 'u8', 'schar': 's8', 'ushort': 'u16', 'short': 'u16',
'uint': 'u32', 'int': 's32', 'ulonglong': 'u64', 'longlong': 's64',
'pointer': 'p32', 'longpointer': 'p64', 'float': 'f32', 'double': 'f64',
}
## Special DB dictionary order key
NMKeyOrder = '>'
## Special DB pad field key
NMKeyPad = '#'
## XML ftype attribute vector suffix string
NMVectorSuffix = '[]'
## List of simple field types by XML ftype
NMFTypeSimple = [
'char', 'u8', 's8', 'bool', 'u16', 's16', 'u32', 's32', 'u64',
's64', 'f32', 'f64', 'p32', 'p64', ]
## List of simple field types by field type code
NMFTypeCodeSimple = [
NMFCode('char'), NMFCode('u8'), NMFCode('s8'), NMFCode('bool'),
NMFCode('u16'), NMFCode('s16'), NMFCode('u32'), NMFCode('s32'),
NMFCode('u64'), NMFCode('s64'), NMFCode('f32'), NMFCode('f64'),
NMFCode('p32'), NMFCode('p64') ]
## List of compound field types by XML ftype
NMFTypeCompound = [ 'string', 'struct', 'vector' ]
## List of compound field types by field type code
NMFTypeCodeCompound = [NMFCode('string'), NMFCode('struct'), NMFCode('vector')]
## List of number field types by XML ftype
NMFTypeNumber = [
'u8', 's8', 'u16', 's16', 'u32', 's32', 'u64', 's64', 'f32', 'f64' ]
## Field type code to XML file type map
NMFTypeCode2Xml = {
NMFCode('bool'): 'bool', NMFCode('char'): 'char',
NMFCode('u8'): 'u8', NMFCode('s8'): 's8',
NMFCode('u16'): 'u16', NMFCode('s16'): 's16',
NMFCode('u32'): 'u32', NMFCode('s32'): 's32',
NMFCode('u64'): 'u64', NMFCode('s64'): 's64',
NMFCode('f32'): 'f32', NMFCode('f64'): 'f64',
NMFCode('p32'): 'p32', NMFCode('p64'): 'p64',
NMFCode('string'): 'string', NMFCode('pad'): 'pad',
NMFCode('struct'): 'struct', NMFCode('vector'): 'vector'
}
## Field Header Lengths keyed by message encoding
NMFHdrLen = {
'flat': {'simple': 0, 'string': 0, 'struct': 0, 'vector': 0},
'itv': {
'simple': NMITV_FHDR_SIZE_SIMPLE, 'string': NMITV_FHDR_SIZE_STRING,
'struct': NMITV_FHDR_SIZE_STRUCT, 'vector': NMITV_FHDR_SIZE_VECTOR},
}
## No field id value
NMFIdNone = NMFID_NONE
## Default pad count
NMPadDftCount = 1
## Pad field value
NMPadFVal = NMFTypePadTr
## Maximum and default string maximum length
NMStringMaxCount = NMFVAL_LEN_MAX_STRING
## Maximum and default vector maximum item count
NMVectorMaxCount = NMFVAL_LEN_MAX_VECTOR
## space quickie
space = lambda indent: "%*s" % (indent, '')
#--
def StrError(ecode):
""" Get the error string describing the NetMsgs error code.
The absolute value of the error code is taken prior retrieving the
string. An unknown or out-of-range error code will be mapped to
NM_ECODE_BADEC.
Parameters:
ecode - NetMsgs error code.
Return:
The appropriate error code string.
"""
sErr = nmStrError(ecode)
if not sErr:
sErr = 'Error'
return sErr
##
#------------------------------------------------------------------------------
# CLASS: NetMsgsError
#------------------------------------------------------------------------------
class NetMsgsError(Exception):
""" NetMsgs Exception Class. """
def __init__(self, msg='XML Parser Error'):
""" Raise exception.
Parameters:
msg - Exception message string.
"""
Exception.__init__(self, msg)
##
#-------------------------------------------------------------------------------
# Support Utilities
#-------------------------------------------------------------------------------
#--
def IsIdentifier(token):
""" Returns True if token is a valid identifier, else False.
Parameters:
token - Parsed token.
"""
if not token:
return False
c = token[0]
if not c.isalpha() and c != "_":
return False
for c in token[1:]:
if not c.isalnum() and c != "_":
return False
return True
##
#--
def PrettyPrintCols(fp, cursor, *args, **kwargs):
""" Pretty print argument strings aligned to column.
Parameters:
cursor - Current column cursor position.
args - List of argument (pos, s) 2-tuples.
kwargs - Print control keywords.
"""
while len(args) >= 2:
linecont = kwargs.get('linecont', '')
force = kwargs.get('force', False)
pos = args[0]
s = args[1]
args = args[2:]
if (pos <= cursor) and (cursor > 0):
if not force or cursor > 78:
fp.write("%s\n" % (linecont))
cursor = 0
else:
fp.write(" ")
cursor += 1
if pos > cursor:
fp.write(space(pos-cursor))
cursor = pos
fp.write("%s" % (s))
cursor += len(s)
return cursor
##
#--
def PrintBuf(buf, count=None, preface='', nlfreq=None, indent=0, col=0,
fp=sys.stderr):
""" Pretty print binary buffer to opened file stream.
Parameters:
buf - Buffer to print.
count - Number of bytes to print.
preface - Optional buffer preface string.
nlfreq - Newline frequency (None for no newlines).
ident - Indentation column alignment.
col - Current column position.
fp - Output file pointer.
"""
if preface:
s = "%s: " % (preface)
col += len(s)
fp.write(s)
if count is None:
count = len(buf)
if (count > 0) and (col < indent):
fp.write(space(indent-col))
i = 0
while i < count:
c = buf[i]
if nlfreq and ((i % nlfreq) == 0) and (i > 0):
fp.write("\n%s" % space(indent))
col = indent
fp.write("0x%02x " % (ord(c)))
i += 1
fp.write('\n')
##
#--
def PrintBits(val, msbit, count=None, preface='', fp=sys.stderr):
""" Pretty print bits in value to opened file stream.
Parameters:
val - Bits to print.
msbit - Starting most significant bit, zero based.
count - Number of bits to print (None = msbit+1).
preface - Optional buffer preface string.
fp - Output file pointer.
"""
if preface:
s = "%s: " % (preface)
fp.write(s)
if count is None:
count = msbit + 1
i = 0
while i < count:
if ((msbit % 8) == 7) and (i > 0):
fp.write(' ')
if (val >> msbit) & 0x01:
fp.write('1')
else:
fp.write('0')
msbit -= 1
i += 1
##
#--
def _atval(val):
""" Convert value to string equivalent.
String values starting with an '@' are treated as variables, not strings.
The '@' is stripped.
Parameters:
val - ['@']value.
Return:
Converted value.
"""
if (type(val) == str) and (len(val) > 0) and (val[0] == '@'):
return val[1:]
else:
return repr(val)
##
## return number of spaces given column position and indentation
_nspaces = lambda col,indent: col<indent and indent-col or 0
#--
def PrettyPrintAssignExpr(name, val, col=0, indent=0, fp=sys.stderr):
""" Pretty print name = value.
Parameters:
nam - Variable name
val - Variable value.
col - Current column position.
indent - Indentation.
fp - Opened file pointer.
"""
sp = _nspaces(col, indent)
lhs = "%s%s = " % (space(sp), name)
fp.write(lhs)
PrettyPrintVal(val, col=len(lhs), indent=indent, fp=fp)
fp.write('\n')
##
#--
def PrettyPrintVal(val, col=0, indent=0, fp=sys.stderr):
""" Pretty print value.
Parameters:
val - Variable value.
col - Current column position.
indent - Indentation.
fp - Opened file pointer.
Return:
New column position
"""
if type(val) == dict:
return PrettyPrintDict(val, col=col, indent=indent, fp=fp)
elif type(val) == list:
return PrettyPrintList(val, col=col, indent=indent, fp=fp)
else:
sp = _nspaces(col, indent)
v = _atval(val)
fp.write("%s%s" % (space(sp), v))
return col + sp + len(v)
##
#--
def PrettyPrintDict(d, col=0, indent=0, fp=sys.stderr):
""" Pretty print dictionary in sorted, indented clarity.
Parameters:
d - The dictionary.
col - Current column position.
indent - Indentation.
fp - Opened file pointer.
Return:
New column position
"""
sp = _nspaces(col, indent)
s = repr(d)
if col + sp + len(s) < 80:
fp.write('%s{' % space(sp))
col = col + sp + 1
for k in sorted(d):
key = _atval(k)
fp.write("%s:" % key)
col += len(key) + 1
col = PrettyPrintVal(d[k], col=col, indent=indent, fp=fp)
fp.write(', ')
col += 2
fp.write("}")
return col + 1
else:
fp.write('%s{\n' % space(sp))
col = 0
indent += 2
for k in sorted(d):
key = _atval(k)
key = "%s%s: " % (space(indent), key)
fp.write(key)
PrettyPrintVal(d[k], col=len(key), indent=indent, fp=fp)
fp.write(',\n')
indent -= 2
fp.write("%s}" % (space(indent)))
return indent + 1
##
#--
def PrettyPrintList(l, col=0, indent=0, fp=sys.stderr):
""" Pretty print list.
Parameters:
l - The list.
col - Current column position.
indent - Indentation.
fp - Opened file pointer.
Return:
New column position
"""
sp = _nspaces(col, indent)
issimple = True
for v in l:
if (type(v) == dict) or (type(v) == list):
issimple = False
break
if issimple:
fp.write('%s[ ' % space(sp))
col = col + sp + 2
for v in l:
if col > 60: # heuristiccally 'safe'
fp.write('\n%s ' % space(indent))
col = col + sp + 2
col = PrettyPrintVal(v, col=col, indent=indent, fp=fp)
fp.write(', ')
col += 2
fp.write(' ]')
return col + 2
else:
fp.write('%s[\n' % space(sp))
col = 0
indent += 2
for v in l:
PrettyPrintVal(v, col=col, indent=indent, fp=fp)
fp.write(',\n')
indent -= 2
fp.write("%s]" % (space(indent)))
return indent + 1
##
|
#! /usr/bin/env python
import rospy
# Provides callback functions for the start and stop buttons
class NodeController(object):
'''
Containing both proxy and gui instances, this class gives a control of
a node on both ROS & GUI sides.
'''
# these values need to synch with member variables.
# Eg. self.gui isn't legal.
__slots__ = ['_proxy', '_gui']
def __init__(self, proxy, gui):
'''
@type proxy: rqt_launch.NodeProxy
@type gui: QWidget
'''
self._proxy = proxy
self._gui = gui
self._gui.set_node_controller(self)
def start_stop_slot(self, signal):
'''
Works as a slot particularly intended to work for
QAbstractButton::toggled(checked). Internally calls
NodeController.start / stop depending on `signal`.
@type signal: bool
'''
if self._proxy.is_running():
self.stop()
rospy.logdebug('---start_stop_slot stOP')
else:
self.start()
rospy.logdebug('==start_stop_slot StART')
def start(self, restart=True):
'''
Start a ROS node as a new _process.
'''
rospy.logdebug('Controller.start restart={}'.format(restart))
# Should be almost unreachable under current design where this 'start'
# method only gets called when _proxy.is_running() returns false.
if self._proxy.is_running():
if not restart:
# If the node is already running and restart isn't desired,
# do nothing further.
return
#TODO: Need to consider...is stopping node here
# (i.e. in 'start' method) good?
self.stop()
# If the launch_prefix has changed, then the _process must be recreated
if (self._proxy.config.launch_prefix !=
self._gui._lineEdit_launch_args.text()):
self._proxy.config.launch_prefix = \
self._gui._lineEdit_launch_args.text()
self._proxy.recreate_process()
self._gui.set_node_started(False)
self._gui.label_status.set_starting()
self._proxy.start_process()
self._gui.label_status.set_running()
self._gui.label_spawncount.setText("({})".format(
self._proxy.get_spawn_count()))
def stop(self):
'''
Stop a ROS node's _process.
'''
#TODO: Need to check if the node is really running.
#if self._proxy.is_running():
self._gui.set_node_started(True)
self._gui.label_status.set_stopping()
self._proxy.stop_process()
self._gui.label_status.set_stopped()
def check_process_status(self):
if self._proxy.has_died():
rospy.logerr("Process died: {}".format(
self._proxy.get_proc_name()))
self._proxy.stop_process()
self._gui.set_node_started(True)
if self._proxy._process.exit_code == 0:
self._gui.label_status.set_stopped()
else:
self._gui.label_status.set_died()
# Checks if it should be respawned
if self._gui.respawn_toggle.isChecked():
rospy.loginfo("Respawning _process: {}".format(
self._proxy._process.name))
self._gui.label_status.set_starting()
self._proxy.start_process()
self._gui.label_status.set_running()
self._gui.label_spawncount.setText("({})".format(
self._proxy._process.spawn_count))
def get_node_widget(self):
'''
@rtype: QWidget
'''
return self._gui
def is_node_running(self):
return self._proxy.is_running()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 09:25:17 2020
@author: krishan
"""
class Mammal(object):
def __init__(self):
print('warm-blooded animal.')
class Dog(Mammal):
def __init__(self):
print('Dog has four legs.')
d1 = Dog()
|
from django.contrib import admin
from kitsune.questions.models import QuestionLocale
class QuestionLocaleAdmin(admin.ModelAdmin):
list_display = ("locale",)
ordering = ("locale",)
filter_horizontal = ("products",)
admin.site.register(QuestionLocale, QuestionLocaleAdmin)
|
import cv2
import os
from tqdm import tqdm
import config
from utility import *
for _ in tqdm(os.listdir(config.TRAIN_IMAGES_DIR)):
_ = config.TRAIN_IMAGES_DIR + "/" + _
img = image_resize(_, config.MAX_PIXEL)
image_write(img, _.replace(config.TRAIN_IMAGES_DIR, config.RESIZED_TRAIN_DIR))
for _ in tqdm(os.listdir(config.TEST_IMAGES_DIR)):
try:
_ = config.TEST_IMAGES_DIR + "/" + _
img = image_resize(_, config.MAX_PIXEL)
image_write(img, _.replace(config.TEST_IMAGES_DIR, config.RESIZED_TEST_DIR))
except:
print(_)
for _ in tqdm(os.listdir(config.VALID_IMAGES_DIR)):
_ = config.VALID_IMAGES_DIR + "/" + _
img = image_resize(_, config.MAX_PIXEL)
image_write(img, _.replace(config.VALID_IMAGES_DIR, config.RESIZED_VALID_DIR)) |
#!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
try:
import CUBRIDdb
except:
pass
import logging
from lib.core.common import getSafeExString
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: https://github.com/CUBRID/cubrid-python
User guide: https://github.com/CUBRID/cubrid-python/blob/develop/README.md
API: https://www.python.org/dev/peps/pep-0249/
License: BSD License
"""
def connect(self):
self.initConnection()
try:
self.connector = CUBRIDdb.connect(hostname=self.hostname, username=self.user, password=self.password, database=self.db, port=self.port, connect_timeout=conf.timeout)
except CUBRIDdb.DatabaseError as ex:
raise SqlmapConnectionException(getSafeExString(ex))
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except CUBRIDdb.DatabaseError as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % getSafeExString(ex))
return None
def execute(self, query):
try:
self.cursor.execute(query)
except CUBRIDdb.DatabaseError as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % getSafeExString(ex))
except CUBRIDdb.Error as ex:
raise SqlmapConnectionException(getSafeExString(ex))
self.connector.commit()
def select(self, query):
self.execute(query)
return self.fetchall()
|
import glob, os, re, sys
import stagger
from stagger.id3 import *
import shutil
ultimate_regex = [
'''# Shit
^
(?P<Artist>.+)
\s-\s
(?P<ReleaseYear>\d{4})
\.
(?P<ReleaseMonth>\d{2})
\.
(?P<ReleaseDay>\d{2})
\s-\s
(?P<Title>.+)
[.]+?''']
walkdir = './ipod'
sortedDir = './sorted'
# Scan every directory for file
for dirName, subdirList, fileList in os.walk(walkdir):
for file in fileList:
# Get the full path of the scanned file
fullFilePath = os.path.join(os.path.abspath(dirName), file)
# The file must be a mp3 file and larger than zero in size
if file.lower().endswith('.mp3') and os.stat(fullFilePath).st_size>0:
# Use stagger to get tags from mp3 file
mp3file = stagger.read_tag(fullFilePath)
# Here we check if we can use id3 tags
# to get our artist, title, album and track
if TPE2 in mp3file:
tpe2 = mp3file[TPE2].text[0]
else:
tpe2 = ''
if TIT2 in mp3file:
tit2 = mp3file[TIT2].text[0]
else:
tit2 = ''
if TALB in mp3file:
talb = mp3file[TALB].text[0]
else:
talb = ''
if TRCK in mp3file:
trck = mp3file[TRCK].text[0]
else:
trck = ''
# Here is just temporary variables to get artist, title, album and track
tmp_artist = mp3file.artist if mp3file.artist is not '' else tpe2
tmp_title = mp3file.title if mp3file.title is not '' else tit2
tmp_album = mp3file.album if mp3file.album is not '' else talb
tmp_track = mp3file.track if mp3file.track else trck
# Here is checked if the tags are empty. If so the text is replaced.
# We also replace bad char in filename (for linux at least)
artist = str(tmp_artist if tmp_artist is not '' else 'unknown artist').replace(':', '').replace('/', '')
title = str(tmp_title if tmp_title is not '' else 'unknown title').replace(':', '').replace('/', '')
album = str(tmp_album if tmp_album is not '' else '').replace(':', '').replace('/', '')
track = tmp_track if tmp_track else ''
# If the album name is empty and artist
# we just get the title and use that as our filename
if artist == 'unknown artist' and album == '':
newName = mp3file.title
# Else use what we got from reading the tags
else:
if track is not '':
newName = "%02d - %s - %s" % (track, artist, title)
else:
newName = "%s - %s" % (artist, title)
newName = "%s.mp3" % newName
# Create the new folder path as in ./sorted/Artist/Album/
fullNewPath = os.path.join(sortedDir, os.path.join(artist, album))
# Create the new filename and file path, ./sorted/Artist/Album/track number - Artist - Title
fullNewName = os.path.join(fullNewPath, newName)
# Create the new path
if not os.path.exists(fullNewPath):
try:
os.makedirs(fullNewPath)
except:
print("Error making path %s", fullNewPath)
# If the file doesn't exist we move it
if not os.path.exists(fullNewName):
try:
shutil.move(fullFilePath, fullNewName)
except:
print("Error moving %s to %s" % (fullFilePath, fullNewName) )
|
# Reads in the training csv, sorts it first by
# ip, then by click_time, then writes it out
# in spark-friendly parquet format
import pyspark
spark = pyspark.sql.SparkSession.builder.getOrCreate()
df = spark.read.csv('../data/train.csv', header=True, inferSchema=True)
df_sort = df.sort(['ip','click_time'])
df_sort.write.parquet('../data/sorted_train.parquet',
mode='overwrite')
spark.stop()
|
import numpy as np
class maze(object):
"""
Implements a 2d maze environment represented as an array.
The agent can move on the array and recieves rewards based on its position
at the end of each turn.
"""
def __init__(self, grid):
self.board = grid
self.position = np.zeros(2) #start at top left
self.actions = [np.array([-1,0]), np.array([1,0]),np.array([0,-1]),np.array([0,1])]
self.total_reward = 0
self.observation_size = np.size(self.board)
self.observation_shape = (1,)
self.num_actions = len(self.actions)
def collect_reward(self):
reward = self.board[int(self.position[0]),int(self.position[1])]
self.total_reward += reward
return reward
def observe(self):
#print(self.position)
return int(self.position[0] * len(self.board) + self.position[1])
def perform_action(self, action_id):
self.position += self.actions[action_id]
for i in range(len(self.position)):
self.position[i] = max(self.position[i], 0)
self.position[i] = min(self.position[i], len(self.board)-1)
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis.asserts import assert_equal
from proboscis import test
from proboscis import SkipTest
from devops.helpers.helpers import _wait
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers.decorators import create_diagnostic_snapshot
from fuelweb_test.helpers import os_actions
from fuelweb_test import logger
from fuelweb_test import settings as hlp_data
from fuelweb_test.tests import base_test_case as base_test_data
@test(groups=["upgrade"])
class UpgradeFuelMaster(base_test_data.TestBasic):
@test(groups=["upgrade_simple"])
@log_snapshot_on_error
def upgrade_simple_env(self):
"""Upgrade simple deployed cluster with ceph
Scenario:
1. Revert snapshot with simple ceph env
2. Run upgrade on master
3. Check that upgrade was successful
4. Add another compute node
5. Re-deploy cluster
6. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'ceph_multinode_compact'):
raise SkipTest()
self.env.revert_snapshot("ceph_multinode_compact")
cluster_id = self.fuel_web.get_last_created_cluster()
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nailgun_upgrade_migration()
self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
self.fuel_web.update_nodes(
cluster_id, {'slave-04': ['compute']},
True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'],
user='ceph1', tenant='ceph1', passwd='ceph1')
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=10, networks_count=1, timeout=300)
self.fuel_web.run_ostf(cluster_id=cluster_id)
create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env")
self.env.make_snapshot("upgrade_simple")
@test(groups=["upgrade_ha"])
@log_snapshot_on_error
def upgrade_ha_env(self):
"""Upgrade ha deployed cluster
Scenario:
1. Revert snapshot with neutron gre ha env
2. Run upgrade on master
3. Check that upgrade was successful
4. Check cluster is operable
5. Create new simple Vlan cluster
6. Deploy cluster
7. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'deploy_neutron_gre_ha'):
raise SkipTest()
self.env.revert_snapshot("deploy_neutron_gre_ha")
cluster_id = self.fuel_web.get_last_created_cluster()
available_releases_before = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.assert_nailgun_upgrade_migration()
self.fuel_web.run_ostf(
cluster_id=cluster_id)
available_releases_after = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
added_release = [id for id in available_releases_after
if id not in available_releases_before]
self.env.bootstrap_nodes(self.env.nodes().slaves[5:7])
data = {
'tenant': 'novaSimpleVlan',
'user': 'novaSimpleVlan',
'password': 'novaSimpleVlan'
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_data.DEPLOYMENT_MODE_SIMPLE,
settings=data,
release_id=added_release[0]
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-06': ['controller'],
'slave-07': ['compute']
}
)
self.fuel_web.update_vlan_network_fixed(
cluster_id, amount=8, network_size=32)
self.fuel_web.deploy_cluster_wait(cluster_id)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(
os_conn, smiles_count=6, networks_count=8, timeout=300)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("upgrade_ha")
@test(groups=["deploy_ha_after_upgrade"])
@log_snapshot_on_error
def deploy_ha_after_upgrade(self):
"""Upgrade and deploy new ha cluster
Scenario:
1. Revert snapshot with simple ceph env
2. Run upgrade on master
3. Check that upgrade was successful
4. Re-deploy cluster
5. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'ceph_multinode_compact'):
raise SkipTest()
self.env.revert_snapshot("ceph_multinode_compact")
cluster_id = self.fuel_web.get_last_created_cluster()
available_releases_before = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.TARBALL_PATH),
'/var')
checkers.run_script(self.env.get_admin_remote(), '/var',
'upgrade.sh', password=
hlp_data.KEYSTONE_CREDS['password'])
checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
phrase='*** UPGRADE DONE SUCCESSFULLY')
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nailgun_upgrade_migration()
available_releases_after = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
added_release = [id for id in available_releases_after
if id not in available_releases_before]
self.env.bootstrap_nodes(self.env.nodes().slaves[3:9])
segment_type = 'vlan'
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=hlp_data.DEPLOYMENT_MODE_HA,
settings={
"net_provider": 'neutron',
"net_segment_type": segment_type
},
release_id=added_release[0]
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-04': ['controller'],
'slave-05': ['controller'],
'slave-06': ['controller'],
'slave-07': ['compute'],
'slave-08': ['compute'],
'slave-09': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
assert_equal(str(cluster['net_provider']), 'neutron')
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_ha_after_upgrade")
@test(groups=["rollback"])
class RollbackFuelMaster(base_test_data.TestBasic):
@test(groups=["rollback_automatic_ha"])
@log_snapshot_on_error
def rollback_automatically_ha_env(self):
"""Rollback manually simple deployed cluster
Scenario:
1. Revert snapshot with simple neutron gre ha env
2. Add raise exception to openstack.py file
3. Run upgrade on master
4. Check that rollback starts automatically
5. Check that cluster was not upgraded
6. Add 1 cinder node and re-deploy cluster
7. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'deploy_neutron_gre_ha'):
raise SkipTest()
self.env.revert_snapshot("deploy_neutron_gre_ha")
cluster_id = self.fuel_web.get_last_created_cluster()
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
self.fuel_web.modify_python_file(self.env.get_admin_remote(),
"61i \ \ \ \ \ \ \ \ raise errors."
"ExecutedErrorNonZeroExitCode('{0}')"
.format('Some bad error'),
'/var/upgrade/site-packages/'
'fuel_upgrade/engines/'
'openstack.py')
checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
password=
hlp_data.KEYSTONE_CREDS['password'],
rollback=True, exit_code=255)
checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_TO,
hlp_data.UPGRADE_FUEL_FROM)
logger.debug("all containers are ok")
_wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
self.env.nodes().slaves[0]), timeout=120)
logger.debug("all services are up now")
self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:5])
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
self.env.bootstrap_nodes(self.env.nodes().slaves[5:6])
self.fuel_web.update_nodes(
cluster_id, {'slave-06': ['cinder']},
True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("rollback_automatic_ha")
@test(groups=["rollback_automatic_simple"])
@log_snapshot_on_error
def rollback_automatically_simple_env(self):
"""Rollback automatically simple deployed cluster
Scenario:
1. Revert snapshot with simple neutron gre env
2. Add raise exception to docker_engine.py file
3. Run upgrade on master
4. Check that rollback starts automatically
5. Check that cluster was not upgraded and run OSTf
6. Add 1 cinder node and re-deploy cluster
7. Run OSTF
"""
if not self.env.get_virtual_environment().has_snapshot(
'deploy_neutron_gre'):
raise SkipTest()
self.env.revert_snapshot("deploy_neutron_gre")
cluster_id = self.fuel_web.get_last_created_cluster()
checkers.upload_tarball(self.env.get_admin_remote(),
hlp_data.TARBALL_PATH, '/var')
checkers.check_tarball_exists(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH),
'/var')
checkers.untar(self.env.get_admin_remote(),
os.path.basename(hlp_data.
TARBALL_PATH), '/var')
self.fuel_web.modify_python_file(self.env.get_admin_remote(),
"98i \ \ \ \ \ \ \ \ raise errors."
"ExecutedErrorNonZeroExitCode('{0}')"
.format('Some bad error'),
'/var/upgrade/site-packages/'
'fuel_upgrade/engines/'
'docker_engine.py')
#we expect 255 exit code here because upgrade failed
# and exit status is 255
checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
password=
hlp_data.KEYSTONE_CREDS['password'],
rollback=True, exit_code=255)
checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
checkers.check_upgraded_containers(self.env.get_admin_remote(),
hlp_data.UPGRADE_FUEL_TO,
hlp_data.UPGRADE_FUEL_FROM)
logger.debug("all containers are ok")
_wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
self.env.nodes().slaves[0]), timeout=120)
logger.debug("all services are up now")
self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
self.fuel_web.update_nodes(
cluster_id, {'slave-04': ['cinder']},
True, False
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("rollback_automatic_simple")
|
import data_connector
from flask import jsonify, Flask, request
from webargs import fields
from webargs.flaskparser import use_args
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
@app.errorhandler(422)
def validation_failed(e):
return jsonify(error=400, description=str(e.description), messages=str(e.data["messages"])), 400
@app.route('/<preferences>/recommended_locations', methods=["POST"])
def recommend_locations(preferences):
print(list(preferences))
try:
return jsonify(data_connector.fetch_recommended_locations(list(preferences))), 201
except Exception as e:
return jsonify(error=400, description=str(e)), 400
if __name__ == '__main__':
app.run(host='0.0.0.0', port='5050')
|
def parentfun():
myaaa = 1
def childfun():
myaaa = 10
print myaaa
childfun()
print myaaa
if __name__=="__main__":
parentfun()
childfun()
|
import itertools
import networkx as nx
def _tokenize(f):
token = []
for line in f:
if line == '\n':
yield token
token = []
else:
token.append(line)
def _filter_terms(tokens):
for token in tokens:
if token[0] == '[Term]\n':
yield token[1:]
def _parse_terms(terms):
for term in terms:
obsolete = False
node = {}
parents = []
for line in term:
if line.startswith('id:'):
id = line[4:-1]
elif line.startswith('name:'):
node['name'] = line[6:-1]
elif line.startswith('namespace:'):
node['namespace'] = line[11:-1]
elif line.startswith('is_a:'):
parents.append(line[6:16])
elif line.startswith('relationship: part_of'):
parents.append(line[22:32])
elif line.startswith('is_obsolete'):
obsolete = True
break
if not obsolete:
edges = [(p, id) for p in parents] # will reverse edges later
yield (id, node), edges
else:
continue
_filename = 'db/go-basic.obo'
def ontology(file):
""" read ontology from file
:param file: file path of file handle
"""
O = nx.DiGraph()
if isinstance(file, str):
f = open(file)
we_opened_file = True
else:
f = file
we_opened_file = False
try:
tokens = _tokenize(f)
terms = _filter_terms(tokens)
entries = _parse_terms(terms)
nodes, edges = zip(*entries)
O.add_nodes_from(nodes)
O.add_edges_from(itertools.chain.from_iterable(edges))
O.graph['roots'] = {data['name'] : n for n, data in O.nodes.items()
if data['name'] == data['namespace']}
finally:
if we_opened_file:
f.close()
for root in O.graph['roots'].values():
for n, depth in nx.shortest_path_length(O, root).items():
node = O.nodes[n]
node['depth'] = min(depth, node.get('depth', float('inf')))
return O.reverse()
|
# -*- coding: utf-8 -*-
# version: Python 3.7.0
from yyCrawler.yyLogin import YyLogin
import requests, sys, csv, logging, os
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
'Referer': 'https://www.yy.com/i/income'
}
url = 'https://www.yy.com/zone/income/anchorIncomeItems.json'
logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s')
logger = logging.getLogger()
def get_json(cookies):
res = requests.get(url=url, cookies=cookies, headers=headers).json()
for i in res['data']:
temp = [i['yearMonth'], i['totalDiamond'], i['outDiamond'], i['commission'], i['frozenCommission'],
i['settleIncome']]
logger.info("解析数据:{}".format(temp))
csv.writer(pf).writerow(temp)
def main():
user_name = '2329990863' # 填入用户名!
user_pwd = 'zixichuanmei245' # 填入密码!
hy_cn = YyLogin()
cookies = hy_cn.login(user_name, user_pwd)
if not cookies:
logger.info("用户名密码输入错误或者网路连接超时......退出程序!")
sys.exit(1)
logger.info("登录成功!并获取数据...")
get_json(cookies)
if __name__ == '__main__':
filepath = r'.\result.csv'
if os.path.isfile(filepath): os.remove(filepath)
pf = open(filepath, 'a', encoding="utf-8-sig", newline='')
csv.writer(pf).writerow(['时间(月)', '蓝钻收入', '蓝钻支出', '待结算佣金', '冻结佣金', '结算收入'])
main()
pf.close()
logger.info("保存result.csv文件成功.")
|
import psycopg2
import pytest
from .tools import exec_file
def test_insert_already_exist_region():
with pytest.raises(psycopg2.ProgrammingError):
exec_file("insert_already_exist_departement")
|
import subprocess
from .conversion import Conversion
from ..helpers import replace_extension
class CommandConversion(Conversion):
command_name = None
output_extension = None
output_flag = False
def __call__(self, source_path):
target_path = replace_extension(self.output_extension, source_path)
if self.output_flag:
args = [self.command_name, source_path, '-o', target_path]
else:
args = [self.command_name, source_path, target_path]
rt = subprocess.check_call(args)
return [target_path]
|
#!/usr/bin/env python
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
import subprocess
#########################################################################################
# GPG wrappers.
def verify_package(name):
"""Helper method to verify if the package is trusted.
Args:
name (str): Name of the package (*.deb.gpg).
Returns:
[type]: [description]
"""
p = subprocess.Popen(["gpg", "--keyring", "/etc/apt/trusted.gpg",
"--verify", name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
return (p.returncode == 0)
def decrypt_package(name, decrypted_name):
"""Helper method to decrypt the package.
Args:
name (str): Path to the package.
decrypted_name (str): Destination path of the decrypted package.
Returns:
bool: True if successfully decrypted else False.
"""
p = subprocess.Popen(["gpg", "--batch",
"--keyring", "/etc/apt/trusted.gpg",
"--output", decrypted_name,
"--decrypt", name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
return (p.returncode == 0)
def install_debian(package_name):
"""Helper method to install debian packages.
Args:
package_name (str): Path to the package.
Returns:
bool: True if successfully installaed else False.
"""
p = subprocess.Popen(["dpkg", "--install", package_name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
return (p.returncode == 0)
|
from _mock_data.url import internal_url
from browserist import Browser
def test_window_fullscreen(browser_default: Browser) -> None:
browser = browser_default
browser.open.url(internal_url.EXAMPLE_COM)
default_width, default_height = browser.window.get.size()
browser.window.fullscreen()
fullscreen_width, fullscreen_height = browser.window.get.size()
assert default_width <= fullscreen_width and default_height <= fullscreen_height
|
import UnityEngine
all_objects = UnityEngine.Object.FindObjectsOfType(UnityEngine.GameObject)
for go in all_objects:
if go.name[-1] != '_':
go.name = go.name + '_' |
import sys
import json
from conllu import parse
import pdb
def get_grand_head(block, idx):
head = block[idx]['head'] - 1
grandhead = block[head]['head'] - 1
grandlabel = block[head]['deprel']
return grandhead, grandlabel
def compute(adp_data, pred_block, gold_block):
uas, las, total = 0, 0, 0
for idx, grand_idx in adp_data:
#assert(pred_block[idx]['upostag'] == 'ADP')
assert(gold_block[idx]['upostag'] == 'ADP')
pred_grandhead, pred_grandlabel = get_grand_head(pred_block, idx)
gold_grandhead, gold_grandlabel = get_grand_head(gold_block, idx)
try:
assert(gold_grandhead == grand_idx)
except AssertionError:
#pdb.set_trace()
pass
if pred_grandhead == gold_grandhead:
uas += 1
# obl edge label introduced in UDv2, nmod used to subsume in v1
if (pred_grandlabel == gold_grandlabel) or \
(pred_grandlabel == "nmod" and gold_grandlabel == "obl"):
las += 1
else:
print(f"pred {pred_grandlabel} true {gold_grandlabel}")
total += 1
return uas, las, total
def parse_conllu(path):
with open(path) as f1:
data = f1.read()
return parse(data)
def parse_json(path):
with open(path) as f1:
data = json.load(f1)
return data
if __name__ == "__main__":
pred_conllu = sys.argv[1]
gold_conllu = sys.argv[2]
json_file = sys.argv[3]
pred_blocks = parse_conllu(pred_conllu)
gold_blocks = parse_conllu(gold_conllu)
json_data = parse_json(json_file)
total_uas, total_las, total_total = 0, 0, 0
for i, (pred_block, gold_block) in enumerate(zip(pred_blocks, gold_blocks)):
uas, las, total = compute(json_data[str(i)], pred_block, gold_block)
total_uas += uas
total_las += las
total_total += total
uas_final = (total_uas/ total_total)*100
las_final = (total_las/ total_total)*100
print(f"UAS {uas_final:.2f} LAS {las_final:.2f}")
|
import gevent
import gevent.monkey
gevent.monkey.patch_socket()
gevent.monkey.patch_select()
gevent.monkey.patch_ssl()
from gevent.pool import Pool as GPool
import re
import os
import sys
import logging
from io import open
from contextlib import contextmanager
from re import search as re_search
from fnmatch import fnmatch
from binascii import unhexlify
import vpk
from steam import webapi
from steam.exceptions import SteamError
from steam.enums import EResult, EDepotFileFlag
from steam.client import EMsg, MsgProto
from steam.client.cdn import decrypt_manifest_gid_2
from steamctl.clients import CachingSteamClient, CTLDepotManifest, CTLDepotFile
from steamctl.utils.web import make_requests_session
from steamctl.utils.format import fmt_size, fmt_datetime
from steamctl.utils.tqdm import tqdm, fake_tqdm
from steamctl.commands.webapi import get_webapi_key
from steamctl.utils.storage import ensure_dir, sanitizerelpath
webapi._make_requests_session = make_requests_session
LOG = logging.getLogger(__name__)
# overload VPK with a missing method
class c_VPK(vpk.VPK):
def c_iter_index(self):
if self.tree:
index = self.tree.items()
else:
index = self.read_index_iter()
for path, metadata in index:
yield path, metadata
# find and cache paths to vpk depot files, and set them up to be read directly from CDN
class ManifestFileIndex(object):
def __init__(self, manifests):
self.manifests = manifests
self._path_cache = {}
def _locate_file_mapping(self, path):
ref = self._path_cache.get(path, None)
if ref:
return ref
else:
self._path_cache[path] = None
for manifest in self.manifests:
try:
foundfile = next(manifest.iter_files(path))
except StopIteration:
continue
else:
self._path_cache[path] = ref = (manifest, foundfile.file_mapping)
return ref
def index(self, pattern=None, raw=True):
for manifest in self.manifests:
for filematch in manifest.iter_files(pattern):
filepath = filematch.filename_raw if raw else filematch.filename
self._path_cache[filepath] = (manifest, filematch.file_mapping)
def file_exists(self, path):
return self._locate_file_mapping(path) != None
def get_file(self, path, *args, **kwargs):
ref = self._locate_file_mapping(path)
if ref:
return CTLDepotFile(*ref)
raise SteamError("File not found: {}".format(path))
def get_vpk(self, path):
return c_VPK(path, fopen=self.get_file)
# vpkfile download task
def vpkfile_download_to(vpk_path, vpkfile, target, no_make_dirs, pbar):
relpath = sanitizerelpath(vpkfile.filepath)
if no_make_dirs:
relpath = os.path.join(target, # output directory
os.path.basename(relpath)) # filename from vpk
else:
relpath = os.path.join(target, # output directory
vpk_path[:-4], # vpk path with extention (e.g. pak01_dir)
relpath) # vpk relative path
filepath = os.path.abspath(relpath)
ensure_dir(filepath)
LOG.info("Downloading VPK file to {} ({}, crc32:{})".format(relpath,
fmt_size(vpkfile.file_length),
vpkfile.crc32,
))
with open(filepath, 'wb') as fp:
for chunk in iter(lambda: vpkfile.read(16384), b''):
fp.write(chunk)
if pbar:
pbar.update(len(chunk))
@contextmanager
def init_clients(args):
s = CachingSteamClient()
if args.cell_id is not None:
s.cell_id = args.cell_id
cdn = s.get_cdnclient()
# short-curcuit everything, if we pass manifest file(s)
if getattr(args, 'file', None):
manifests = []
for file_list in args.file:
for fp in file_list:
manifest = CTLDepotManifest(cdn, args.app or -1, fp.read())
manifest.name = os.path.basename(fp.name)
manifests.append(manifest)
yield None, None, manifests
return
# for everything else we need SteamClient and CDNClient
if not args.app:
raise SteamError("No app id specified")
# only login when we may need it
if (not args.skip_login # user requested no login
and (not args.app or not args.depot or not args.manifest or
args.depot not in cdn.depot_keys)
):
result = s.login_from_args(args)
if result == EResult.OK:
LOG.info("Login to Steam successful")
else:
raise SteamError("Failed to login: %r" % result)
else:
LOG.info("Skipping login")
if getattr(args, 'no_manifests', None):
manifests = []
# when app, depot, and manifest are specified, we can just go to CDN
elif args.app and args.depot and args.manifest:
# we can only decrypt if SteamClient is logged in, or we have depot key cached
if args.skip_login and args.depot not in cdn.depot_keys:
decrypt = False
else:
decrypt = True
# load the manifest
try:
manifests = [cdn.get_manifest(args.app, args.depot, args.manifest, decrypt=decrypt)]
except SteamError as exp:
if exp.eresult == EResult.AccessDenied:
raise SteamError("This account doesn't have access to the app depot", exp.eresult)
elif 'HTTP Error 404' in str(exp):
raise SteamError("Manifest not found on CDN")
else:
raise
# if only app is specified, or app and depot, we need product info to figure out manifests
else:
# no license, means no depot keys, and possibly not product info
if not args.skip_licenses:
LOG.info("Checking licenses")
if s.logged_on and not s.licenses and s.steam_id.type != s.steam_id.EType.AnonUser:
s.wait_event(EMsg.ClientLicenseList, raises=False, timeout=10)
cdn.load_licenses()
if args.app not in cdn.licensed_app_ids:
raise SteamError("No license available for App ID: %s" % args.app, EResult.AccessDenied)
# check if we need to invalidate the cache data
if not args.skip_login:
LOG.info("Checking change list")
s.check_for_changes()
# handle any filtering on depot list
def depot_filter(depot_id, info):
if args.depot is not None:
if args.depot != depot_id:
return False
if args.os != 'any':
if args.os[-2:] == '64':
os, arch = args.os[:-2], args.os[-2:]
else:
os, arch = args.os, None
config = info.get('config', {})
if 'oslist' in config and (os not in config['oslist'].split(',')):
return False
if 'osarch' in config and config['osarch'] != arch:
return False
return True
if args.skip_login:
if cdn.has_cached_app_depot_info(args.app):
LOG.info("Using cached app info")
else:
raise SteamError("No cached app info. Login to Steam")
branch = args.branch
password = args.password
LOG.info("Getting manifests for %s branch", repr(branch))
# enumerate manifests
manifests = []
for manifest in cdn.get_manifests(args.app, branch=branch, password=password, filter_func=depot_filter, decrypt=False):
if (not args.skip_licenses
and manifest.depot_id not in cdn.licensed_depot_ids
and manifest.depot_id not in cdn.licensed_app_ids):
LOG.error("No license for depot: %r" % manifest)
continue
if manifest.filenames_encrypted:
if not args.skip_login:
try:
manifest.decrypt_filenames(cdn.get_depot_key(manifest.app_id, manifest.depot_id))
except Exception as exp:
LOG.error("Failed to decrypt manifest %s (depot %s): %s", manifest.gid, manifest.depot_id, str(exp))
if not args.skip_licenses:
continue
manifests.append(manifest)
LOG.debug("Got manifests: %r", manifests)
yield s, cdn, manifests
# clean and exit
cdn.save_cache()
s.disconnect()
def cmd_depot_info(args):
try:
with init_clients(args) as (_, cdn, manifests):
for i, manifest in enumerate(manifests, 1):
print("App ID:", manifest.app_id)
print("Depot ID:", manifest.metadata.depot_id)
print("Depot Name:", manifest.name if manifest.name else 'Unnamed Depot')
print("Manifest GID:", manifest.metadata.gid_manifest)
print("Created On:", fmt_datetime(manifest.metadata.creation_time))
print("Size:", fmt_size(manifest.metadata.cb_disk_original))
print("Compressed Size:", fmt_size(manifest.metadata.cb_disk_compressed))
nchunks = sum((len(file.chunks) for file in manifest.payload.mappings))
unique_chunks = manifest.metadata.unique_chunks
print("Unique/Total chunks:", unique_chunks, "/", nchunks, "({:.2f}%)".format(((1-(unique_chunks / nchunks))*100) if nchunks else 0))
print("Encrypted Filenames:", repr(manifest.metadata.filenames_encrypted))
print("Number of Files:", len(manifest.payload.mappings))
if cdn:
depot_info = cdn.app_depots.get(manifest.app_id, {}).get(str(manifest.metadata.depot_id))
if depot_info:
print("Config:", depot_info.get('config', '{}'))
if 'dlcappid' in depot_info:
print("DLC AppID:", depot_info['dlcappid'])
print("Branch:", args.branch)
print("Open branches:", ', '.join(depot_info.get('manifests', {}).keys()))
print("Protected branches:", ', '.join(depot_info.get('encryptedmanifests', {}).keys()))
if i != len(manifests):
print("-"*40)
except SteamError as exp:
LOG.error(str(exp))
return 1 # error
def cmd_depot_list(args):
def print_file_info(filepath, info=None):
# filepath filtering
if args.name and not fnmatch(filepath, args.name):
return
if args.regex and not re_search(args.regex, filepath):
return
# output
if info:
print("{} - {}".format(filepath, info))
else:
print(filepath)
try:
with init_clients(args) as (_, _, manifests):
fileindex = ManifestFileIndex(manifests)
# pre-index vpk file to speed up lookups
if args.vpk:
fileindex.index('*.vpk')
for manifest in manifests:
LOG.debug("Processing: %r", manifest)
if manifest.filenames_encrypted:
LOG.error("Manifest %s (depot %s) filenames are encrypted.", manifest.gid, manifest.depot_id)
continue
for mapping in manifest.payload.mappings:
# ignore symlinks and directorys
if mapping.linktarget or mapping.flags & EDepotFileFlag.Directory:
continue
filepath = mapping.filename.rstrip('\x00 \n\t')
# filepath filtering
if ( (not args.name and not args.regex)
or (args.name and fnmatch(filepath, args.name))
or (args.regex and re_search(args.regex, filepath))
):
# print out for manifest file
if not args.long:
print(filepath)
else:
print("{} - size:{:,d} sha1:{}".format(
filepath,
mapping.size,
mapping.sha_content.hex(),
)
)
# list files inside vpk
if args.vpk and filepath.endswith('.vpk'):
# fast skip VPKs that can't possibly match
if args.name and ':' in args.name:
pre = args.name.split(':', 1)[0]
if not fnmatch(filepath, pre):
continue
if args.regex and ':' in args.regex:
pre = args.regex.split(':', 1)[0]
if not re_search(pre + '$', filepath):
continue
# scan VPKs, but skip data only ones
if filepath.endswith('_dir.vpk') or not re.search("_\d+\.vpk$", filepath):
LOG.debug("Scanning VPK file: %s", filepath)
try:
fvpk = fileindex.get_vpk(filepath)
except ValueError as exp:
LOG.error("VPK read error: %s", str(exp))
else:
for vpkfile_path, (_, crc32, _, _, _, size) in fvpk.c_iter_index():
complete_path = "{}:{}".format(filepath, vpkfile_path)
if ( (not args.name and not args.regex)
or (args.name and fnmatch(complete_path, args.name))
or (args.regex and re_search(args.regex, complete_path))
):
if args.long:
print("{} - size:{:,d} crc32:{}".format(
complete_path,
size,
crc32,
)
)
else:
print(complete_path)
except SteamError as exp:
LOG.error(str(exp))
return 1 # error
def cmd_depot_download(args):
pbar = fake_tqdm()
pbar2 = fake_tqdm()
try:
with init_clients(args) as (_, _, manifests):
fileindex = ManifestFileIndex(manifests)
# pre-index vpk file to speed up lookups
if args.vpk:
fileindex.index('*.vpk')
# calculate total size
total_files = 0
total_size = 0
LOG.info("Locating and counting files...")
for manifest in manifests:
for depotfile in manifest:
if not depotfile.is_file:
continue
filepath = depotfile.filename_raw
# list files inside vpk
if args.vpk and filepath.endswith('.vpk'):
# fast skip VPKs that can't possibly match
if args.name and ':' in args.name:
pre = args.name.split(':', 1)[0]
if not fnmatch(filepath, pre):
continue
if args.regex and ':' in args.regex:
pre = args.regex.split(':', 1)[0]
if not re_search(pre + '$', filepath):
continue
# scan VPKs, but skip data only ones
if filepath.endswith('_dir.vpk') or not re.search("_\d+\.vpk$", filepath):
LOG.debug("Scanning VPK file: %s", filepath)
try:
fvpk = fileindex.get_vpk(filepath)
except ValueError as exp:
LOG.error("VPK read error: %s", str(exp))
else:
for vpkfile_path, (_, _, _, _, _, size) in fvpk.c_iter_index():
complete_path = "{}:{}".format(filepath, vpkfile_path)
if args.name and not fnmatch(complete_path, args.name):
continue
if args.regex and not re_search(args.regex, complete_path):
continue
total_files += 1
total_size += size
# account for depot files
if args.name and not fnmatch(filepath, args.name):
continue
if args.regex and not re_search(args.regex, filepath):
continue
total_files += 1
total_size += depotfile.size
if not total_files:
raise SteamError("No files found to download")
# enable progress bar
if not args.no_progress and sys.stderr.isatty():
pbar = tqdm(desc='Data ', mininterval=0.5, maxinterval=1, miniters=1024**3*10, total=total_size, unit='B', unit_scale=True)
pbar2 = tqdm(desc='Files', mininterval=0.5, maxinterval=1, miniters=10, total=total_files, position=1, unit=' file', unit_scale=False)
gevent.spawn(pbar.gevent_refresh_loop)
gevent.spawn(pbar2.gevent_refresh_loop)
# download files
tasks = GPool(6)
for manifest in manifests:
if pbar2.n == total_files:
break
LOG.info("Processing manifest (%s) '%s' ..." % (manifest.gid, manifest.name or "<Unknown>"))
for depotfile in manifest:
if pbar2.n == total_files:
break
if not depotfile.is_file:
continue
filepath = depotfile.filename_raw
if args.vpk and filepath.endswith('.vpk'):
# fast skip VPKs that can't possibly match
if args.name and ':' in args.name:
pre = args.name.split(':', 1)[0]
if not fnmatch(filepath, pre):
continue
if args.regex and ':' in args.regex:
pre = args.regex.split(':', 1)[0]
if not re_search(pre + '$', filepath):
continue
# scan VPKs, but skip data only ones
if filepath.endswith('_dir.vpk') or not re.search("_\d+\.vpk$", filepath):
LOG.debug("Scanning VPK file: %s", filepath)
try:
fvpk = fileindex.get_vpk(filepath)
except ValueError as exp:
LOG.error("VPK read error: %s", str(exp))
else:
for vpkfile_path, metadata in fvpk.c_iter_index():
complete_path = "{}:{}".format(filepath, vpkfile_path)
if args.name and not fnmatch(complete_path, args.name):
continue
if args.regex and not re_search(args.regex, complete_path):
continue
tasks.spawn(vpkfile_download_to,
depotfile.filename,
fvpk.get_vpkfile_instance(vpkfile_path,
fvpk._make_meta_dict(metadata)),
args.output,
no_make_dirs=args.no_directories,
pbar=pbar,
)
pbar2.update(1)
# break out of vpk file loop
if pbar2.n == total_files:
break
# break out of depotfile loop
if pbar2.n == total_files:
break
# filepath filtering
if args.name and not fnmatch(filepath, args.name):
continue
if args.regex and not re_search(args.regex, filepath):
continue
tasks.spawn(depotfile.download_to, args.output,
no_make_dirs=args.no_directories,
pbar=pbar,
verify=(not args.skip_verify),
)
pbar2.update(1)
# wait on all downloads to finish
tasks.join()
gevent.sleep(0.5)
except KeyboardInterrupt:
pbar.close()
LOG.info("Download canceled")
return 1 # error
except SteamError as exp:
pbar.close()
pbar.write(str(exp))
return 1 # error
else:
pbar.close()
if not args.no_progress:
pbar2.close()
pbar2.write('\n')
LOG.info('Download complete')
from hashlib import sha1
def calc_sha1_for_file(path):
checksum = sha1()
with open(path, 'rb') as fp:
for chunk in iter(lambda: fp.read(16384), b''):
checksum.update(chunk)
return checksum.digest()
def cmd_depot_diff(args):
try:
with init_clients(args) as (_, _, manifests):
targetdir = args.TARGETDIR
fileindex = {}
for manifest in manifests:
LOG.debug("Scanning manifest: %r", manifest)
for mfile in manifest.iter_files():
if not mfile.is_file:
continue
if args.name and not fnmatch(mfile.filename_raw, args.name):
continue
if args.regex and not re_search(args.regex, mfile.filename_raw):
continue
if args.show_extra:
fileindex[mfile.filename] = mfile.file_mapping
if args.hide_missing and args.hide_mismatch:
continue
full_filepath = os.path.join(targetdir, mfile.filename)
if os.path.isfile(full_filepath):
# do mismatch, checksum checking
size = os.path.getsize(full_filepath)
if size != mfile.size:
print("Mismatch (size):", full_filepath)
continue
# valve sets the checksum for empty files to all nulls
if size == 0:
chucksum = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
else:
chucksum = calc_sha1_for_file(full_filepath)
if chucksum != mfile.file_mapping.sha_content:
print("Mismatch (checksum):", full_filepath)
elif not args.hide_missing:
print("Missing file:", full_filepath)
# walk file system and show files not in manifest(s)
if args.show_extra:
for cdir, _, files in os.walk(targetdir):
for filename in files:
filepath = os.path.join(cdir, filename)
rel_filepath = os.path.relpath(filepath, targetdir)
if rel_filepath.lower() not in fileindex:
print("Not in manifest:", filepath)
except KeyboardInterrupt:
return 1 # error
except SteamError as exp:
LOG.error(exp)
return 1 # error
def _decrypt_gid(egid, key):
try:
gid = decrypt_manifest_gid_2(unhexlify(egid), unhexlify(key))
except Exception as exp:
if 'unpack requires a buffer' in str(exp):
print(' ', egid, '- incorrect decryption key')
else:
print(' ', egid, '- Error: ', str(exp))
else:
print(' ', egid, '=', gid)
def cmd_depot_decrypt_gid(args):
args.cell_id = 0
args.no_manifests = True
args.skip_login = False
args.depot = None
valid_gids = []
for egid in args.manifest_gid:
if not re.match(r'[0-9A-Za-z]{32}$', egid):
LOG.error("Skipping invalid gid: %s", egid)
else:
valid_gids.append(egid)
if not valid_gids:
LOG.error("No valid gids left to check")
return 1 # error
# offline: decrypt gid via decryption key
if args.key:
if not re.match(r'[0-9A-Za-z]{64}$', args.key):
LOG.error("Invalid decryption key (format: hex encoded, a-z0-9, 64 bytes)")
return 1 # error
for egid in valid_gids:
_decrypt_gid(egid, args.key)
return
# online: use branch password to fetch decryption key and attempt to decrypt gid
with init_clients(args) as (s, _, _):
resp = s.send_job_and_wait(MsgProto(EMsg.ClientCheckAppBetaPassword),
{'app_id': args.app, 'betapassword': args.password})
if resp.eresult == EResult.OK:
LOG.debug("Unlocked following beta branches: %s",
', '.join(map(lambda x: x.betaname.lower(), resp.betapasswords)))
for entry in resp.betapasswords:
print("Password is valid for branch:", entry.betaname)
for egid in valid_gids:
_decrypt_gid(egid, entry.betapassword)
else:
raise SteamError("App beta password check failed.", EResult(resp.eresult))
|
import base64
import hashlib
from .helpers import get_config, get_session, write_signature_placeholder, APIError, subject_dn_ as default_subject_dn, get_digest, pkcs11_aligned, write_stream_object
from .selfsigned import cert2asn
import datetime
from asn1crypto import cms, algos, core, pem, tsp, x509, util, ocsp, pdf
from cryptography import x509 as cryptox509
from cryptography.x509 import ocsp as cryptoocsp
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, utils
from cryptography.hazmat.backends import default_backend
from . import BaseSigner
class GlobalSignSigner(BaseSigner):
def __init__(self, cfg_file, keypass=None, subject_dn=None):
self._ssl, self._api = get_config(cfg_file)
self._key_password = keypass or self._ssl['keypass']
self._url = self._api.get('url') + self._api.get('endpoint')
self._subject_dn = subject_dn or default_subject_dn
self._login_url = self._url + '/login'
self._certificate_path_url = self._url + '/certificate_path'
self._quota_signatures = self._url + '/quotas/signatures'
self._identity_url = self._url + '/identity'
self._validation_policy_url = self._url + '/validationpolicy'
self._quota_signatures_url = self._url + '/quotas/signatures'
self._signature_url = self._url + '/identity/{id}/sign/{digest}'
self._timestamp_url = self._url + '/timestamp/{digest}'
self._trustchain_url = self._url + '/trustchain'
def write_signature_placeholder(self, pdf):
return write_signature_placeholder(pdf)
def write_signature(self, pdf):
# Start a secure session so we don't have to set the same headers in every call
s = get_session(self._api.get('url'), **self._ssl)
s.headers.update({'Content-Type': 'application/json;charset=utf-8', 'Accept-Encoding': None})
private_key = None
with open(self._ssl['keyfile'], "rb") as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=self._key_password.encode('utf-8'),
backend=default_backend()
)
# Login in and get the access token
r = s.post(self._login_url,
json={'api_key': self._api.get('api_key'), 'api_secret': self._api.get('api_secret')},
)
if r.status_code != 200:
raise APIError('Cannot login: {}\n{}'.format(r.status_code, r.json()))
token = r.json()['access_token']
# Add an Authorization header with the access token
s.headers.update({"Authorization": "Bearer {}".format(token)})
r = s.post(self._identity_url, json=self._subject_dn)
if r.status_code != 200:
raise APIError('Cannot retrieve the id used to sign the identity requests: {}\n{}'.format(r.status_code, r.json()))
identity = r.json()
id = identity['id']
signing_cert = identity['signing_cert']
self._signing_ocsp_response_raw = base64.b64decode(identity['ocsp_response'].encode('ascii'))
r = s.get(self._trustchain_url)
if r.status_code != 200:
raise APIError('Cannot retrieve the trustchain: {}\n{}'.format(r.status_code, r.json()))
self._revocation_info = r.json()['ocsp_revocation_info']
self._trustchain_ocsp_revocation_info_raw = [base64.b64decode(o.encode('ascii')) for o in self._revocation_info]
self._dss_trustchain = []
self._trustchain_raw = [c.encode('ascii') for c in r.json()['trustchain']]
self._trustchain = []
for c in self._trustchain_raw:
self._dss_trustchain.append(
cryptox509.load_pem_x509_certificate(
c,
default_backend()
)
)
# Fetch the path to the ca (certificate authority)
r = s.get(self._certificate_path_url)
if r.status_code != 200:
raise APIError('Cannot retrieve the certificate path: {}\n{}'.format(r.status_code, r.json()))
ca = r.json()['path']
self._trustchain.append(
cryptox509.load_pem_x509_certificate(
ca.encode('ascii'),
default_backend()
)
)
response = ocsp.OCSPResponse.load(self._signing_ocsp_response_raw)
self._ocsp_response = response
digest = get_digest(pdf)
self._signing_cert_raw = signing_cert.encode('ascii')
signing_cert = cryptox509.load_pem_x509_certificate(
self._signing_cert_raw,
default_backend()
)
# Thw _sign function builds the signature dictionary to be written to the PDF's signature content
digital_signature = self._sign(None, private_key, signing_cert, self._trustchain, 'sha256', True, digest, None, False, "http://public-qlts.certum.pl/qts-17", identity['id'], s)
digital_signature = pkcs11_aligned(digital_signature) # ocsp_resp.public_bytes(serialization.Encoding.DER))
pdf.fileobj.write(digital_signature.encode('ascii'))
# Long Term Validation (LTV) stuff
dss_number = pdf.next_object_number()
certlist_number = dss_number + 1
ocsplist_number = dss_number + 2
expected_next_object_number = dss_number + 3
dss = b'<</Certs %d 0 R/OCSPs %d 0 R>>' % (certlist_number, ocsplist_number)
pdf.write_new_object(dss)
first_cert_number = dss_number + 3
# The +1 is because the signing cert is not in the trustchain list
cert_list_numbers = [first_cert_number + n for n in range(0, len(self._trustchain_raw) + 1)]
cert_list = b'['
for n in cert_list_numbers:
cert_list += b'%d 0 R ' % n
cert_list += b']'
pdf.write_new_object(cert_list)
first_ocsp_number = first_cert_number + len(cert_list_numbers)
ocsp_list_numbers = [first_ocsp_number + n for n in range(0, len(self._trustchain_ocsp_revocation_info_raw) + 1)]
ocsp_list = b'['
for n in ocsp_list_numbers:
ocsp_list += b'%d 0 R ' % n
ocsp_list += b']'
pdf.write_new_object(ocsp_list)
assert pdf.next_object_number() == expected_next_object_number
cert_numbers = []
for c in self._dss_trustchain:
cert_numbers.append(write_stream_object(pdf, c.public_bytes(serialization.Encoding.DER)))
cert_numbers.append(write_stream_object(pdf, signing_cert.public_bytes(serialization.Encoding.DER)))
assert cert_numbers == cert_list_numbers
ocsp_numbers = []
for o in self._trustchain_ocsp_revocation_info_raw:
ocsp_numbers.append(write_stream_object(pdf, o))
ocsp_numbers.append(write_stream_object(pdf, self._signing_ocsp_response_raw))
assert ocsp_numbers == ocsp_list_numbers
# A Document Timestamp is required to be Pades LTV compliant
tsp_number, tsp_offset, len_to_content = self._document_timestamp_placeholder(pdf, s)
timestamp_annot = b'<</F 132/Type/Annot/Subtype/Widget/Rect[0 0 0 0]/FT/Sig/DR<<>>/T(doctsp)/P 2 0 R/V %d 0 R>>' % tsp_number
tsp_annot_number = pdf.write_new_object(timestamp_annot)
params = b' /Version/1.7 /AcroForm <</Fields[%d 0 R %d 0 R] /SigFlags 3>>/DSS %d 0 R /Extensions<</ESIC <</BaseVersion/1.7/ExtensionLevel 1>>>>' % (pdf._signature_rect_number, tsp_annot_number, dss_number)
pdf.extend_dict(pdf.catalog, params)
pdf.finish() # This writes the trailer etc.
# Replace the timestamp placeholder
self._document_timestamp(pdf, s, tsp_offset, len_to_content)
def _sign(self, datau, key, signing_cert, trustchain, hashalgo, attrs=True, signed_value=None, hsm=None, pss=False, timestampurl=None, identity=None, s=None):
if signed_value is None:
signed_value = getattr(hashlib, hashalgo)(datau).digest()
signed_time = datetime.datetime.now(tz=util.timezone.utc)
esscert = signing_cert.public_bytes(serialization.Encoding.DER)
esscert = getattr(hashlib, hashalgo)(esscert).digest()
if hsm is not None:
keyid, cert = hsm.certificate()
cert = cert2asn(cert, False)
trustchain = []
else:
signing_cert = cert2asn(signing_cert)
certificates = []
for c in trustchain:
certificates.append(cert2asn(c))
certificates.append(signing_cert)
signer = {
'version': 'v1',
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': signing_cert.issuer,
'serial_number': signing_cert.serial_number,
}),
}),
'digest_algorithm': algos.DigestAlgorithm({'algorithm': hashalgo}),
'signature': signed_value,
}
signer['signature_algorithm'] = algos.SignedDigestAlgorithm({'algorithm': 'rsassa_pkcs1v15'})
if attrs:
if attrs is True:
signer['signed_attrs'] = [
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': ('data',),
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': (signed_value,),
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_certificate_v2'),
'values': (
tsp.SigningCertificateV2({
'certs': [tsp.ESSCertIDv2({'cert_hash': esscert}),]
}),
)
})
#cms.CMSAttribute({
#'type': cms.CMSAttributeType('signing_time'),
#'values': (cms.Time({'utc_time': core.UTCTime(signed_time)}),)
#}),
]
else:
signer['signed_attrs'] = attrs
# TODO: Keep it all in one loop
ocsp_revocation = []
ocsp_revocation.append(
cms.RevocationInfoChoice({
'other': cms.OtherRevocationInfoFormat({
'other_rev_info_format': cms.OtherRevInfoFormatId('ocsp_response'),
'other_rev_info': self._ocsp_response
})
})
)
# TODO: Don't need this because I have a DSS now
#for rev in self._revocation_info:
#rev = base64.b64decode(rev)
#rev = ocsp.OCSPResponse.load(rev)
#ocsp_revocation.append(
#cms.RevocationInfoChoice({
#'other': cms.OtherRevocationInfoFormat({
#'other_rev_info_format': cms.OtherRevInfoFormatId('ocsp_response'),
#'other_rev_info': rev
#})
#})
#)
config = {
'version': 'v1',
'digest_algorithms': cms.DigestAlgorithms((
algos.DigestAlgorithm({'algorithm': hashalgo}),
)),
'encap_content_info': {
'content_type': 'data',
},
'certificates': certificates,
'crls': ocsp_revocation,
'signer_infos': [
signer,
],
}
datas = cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData(config),
})
if attrs:
tosign = datas['content']['signer_infos'][0]['signed_attrs'].dump()
tosign = b'\x31' + tosign[1:]
else:
tosign = datau
tosign = getattr(hashlib, hashalgo)(tosign).digest()
# Fetch the actual signature
r = s.get(self._signature_url.format(id=identity, digest=tosign.hex().upper()))
if r.status_code != 200:
raise APIError('Cannot retrieve the signature: {}\n{}'.format(r.status_code, r.json()))
signed_value_signature = r.json()['signature']
signed_value_signature = bytes.fromhex(signed_value_signature)
signed_value = getattr(hashlib, hashalgo)(signed_value_signature).digest()
datas['content']['signer_infos'][0]['signature'] = signed_value_signature
# Use globalsigns timestamp
# TODO: uncomment next 17 lines to have timestamped signature
r = s.get(self._timestamp_url.format(digest=signed_value.hex().upper()))
if r.status_code != 200:
raise APIError('Cannot retrieve the timestamp: {}\n{}'.format(r.status_code, r.json()))
timestamp_token = r.json()['token']
timestamp_token = timestamp_token.encode('ascii')
timestamp_token = base64.b64decode(timestamp_token)
tsp_dict = cms.ContentInfo.load(timestamp_token)
tsp_attrs = [
cms.CMSAttribute({
'type': cms.CMSAttributeType('signature_time_stamp_token'),
'values': cms.SetOfContentInfo([
cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': tsp_dict['content'],
})
])
})
]
datas['content']['signer_infos'][0]['unsigned_attrs'] = tsp_attrs
# TODO: OCSP stuff - probably not necessary since we have a DSS
#ocsp_seq = pdf.SequenceOfOCSPResponse((self._ocsp_response,))
#ocsp_arc = pdf.RevocationInfoArchival({'ocsp': ocsp_seq})
#revocation_info = pdf.SetOfRevocationInfoArchival()
#revocation_info.append(ocsp_arc)
#self._ocsp_response
#ocsp_attribute = cms.CMSAttribute({ # basic_ocsp_response
#'type': cms.CMSAttributeType('adobe_revocation_info_archival'),
#'values': pdf.SetOfRevocationInfoArchival([
#pdf.RevocationInfoArchival({
#'ocsp': pdf.SequenceOfOCSPResponse(self._ocsp_response)
#})#cert2asn(ocsp_resp.public_bytes(serialization.Encoding.DER), False)
#])
#}),
#datas['content']['signer_infos'][0]['unsigned_attrs'].append(ocsp_attribute)
return datas.dump()
def _document_timestamp_placeholder(self, pdf, session):
byterange_placeholder = b'/ByteRange[0 ********** ********** **********]'
tsp = b'<</Type /DocTimeStamp /Filter /Adobe.PPKLite /SubFilter /ETSI.RFC3161 /Contents <'
len_to_content = len(tsp)
tsp += b'0' * 16384 + b'>'
tsp += byterange_placeholder + b'>>'
tsp_number = pdf.write_new_object(tsp)
# Store the byte offset where the timestamp object starts
tsp_offset = pdf.new_objects_offsets[-1]
return tsp_number, tsp_offset, len_to_content
def _document_timestamp(self, pdf, session, tsp_offset, len_to_content):
from weasyprint.pdf import pdf_format
import os
byterange_string = '/ByteRange[0 {} {} {}]'
byterange = [0, 0, 0, 0]
fileobj = pdf.fileobj
fileobj.seek(tsp_offset)
next(fileobj) # Skip to object content line
# 153 is the length until the Contents part
byterange[1] = fileobj.tell() + len_to_content - 1 # -1 to exclude the <
byterange[2] = byterange[1] + 16384 + 2
byterange[3] = fileobj.getbuffer().nbytes - byterange[2]
byterange_final = pdf_format(byterange_string, byterange[1], byterange[2], byterange[3])
byterange_final = byterange_final.ljust(46, b' ')
fileobj.seek(len_to_content + 16384 + 1, os.SEEK_CUR)
fileobj.write(byterange_final)
tsp_digest = self._hash(pdf, byterange)
r = session.get(self._timestamp_url.format(digest=tsp_digest.hex().upper()))
if r.status_code != 200:
raise APIError('Cannot retrieve the document timestamp: {}\n{}'.format(r.status_code, r.json()))
timestamp_token = r.json()['token']
timestamp_token = timestamp_token.encode('ascii')
timestamp_token = base64.b64decode(timestamp_token)
timestamp_token = pkcs11_aligned(timestamp_token) # ocsp_resp.public_bytes(serialization.Encoding.DER))
fileobj.seek(tsp_offset)
next(fileobj) # Skip to object content line
fileobj.seek(len_to_content, os.SEEK_CUR)
fileobj.write(timestamp_token.encode('ascii'))
def _hash(self, pdf, byterange):
buf = pdf.fileobj.getbuffer()
# Get the digest
hasher = hashlib.sha256()
hasher.update(buf[:byterange[1]])
hasher.update(buf[byterange[2]:])
# hasher maps a variable length bytestring to a fixed length bytestring
return hasher.digest()
|
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
"""
Correctly initialize the boundary variables left and right.
Only one rule: set up the boundary to include all possible
elements;
Decide return value. Is it return left or return left - 1?
Remember this: after exiting the while loop, left is the
minimal k satisfying the condition function;
Design the condition function. This is the most difficult
and most beautiful part. Needs lots of practice.
def binary_search(array) -> int:
def condition(value) -> bool:
pass
left, right = 0, len(array)
while left < right:
mid = left + (right - left) // 2
if condition(mid):
right = mid
else:
left = mid + 1
return left
"""
class Solution:
def firstBadVersion(self, n) -> int:
"""
:type n: int
:rtype: int
"""
left, right = 1, n
while left < right:
mid = left + (right - left) // 2
if isBadVersion(mid):
right = mid
else:
left = mid + 1
return left |
"""https://adventofcode.com/2020/day/15"""
import io
def part1(stdin: io.TextIOWrapper, stderr: io.TextIOWrapper) -> int:
"""
Given your starting numbers, what will be the 2020th number spoken?
"""
return nth(stdin, stderr, 2020)
def part2(stdin: io.TextIOWrapper, stderr: io.TextIOWrapper) -> int:
"""
Given your starting numbers, what will be the 30000000th number spoken?
"""
return nth(stdin, stderr, 30000000)
def nth(stdin: io.TextIOWrapper, stderr: io.TextIOWrapper, end: int) -> int:
"""
Brute-force the nth number spoken given an input set of numbers.
"""
debug_interval = end // 100
numbers = parse(stdin)
number = next(reversed(numbers.keys()))
for turn in range(len(numbers), end):
(prev, last) = numbers.get(number)
if turn % debug_interval == 0:
stderr.write(
f"{turn}: {number} was previously spoken in turn {prev}"
)
if prev is None:
number = 0
else:
number = last - prev
if turn % debug_interval == 0:
stderr.write(f", so I say \"{number}\"\n")
if prev is None:
number = 0
else:
number = last - prev
numbers[number] = (numbers.get(number, (None, None))[1], turn)
return number
def parse(stdin: io.TextIOWrapper) -> list:
"""
Parse the input into a list of ints.
"""
return {
int(v): (None, k)
for k, v in enumerate(stdin.read().strip().split(","))
}
|
"""
core/exceptions.py
~~~~~~~~~~~~~~~~~~
Common exception tools.
:copyright: (c) 2018 by {{cookiecutter.author}}.
"""
import sys
from collections import OrderedDict
from rest_framework.views import exception_handler as origin_exception_handler
def get_service_name(view):
"""Returns service name by view and stacktrace."""
service_name = '.'.join(
[view.__class__.__module__, view.__class__.__name__])
_, _, tb = sys.exc_info()
tb = getattr(tb, 'tb_next', tb)
lineno = getattr(tb, 'tb_lineno', '')
return ':'.join([service_name, str(lineno)])
def common_exception_handler(exc, context):
"""Add exception format with module and error name details."""
response = origin_exception_handler(exc, context)
if response is None:
return response
# Detail
if isinstance(response.data, dict):
detail = response.data.get('detail')
else:
detail = None
if not detail:
detail = response.data
if isinstance(detail, str):
detail = [detail]
# Result
response.data = OrderedDict([
('service_name', get_service_name(context.get('view'))),
('error_name', exc.__class__.__name__),
('detail', detail),
])
return response
|
import os
import sys
import time
import logging
import signal
import json
import subprocess
import threading
from shutil import copyfile
from Queue import Queue
from Queue import Empty
logger = logging.getLogger(__name__)
class SnortUpdateThread(threading.Thread):
"""
threading.Thread to deal with writing to the snort file
"""
def __init__(self, queue, cb, rule_file, rule_template=None):
"""
"""
threading.Thread.__init__(self)
self._queue = queue
self.rule_file = rule_file
self.rule_template = rule_template
self.running = True
self.cb = cb
def run(self):
"""
"""
while self.running:
try:
rule = self._queue.get(timeout=1)
logger.info(str(rule))
self._process_rule(rule)
if self.cb != None:
self.cb()
except Empty as empty:
pass
except Exception as e:
print 'Exception processing rule: ' + e.message
print(type(e))
print(e)
def stop(self):
"""
"""
self.running = False
def _process_rule(self, rule):
# Process the rule queue
if rule['type'] == 'add':
self._add_rule(rule['rule'])
elif rule['type'] == 'delete':
self._delete_rule(rule['rule'])
elif rule['type'] == 'clear':
self._clear_rules()
def _check_rule(self, rule):
#TODO: Validate a rule && compare with current config file
pass
def _clear_rules(self):
# Clear all rules and replace with the template, or an empty file
# if the template does not exist.
logger.info('Clearing rules')
if self.rule_template is not None:
logger.info('Using the rule template ' + self.rule_template)
copyfile(self.rule_template, self.rule_file)
else:
with open(self.rule_file, 'w'):
pass
def _add_rule(self, rule):
# Append a literal Snort Rule to the rules file
logger.info('Adding a rule')
with open(self.rule_file, 'a') as f:
f.write(rule + '\n')
def _delete_rule(self, rule):
# Delete a literal Snort Rule from the rules file
lines = []
logger.info('Deleting a rule')
with open(self.rule_file, 'r') as f:
lines = f.readlines()
with open(self.rule_file, 'w') as f:
for line in lines:
if line != rule + '\n':
f.write(line)
class SnortDaemon(object):
"""
Instance of Snort
"""
def __init__(self, rule_file, interface='eth0', template=None):
"""
"""
self.rule_file = rule_file
self.rule_template = template
self.interface = interface
self._queue = Queue()
self._lock = threading.RLock()
def callback():
self.reload()
self._update_thread = SnortUpdateThread(self._queue,
callback,
rule_file,
rule_template=template)
self._update_thread.setDaemon(True)
def start(self):
"""
"""
with self._lock:
if not hasattr(self, '_proc'):
self._update_thread.start()
command = ['snort', '-i', self.interface, '-A', 'unsock', '-l', '/tmp', '-c', self.rule_file]
logger.info('Starting Snort')
f = open("snort.out", "w")
self._proc = subprocess.Popen(command, stdout=f, stderr=f)
def stop(self):
"""
"""
with self._lock:
if not hasattr(self, '_proc'):
return
try:
if self._proc.poll() is None:
self._proc.kill()
except Exception as e:
print 'Failed stopping update thread'
print(type(e))
print(e)
def _send_sighup(self):
"""
"""
self._proc.send_signal(signal.SIGHUP)
def reload(self):
"""
"""
logger.info("Reloading Snort")
self._send_sighup()
self.stop()
self.start()
def add_rule(self, rule):
"""
"""
self._queue.put({'type':'add', 'rule': rule})
def delete_rule(self, rule):
"""
"""
self._queue.put({'type':'delete', 'rule': rule})
def clear_rule(self):
"""
"""
self._queue.put({'type':'clear'})
|
import torch
import io
import posixpath
class ClassificationSummary:
""" Simple class to keep track of summaries of a classification problem. """
def __init__(self, num_outcomes=2, device=None):
""" Initializes a new summary class with the given number of outcomes.
Parameters
----------
num_outcomes: the number of possible outcomes of the classification problem.
"""
self.recorded = torch.zeros(num_outcomes * num_outcomes, dtype=torch.int32, device=device)
self.num_outcomes = num_outcomes
@property
def prediction_matrix(self):
return self.recorded.view((self.num_outcomes, self.num_outcomes))
def record_statistics(self, labels, predictions):
""" Records statistics for a batch of predictions.
Parameters
----------
labels: an array of true labels in integer format. Each label must correspond to an
integer in 0 to num_outcomes - 1 inclusive.
predictions: an array of predicted labels. Must follow the same format as `labels`.
"""
indices = torch.add(labels.int(), self.num_outcomes, predictions.int()).long()
self.recorded = self.recorded.scatter_add_(
0, indices, torch.ones_like(indices, dtype=torch.int32))
def reset_statistics(self):
""" Resets statistics recorded in this accumulator. """
self.recorded = torch.zeros_like(self.recorded)
def accuracy(self):
""" Compute the accuracy of the recorded problem. """
num_outcomes = self.num_outcomes
num_correct = self.prediction_matrix.diag().sum()
num_total = self.recorded.sum()
return num_correct.float() / num_total.float()
def confusion_matrix(self):
return self.prediction_matrix.float() / self.prediction_matrix.sum().float()
def cohen_kappa(self):
pm = self.prediction_matrix.float()
N = self.recorded.sum().float()
p_observed = pm.diag().sum() / N
p_expected = torch.dot(pm.sum(dim=0), pm.sum(dim=1)) / (N * N)
return 1 - (1 - p_observed) / (1 - p_expected)
def marginal_labels(self):
return self.prediction_matrix.sum(dim=0).float() / self.recorded.sum().float()
def marginal_predicted(self):
return self.prediction_matrix.sum(dim=1).float() / self.recorded.sum().float()
def write_tensorboard(self, writer, prefix="", **kwargs):
writer.add_scalar(posixpath.join(prefix, "kappa"), self.cohen_kappa(), **kwargs)
writer.add_scalar(posixpath.join(prefix, "accuracy"), self.accuracy(), **kwargs)
class ConditionalAccuracySummary:
def __init__(self, device=None):
self.device = device
self.count_correct = torch.tensor(0, dtype=torch.int32, device=self.device)
self.count_event = torch.tensor(0, dtype=torch.int32, device=self.device)
self.count_total = torch.tensor(0, dtype=torch.int32, device=self.device)
self.reset_statistics()
def reset_statistics(self):
self.count_correct = torch.tensor(0, dtype=torch.int32, device=self.device)
self.count_event = torch.tensor(0, dtype=torch.int32, device=self.device)
self.count_total = torch.tensor(0, dtype=torch.int32, device=self.device)
def accuracy(self):
return self.count_correct.float() / self.count_event.float()
def probability_event(self):
return self.count_event.float() / self.count_total.float()
def record_statistics(self, correct, mask):
self.count_event.add_(torch.sum(mask).int())
self.count_correct.add_(torch.sum(mask * correct).int())
self.count_total.add_(mask.shape[0])
def write_tensorboard(self, writer, prefix="", **kwargs):
writer.add_scalar(posixpath.join(prefix, "accuracy"), self.accuracy(), **kwargs)
writer.add_scalar(posixpath.join(prefix, "frequency"), self.probability_event(), **kwargs)
|
import random
import numpy as np
import py_progress_tracker as progress
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
@progress.track(
[
# Addition
{
"id": "x-plus-42-scalar",
"name": "x + 42 {Scalar}",
"parameters": {
"function": lambda x: x + 42,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 85,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-plus-42-tensor-2x3",
"name": "x + 42 {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x + 42,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 85,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-plus-10-20-30-tensor-3",
"name": "x + [10, 20, 30] {Vector of Size 3}",
"parameters": {
"function": lambda x: x + np.array([10, 20, 30], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 97,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-plus-10-20-30-tensor-2x3",
"name": "x + [10, 20, 30] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x + np.array([10, 20, 30], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 97,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-plus-y-scalars",
"name": "x + y {Scalars}",
"parameters": {
"function": lambda x, y: x + y,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 27,
},
"y": {
"type": "encrypted",
"minimum": 0,
"maximum": 100,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-plus-y-tensor-2x3-and-scalar",
"name": "x + y {Tensor of Shape 2x3 and Scalar}",
"parameters": {
"function": lambda x, y: x + y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 27,
},
"y": {
"type": "encrypted",
"minimum": 0,
"maximum": 100,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-plus-y-tensors-2x3",
"name": "x + y {Tensors of Shape 2x3}",
"parameters": {
"function": lambda x, y: x + y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 27,
},
"y": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 100,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-plus-y-tensor-2x3-and-tensor-3",
"name": "x + y {Tensor of Shape 2x3 and Vector of Size 3}",
"parameters": {
"function": lambda x, y: x + y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 27,
},
"y": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 100,
},
},
"accuracy_alert_threshold": 100,
},
},
# Subtraction
{
"id": "x-minus-24-scalar",
"name": "x - 24 {Scalar}",
"parameters": {
"function": lambda x: x - 24,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 24,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "124-minus-x-scalar",
"name": "124 - x {Scalar}",
"parameters": {
"function": lambda x: 124 - x,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 124,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-minus-24-tensor-2x3",
"name": "x - 24 {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x - 24,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 24,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "124-minus-x-tensor-2x3",
"name": "124 - x {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: 124 - x,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 124,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-minus-10-20-30-tensor-3",
"name": "x - [10, 20, 30] {Vector of Size 3}",
"parameters": {
"function": lambda x: x - np.array([10, 20, 30], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 30,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "100-90-80-minus-x-tensor-3",
"name": "[100, 90, 80] - x {Vector of Size 3}",
"parameters": {
"function": lambda x: np.array([100, 90, 80], dtype=np.uint8) - x,
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 80,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-minus-10-20-30-tensor-2x3",
"name": "x - [10, 20, 30] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x - np.array([10, 20, 30], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 30,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "100-90-80-minus-x-tensor-2x3",
"name": "[100, 90, 80] - x {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: np.array([100, 90, 80], dtype=np.uint8) - x,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 80,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-minus-y-scalars",
"name": "x - y {Scalars}",
"parameters": {
"function": lambda x, y: x - y,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 35,
"maximum": 127,
},
"y": {
"type": "encrypted",
"minimum": 0,
"maximum": 35,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-minus-y-tensor-2x3-and-scalar",
"name": "x - y {Tensor of Shape 2x3 and Scalar}",
"parameters": {
"function": lambda x, y: x - y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 35,
"maximum": 127,
},
"y": {
"type": "encrypted",
"minimum": 0,
"maximum": 35,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-minus-y-tensors-2x3",
"name": "x - y {Tensors of Shape 2x3}",
"parameters": {
"function": lambda x, y: x - y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 35,
"maximum": 127,
},
"y": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 35,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-minus-y-tensor-2x3-and-tensor-3",
"name": "x - y {Tensor of Shape 2x3 and Vector of Size 3}",
"parameters": {
"function": lambda x, y: x - y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 35,
"maximum": 127,
},
"y": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 35,
},
},
"accuracy_alert_threshold": 100,
},
},
# Multiplication
{
"id": "x-times-7-scalar",
"name": "x * 7 {Scalar}",
"parameters": {
"function": lambda x: x * 7,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 18,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-times-7-tensor-2x3",
"name": "x * 7 {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x * 7,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 18,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-times-1-2-3-tensor-3",
"name": "x * [1, 2, 3] {Vector of Size 3}",
"parameters": {
"function": lambda x: x * np.array([1, 2, 3], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 42,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-times-1-2-3-tensor-2x3",
"name": "x * [1, 2, 3] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x * np.array([1, 2, 3], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 42,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-times-y-scalars",
"name": "x * y {Scalars}",
"parameters": {
"function": lambda x, y: x * y,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 5,
},
"y": {
"type": "encrypted",
"minimum": 0,
"maximum": 25,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-times-y-tensor-and-scalar",
"name": "x * y {Tensor of Shape 2x3 and Scalar}",
"parameters": {
"function": lambda x, y: x * y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 5,
},
"y": {
"type": "encrypted",
"minimum": 0,
"maximum": 25,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-times-y-tensor-and-scalar",
"name": "x * y {Tensors of Shape 2x3}",
"parameters": {
"function": lambda x, y: x * y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 5,
},
"y": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 25,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-times-y-tensor-and-scalar",
"name": "x * y {Tensor of Shape 2x3 and Vector of Size 3}",
"parameters": {
"function": lambda x, y: x * y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 5,
},
"y": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 25,
},
},
"accuracy_alert_threshold": 100,
},
},
# True Division
{
"id": "x-truediv-10-scalar",
"name": "x // 10 {Scalar}",
"parameters": {
"function": lambda x: x // 10,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "126-truediv-x-scalar",
"name": "126 // x {Scalar}",
"parameters": {
"function": lambda x: 126 // x,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 1,
"maximum": 126,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-truediv-10-tensor-2x3",
"name": "x // 10 {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x // 10,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "126-truediv-x-tensor-2x3",
"name": "126 // x {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: 126 // x,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 1,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-truediv-5-10-15-tensor-3",
"name": "x // [5, 10, 15] {Vector of Size 3}",
"parameters": {
"function": lambda x: x // np.array([5, 10, 15], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "120-60-30-truediv-x-tensor-3",
"name": "[120, 60, 30] // x {Vector of Size 3}",
"parameters": {
"function": lambda x: np.array([120, 60, 30], dtype=np.uint8) // x,
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 1,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-truediv-5-10-15-tensor-2x3",
"name": "x // [5, 10, 15] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x // np.array([5, 10, 15], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "120-60-30-truediv-x-tensor-2x3",
"name": "[120, 60, 30] // x {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: np.array([120, 60, 30], dtype=np.uint8) // x,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 1,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-truediv-y-scalars",
"name": "x // y {Scalars}",
"parameters": {
"function": lambda x, y: x // y,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 127,
},
"y": {
"type": "encrypted",
"minimum": 1,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-truediv-y-tensor-2x3-and-scalar",
"name": "x // y {Tensor of Shape 2x3 and Scalar}",
"parameters": {
"function": lambda x, y: x // y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
"y": {
"type": "encrypted",
"minimum": 1,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-truediv-y-tensors-2x3",
"name": "x // y {Tensors of Shape 2x3}",
"parameters": {
"function": lambda x, y: x // y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
"y": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 1,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-truediv-y-tensor-2x3-and-tensor-3",
"name": "x // y {Tensor of Shape 2x3 and Vector of Size 3}",
"parameters": {
"function": lambda x, y: x // y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
"y": {
"type": "encrypted",
"shape": (3,),
"minimum": 1,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
# Dot Product
{
"id": "x-dot-2-3-1-tensor-3",
"name": "np.dot(x, [2, 3, 1]) {Vector of Size 3}",
"parameters": {
"function": lambda x: np.dot(x, np.array([2, 3, 1], dtype=np.uint8)),
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 20,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "2-3-1-dot-x-tensor-3",
"name": "np.dot([2, 3, 1], x) {Vector of Size 3}",
"parameters": {
"function": lambda x: np.dot(np.array([2, 3, 1], dtype=np.uint8), x),
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 20,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-dot-y-tensors-3",
"name": "np.dot(x, y) {Vectors of Size 3}",
"parameters": {
"function": lambda x, y: np.dot(x, y),
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 14,
},
"y": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 3,
},
},
"accuracy_alert_threshold": 100,
},
},
# Matrix Multiplication
{
"id": "x-matmul-c-tensor-2x3",
"name": "x @ [[1, 3], [3, 2], [2, 1]] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x @ np.array([[1, 3], [3, 2], [2, 1]], dtype=np.uint8),
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 20,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "c-matmul-x-tensor-2x3",
"name": "[[1, 3], [3, 2], [2, 1]] @ x {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: np.array([[1, 3], [3, 2], [2, 1]], dtype=np.uint8) @ x,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 25,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-matmul-y-tensor-2x3-and-tensor-3x2",
"name": "x @ y {Tensor of Shape 2x3 and Tensor of Shape 3x2}",
"parameters": {
"function": lambda x, y: x @ y,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 15,
},
"y": {
"type": "encrypted",
"shape": (3, 2),
"minimum": 0,
"maximum": 4,
},
},
"accuracy_alert_threshold": 100,
},
},
# Negation
{
"id": "negative-x-plus-127-scalar",
"name": "-x + 127 {Scalar}",
"parameters": {
"function": lambda x: -x + 127,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "negative-x-plus-127-tensor-2x3",
"name": "-x + 127 {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: -x + 127,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
# Power
{
"id": "x-to-the-power-of-2-scalar",
"name": "x ** 2 {Scalar}",
"parameters": {
"function": lambda x: x ** 2,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 11,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "2-to-the-power-of-x-scalar",
"name": "2 ** x {Scalar}",
"parameters": {
"function": lambda x: 2 ** x,
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 6,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-to-the-power-of-2-tensor-2x3",
"name": "x ** 2 {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x ** 2,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 11,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "2-to-the-power-of-x-tensor-2x3",
"name": "2 ** x {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: 2 ** x,
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 6,
},
},
"accuracy_alert_threshold": 100,
},
},
# Direct Table Lookup
{
"id": "single-table-lookup-5-bit-scalar",
"name": "Single Table Lookup (5-Bit) {Scalar}",
"parameters": {
"function": lambda x: hnp.LookupTable([(i ** 5) % 32 for i in range(32)])[x],
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 31,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "single-table-lookup-5-bit-tensor-2x3",
"name": "Single Table Lookup (5-Bit) {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: hnp.LookupTable([(i ** 5) % 32 for i in range(32)])[x],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 31,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "multi-table-lookup-5-bit-tensor-2x3",
"name": "Multi Table Lookup (5-Bit) {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: hnp.MultiLookupTable(
[
[
hnp.LookupTable([((i ** 5) + 2) % 32 for i in range(32)]),
hnp.LookupTable([((i ** 5) * 3) % 32 for i in range(32)]),
hnp.LookupTable([((i ** 5) // 6) % 32 for i in range(32)]),
],
[
hnp.LookupTable([((i ** 5) // 2) % 32 for i in range(32)]),
hnp.LookupTable([((i ** 5) + 5) % 32 for i in range(32)]),
hnp.LookupTable([((i ** 5) * 4) % 32 for i in range(32)]),
],
]
)[x],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 31,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "single-table-lookup-6-bit-scalar",
"name": "Single Table Lookup (6-Bit) {Scalar}",
"parameters": {
"function": lambda x: hnp.LookupTable([(i ** 6) % 64 for i in range(64)])[x],
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 63,
},
},
"accuracy_alert_threshold": 99,
},
},
{
"id": "single-table-lookup-6-bit-tensor-2x3",
"name": "Single Table Lookup (6-Bit) {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: hnp.LookupTable([(i ** 6) % 64 for i in range(64)])[x],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 63,
},
},
"accuracy_alert_threshold": 99,
},
},
{
"id": "multi-table-lookup-6-bit-tensor-2x3",
"name": "Multi Table Lookup (6-Bit) {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: hnp.MultiLookupTable(
[
[
hnp.LookupTable([((i ** 6) + 2) % 64 for i in range(64)]),
hnp.LookupTable([((i ** 6) * 3) % 64 for i in range(64)]),
hnp.LookupTable([((i ** 6) // 6) % 64 for i in range(64)]),
],
[
hnp.LookupTable([((i ** 6) // 2) % 64 for i in range(64)]),
hnp.LookupTable([((i ** 6) + 5) % 64 for i in range(64)]),
hnp.LookupTable([((i ** 6) * 4) % 64 for i in range(64)]),
],
]
)[x],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 63,
},
},
"accuracy_alert_threshold": 99,
},
},
{
"id": "single-table-lookup-7-bit-scalar",
"name": "Single Table Lookup (7-Bit) {Scalar}",
"parameters": {
"function": lambda x: hnp.LookupTable([(i ** 7) % 128 for i in range(128)])[x],
"inputs": {
"x": {
"type": "encrypted",
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 95,
},
},
{
"id": "single-table-lookup-7-bit-tensor-2x3",
"name": "Single Table Lookup (7-Bit) {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: hnp.LookupTable([(i ** 7) % 128 for i in range(128)])[x],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 95,
},
},
{
"id": "multi-table-lookup-7-bit-tensor-2x3",
"name": "Multi Table Lookup (7-Bit) {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: hnp.MultiLookupTable(
[
[
hnp.LookupTable([((i ** 7) + 2) % 128 for i in range(128)]),
hnp.LookupTable([((i ** 7) * 3) % 128 for i in range(128)]),
hnp.LookupTable([((i ** 7) // 6) % 128 for i in range(128)]),
],
[
hnp.LookupTable([((i ** 7) // 2) % 128 for i in range(128)]),
hnp.LookupTable([((i ** 7) + 5) % 128 for i in range(128)]),
hnp.LookupTable([((i ** 7) * 4) % 128 for i in range(128)]),
],
]
)[x],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 95,
},
},
# Manipulation
{
"id": "transpose-tensor-2x3",
"name": "np.transpose(x) {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: np.transpose(x),
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "reshape-to-1-3-1-2-1-tensor-2x3",
"name": "np.reshape(x, (1, 3, 1, 2, 1)) {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: np.reshape(x, (1, 3, 1, 2, 1)),
"inputs": {
"x": {
"type": "encrypted",
"shape": (1, 3, 1, 2, 1),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "flatten-tensor-2x3",
"name": "x.flatten() {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x.flatten(),
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
# Indexing
{
"id": "x-index-0-tensor-3",
"name": "x[0] {Vector of Size 3}",
"parameters": {
"function": lambda x: x[0],
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-1-tensor-3",
"name": "x[1] {Vector of Size 3}",
"parameters": {
"function": lambda x: x[1],
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-2-tensor-3",
"name": "x[2] {Vector of Size 3}",
"parameters": {
"function": lambda x: x[2],
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-minus-1-tensor-3",
"name": "x[-1] {Vector of Size 3}",
"parameters": {
"function": lambda x: x[-1],
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-minus-2-tensor-3",
"name": "x[-2] {Vector of Size 3}",
"parameters": {
"function": lambda x: x[-2],
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-minus-3-tensor-3",
"name": "x[-3] {Vector of Size 3}",
"parameters": {
"function": lambda x: x[-3],
"inputs": {
"x": {
"type": "encrypted",
"shape": (3,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-0-and-0-tensor-2x3",
"name": "x[0, 0] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x[0, 0],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-minus-1-and-minus-1-tensor-2x3",
"name": "x[-1, -1] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x[-1, -1],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-0-tensor-2x3",
"name": "x[0] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x[0],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-minus-1-tensor-2x3",
"name": "x[-1] {Tensor of Shape 2x3}",
"parameters": {
"function": lambda x: x[-1],
"inputs": {
"x": {
"type": "encrypted",
"shape": (2, 3),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-y-tensor-5-and-scalar",
"name": "x[y] {Vector of Size 5 and Scalar}",
"parameters": {
"function": lambda x, y: x[y],
"inputs": {
"x": {
"type": "encrypted",
"shape": (5,),
"minimum": 0,
"maximum": 127,
},
"y": {
"type": "encrypted",
"minimum": 0,
"maximum": 4,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-y-and-z-tensor-5-and-scalars",
"name": "x[y] {Tensor of Shape 5x3 and Scalars}",
"parameters": {
"function": lambda x, y, z: x[y, z],
"inputs": {
"x": {
"type": "encrypted",
"shape": (5,),
"minimum": 0,
"maximum": 127,
},
"y": {
"type": "encrypted",
"minimum": 0,
"maximum": 4,
},
"z": {
"type": "encrypted",
"minimum": 0,
"maximum": 2,
},
},
"accuracy_alert_threshold": 100,
},
},
# Slicing
{
"id": "x-reversed-tensor-5",
"name": "x[::-1] {Vector of Size 5}",
"parameters": {
"function": lambda x: x[::-1],
"inputs": {
"x": {
"type": "encrypted",
"shape": (5,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-colon-tensor-5",
"name": "x[:] {Vector of Size 5}",
"parameters": {
"function": lambda x: x[:],
"inputs": {
"x": {
"type": "encrypted",
"shape": (5,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-2-colon-tensor-5",
"name": "x[2:] {Vector of Size 5}",
"parameters": {
"function": lambda x: x[2:],
"inputs": {
"x": {
"type": "encrypted",
"shape": (5,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-colon-3-tensor-5",
"name": "x[:3] {Vector of Size 5}",
"parameters": {
"function": lambda x: x[:3],
"inputs": {
"x": {
"type": "encrypted",
"shape": (5,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-1-colon-3-tensor-5",
"name": "x[1:3] {Vector of Size 5}",
"parameters": {
"function": lambda x: x[1:3],
"inputs": {
"x": {
"type": "encrypted",
"shape": (5,),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-colon-and-1-tensor-3x2",
"name": "x[:, 1] {Tensor of Shape 3x2}",
"parameters": {
"function": lambda x: x[:, 1],
"inputs": {
"x": {
"type": "encrypted",
"shape": (3, 2),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
{
"id": "x-index-1-colon-3-and-1-colon-3-tensor-4x4",
"name": "x[1:3, 1:3] {Tensor of Shape 4x4}",
"parameters": {
"function": lambda x: x[1:3, 1:3],
"inputs": {
"x": {
"type": "encrypted",
"shape": (4, 4),
"minimum": 0,
"maximum": 127,
},
},
"accuracy_alert_threshold": 100,
},
},
]
)
def main(function, inputs, accuracy_alert_threshold):
inputset = []
for _ in range(128):
input_ = []
for description in inputs.values():
minimum = description["minimum"]
maximum = description["maximum"]
assert minimum >= 0
assert maximum <= 127
if "shape" in description:
shape = description["shape"]
input_.append(np.random.randint(minimum, maximum + 1, size=shape, dtype=np.uint8))
else:
input_.append(random.randint(minimum, maximum))
inputset.append(tuple(input_) if len(input_) > 1 else input_[0])
compiler = hnp.NPFHECompiler(
function,
{name: description["type"] for name, description in inputs.items()},
compilation_configuration=BENCHMARK_CONFIGURATION,
)
circuit = compiler.compile_on_inputset(inputset)
samples = []
expectations = []
for _ in range(128):
sample = []
for description in inputs.values():
minimum = description["minimum"]
maximum = description["maximum"]
assert minimum >= 0
assert maximum <= 127
if "shape" in description:
shape = description["shape"]
sample.append(np.random.randint(minimum, maximum + 1, size=shape, dtype=np.uint8))
else:
sample.append(random.randint(minimum, maximum))
samples.append(sample)
expectations.append(function(*sample))
correct = 0
for sample_i, expectation_i in zip(samples, expectations):
with progress.measure(id="evaluation-time-ms", label="Evaluation Time (ms)"):
result_i = circuit.encrypt_run_decrypt(*sample_i)
np_result_i = np.array(result_i, dtype=np.uint8)
np_expectation_i = np.array(expectation_i, dtype=np.uint8)
if np_result_i.shape == np_expectation_i.shape:
correct += np.sum(np_result_i == np_expectation_i) / np_result_i.size
accuracy = (correct / len(samples)) * 100
print(f"Accuracy (%): {accuracy:.4f}")
progress.measure(
id="accuracy-percent",
label="Accuracy (%)",
value=accuracy,
alert=("<", accuracy_alert_threshold),
)
|
import numpy as np
from skimage.transform import SimilarityTransform
from skimage.transform import AffineTransform
# from skimage.transform import warp
from skimage.transform._warps_cy import _warp_fast
def warp(img, tf, output_shape, mode='constant', order=0):
"""
This wrapper function is faster than skimage.transform.warp
"""
m = tf.params
img = img.transpose(2, 0, 1)
t_img = np.zeros(img.shape, img.dtype)
for i in range(t_img.shape[0]):
t_img[i] = _warp_fast(img[i], m, output_shape=output_shape[:2],
mode=mode, order=order)
t_img = t_img.transpose(1, 2, 0)
return t_img
def augment_color(img, ev, u, sigma=0.1, color_vec=None):
"""
https://github.com/sveitser/kaggle_diabetic/blob/master/data.py
"""
if color_vec is None:
if not sigma > 0.0:
color_vec = np.zeros(3, dtype=np.float32)
else:
color_vec = np.random.normal(0.0, sigma, 3)
alpha = color_vec.astype(np.float32) * ev
noise = np.dot(u, alpha.T)
return img + noise[:, np.newaxis, np.newaxis]
def im_affine_transform(img, scale, rotation, shear,
translation_y, translation_x, return_tform=False):
# Assumed img in c01. Convert to 01c for skimage
img = img.transpose(1, 2, 0)
# Normalize so that the param acts more like im_rotate, im_translate etc
scale = 1 / scale
translation_x = - translation_x
translation_y = - translation_y
# shift to center first so that image is rotated around center
center_shift = np.array((img.shape[0], img.shape[1])) / 2. - 0.5
tform_center = SimilarityTransform(translation=-center_shift)
tform_uncenter = SimilarityTransform(translation=center_shift)
rotation = np.deg2rad(rotation)
tform = AffineTransform(scale=(scale, scale), rotation=rotation,
shear=shear,
translation=(translation_x, translation_y))
tform = tform_center + tform + tform_uncenter
warped_img = warp(img, tform, output_shape=img.shape)
# Convert back from 01c to c01
warped_img = warped_img.transpose(2, 0, 1)
warped_img = warped_img.astype(img.dtype)
if return_tform:
return warped_img, tform
else:
return warped_img
|
from __future__ import division, print_function
from .hygroup import HyGroup
class TrajectoryGroup(HyGroup):
"""
Class for processing and plotting multiple ``Trajectory`` instances.
:subclass: of ``HyGroup``.
"""
def __init__(self, trajectories):
"""
Initialize ``TrajectoryGroup`` object.
Parameters
----------
trajectories : list of ``Trajectory`` instances
``Trajectory`` instances that belong in the group.
"""
HyGroup.__init__(self, trajectories)
def __getitem__(self, index):
"""
Get ``Trajectory`` or ``TrajectoryGroup``.
Parameters
----------
index : int or slice
Returns
-------
``Trajectory`` or ``TrajectoryGroup`` depending if indexed
or sliced. Won't return a ``Cluster`` because those are
specially defined.
"""
newthing = self.trajectories[index]
if isinstance(newthing, list):
newthing = TrajectoryGroup(newthing)
return newthing
def __add__(self, other):
"""
Add a ``HyGroup`` to this ``TrajectoryGroup`` instance.
Parameters
----------
other : ``HyGroup``
Another ``TrajectoryGroup`` or ``Cluster``. May or may not
contain some of the same ``Trajectory`` instances
Returns
-------
A new ``TrajectoryGroup`` containing the union of the sets
of ``Trajectory`` instances.
"""
return TrajectoryGroup(HyGroup.__add__(self, other))
def __sub__(self, other):
"""
Subtract a ``HyGroup`` from this ``TrajectoryGroup`` instance.
Parameters
----------
other : ``HyGroup``
Another ``TrajectoryGroup`` or ``Cluster``
Returns
-------
A new ``TrajectoryGroup`` containing the set difference between
the sets of ``Trajectory`` instances.
"""
return TrajectoryGroup(HyGroup.__sub__(self, other))
def pop(self, ind=-1, trajid=None):
"""
Remove Trajectory object(s) from self.
Shortcut to self.trajectories.pop() that updates the
self.trajcount and the list of trajids.
Parameters
----------
ind : int
The positional argument of the ``Trajectory``
to remove.
trajid : string or list of strings
The identifier(s) of the ``Trajectory`` object(s)
to remove from ``self``. Overrides ``ind`` if not None.
Returns
-------
popped : ``Trajectory`` or ``TrajectoryGroup``
A``Trajectory`` or ``TrajectoryGroup`` consisting of the
trajectory or trajectories indicated by ``ind`` or ``trajid``.
"""
if trajid is not None:
try:
to_pop = [self.trajids.index(trajid)]
except ValueError:
to_pop = [self.trajids.index(t) for t in trajid
if t in self.trajids]
if len(to_pop) == 0:
raise ValueError('TrajIDs not in list of self.trajids')
to_pop.sort()
popped = []
for p in to_pop[::-1]:
popped.append(self.trajectories.pop(p))
self.trajids.pop(p)
self.trajcount = len(self.trajectories)
if len(popped) == 1:
popped = popped[0]
else:
popped = TrajectoryGroup(popped)
else:
popped = self.trajectories.pop(ind)
self.trajids.pop(ind)
self.trajcount = len(self.trajectories)
return popped
def append(self, traj):
"""
Add a ``Trajectory`` to the ``self``.
Parameters
----------
traj : ``Trajectory`` instance
The ``Trajectory`` to add to the end of ``self``.
"""
if hasattr(traj, 'trajid'):
self.trajectories.append(traj)
self.trajids.append(traj.trajid)
self.trajcount = len(self.trajectories)
|
class GridElement(object):
"""
A element in a grid.
"""
def __init__(self, id, left, top, right, bottom):
"""
:param left: The left cell this elements is laying in.
:type left: int
:param top: The top cell this elements is laying in.
:type top: int
:param right: The right cell this elements is laying in.
:type right: int
:param bottom: The bottom cell this elements is laying in.
:type bottom: int
"""
self._id = id
self.left = left
self.top = top
self.right = right
self.bottom = bottom
@property
def id(self):
"""
:rtype: int
"""
return self._id
@property
def cell_width(self):
"""
:rtype: int
"""
return self.right - self.left + 1
@property
def cell_height(self):
"""
:rtype: int
"""
return self.bottom - self.top + 1
def __repr__(self):
return "GridElement {} ({},{},{},{})".format(self._id, self.left, self.top, self.right, self.bottom)
def is_cell_in(self, cell):
"""
Checks if a given cell lies inside in this grid element.
:param cell: The cell to check for.
:type cell: tuple[int, int]
:return: Is the given cell in this element?
:rtype: bool
"""
return self.left <= cell[0] <= self.right and self.top <= cell[1] <= self.bottom
|
#!/usr/bin/env python3
import argparse
import os
import psutil
import queue
import requests
import threading
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-t', '--target', type=str, dest='target', default='http://localhost:8000',
help='your target scheme and host')
parser.add_argument('-d', '--directory', type=str, dest='dir', default='/tmp',
help='local directory to use')
parser.add_argument('-w', '--workers', type=int, dest='threads', default=psutil.cpu_count(),
help='number of worker threads to spawn, default is number of cores in system')
parser.add_argument('-f', '--filter', type=str, nargs='+', dest='filters',
default=['.png', '.gif', '.jpg', '.jpeg', '.css'], help='file suffixes to filter out')
parser.description = '''
This script will brute force a remote machine over http to discover directories by walking your local
directory to build a list of paths to check, then spawn a pool of threads to check them on the target.
example: you suspect the target is running Joomla - you download and extract Joomla, then run this script
from inside the local Joomla dir, and every file that is included by default will be tried on the target.
You can take that a step further and use mkdir and touch to create local files and dirs you suspect may exist.
maybe use find and touch to create munged names before you run this tool to check if the paths exist on the target.
'''
args = parser.parse_args()
print(f'changing dir to {args.dir}')
os.chdir(args.dir)
rel_urls = queue.Queue()
for rootdir, dirs, nondirs in os.walk('.'):
for files in nondirs:
rpath = f'{rootdir}/{files}'
if rpath.startswith('.'):
rpath = rpath[1:]
# skip all the extensions in our filter
if os.path.splitext(files)[1] not in args.filters:
# /includes/leading/slash
rel_urls.put_nowait(rpath)
def test_remote():
sess = requests.session()
while not rel_urls.empty():
rel_url = rel_urls.get()
url = f'{args.target}{rel_url}'
try:
resp = sess.get(url)
print(f'{resp.status_code} => {rel_url}')
except Exception as ex:
print(f'caught exception: {ex.__class__.__name__} - {ex}')
# mostly just catches connection errors to suppress them
pass
for tid in range(args.threads):
print(f'spawning thread: {tid}')
t = threading.Thread(target=test_remote, name=f'worker-{tid}')
t.start()
|
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
import datetime as dt
from .models import Image, Location
def index(request):
images = Image.all_pics()
locations = Location.all_locations()
return render(request, 'index.html', {"locations": locations, "images": images})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
category = request.GET.get("image")
searched_images = Image.search_category(category)
message = f"{category}"
print(searched_images)
return render(request, 'search.html',{"message":message,"images": searched_images})
else:
message = "Kindly Search a different Category"
return render(request, 'search.html',{"message":message})
def location_captured (request, location):
pics = Image.images_by_location(location)
print(pics)
return render (request, 'location.html', {'location_pics': pics}) |
%matplotlib inline
from random import *
import numpy as np
import seaborn as sns
import pandas as pd
import os
from numpy.linalg import svd
from fbpca import pca
import time
from sklearn.preprocessing import scale, MinMaxScaler
from sklearn.decomposition import RandomizedPCA
from scipy.optimize import curve_fit
from pylab import *
from math import atan2
from scipy.stats import linregress
import itertools
from statsmodels.nonparametric.smoothers_lowess import lowess
from sklearn.metrics import roc_curve, auc
def get_res(arr,l):
m = {}
for v in set(l):
indices = [i for i, x in enumerate(l) if x == v]
m[v] = np.mean(arr[:,indices],axis=1)
ma = np.zeros(np.shape(arr))
for i in range(len(l)):
ma[:,i]=m[l[i]]
return np.subtract(arr,ma)
def get_tks(resarr):
pca = RandomizedPCA()
pca.fit(resarr)
return pca.explained_variance_ratio_
def perm_test(resarr,l,n,tks):
rstar = np.copy(resarr)
out = np.zeros(len(tks))
for j in range(n):
for i in range(rstar.shape[0]):
np.random.shuffle(rstar[i,:])
resstar = get_res(rstar,l)
tkstar = get_tks(resstar)
#tkstar = get_tks(rstar)
for m in range(len(tks)):
if tkstar[m] > tks[m]:
out[m] += 1
return out/n
exp_class = [j for i in range(0,12) for j in [i]*3]*2
classes = exp_class
len(classes)
classes
circ = [randint(0,1) for b in range(1,10001)]
circ
base = np.arange(0,(4*np.pi),(4*np.pi/24))
sim = []
phases = []
for i in circ:
if i == 1:
temp=[]
p = randint(0,1)
phases.append(p)
temp.append(np.sin(base+np.random.normal(0,0.4,1)+np.pi*p)+np.random.normal(0,1.75,24))
temp.append(np.sin(base+np.random.normal(0,0.4,1)+np.pi*p)+np.random.normal(0,1.75,24))
temp.append(np.sin(base+np.random.normal(0,0.4,1)+np.pi*p)+np.random.normal(0,1.75,24))
temp2 = []
for i in range(len(temp[0])):
temp2.append(temp[0][i])
temp2.append(temp[1][i])
temp2.append(temp[2][i])
sim.append(temp2)
else:
phases.append('nan')
sim.append(np.random.normal(0,1,72))
np.shape(sim)
simdf = pd.DataFrame(sim,columns=csp_norm.columns.values)
simdf.index.names = ['#']
simnoise = []
trend1list = []
#trend2list = []
trend3list = []
for i in sim:
temp = []
t1 = randint(0,1)
#t2 = randint(0,1)
t3 = randint(0,1)
trend1list.append(t1)
#trend2list.append(t2)
trend3list.append(t3)
for j in range(len(i)):
trend = [i*t1 for i in ([0,0,3]*24)]
#trend2 = [k*t2*.5 for k in ([0]*63 + [3]*9)]
trend3 = [i*t3 for i in ([0,3,0]*12) + [0,0,0]*12]
temp.append(i[j]+trend[j]+trend3[j])
#temp.append(i[j]+trend[j])
simnoise.append(temp)
simndf = pd.DataFrame(simnoise,columns=csp_norm.columns.values)
simndf.index.names = ['#']
simdf.to_csv('simulated_data_baseline.txt',sep='\t')
simndf.to_csv('simulated_data_with_noise.txt',sep='\t')
k = pd.concat([pd.Series(circ),pd.Series(phases),pd.Series(trend1list),pd.Series(trend3list)],axis=1)
#k = pd.concat([pd.Series(circ),pd.Series(phases),pd.Series(trend1list)],axis=1)
k.columns = ['circ','phase','trend1','trend3']
#k.columns = ['circ','phase','trend1']
k.to_csv('simulated_data_key.txt',sep='\t')
#simdf= pd.read_csv('simulated_data_baseline.txt',sep='\t')
#simdf = simdf.set_index('#')
sns.clustermap(simdf.head(n=100),col_cluster=False)
plt.savefig('simulated_data.pdf')
sns.clustermap(simndf.head(n=100),col_cluster=False)
plt.savefig('simulated_noised_data.pdf')
len(tpoints)
cors = prim_cor(simndf.values,tpoints,12)
cors = pd.Series(cors,index=simndf.index)
merged = pd.concat([cors, k['circ'].astype('bool')], axis=1,join='inner')
merged.columns = ['cors','circ']
sns.violinplot(x="circ", y="cors", data=merged)
plt.title('Correlation in Simulated Data with Noise')
plt.savefig('primary_corellations_simulated_noised.pdf')
cors = prim_cor(simdf.values,tpoints,12)
cors = pd.Series(cors,index=simdf.index)
merged = pd.concat([cors, k['circ'].astype('bool')], axis=1,join='inner')
merged.columns = ['cors','circ']
sns.violinplot(x="circ", y="cors", data=merged)
plt.title('Correlation in Simulated Data without Noise')
plt.savefig('primary_corellations_simulated.pdf')
simj = pd.read_csv('simulated_data_baseline__jtkout_GammaP.txt',sep='\t')
simj = simj.set_index('ID')
sns.distplot(simj['GammaP'])
merged = []
merged = pd.concat([simj['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
plt.title('Initial Data')
plt.savefig('simulated_classification.pdf')
merged['circ'].values
-merged['p'].values
fpr, tpr, _ = roc_curve(merged['circ'].values, -merged['p'].values)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve initial data (area = %0.2f)' % roc_auc)
plt.plot(fprdn, tprdn, label='ROC curve denoised Lowess (area = %0.2f)' % roc_aucdn)
plt.plot(fprdncr, tprdncr, label='ROC curve denoised circadian replicate (area = %0.2f)' % roc_aucdncr)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('ROC_curves.pdf')
simnj = pd.read_csv('simulated_data_with_noise__jtkout_GammaP.txt',sep='\t')
simnj = simnj.set_index('ID')
sns.distplot(simnj['GammaP'])
merged = pd.concat([simnj['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
plt.title('Data with Noise')
plt.savefig('simulated_classification_noised.pdf')
fprn, tprn, _ = roc_curve(merged['circ'].astype('bool').values, -merged['p'].values)
roc_aucn = auc(fprn, tprn)
cors = prim_cor(simndf.values,tpoints,12)
uncor = [(i<(np.percentile(cors,25))) for i in cors]
simndf_reduced = simndf[uncor]
def get_res(arr,l):
res = []
for row in arr:
ys = lowess(row, l,delta=4)[:,1]
res.append(row - ys)
return res
res = get_res(simndf_reduced.values,tpoints)
tks_ = get_tks(res)
np.sum(tks_)
start = time.time()
sigs = perm_test(res,tpoints,100,tks_)
end = time.time()
print(end - start)
sigs
ps = eig_reg(simndf_reduced.values,res,sigs,.05)
ts = subset_svd(simndf_reduced.values,ps,0.5)
fin_res = np.dot(linalg.lstsq(np.asarray(ts).T,simndf.values.T)[0].T,np.asarray(ts))
svd_norm = simndf.values - fin_res
svd_norm = pd.DataFrame(svd_norm,index=simndf.index,columns=simndf.columns)
svd_norm = pd.DataFrame(scale(svd_norm.values,axis=1),columns=svd_norm.columns,index=svd_norm.index)
svd_norm.index.names = ['#']
svd_norm.to_csv('simulated_data_denoised.txt',sep='\t')
svd_norm = pd.read_csv('simulated_data_denoised.txt',sep='\t')
svd_norm = svd_norm.set_index('#')
sns.clustermap(svd_norm.head(n=100),col_cluster=False)
plt.savefig('simulated_denoised_data.pdf')
simdnj = pd.read_csv('simulated_data_denoised__jtkout_GammaP.txt',sep='\t')
simdnj = simdnj.set_index('ID')
sns.distplot(simdnj['GammaP'])
merged = pd.concat([simdnj['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
plt.title('Lowess Denoised')
plt.savefig('simulated_classification_denoised.pdf')
fprdn, tprdn, _ = roc_curve(merged['circ'].astype('bool').values, -merged['p'].values)
roc_aucdn = auc(fprdn, tprdn)
svd_norm = normalize(simndf.values,res,sigs,0.05)
svd_norm = pd.DataFrame(svd_norm,index=simndf.index,columns=simndf.columns)
svd_norm.index.names = ['#']
svd_norm.to_csv('simulated_denoised_circ_rep.txt',sep='\t')
sns.clustermap(svd_norm.head(n=100),col_cluster=False)
simdnj = pd.read_csv('simulated_denoised_circ_rep__jtkout_GammaP.txt',sep='\t')
simdnj = simdnj.set_index('ID')
sns.distplot(simdnj['GammaP'])
merged = pd.concat([simdnj['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
exp_class = [j for i in range(0,24) for j in [i]*3]
classes = exp_class
len(classes)
classes
def get_res(arr,l):
m = {}
for v in set(l):
indices = [i for i, x in enumerate(l) if x == v]
m[v] = np.mean(arr[:,indices],axis=1)
ma = np.zeros(np.shape(arr))
for i in range(len(l)):
ma[:,i]=m[l[i]]
return np.subtract(arr,ma)
res = get_res(simndf.values,classes)
tks_ = get_tks(res)
np.sum(tks_)
start = time.time()
sigs = perm_test(res,classes,100,tks_)
end = time.time()
print(end - start)
sigs
svd_norm = normalize(simndf.values,res,sigs,0.05)
svd_norm = pd.DataFrame(svd_norm,index=simndf.index,columns=simndf.columns)
svd_norm.index.names = ['#']
svd_norm.to_csv('simulated_data_denoised_rep.txt',sep='\t')
simdnjcr = pd.read_csv('simulated_data_denoised_circ_rep__jtkout_GammaP.txt',sep='\t')
simdnjcr = simdnjcr.set_index('ID')
sns.distplot(simdnjcr['GammaP'])
merged = pd.concat([simdnjcr['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
plt.title('Circadian Replicate Denoised')
plt.savefig('simulated_classification_denoised_circrep.pdf')
fprdncr, tprdncr, _ = roc_curve(merged['circ'].astype('bool').values, -merged['p'].values)
roc_aucdncr = auc(fprdncr, tprdncr)
simdnjr = pd.read_csv('simulated_data_denoised_rep__jtkout_GammaP.txt',sep='\t')
simdnjr = simdnjr.set_index('ID')
sns.distplot(simdnjr['GammaP'])
merged = pd.concat([simdnjr['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
fprdnr, tprdnr, _ = roc_curve(~merged['circ'].astype('bool').values, merged['p'].values)
roc_aucdnr = auc(fprdnr, tprdnr)
sns.clustermap(svd_norm.head(n=100),col_cluster=False)
simdnj2 = pd.read_csv('simulated_denoised_rep__jtkout_GammaP.txt',sep='\t')
simdnj2 = simdnj2.set_index('ID')
sns.distplot(simdnj2['GammaP'])
merged = pd.concat([simdnj2['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
merged = pd.concat([simdnjcr['Phase'], k['circ'],k['phase']], axis=1,join='inner')
merged.columns = ['Phase_pred','circ','Phase_real']
sns.jointplot(x='Phase_pred',y='Phase_real',data=merged[merged['circ']==1],kind='kde')
merged = pd.concat([simdnj['Phase'], k['circ'],k['phase']], axis=1,join='inner')
merged.columns = ['Phase_pred','circ','Phase_real']
phasediff = simj['Phase'] - simdnj['Phase']
theta = np.linspace(0,2*np.pi,145)
theta_matrix = np.meshgrid(theta)[0].tolist()
transformed = [(i % 23)*np.pi*2/23 for i in phasediff.values.tolist()]
transformed = transformed + [(i - (2*np.pi)) for i in transformed] + [(i + (2*np.pi)) for i in transformed]
kernel = stats.gaussian_kde(transformed + [(i + (2*np.pi)) for i in transformed])
Z = kernel(theta_matrix)
kde = KernelDensity(kernel='gaussian',bandwidth=0.25).fit(np.asarray(transformed).reshape(-1,1))
Z = kde.score_samples(np.asarray(theta_matrix).reshape(-1,1))
Z = scale(Z)
Z = Z - np.min(Z)
min_max_scaler = MinMaxScaler()
Z = min_max_scaler.fit_transform(Z.reshape(-1,1))
plt.figure()
ax = plt.axes(polar=True)
xtickslbls = ['0', '2.875','5.75','8.625','11.5','14.375','17.25','20.125']
ax.set_xticklabels(xtickslbls)
ax.set_theta_direction(-1)
ax.set_theta_offset(np.pi/2.0)
ax.set_rlabel_position(70)
CS = plt.plot(np.linspace(0,2*np.pi,145), Z, c= 'b')
plt.savefig('simulated_phasediffs.pdf')
sns.jointplot(x='Phase_pred',y='Phase_real',data=merged[merged['circ']==1],kind='kde')
merged = pd.concat([simj['Phase'], k['circ'],k['phase']], axis=1,join='inner')
merged.columns = ['Phase_pred','circ','Phase_real']
sns.jointplot(x='Phase_pred',y='Phase_real',data=merged[merged['circ']==1],kind='kde')
plt.savefig('initial_phases.pdf')
def get_res(arr,l):
res = []
for row in arr:
def harm(x, p1,p2,p3):
return p1*np.cos(2*np.pi*p2*x + 2*np.pi*p3)
amplitude = row.max() - row.min()
popt, pcov = curve_fit(harm, l, row, p0=(amplitude,.043478261,0), bounds=([0,.043478260,-np.inf], [np.inf, .043478262, np.inf]))
res.append(row - harm(l,popt[0],popt[1],popt[2]))
return res
def get_tpoints(l):
tpoints = [i.replace('CT','') for i in l]
tpoints = [int(i.split('_')[0]) for i in tpoints]
return np.asarray(tpoints)
tpoints = get_tpoints(simndf.columns.values)
res = get_res(simndf.values,tpoints)
tks_ = get_tks(res)
np.sum(tks_)
start = time.time()
sigs = perm_test(res,tpoints,100,tks_)
end = time.time()
print(end - start)
sigs
def autocorr(l,shift):
return dot(l, np.roll(l, shift)) / dot(l, l)
def prim_cor(arr,l,per,reps):
cors = []
for row in arr:
ave = [np.mean(i) for i in [row[j:j + reps] for j in range(0, len(row), reps)]]
cors.append((autocorr(ave,per) - autocorr(ave,(per//2))))
return cors
temp = [np.mean(i) for i in [simdf.values[1][j:j + 3] for j in range(0, len(simdf.values[1]), 3)]]
len(temp)
autocorr(temp,12)
autocorr(temp,6)
cors = prim_cor(simndf.values,tpoints,12)
plt.scatter(k['circ'].values.tolist(),cors)
sns.violinplot(x=k['circ'].values.tolist(),y=cors)
k['circ'].values.tolist()
np.mean(cors)
uncor = [(i<(np.mean(cors)*.25)) for i in cors]
len(simndf[uncor])
simndf_reduced = simndf[uncor]
tpoints = get_tpoints(simndf_reduced.columns.values)
#res = get_res(simndf_reduced.values,classes)
res = get_res(simndf_reduced.values,tpoints)
sns.clustermap(res[:1000],col_cluster=False)
tks_ = get_tks(res)
np.sum(tks_)
start = time.time()
sigs = perm_test(res,tpoints,100,tks_)
end = time.time()
print(end - start)
sigs
def eig_reg(arr,resarr,perm,a):
U, s, V = np.linalg.svd(resarr)
sig = V.T[:,:len([i for i in itertools.takewhile(lambda x: x < a, perm)])]
pvals = []
for trend in sig.T:
temp = []
for row in arr:
slope, intercept, r_value, p_value, std_err = linregress(row,trend)
temp.append(p_value)
pvals.append(temp)
return pvals
def normalize(arr,resarr,perm,a):
pca = RandomizedPCA(n_components=len([i for i in perm if i <= a]))
pca.fit(resarr)
return arr - pca.inverse_transform(pca.transform(resarr))
def est_pi_naught(probs,lam):
return len([i for i in probs if i > lam])/(len(probs)*lam)
def est_pi_sig(probs,l):
pi_0 = est_pi_naught(probs,l)
sp = np.sort(probs)
return sp[int(floor(pi_0*len(probs)))]
def subset_svd(arr,plist,lam):
trends = []
for entry in plist:
sub = []
thresh = est_pi_sig(entry,lam)
for i in range(len(entry)):
if entry[i] < thresh:
sub.append(arr[i])
U, s, V = np.linalg.svd(sub)
trends.append(V.T[:,0])
return trends
ps = eig_reg(simndf_reduced.values,res,sigs,.05)
sns.distplot(ps[0])
est_pi_naught(ps[0],.5)
est_pi_sig(ps[0],.5)
ts = subset_svd(simndf_reduced.values,ps,0.5)
plt.plot(range(len(ts[0])),ts[0])
plt.plot(range(len(ts[1])),ts[1])
plt.plot(range(len(ts[2])),ts[2])
U, s, V = np.linalg.svd(res)
sig = V.T[:,:len([i for i in itertools.takewhile(lambda x: x < .05, sigs)])]
np.shape(ts)
np.shape(sig)
plt.plot(range(len(sig.T[0])),sig.T[0])
plt.plot(range(len(sig.T[1])),sig.T[1])
plt.plot(range(len(sig.T[2])),sig.T[2])
np.shape(simndf_reduced.values)
#fin_res = np.dot(linalg.lstsq(np.asarray(sig),simndf.values.T)[0].T,np.asarray(sig).T)
fin_res = np.dot(linalg.lstsq(np.asarray(ts).T,simndf.values.T)[0].T,np.asarray(ts))
sns.clustermap(fin_res[:100],col_cluster=False)
svd_norm = simndf.values - fin_res
svd_norm = pd.DataFrame(svd_norm,index=simndf.index,columns=simndf.columns)
svd_norm.index.names = ['#']
svd_norm.to_csv('simulated_denoised_new_lowess.txt',sep='\t')
sns.clustermap(svd_norm.head(n=1000),col_cluster=False)
simdnj = pd.read_csv('simulated_denoised_new__jtkout_GammaP.txt',sep='\t')
simdnj = simdnj.set_index('ID')
sns.distplot(simdnj['GammaP'])
merged = pd.concat([simdnj['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
simdnj = pd.read_csv('simulated_denoised_new2__jtkout_GammaP.txt',sep='\t')
simdnj = simdnj.set_index('ID')
sns.distplot(simdnj['GammaP'])
merged = pd.concat([simdnj['GammaP'], k['circ']], axis=1,join='inner')
merged.columns = ['p','circ']
merged[merged['circ']==1]['p'].mean()
len(merged[(merged['circ']==1) & (merged['p']<.05)])
len(merged[(merged['circ']==0) & (merged['p']<.05)])
sns.violinplot(x="circ", y="p", data=merged)
def get_res(arr,l):
res = []
for row in arr:
ys = lowess(row, l,delta=4)[:,1]
res.append(row - ys)
return res
def get_tpoints(l):
tpoints = [i.replace('CT','') for i in l]
tpoints = [int(i.split('_')[0]) for i in tpoints]
return np.asarray(tpoints)
|
import numpy as np
import pandas as pd
import sklearn.datasets
from sklearn.metrics import f1_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import optuna
import optuna.integration.lightgbm as lgb
import math
from optuna.trial import Trial
np.random.seed(123)
# import data
data_transformed = pd.read_csv('data_transformed.csv')
# avoid this ugly slicing by using a two-dim dataset
X = data_transformed.iloc[:, :-1]
y = data_transformed.iloc[:, :]['Class']
# using 75% of the data for training and 30% for testing (with
# stratification for imbalanced class)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, stratify=y, random_state=123)
# Standardize features by removing the mean and scaling to unit variance
sc = StandardScaler()
scaled_X_train = sc.fit_transform(X_train)
scaled_X_test = sc.transform(X_test)
class LGBM():
def __init__(self,scaled_X_train, scaled_X_test, y_train, y_test):
dtrain = lgb.Dataset(scaled_X_train, label= y_train)
dtest = lgb.Dataset(scaled_X_test, label=y_test)
def objective(self, trial = Trial):
dtrain = lgb.Dataset(scaled_X_train, label= y_train)
dtest = lgb.Dataset(scaled_X_test, label=y_test)
params = {
"objective": "binary",
"metric": "auc",
"verbosity": 0,
"boosting_type": "gbdt",
}
gbm = lgb.train(
params, dtrain, verbose_eval=True, valid_sets=[dtest]
)
preds = gbm.predict(scaled_X_test)
y_pred = np.array(list(map(lambda x: int(x), preds>0.5)))
f1_sc = sklearn.metrics.f1_score(y_test, y_pred)
loss = np.subtract(1,f1_sc)
return loss
def optuna_method(self):
study = optuna.create_study(direction="minimize")
study.optimize(self.objective, n_trials=2000)
self.params = study.best_params
return study.best_trial
|
#!/usr/bin/python2
#
# Copyright 2018 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Tests of data_source_to_dspl module."""
__author__ = 'Benjamin Yolken <[email protected]>'
import unittest
import data_source
import data_source_to_dspl
class _MockDataSource(data_source.DataSource):
"""A fake DataSource, for testing purposes."""
def __init__(self, data_source_identifier, verbose=True):
pass
def GetColumnBundle(self):
column1 = data_source.DataSourceColumn(
'col1', data_type='string', slice_role='dimension',
concept_extension='entity:entity', rollup=True)
column2 = data_source.DataSourceColumn(
'col2', data_type='string', concept_extension='geo:location',
slice_role='dimension', parent_ref='col6')
column3 = data_source.DataSourceColumn(
'col3', data_type='date', concept_ref='time:year', data_format='yyyy',
slice_role='dimension')
column4 = data_source.DataSourceColumn(
'col4', data_type='float', slice_role='metric')
column5 = data_source.DataSourceColumn(
'col5', data_type='integer', slice_role='metric')
column6 = data_source.DataSourceColumn(
'col6', data_type='string', slice_role='dimension', rollup=True)
return data_source.DataSourceColumnBundle(
columns=[column1, column2, column3, column4, column5, column6])
def GetTableData(self, query_parameters):
if query_parameters.column_ids == ('col1',):
return data_source.TableData(rows=[['blue'], ['green'], ['red']])
elif query_parameters.column_ids == ('col2',):
return data_source.TableData(rows=[['california'], ['maine'], ['oregon']])
elif query_parameters.column_ids == ('col6',):
return data_source.TableData(rows=[['east'], ['west']])
elif query_parameters.column_ids == ('col2', 'col6'):
return data_source.TableData(rows=[['california', 'west'],
['maine', 'east'], ['oregon', 'west']])
else:
data_columns = []
for column_id in query_parameters.column_ids:
if column_id == 'col1':
data_columns.append(['blue', 'blue', 'green', 'red'])
elif column_id == 'col2':
data_columns.append(['california', 'california', 'maine', 'oregon'])
elif column_id == 'col3':
data_columns.append(['1989', '1990', '1991', '1992'])
elif column_id == 'col4':
data_columns.append(['1.2', '1.3', '1.4', '1.5'])
elif column_id == 'col5':
data_columns.append(['4', '5', '6', '7'])
elif column_id == 'col6':
data_columns.append(['west', 'west', 'east', 'west'])
# Transpose rows and columns so that table is properly set up
return data_source.TableData([list(r) for r in zip(*data_columns)])
def Close(self):
pass
class CalculateSlicesTests(unittest.TestCase):
"""Tests of _CalculateSlices function."""
def setUp(self):
pass
def testCalculateSlices(self):
"""Test of _CalculateSlices with powersets."""
column1 = data_source.DataSourceColumn(
'col1', rollup=True, concept_extension='entity:entity')
column2 = data_source.DataSourceColumn('col2', rollup=False)
column3 = data_source.DataSourceColumn(
'col3', rollup=True, parent_ref='col5')
column4 = data_source.DataSourceColumn(
'col4', rollup=True, parent_ref='col3')
column5 = data_source.DataSourceColumn(
'col5', rollup=True)
column_bundle = data_source.DataSourceColumnBundle(
columns=[column1, column2, column3, column4, column5])
slice_column_sets = data_source_to_dspl._CalculateSlices(column_bundle)
# Convert columns to id strings
slice_column_ids = []
for slice_column_set in slice_column_sets:
slice_column_ids.append([c.column_id for c in slice_column_set])
# Sort the actual and expected results so that the test is not order
# dependent
self.assertEqual(
sorted([sorted(s) for s in slice_column_ids]),
sorted([sorted(s) for s in [['col1', 'col2', 'col3'],
['col1', 'col2', 'col4'],
['col1', 'col2', 'col5'],
['col1', 'col2'], ['col2', 'col3'],
['col2', 'col4'], ['col2', 'col5'],
['col2']]]))
class PopulateDatasetTest(unittest.TestCase):
"""Tests of PopulateDataset functionality."""
def setUp(self):
self.dataset = data_source_to_dspl.PopulateDataset(
_MockDataSource(None), verbose=False)
def testDatasetImports(self):
"""Test that the dataset imports are properly created."""
# Sort so that the test results aren't sensitive to ordering
sorted_imports = sorted(self.dataset.imports, key=lambda i: i.namespace_id)
self.assertEqual(
[i.namespace_id for i in sorted_imports],
['entity', 'geo', 'geo_us', 'quantity', 'time', 'unit'])
self.assertEqual(
[i.namespace_url for i in sorted_imports],
['http://www.google.com/publicdata/dataset/google/entity',
'http://www.google.com/publicdata/dataset/google/geo',
'http://www.google.com/publicdata/dataset/google/geo/us',
'http://www.google.com/publicdata/dataset/google/quantity',
'http://www.google.com/publicdata/dataset/google/time',
'http://www.google.com/publicdata/dataset/google/unit'])
def testDatasetConcepts(self):
"""Test that the dataset concepts are properly created."""
# Sort so that the test results aren't sensitive to ordering
sorted_concepts = sorted(self.dataset.concepts, key=lambda c: c.concept_id)
self.assertEqual(
[c.concept_id for c in sorted_concepts],
['col1', 'col2', 'col4', 'col5', 'col6', 'time:year'])
self.assertEqual(
[c.data_type for c in sorted_concepts],
['string', 'string', 'float', 'integer', 'string', 'date'])
self.assertEqual(
[c.table_ref for c in sorted_concepts],
['col1_table', 'col2_table', '', '', 'col6_table', ''])
self.assertEqual(
[c.concept_extension_reference for c in sorted_concepts],
['entity:entity', 'geo:location', '', '', '', ''])
self.assertEqual(
[c.concept_reference for c in sorted_concepts],
['', '', '', '', '', 'time:year'])
def testDatasetSlices(self):
"""Test that the dataset slices are properly created."""
# Slice ids are based on order, so no need to sort here
self.assertEqual(
[s.slice_id for s in self.dataset.slices],
['slice_0', 'slice_1'])
self.assertEqual(
[s.table_ref for s in self.dataset.slices],
['slice_0_table', 'slice_1_table'])
self.assertEqual(
[s.dimension_map for s in self.dataset.slices],
[{'time:year': 'col3'}, {'time:year': 'col3'}])
# Test dimensions in an order-independent way
self.assertEqual(
sorted([
sorted(self.dataset.slices[0].dimension_refs),
sorted(self.dataset.slices[1].dimension_refs)]),
[['col1', 'col2', 'time:year'], ['col2', 'time:year']])
# Test metrics in an order-independent way
self.assertEqual(
[sorted(self.dataset.slices[0].metric_refs),
sorted(self.dataset.slices[1].metric_refs)],
[['col4', 'col5'], ['col4', 'col5']])
# Test that dimension maps are set up appropriately
self.assertEqual(self.dataset.slices[0].dimension_map,
{'time:year': 'col3'})
self.assertEqual(self.dataset.slices[1].dimension_map,
{'time:year': 'col3'})
def testDatasetTables(self):
"""Test that the dataset tables are properly created."""
# Sort tables so that test results aren't dependent on order
sorted_tables = sorted(self.dataset.tables, key=lambda t: t.table_id)
self.assertEqual(
[t.table_id for t in sorted_tables],
['col1_table', 'col2_table', 'col6_table',
'slice_0_table', 'slice_1_table'])
self.assertEqual(
[t.file_name for t in sorted_tables],
['col1_table.csv', 'col2_table.csv', 'col6_table.csv',
'slice_0_table.csv', 'slice_1_table.csv'])
# Map tables to what concepts they have in them
col1_table = sorted_tables[0]
col2_table = sorted_tables[1]
col6_table = sorted_tables[2]
if len(sorted_tables[3].columns) == 5:
col1_to_col5_table = sorted_tables[3]
col2_to_col5_table = sorted_tables[4]
else:
col1_to_col5_table = sorted_tables[4]
col2_to_col5_table = sorted_tables[3]
# Do in-depth tests of each table
self._TableColumnTestHelper(
col1_table,
expected_ids=['col1', 'name'],
expected_types=['string', 'string'],
expected_formats=['', ''],
expected_data={'col1': ['col1', 'blue', 'green', 'red'],
'name': ['name', 'blue', 'green', 'red']})
self._TableColumnTestHelper(
col2_table,
expected_ids=['col2', 'col6', 'latitude', 'longitude', 'name'],
expected_types=['string', 'string', 'float', 'float', 'string'],
expected_formats=['', '', '', '', ''],
expected_data={'col2': ['col2', 'california', 'maine', 'oregon'],
'col6': ['col6', 'west', 'east', 'west'],
'name': ['name', 'california', 'maine', 'oregon'],
'latitude': ['latitude', '', '', ''],
'longitude': ['longitude', '', '', '']})
self._TableColumnTestHelper(
col6_table,
expected_ids=['col6'],
expected_types=['string'],
expected_formats=[''],
expected_data={'col6': ['col6', 'east', 'west']})
self._TableColumnTestHelper(
col1_to_col5_table,
expected_ids=['col1', 'col2', 'col3', 'col4', 'col5'],
expected_types=['string', 'string', 'date', 'float', 'integer'],
expected_formats=['', '', 'yyyy', '', ''],
expected_data={'col1': ['col1', 'blue', 'blue', 'green', 'red'],
'col2': ['col2', 'california', 'california', 'maine',
'oregon'],
'col3': ['col3', '1989', '1990', '1991', '1992'],
'col4': ['col4', '1.2', '1.3', '1.4', '1.5'],
'col5': ['col5', '4', '5', '6', '7']})
self._TableColumnTestHelper(
col2_to_col5_table,
expected_ids=['col2', 'col3', 'col4', 'col5'],
expected_types=['string', 'date', 'float', 'integer'],
expected_formats=['', 'yyyy', '', ''],
expected_data={'col2': ['col2', 'california', 'california', 'maine',
'oregon'],
'col3': ['col3', '1989', '1990', '1991', '1992'],
'col4': ['col4', '1.2', '1.3', '1.4', '1.5'],
'col5': ['col5', '4', '5', '6', '7']})
def _TableColumnTestHelper(self, table, expected_ids=(), expected_types=(),
expected_formats=(), expected_data=dict()):
"""Help test contents of a single DSPL table object."""
# Sort the columns so the test results aren't order dependent
sorted_table_columns = sorted(
table.columns, key=lambda c: c.column_id)
self.assertEqual(
[c.column_id for c in sorted_table_columns],
expected_ids)
self.assertEqual(
[c.data_type for c in sorted_table_columns],
expected_types)
self.assertEqual(
[c.data_format for c in sorted_table_columns],
expected_formats)
# Transpose data table so we can look at the data by columns
transposed_table_data = [list(r) for r in zip(*table.table_data)]
self.assertEqual(len(transposed_table_data), len(table.columns))
for c, column in enumerate(table.columns):
self.assertEqual(
transposed_table_data[c],
expected_data[column.column_id])
if __name__ == '__main__':
unittest.main()
|
# Yibo Yang, 2020
import tensorflow.compat.v1 as tf
def read_png(filename):
"""Loads a image file as float32 HxWx3 array; tested to work on png and jpg images."""
string = tf.read_file(filename)
image = tf.image.decode_image(string, channels=3)
image = tf.cast(image, tf.float32)
image /= 255
return image
def quantize_image(image):
image = tf.round(image * 255)
image = tf.saturate_cast(image, tf.uint8)
return image
def write_png(filename, image):
"""Saves an image to a PNG file."""
image = quantize_image(image)
string = tf.image.encode_png(image)
return tf.write_file(filename, string)
def convert_float_to_uint8(image):
image = tf.round(image * 255)
image = tf.saturate_cast(image, tf.uint8)
return image
def convert_uint8_to_float(image):
image = tf.cast(image, tf.float32)
image /= 255
return image
import numpy as np
# for reading images in .npy format
def read_npy_file_helper(file_name_in_bytes):
# data = np.load(file_name_in_bytes.decode('utf-8'))
data = np.load(file_name_in_bytes) # turns out this works too without decoding to str first
# assert data.dtype is np.float32 # needs to match the type argument in the caller tf.data.Dataset.map
return data
def get_runname(args_dict, record_keys=('num_filters', 'num_hfilters', 'lmbda', 'last_step'), prefix=''):
"""
Given a dictionary of cmdline arguments, return a string that identifies the training run.
:param args_dict:
:return:
"""
config_strs = [] # ['key1=val1', 'key2=val2', ...]
# for key, val in args_dict.items():
# if isinstance(val, (list, tuple)):
# val_str = '_'.join(map(str, val))
# config_strs.append('%s=%s' % (key, val_str))
for key in record_keys:
if key == 'num_hfilters' and int(args_dict[key]) <= 0:
continue
config_strs.append('%s=%s' % (key, args_dict[key]))
return '-'.join([prefix] + config_strs)
log2pi = np.log(2. * np.pi).astype('float32')
def log_normal_pdf(sample, mean, logvar, backend=tf):
# compute normal logpdf, element-wise
return -.5 * ((sample - mean) ** 2. * backend.exp(-logvar) + logvar + log2pi)
def gaussian_standardized_cumulative(inputs):
# borrowed from tensorflow_compression/python/layers/entropy_models.GaussianConditional._standardized_cumulative
# Using the complementary error function maximizes numerical precision.
return 0.5 * tf.math.erfc(-(2 ** -0.5) * inputs)
def box_convolved_gaussian_pdf(inputs, mu, sigma):
# Compute the pdf of inputs under the density of N(mu, sigma**2) convolved with U(-0.5, 0.5).
# Equivalent to N(mu, sigma**2).CDF(inputs + 0.5) - N(mu, sigma**2).CDF(inputs - 0.5), but should be more numerically
# stable.
values = inputs
values -= mu
# This assumes that the standardized cumulative has the property
# 1 - c(x) = c(-x), which means we can compute differences equivalently in
# the left or right tail of the cumulative. The point is to only compute
# differences in the left tail. This increases numerical stability: c(x) is
# 1 for large x, 0 for small x. Subtracting two numbers close to 0 can be
# done with much higher precision than subtracting two numbers close to 1.
values = abs(values)
upper = gaussian_standardized_cumulative((.5 - values) / sigma)
lower = gaussian_standardized_cumulative((-.5 - values) / sigma)
likelihood = upper - lower
return likelihood
@tf.custom_gradient
def round_with_STE(x, STE=None):
"""
Special rounding that uses straight-through estimator (STE) for backpropagation.
See a discussion in https://openreview.net/pdf?id=Skh4jRcKQ.
:param x:
:param STE: type of proxy function whose gradient is used in place of round in the backward pass.
:return:
"""
output = tf.math.round(x)
def grad(dy): # grad fun implement the Jacobian
if STE is None or STE == 'identity':
return dy
elif STE == 'relu':
return tf.nn.relu(dy) # max{input, 0}
elif STE == 'crelu' or STE == 'clipped_relu':
return tf.clip_by_value(tf.nn.relu(dy), 0., 1.) # min{max{input, 0}, 1}
else:
raise NotImplementedError
return output, grad
# Above version of round_with_STE with kwarg won't work in graph mode. Hence have to implement various STE types separately
@tf.custom_gradient
def round_with_identity_STE(x):
output = tf.math.round(x)
grad = lambda dy: dy
return output, grad
@tf.custom_gradient
def round_with_relu_STE(x):
output = tf.math.round(x)
grad = lambda dy: tf.nn.relu(dy)
return output, grad
@tf.custom_gradient
def round_with_crelu_STE(x):
output = tf.math.round(x)
grad = lambda dy: tf.clip_by_value(tf.nn.relu(dy), 0., 1.)
return output, grad
def annealed_temperature(t, r, ub, lb=1e-8, backend=np, scheme='exp', **kwargs):
"""
Return the temperature at time step t, based on a chosen annealing schedule.
:param t: step/iteration number
:param r: decay strength
:param ub: maximum/init temperature
:param lb: small const like 1e-8 to prevent numerical issue when temperature gets too close to 0
:param backend: np or tf
:param scheme:
:param kwargs:
:return:
"""
default_t0 = 700
if scheme == 'exp':
tau = backend.exp(-r * t)
elif scheme == 'exp0':
# Modified version of above that fixes temperature at ub for initial t0 iterations
t0 = kwargs.get('t0', default_t0)
tau = ub * backend.exp(-r * (t - t0))
elif scheme == 'linear':
# Cool temperature linearly from ub after the initial t0 iterations
t0 = kwargs.get('t0', default_t0)
tau = -r * (t - t0) + ub
else:
raise NotImplementedError
if backend is None:
return min(max(tau, lb), ub)
else:
return backend.minimum(backend.maximum(tau, lb), ub)
|
from django.test import TestCase
# Create your tests here.
from django.urls import reverse
from rest_framework.views import status
import json
# tests for views
from django.test import TestCase, Client, RequestFactory
from .views import object_detection
from django.http import JsonResponse, HttpResponse
class SimpleTest(TestCase):
def test_obj_recognition(self):
picture_url = 'https://watson-developer-cloud.github.io/doc-tutorial-downloads/visual-recognition/640px-IBM_VGA_90X8941_on_PS55.jpg'
request = RequestFactory().get('/obj')
rr = object_detection(request, picture_url)
self.assertEqual(rr.status_code,200)
|
"""
Useful additional string functions.
"""
import sys
def remove_non_ascii(s):
"""
Remove non-ascii characters in a file. Needed when support for non-ASCII
is not available.
Args:
s (str): Input string
Returns:
String with all non-ascii characters removed.
"""
return "".join(i for i in s if ord(i) < 128)
def unicode2str(s):
"""
Forces a unicode to a string in Python 2, but transparently handles
Python 3.
Args:
s (str/unicode): Input string / unicode.
Returns:
str in Python 2. Unchanged otherwise.
"""
return s.encode("utf-8") if sys.version_info.major < 3 else s
def is_string(s):
"""True if s behaves like a string (duck typing test)."""
try:
s + " "
return True
except TypeError:
return False
def list_strings(arg):
"""
Always return a list of strings, given a string or list of strings as
input.
:Examples:
>>> list_strings('A single string')
['A single string']
>>> list_strings(['A single string in a list'])
['A single string in a list']
>>> list_strings(['A','list','of','strings'])
['A', 'list', 'of', 'strings']
"""
if is_string(arg):
return [arg]
return arg
def marquee(text="", width=78, mark="*"):
"""
Return the input string centered in a 'marquee'.
Args:
text (str): Input string
width (int): Width of final output string.
mark (str): Character used to fill string.
:Examples:
>>> marquee('A test', width=40)
'**************** A test ****************'
>>> marquee('A test', width=40, mark='-')
'---------------- A test ----------------'
marquee('A test',40, ' ')
' A test '
"""
if not text:
return (mark * width)[:width]
nmark = (width - len(text) - 2) // len(mark) // 2
if nmark < 0:
nmark = 0
marks = mark * nmark
return "%s %s %s" % (marks, text, marks)
def boxed(msg, ch="=", pad=5):
"""
Returns a string in a box
Args:
msg: Input string.
ch: Character used to form the box.
pad: Number of characters ch added before and after msg.
>>> print(boxed("hello", ch="*", pad=2))
***********
** hello **
***********
"""
if pad > 0:
msg = pad * ch + " " + msg.strip() + " " + pad * ch
return "\n".join(
[
len(msg) * ch,
msg,
len(msg) * ch,
]
)
def make_banner(s, width=78, mark="*"):
"""
:param s: String
:param width: Width of banner. Defaults to 78.
:param mark: The mark used to create the banner.
:return: Banner string.
"""
banner = marquee(s, width=width, mark=mark)
return "\n" + len(banner) * mark + "\n" + banner + "\n" + len(banner) * mark
def indent(lines, amount, ch=" "):
"""
Indent the lines in a string by padding each one with proper number of pad
characters
"""
padding = amount * ch
return padding + ("\n" + padding).join(lines.split("\n"))
|
import re
from typing import Optional
from .types import Filter
from .utils import force_tuple
def should_skip_method(method: str, pattern: Optional[Filter]) -> bool:
if pattern is None:
return False
patterns = force_tuple(pattern)
return method.upper() not in map(str.upper, patterns)
def should_skip_endpoint(endpoint: str, pattern: Optional[Filter]) -> bool:
if pattern is None:
return False
patterns = force_tuple(pattern)
return not any(re.search(item, endpoint) for item in patterns)
|
import os
from experiments.experiments import REDDModelSelectionExperiment
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
from nilmlab import exp_model_list
from nilmlab.lab import TimeSeriesLength
dirname = os.path.dirname(__file__)
single_building_exp_checkpoint = os.path.join(dirname, '../results/cv5mins_redd3.csv')
exp = REDDModelSelectionExperiment(building=3)
exp.set_ts_len(TimeSeriesLength.WINDOW_5_MINS)
exp.set_checkpoint_file(single_building_exp_checkpoint)
exp.set_transformers(exp_model_list.cv_signal2vec)
exp.set_classifiers(exp_model_list.cv_signal2vec_clf)
exp.run() |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend filtering"""
from typing import List, Union, Type
import allure
import pytest
from adcm_client.base import ResponseTooLong, BaseAPIListObject, BaseAPIObject
from adcm_client.objects import (
Action,
ADCMClient,
Bundle,
BundleList,
Cluster,
ClusterList,
ClusterPrototype,
ClusterPrototypeList,
Host,
HostList,
HostPrototype,
HostPrototypeList,
Job,
JobList,
Prototype,
PrototypeList,
Provider,
ProviderList,
ProviderPrototype,
ProviderPrototypeList,
Task,
TaskList,
Service,
)
from adcm_pytest_plugin.utils import get_data_dir, get_subdirs_iter
from delayed_assert import assert_expectations, expect
from pytest_lazyfixture import lazy_fixture
# pylint: disable=redefined-outer-name,protected-access
@pytest.fixture()
def cluster_bundles(sdk_client_fs: ADCMClient):
"""Upload cluster bundles"""
for path in get_subdirs_iter(__file__, "cluster_bundles"):
sdk_client_fs.upload_from_fs(path)
return sdk_client_fs
@pytest.fixture()
def one_cluster_prototype(cluster_bundles: ADCMClient):
"""Get cluster prototype"""
return cluster_bundles.bundle(name="4").cluster_prototype()
@pytest.fixture()
def one_cluster_prototype_name_attr(one_cluster_prototype: ClusterPrototype):
"""Get cluster prototype name attr"""
return {'name': one_cluster_prototype.name}
@pytest.fixture()
def one_cluster_prototype_bundle_id_attr(one_cluster_prototype: ClusterPrototype):
"""Get cluster prototype bundle_id attr"""
return {'bundle_id': one_cluster_prototype.bundle_id}
@pytest.fixture()
def clusters(cluster_bundles: ADCMClient):
"""Create clusters"""
for i in range(51):
cluster_bundles.bundle(name='14').cluster_create(name=str(i))
return cluster_bundles
@pytest.fixture()
def one_cluster(cluster_bundles: ADCMClient):
"""Create one cluster"""
return cluster_bundles.bundle(name='42').cluster_create(name="I am a Cluster")
@pytest.fixture()
def one_cluster_name_attr(one_cluster: Cluster):
"""Get cluster name attr"""
return {'name': one_cluster.name}
@pytest.fixture()
def one_cluster_prototype_id_attr(one_cluster: Cluster):
"""Get cluster prototype_id attr"""
return {'prototype_id': one_cluster.prototype_id}
@pytest.fixture()
def provider_bundles(sdk_client_fs: ADCMClient):
"""Upload provider bundles"""
for path in get_subdirs_iter(__file__, "provider_bundles"):
sdk_client_fs.upload_from_fs(path)
return sdk_client_fs
@pytest.fixture()
def providers(provider_bundles: ADCMClient):
"""Create providers"""
bundle = provider_bundles.bundle(name='provider18')
for i in range(51):
bundle.provider_create(name=str(i))
return provider_bundles
@pytest.fixture()
def one_provider(provider_bundles: ADCMClient):
"""Create one provider"""
return provider_bundles.bundle(name='provider15').provider_create(name="I am a Provider")
@pytest.fixture()
def one_provider_name_attr(one_provider: Provider):
"""Get provider name attr"""
return {'name': one_provider.name}
@pytest.fixture()
def one_provider_prototype_id_attr(one_provider: Provider):
"""Get provider prototype_id attr"""
return {'prototype_id': one_provider.prototype_id}
@pytest.fixture()
def provider_bundle_id(one_provider: Provider):
"""Get provider bundle_id attr"""
return {'bundle_id': one_provider.bundle_id}
@pytest.fixture()
def hosts(provider_bundles: ADCMClient, one_provider):
"""Create hosts return provider bundles"""
for i in range(51):
one_provider.host_create(fqdn=str(i))
return provider_bundles
@pytest.fixture()
def one_host(provider_bundles: ADCMClient):
"""Create one host"""
provider = provider_bundles.bundle(name='provider42').provider_create(name="For one Host")
return provider.host_create(fqdn='host.host.host')
@pytest.fixture()
def one_host_fqdn_attr(one_host: Host):
"""Get host fqdn attr"""
return {'fqdn': one_host.fqdn}
@pytest.fixture()
def one_host_prototype_id_attr(one_host: Host):
"""Get host prototype_id attr"""
return {'prototype_id': one_host.prototype_id}
@pytest.fixture()
def one_host_provider_id_attr(one_host: Host):
"""Get host provider_id attr"""
return {'provider_id': one_host.provider_id}
@pytest.mark.parametrize(
'tested_class',
[
pytest.param(Bundle, id="Bundle"),
pytest.param(Prototype, id="Prototype"),
pytest.param(ClusterPrototype, id="ClusterPrototype"),
pytest.param(ProviderPrototype, id="ProviderPrototype"),
pytest.param(HostPrototype, id="HostPrototype"),
pytest.param(Cluster, id="Cluster"),
pytest.param(Provider, id="Provider"),
pytest.param(Host, id="Host"),
pytest.param(Task, id="Task"),
pytest.param(Job, id="Job"),
],
)
def test_coreapi_schema(sdk_client_fs: ADCMClient, tested_class: Type[BaseAPIObject]):
"""Test coreapi schema"""
def _get_params(link):
result = {}
for field in link.fields:
result[field.name] = True
return result
schema_obj = sdk_client_fs._api.schema
with allure.step(f'Get {tested_class.__name__} schema objects'):
for path in tested_class.PATH:
assert path in schema_obj.data
schema_obj = schema_obj[path]
params = _get_params(schema_obj.links['list'])
with allure.step(f'Check if filters are acceptable for coreapi {tested_class.__name__}'):
for _filter in tested_class.FILTERS:
expect(
_filter in params,
f"Filter {_filter} should be acceptable for coreapi in class {tested_class.__name__}",
)
assert_expectations()
@pytest.mark.parametrize(
('sdk_client', 'tested_class'),
[
pytest.param(lazy_fixture('cluster_bundles'), ClusterPrototypeList, id="Cluster Prototype"),
pytest.param(lazy_fixture('cluster_bundles'), PrototypeList, id="Prototype"),
pytest.param(lazy_fixture('provider_bundles'), ProviderPrototypeList, id="Provider Prototype"),
pytest.param(lazy_fixture('provider_bundles'), HostPrototypeList, id="Host Prototype"),
pytest.param(lazy_fixture('provider_bundles'), BundleList, id="Bundle"),
pytest.param(lazy_fixture('clusters'), ClusterList, id="Cluster"),
pytest.param(lazy_fixture('hosts'), HostList, id="Host"),
pytest.param(lazy_fixture('hosts_with_jobs'), TaskList, id="Task"),
pytest.param(lazy_fixture('hosts_with_jobs'), JobList, id="Job"),
],
)
def test_paging_fail(sdk_client, tested_class: Type[BaseAPIListObject]):
"""Scenario:
* Prepare a lot of objects in ADCM
* Call listing api over objects.*List classes
* Expecting to have ResponseTooLong error
"""
with allure.step(f'Prepare a lot of objects: {tested_class.__name__} ' f'in ADCM and check ResponseTooLong error'):
with pytest.raises(ResponseTooLong):
tested_class(sdk_client._api)
@pytest.mark.parametrize(
('sdk_client', 'tested_class', 'tested_list_class', 'search_args', 'expected_args'),
[
pytest.param(
lazy_fixture('cluster_bundles'),
ClusterPrototype,
ClusterPrototypeList,
lazy_fixture('one_cluster_prototype_name_attr'),
lazy_fixture('one_cluster_prototype_name_attr'),
id="Cluster Prototype Name Filter",
),
pytest.param(
lazy_fixture('cluster_bundles'),
ClusterPrototype,
ClusterPrototypeList,
lazy_fixture('one_cluster_prototype_bundle_id_attr'),
lazy_fixture('one_cluster_prototype_bundle_id_attr'),
id="Cluster Prototype Bundle ID Filter",
),
pytest.param(
lazy_fixture('cluster_bundles'),
Prototype,
PrototypeList,
lazy_fixture('one_cluster_prototype_name_attr'),
lazy_fixture('one_cluster_prototype_name_attr'),
id="Prototype Name Filter",
),
pytest.param(
lazy_fixture('cluster_bundles'),
Prototype,
PrototypeList,
lazy_fixture('one_cluster_prototype_bundle_id_attr'),
lazy_fixture('one_cluster_prototype_bundle_id_attr'),
id="Prototype Bundle ID Filter",
),
pytest.param(
lazy_fixture('provider_bundles'),
ProviderPrototype,
ProviderPrototypeList,
{'name': 'provider24'},
{'name': 'provider24'},
id="Provider Prototype Name Filter",
),
pytest.param(
lazy_fixture('provider_bundles'),
ProviderPrototype,
ProviderPrototypeList,
lazy_fixture('provider_bundle_id'),
lazy_fixture('provider_bundle_id'),
id="Provider Prototype Bundle ID Filter",
),
pytest.param(
lazy_fixture('provider_bundles'),
HostPrototype,
HostPrototypeList,
{'name': 'host13'},
{'name': 'host13'},
id="Host Prototype Name Filter",
),
pytest.param(
lazy_fixture('provider_bundles'),
HostPrototype,
HostPrototypeList,
lazy_fixture('provider_bundle_id'),
lazy_fixture('provider_bundle_id'),
id="Host Prototype Bundle ID Filter",
),
pytest.param(
lazy_fixture('cluster_bundles'),
Bundle,
BundleList,
{'name': '4'},
{'version': 'ver4'},
id="Bundle Name Filter",
),
pytest.param(
lazy_fixture('cluster_bundles'),
Bundle,
BundleList,
{'version': 'ver8'},
{'name': '8'},
id="Bundle Version Filter",
),
pytest.param(
lazy_fixture('clusters'),
Cluster,
ClusterList,
lazy_fixture('one_cluster_name_attr'),
lazy_fixture('one_cluster_prototype_id_attr'),
id="Cluster Name Filter",
),
pytest.param(
lazy_fixture('clusters'),
Cluster,
ClusterList,
lazy_fixture('one_cluster_prototype_id_attr'),
lazy_fixture('one_cluster_name_attr'),
id="Cluster Prototype Id Filter",
),
pytest.param(
lazy_fixture('providers'),
Provider,
ProviderList,
lazy_fixture('one_provider_name_attr'),
lazy_fixture('one_provider_prototype_id_attr'),
id="Provider Name Filter",
),
pytest.param(
lazy_fixture('providers'),
Provider,
ProviderList,
lazy_fixture('one_provider_prototype_id_attr'),
lazy_fixture('one_provider_name_attr'),
id="Provider Prototype Id Filter",
),
pytest.param(
lazy_fixture('hosts'),
Host,
HostList,
lazy_fixture('one_host_fqdn_attr'),
lazy_fixture('one_host_prototype_id_attr'),
id="Host Fqdn Filter",
),
pytest.param(
lazy_fixture('hosts'),
Host,
HostList,
lazy_fixture('one_host_prototype_id_attr'),
lazy_fixture('one_host_fqdn_attr'),
id="Host Prototype Id Filter",
),
pytest.param(
lazy_fixture('hosts_with_jobs'),
Task,
TaskList,
lazy_fixture('task_action_id_attr'),
lazy_fixture('task_action_id_attr'),
id="Task Action Id Filter",
),
pytest.param(
lazy_fixture('hosts_with_jobs'),
Task,
TaskList,
lazy_fixture('task_status_attr'),
lazy_fixture('task_status_attr'),
id="Task Status Filter",
),
pytest.param(
lazy_fixture('hosts_with_jobs'),
Job,
JobList,
lazy_fixture('task_status_attr'),
lazy_fixture('task_status_attr'),
id="Job Action Id Filter",
),
pytest.param(
lazy_fixture('hosts_with_jobs'),
Job,
JobList,
lazy_fixture('task_status_attr'),
lazy_fixture('task_status_attr'),
id="Job Status Filter",
),
pytest.param(
lazy_fixture('hosts_with_jobs'),
Job,
JobList,
lazy_fixture('job_task_id_attr'),
lazy_fixture('job_task_id_attr'),
id="Job Task Id Filter",
),
],
)
def test_filter(sdk_client: ADCMClient, tested_class, tested_list_class, search_args, expected_args):
"""Scenario:
* Create a lot of objects in ADCM (more than allowed to get without paging)
* Call listing over *List class with tested filter as search args.
* Inspect first (and only) element of list
* Check that we found what we need
* Create single object over class call (like Cluster or Bundle) with tested filter
as search args
* Check that we found what we need
"""
with allure.step('Create a lot of objects in ADCM'):
objects = tested_list_class(sdk_client._api, **search_args)
with allure.step('Inspect first (and only) element of list'):
for k, v in expected_args.items():
assert getattr(objects[0], k) == v
with allure.step(
'Create single object over class call (like Cluster or Bundle) ' 'with tested filter as search args'
):
single_object = tested_class(sdk_client._api, **search_args)
with allure.step('Check created object'):
for k, v in expected_args.items():
assert getattr(single_object, k) == v
@pytest.fixture()
def cluster_with_actions(sdk_client_fs: ADCMClient):
"""Create cluster with actions"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'cluster_with_actions'))
return bundle.cluster_create(name="cluster_with_actions")
@pytest.fixture()
def service_with_actions(cluster_with_actions: Cluster):
"""Create service with actions"""
return cluster_with_actions.service_add(name='service_with_actions')
@pytest.fixture()
def provider_with_actions(sdk_client_fs: ADCMClient):
"""Create provider with actions"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'provider_with_actions'))
return bundle.provider_create(name="provider_with_actions")
@pytest.fixture()
def host_with_actions(provider_with_actions: Provider):
"""Create host with actions"""
return provider_with_actions.host_create(fqdn='host.with.actions')
@pytest.fixture()
def host_ok_action(host_with_actions: Host):
"""Het host OK action"""
return host_with_actions.action(name="ok42")
@pytest.fixture()
def hosts_with_actions(host_with_actions: Host, provider_with_actions: Provider):
"""Create hosts with actions"""
hosts = [host_with_actions]
for i in range(9):
hosts.append(provider_with_actions.host_create(fqdn=f'host.with.actions.{i}'))
return hosts
@pytest.fixture()
def hosts_with_jobs(hosts_with_actions: List, host_ok_action: Action):
"""
Run multiple actions on hosts. Return first host.
"""
for _ in range(6):
actions = []
for host in hosts_with_actions:
actions.append(host.action(name="fail50").run())
for action in actions:
action.wait()
host_ok_action.run().try_wait()
return hosts_with_actions[0]
@pytest.fixture()
def task_action_id_attr(host_ok_action: Action):
"""Get task action_id attr"""
return {'action_id': host_ok_action.action_id}
@pytest.fixture()
def task_status_attr():
"""Get task status attr"""
return {'status': 'success'}
@pytest.fixture()
def job_task_id_attr(host_ok_action: Action):
"""Get task task_id attr"""
return {'task_id': host_ok_action.task().task_id}
# There is no paging on Actions right now.
# @pytest.mark.parametrize(
# "TestedParentClass",
# [
# pytest.param(
# lazy_fixture('cluster_with_actions'),
# id="Cluster"
# ),
# pytest.param(
# lazy_fixture('service_with_actions'),
# id="Service"
# ),
# pytest.param(
# lazy_fixture('provider_with_actions'),
# id="Provider"
# ),
# pytest.param(
# lazy_fixture('host_with_actions'),
# id="Host"
# ),
# ])
# def test_paging_fail_on_actions(TestedParentClass):
# """Scenario:
# * Create object with a lot of actions
# * Call action_list()
# * Expecting to have ResponseTooLong error
# """
# with pytest.raises(ResponseTooLong):
# from pprint import pprint
# pprint(TestedParentClass.action_list())
@pytest.mark.parametrize(
('tested_parent_class', 'search_args', 'expected_args'),
[
pytest.param(
lazy_fixture('cluster_with_actions'),
{'name': 'ok14'},
{'name': 'ok14'},
id="on Cluster",
),
pytest.param(
lazy_fixture('service_with_actions'),
{'name': 'fail15'},
{'name': 'fail15'},
id="on Service",
),
pytest.param(
lazy_fixture('provider_with_actions'),
{'name': 'ok14'},
{'name': 'ok14'},
id="on Provider",
),
pytest.param(lazy_fixture('host_with_actions'), {'name': 'fail15'}, {'name': 'fail15'}, id="on Host"),
],
)
def test_actions_name_filter(
tested_parent_class: Union[Provider, Service, Cluster], search_args: dict, expected_args: dict
):
"""Scenario:
* Create object with a lot of actions
* Call action_list() with tested filter as search args.
* Inspect first (and only) element of list
* Check that we found what we need
* Call action() with tested filter as search args
* Check that we found what we need
"""
with allure.step(f'Create {tested_parent_class} with a lot of actions'):
actions = tested_parent_class.action_list(**search_args)
with allure.step('Inspect first (and only) element of list'):
for k, v in expected_args.items():
assert getattr(actions[0], k) == v
with allure.step('Call action() with tested filter as search args'):
action = tested_parent_class.action(**search_args)
with allure.step('Check action name'):
for k, v in expected_args.items():
assert getattr(action, k) == v
|
import os
import glob
from sqlalchemy import Column, types
from sqlalchemy.exc import UnboundExecutionError
from sqlalchemy.orm.exc import UnmappedError
from alchy import manager, model, Session
from .base import TestBase, TestQueryBase
from . import fixtures
from .fixtures import Foo
class TestManager(TestBase):
def test_create_drop_all(self):
db = manager.Manager(Model=fixtures.Model, config=self.config)
db.create_all()
self.assertTrue(len(self.models) > 0)
self.assertModelTablesExist(db.engine)
db.drop_all()
self.assertModelTablesNotExists(db.engine)
def test_default_model_config(self):
db = manager.Manager(config=self.config)
self.assertTrue(issubclass(db.Model, model.ModelBase))
def test_create_all_exception(self):
# pass in dummy value for Model
db = manager.Manager(Model=False, config=self.config)
self.assertRaises(UnmappedError, db.create_all)
def test_drop_all_exception(self):
# pass in dummy value for Model
db = manager.Manager(Model=False, config=self.config)
self.assertRaises(UnmappedError, db.drop_all)
class TestManagerSessionExtensions(TestQueryBase):
def get_count(self, table='foo'):
return self.db.execute(
'select count(*) from {0}'.format(table)).scalar()
def test_add(self):
count = self.get_count()
self.db.add(Foo())
self.db.add(Foo(), Foo())
self.db.add([Foo(), Foo()])
self.assertEqual(self.db.execute(
'select count(*) from foo').scalar(), count)
self.db.commit()
self.assertEqual(self.get_count(), count + 5)
def test_add_commit(self):
count = self.get_count()
self.db.add_commit(Foo())
self.assertEqual(self.get_count(), count + 1)
self.db.add_commit(Foo(), Foo())
self.assertEqual(self.get_count(), count + 3)
self.db.add_commit([Foo(), Foo()])
self.assertEqual(self.get_count(), count + 5)
def test_delete(self):
count = self.get_count()
foos = Foo.query.all()
self.db.delete(foos[0])
self.db.delete(foos[1], foos[2])
self.db.delete([foos[3], foos[4]])
self.assertEqual(self.get_count(), count)
self.db.commit()
self.assertEqual(self.get_count(), count - 5)
def test_delete_commit(self):
count = self.get_count()
foos = Foo.query.all()
self.db.delete_commit(foos[0])
self.assertEqual(self.get_count(), count - 1)
self.db.delete_commit(foos[1], foos[2])
self.assertEqual(self.get_count(), count - 3)
self.db.delete_commit([foos[3], foos[4]])
self.assertEqual(self.get_count(), count - 5)
def test_custom_session(self):
class MySession(Session):
pass
db = manager.Manager(session_class=MySession)
self.assertIsInstance(db.session.session_factory(), MySession)
class TestMultipleEngineBinds(TestBase):
class config(object):
binds = [
'sqlite:///bind0.test.db',
'sqlite:///bind1.test.db',
'sqlite:///bind2.test.db'
]
SQLALCHEMY_DATABASE_URI = binds[0]
SQLALCHEMY_BINDS = {
'bind1': binds[1],
'bind2': {
'SQLALCHEMY_DATABASE_URI': binds[2]
}
}
Model = model.make_declarative_base()
class Bind0(Model):
_id = Column(types.Integer(), primary_key=True)
class Bind1(Model):
__bind_key__ = 'bind1'
_id = Column(types.Integer(), primary_key=True)
class Bind2(Model):
__bind_key__ = 'bind2'
_id = Column(types.Integer(), primary_key=True)
def setUp(self):
self.db = manager.Manager(config=self.config, Model=self.Model)
self.db.create_all()
self.engine0 = self.db.engine
self.engine1 = self.db.get_engine('bind1')
self.engine2 = self.db.get_engine('bind2')
def tearDown(self):
for db in glob.glob('*.test.db'):
os.remove(db)
def test_bind_engines(self):
"""Test that each bind engine is accessible and configured properly."""
self.assertEqual(
str(self.db.engine.url), self.config.binds[0])
self.assertEqual(
str(self.db.get_engine('bind1').url), self.config.binds[1])
self.assertEqual(
str(self.db.get_engine('bind2').url), self.config.binds[2])
def test_bind_tables(self):
"""Test that tables are created in the proper database."""
self.assertEqual(
self.engine0.execute('select * from bind0').fetchall(), [])
self.assertEqual(
self.engine1.execute('select * from bind1').fetchall(), [])
self.assertEqual(
self.engine2.execute('select * from bind2').fetchall(), [])
try:
self.engine0.execute('select * from bind1')
except Exception as e:
self.assertIn('no such table', str(e))
try:
self.engine0.execute('select * from bind2')
except Exception as e:
self.assertIn('no such table', str(e))
try:
self.engine1.execute('select * from bind0')
except Exception as e:
self.assertIn('no such table', str(e))
try:
self.engine1.execute('select * from bind2')
except Exception as e:
self.assertIn('no such table', str(e))
try:
self.engine2.execute('select * from bind0')
except Exception as e:
self.assertIn('no such table', str(e))
try:
self.engine2.execute('select * from bind1')
except Exception as e:
self.assertIn('no such table', str(e))
def test_bind_inserts(self):
"""Test that records are inserted into the proper database when using
models."""
self.db.add_commit(self.Bind0())
self.db.add_commit(self.Bind1())
self.db.add_commit(self.Bind2())
self.assertTrue(self.Bind0.query.count() > 0)
self.assertEqual(
self.Bind0.query.count(),
self.engine0.execute('select count(*) from bind0').fetchone()[0])
self.assertTrue(self.Bind1.query.count() > 0)
self.assertEqual(
self.Bind1.query.count(),
self.engine1.execute('select count(*) from bind1').fetchone()[0])
self.assertTrue(self.Bind2.query.count() > 0)
self.assertEqual(
self.Bind2.query.count(),
self.engine2.execute('select count(*) from bind2').fetchone()[0])
def test_create_drop_all_by_bind(self):
"""Test that create/drop all can be used to target a specific bind."""
self.db.drop_all(bind='bind1')
self.assertEqual(
self.engine0.execute('select * from bind0').fetchall(), [])
self.assertEqual(
self.engine2.execute('select * from bind2').fetchall(), [])
try:
self.engine1.execute('select * from bind1')
except Exception as e:
self.assertIn('no such table', str(e))
self.db.create_all(bind='bind1')
self.assertEqual(
self.engine1.execute('select * from bind1').fetchall(), [])
self.db.drop_all(bind=['bind1', 'bind2'])
try:
self.engine1.execute('select * from bind1')
except Exception as e:
self.assertIn('no such table', str(e))
try:
self.engine2.execute('select * from bind2')
except Exception as e:
self.assertIn('no such table', str(e))
self.db.create_all(bind=['bind1', 'bind2'])
self.assertEqual(
self.engine1.execute('select * from bind1').fetchall(), [])
self.assertEqual(
self.engine2.execute('select * from bind2').fetchall(), [])
def test_reflect(self):
"""Test that existing database tables can be reflected."""
rdb = manager.Manager(
config={'SQLALCHEMY_DATABASE_URI': self.config.binds[0]})
self.assertEqual(len(rdb.metadata.tables), 0)
rdb.reflect()
self.assertEqual(len(rdb.metadata.tables), 1)
self.assertIn('bind0', rdb.metadata.tables)
|
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.preprocessing import LabelBinarizer, LabelEncoder, OneHotEncoder
from sklearn.impute import KNNImputer
def num_columns(df):
"""
Create a list with the names of the numeric columns of a dataframe.
This function is used on rem_outliers and knn_missings.
Params:
- df = dataframe.
"""
df_num_cols = df.select_dtypes(include=np.number).columns
list_num_cols = list(df_num_cols)
return list_num_cols
def rem_col_nan(df, per_na=.3, rem_print=False):
"""
If a column has more than 30 % NaN, will be removed.
Percentage NaN could be changed on the params.
Params:
- df = dataframe.
- per_na = percentage limit of Nan to remove the column, by default 0.30.
- rem_print = print the list with removed columns.
"""
df_rem_nan = df.copy()
num_rows = len(df_rem_nan.index) * per_na
li_col_rem = []
for i in df_rem_nan:
if df_rem_nan[i].isnull().sum() >= num_rows:
df_rem_nan.drop(columns=i, inplace=True)
li_col_rem.append(i)
if rem_print:
print('The columns removed are:', li_col_rem)
return df_rem_nan
def rem_outliers(df, z_num=3, shape_print=False):
"""
The rows with a value more than 3 z-score, will be removed.
The z-score indicates if the number is an outlier.
Z-Score could be changed on the input.
Params:
- df = dataframe
- z_num = limit of z-score to consider an outlier, by default 3.
- shape_print: print the number of rows removed.
"""
df_rem_outliers = df.copy()
list_num_cols = num_columns(df_rem_outliers)
if df_rem_outliers[list_num_cols].isnull().values.any():
return print('There are NaN on df. Please, treat them before transform outliers.')
else:
for i in list_num_cols:
z = np.abs(stats.zscore(df_rem_outliers[i]))
df_rem_outliers = df_rem_outliers[(z < z_num)]
if shape_print:
print('Number of rows removed: ', (df.shape[0] - df_rem_outliers.shape[0]))
return df_rem_outliers
def knn_missings(df, n_ngb=3):
"""
First calls the function to select the numeric columns of the dataframe
and transform the NaN through a KNN with 3 neighbors (optional).
The return change the values on the original dataframe.
Params:
df = dataframe.
n_ngb = number of neighbors of KNN, by default 3.
"""
df_knn_msg = df.copy()
list_num_cols = num_columns(df_knn_msg)
imputer = KNNImputer(n_neighbors=n_ngb)
imputer.fit(df[list_num_cols])
df_knn_msg[list_num_cols] = imputer.transform(df_knn_msg[list_num_cols])
return df_knn_msg
def nlp_label_enc(df_encoder, cols, p_tf):
if len(cols) == 1:
df_encoder[cols] = LabelEncoder(
).fit_transform(df_encoder[cols])
print(cols, p_tf)
elif len(cols) > 1:
for i in cols:
df_encoder[i] = LabelEncoder().fit_transform(df_encoder[i])
print(i, p_tf)
return df_encoder
def nlp_binary_enc(df_encoder, cols, p_tf):
if len(cols) == 1:
if len(np.unique(df_encoder[cols])) > 2:
return 'Column has more than two values, cannot be transformed.'
else:
df_encoder[cols] = LabelBinarizer(
).fit_transform(df_encoder[cols])
print(cols, p_tf)
elif len(cols) > 1:
for i in cols:
if len(df_encoder[i].unique()) > 2:
print(i, ' has more than two values, cannot be transformed.')
else:
df_encoder[i] = LabelBinarizer(
).fit_transform(df_encoder[i])
print(i, p_tf)
return df_encoder
def nlp_ohe_enc(df_encoder, cols, p_tf):
ohenc = OneHotEncoder()
df_ohe = ohenc.fit_transform(df_encoder[cols]).toarray()
cols_ohe = ohenc.get_feature_names()
df_ohe = pd.DataFrame(df_ohe, columns=cols_ohe)
df_encoder.drop(columns=cols, inplace=True)
df_encoder = pd.merge(df_encoder, df_ohe,
left_index=True, right_index=True)
print(cols, p_tf)
return df_encoder
def nlp_dummies_enc(df_encoder, cols, p_tf):
df_dummies = pd.get_dummies(df_encoder[cols])
df_encoder.drop(columns=cols, inplace=True)
df_encoder = pd.merge(df_encoder, df_dummies,
left_index=True, right_index=True)
print(cols, p_tf)
return df_encoder
def nlp_encoder(df, cols, encoder):
"""
This function compiles the most used encoders to have them all easily at hand,
uses Sklearn and Pandas tools for its operation and currently has 4 functions
which are called using their respective encoder.
1º encoder = labelencoder:
To use this encoder you must enter the column name that you want to be converted
into multiple columns, it can be multiclass.
2º encoder = binary:
To use this encoder you must enter the column name that you want to be converted
into 2 columns.
This column must contain only 2 values, since the contained values are converted only into 0 and 1.
3º encoder = onehotencoder:
To use this encoder you must enter the column names that you want to be converted
as many columns as there are variables.
The function remove the original columns and add the new "variables columns" at the end.
4º encoder = dummies:
Similar to One hot encoder, you must enter the column names that you want to be converted as many columns as there are variables.
The function remove the original columns and add the new "variables columns" at the end.
Params:
- df = dataframe.
- cols = pass a list of columns to transform:
- encoder = select as a string the desired encode to tranform:
- labelencoder
- binary
- onehotencoder
- dummies
"""
p_tf = ' has been transformed.'
df_encoder = df.copy()
encoder_list = ['labelencoder', 'binary', 'onehotencoder', 'dummies']
result_enc = any(elem in encoder for elem in encoder_list)
if not result_enc:
return print('Param encoder is wrong.')
if type(cols).__name__ == 'str':
cols = [cols]
list_num_cols = num_columns(df_encoder)
list_passed = list(df_encoder[cols].columns)
result_cols = any(elem in list_passed for elem in list_num_cols)
if result_cols:
return print('Columns passed are numeric please, '
'pass only categorical columns.')
if encoder == "labelencoder":
df_encoder = nlp_label_enc(df_encoder, cols, p_tf)
return df_encoder
elif encoder == 'binary':
df_encoder = nlp_binary_enc(df_encoder, cols, p_tf)
return df_encoder
elif encoder == "onehotencoder":
df_encoder = nlp_ohe_enc(df_encoder, cols, p_tf)
return df_encoder
elif encoder == "dummies":
df_encoder = nlp_dummies_enc(df_encoder, cols, p_tf)
return df_encoder
|
import json
import boto3
from urllib import parse
s3 = boto3.client('s3')
def get_target_key(source_key):
### defining file key (prefix + filename)
city, year, month, day, query = ['']*5
for s in source_key.split('/'):
city = s if 'city' in s else city
year = s if 'year' in s else year
month = s if 'month' in s else month
day = s if 'day' in s else day
query = s.split('=')[-1] if 'query' in s else query
city = city.replace("ã", "a")
prefix = '/'.join(map(lambda x: x.split('=')[-1].zfill(2), ['exports/reports/weekly',year, month, day, city])) + '/'
if source_key[-3:] == 'csv':
name = '-'.join(map(lambda x: x.split('=')[-1].zfill(2), [year, month, day, 'fgv-bid', city, query+'.csv']))
elif source_key[-3:] == 'pdf':
name = source_key.split('/')[-1]
return prefix + name
def lambda_handler(event, context):
source_bucket = event['Records'][0]['s3']['bucket']['name']
source_key = parse.unquote_plus(event['Records'][0]['s3']['object']['key'])
copy_source = {'Bucket': source_bucket, 'Key': source_key}
waiter = s3.get_waiter('object_exists')
waiter.wait(Bucket=source_bucket, Key=source_key)
target_bucket = 'bd-fgv-public'
target_key = get_target_key(source_key)
### coping files
s3.copy_object(Bucket=target_bucket, Key=target_key,
CopySource=copy_source, ACL='public-read')
|
import itertools
import random
import time
def num_matrix(rows, cols, steps=25):
nums = list(range(1, rows * cols)) + [0]
goal = [ nums[i:i+rows] for i in range(0, len(nums), rows) ]
puzzle = goal
for steps in range(steps):
puzzle = random.choice(gen_kids(puzzle))
return puzzle, goal
def gen_kids(puzzle):
for row, level in enumerate(puzzle):
for column, item in enumerate(level):
if(item == 0):
kids = get_kids(row, column, puzzle)
return kids
def get_kids(row, column, puzzle):
kids = []
if(row > 0 ):
kids.append(swap(row - 1, column, row, column, puzzle))
if(row < 2):
kids.append(swap(row + 1, column, row, column, puzzle))
if(column > 0):
kids.append(swap(row, column - 1, row, column, puzzle))
if(column < 2):
kids.append(swap(row, column + 1, row, column, puzzle))
return kids
def swap(row, col, zrow, zcol, array):
import copy
s = copy.deepcopy(array)
s[zrow][zcol], s[row][col] = s[row][col], s[zrow][zcol]
return s
def dfs(puzzle, goal):
stack = []
visited = [puzzle]
stack.append([puzzle])
while stack:
path = stack.pop(0)
node = path[-1]
if node == goal:
return path
for adjacent in gen_kids(node):
if adjacent not in visited:
visited.append(adjacent)
new_path = list(path)
new_path.append(adjacent)
stack.append(new_path)
def id_dfs(puzzle, goal):
def idfs(path, depth):
if depth == 0:
return
if path[-1] == goal:
return path
for move in gen_kids(path[-1]):
if move not in path:
next_path = idfs(path + [move], depth - 1)
if next_path:
return next_path
for depth in itertools.count():
path = idfs([puzzle], depth)
if path:
return path
puzzle, goal = num_matrix(3,3, 30)
total_time = 0
print(goal)
print(puzzle)
t0 = time.time()
solution = dfs(puzzle, goal)
t1 = time.time()
total_time += t1 - t0
print('Puzzle resuelto en', total_time, 'segundos.')
for level in solution:
for item in level:
print(item)
print("\n")
print(goal)
print(puzzle)
total_time = 0
t0 = time.time()
solution = id_dfs(puzzle, goal)
t1 = time.time()
total_time += t1 - t0
print('Puzzle resuelto en', total_time, 'segundos.')
for level in solution:
for item in level:
print(item)
print("\n") |
#!/usr/bin/env python
# Python Network Programming Cookbook, Second Edition -- Chapter - 7
# This program is optimized for Python 3.5.2.
# To make it work with Python 2.7.12:
# Follow through the code inline for some changes.
# It may run on any other version with/without modifications.
import argparse
import xmlrpc
# Comment out the above line and uncomment the below line for Python 2.x.
#import xmlrpclib
from xmlrpc.server import SimpleXMLRPCServer
# Comment out the above line for Python 2.x.
def run_client(host, port, username, password):
server = xmlrpc.client.ServerProxy('http://%s:%s@%s:%s' %(username, password, host, port, ))
# Comment out the above line and uncomment the below line for Python 2.x.
#server = xmlrpclib.ServerProxy('http://%s:%s@%s:%s' %(username, password, host, port, ))
msg = "hello server..."
print ("Sending message to server: %s " %msg)
print ("Got reply: %s" %server.echo(msg))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Multithreaded multicall XMLRPC Server/Proxy')
parser.add_argument('--host', action="store", dest="host", default='localhost')
parser.add_argument('--port', action="store", dest="port", default=8000, type=int)
parser.add_argument('--username', action="store", dest="username", default='user')
parser.add_argument('--password', action="store", dest="password", default='pass')
# parse arguments
given_args = parser.parse_args()
host, port = given_args.host, given_args.port
username, password = given_args.username, given_args.password
run_client(host, port, username, password)
|
# Generated by Django 2.2.4 on 2019-09-02 17:13
from django.db import migrations
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [("djstripe", "0011_auto_20190828_0852")]
operations = [
migrations.AlterField(
model_name="paymentmethod",
name="card_present",
field=djstripe.fields.JSONField(
blank=True,
help_text="If this is an card_present PaymentMethod, this hash contains details about the Card Present payment method.",
null=True,
),
)
]
|
from flask import Flask
import logging
import os
from proxy_rss import spotify, twitter
logging.basicConfig(level=logging.WARNING)
application = Flask('proxy_rss')
application.register_blueprint(spotify.blueprint, url_prefix='/spotify')
application.register_blueprint(twitter.blueprint, url_prefix='/twitter')
|
import inspect
try:
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm.exc import UnmappedClassError
except ImportError:
pass
from tgext.crud import CrudRestController
try:
from tgext.crud.utils import SortableTableBase as TableBase
except:
from sprox.tablebase import TableBase
try:
from tgext.crud.utils import RequestLocalTableFiller as TableFiller
except:
from sprox.fillerbase import TableFiller
from sprox.formbase import AddRecordForm, EditableForm
from sprox.fillerbase import RecordFiller, AddFormFiller
from sprox.providerselector import ProviderTypeSelector, ProviderTypeSelectorError
class CrudRestControllerConfig(object):
allow_only = None
defaultCrudRestController = CrudRestController
def _post_init(self):
#RecordFillerClass = type('RecordFillerClass', (RecordFiller,),{})
#AddFormFillerClass = type('AddFormFillerClass', (AddFormFiller,),{})
#this insanity is caused by some weird python scoping.
# see previous changesets for first attempts
TableBaseClass = type('TableBaseClass', (TableBase,), {})
TableFillerClass = type('TableBaseClass', (TableFiller,), {})
EditableFormClass = type('EditableFormClass', (EditableForm,), {})
AddRecordFormClass = type('AddRecordFormClass', (AddRecordForm,),{})
if not hasattr(self, 'table_type'):
class Table(TableBaseClass):
__entity__=self.model
self.table_type = Table
if not hasattr(self, 'table_filler_type'):
class MyTableFiller(TableFillerClass):
__entity__ = self.model
self.table_filler_type = MyTableFiller
if not hasattr(self, 'edit_form_type'):
class EditForm(EditableFormClass):
__entity__ = self.model
self.edit_form_type = EditForm
if not hasattr(self, 'edit_filler_type'):
class EditFiller(RecordFiller):
__entity__ = self.model
self.edit_filler_type = EditFiller
if not hasattr(self, 'new_form_type'):
class NewForm(AddRecordFormClass):
__entity__ = self.model
self.new_form_type = NewForm
if not hasattr(self, 'new_filler_type'):
class NewFiller(AddFormFiller):
__entity__ = self.model
self.new_filler_type = NewFiller
def __init__(self, model, translations=None, default_to_dojo=True):
super(CrudRestControllerConfig, self).__init__()
self.model = model
self.default_to_dojo = default_to_dojo
self._do_init_with_translations(translations)
self._post_init()
def _do_init_with_translations(self, translations):
pass
provider_type_selector = ProviderTypeSelector()
class AdminConfig(object):
DefaultControllerConfig = CrudRestControllerConfig
default_index_template = None
allow_only = None
include_left_menu = True
default_to_dojo = True
def __init__(self, models, translations=None):
if translations is None:
translations = {}
if inspect.ismodule(models):
models = [getattr(models, model) for model in dir(models) if inspect.isclass(getattr(models, model))]
#purge all non-model objects
try_models = models
models = {}
for model in try_models:
try:
provider_type_selector.get_selector(model)
models[model.__name__.lower()] = model
except ProviderTypeSelectorError:
continue
self.models = models
self.translations = translations
self.index_template = self.default_index_template
def lookup_controller_config(self, model_name):
model_name_lower = model_name.lower()
if hasattr(self, model_name_lower):
return getattr(self, model_name_lower)(self.models[model_name], self.translations, self.default_to_dojo)
return self.DefaultControllerConfig(self.models[model_name], self.translations, self.default_to_dojo)
|
class Solution(object):
def getNum(self, t):
if int(t) < 27:
return 1
else:
return 0
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return 0
s = str(s)
n = len(s)
dp = [1 for _ in range(n + 1)]
for i in range(2, len(dp)):
dp[i] = dp[i - 1] + dp[i - 2] * self.getNum(s[i - 2:i])
return dp[-1]
a = Solution()
a.numDecodings(12) |
import cerberus
from random import randint
import pytest
class TestJsonApi():
def test_get_posts(self, client_json):
num = randint(1,100)
res = client_json.get_resourses(path=f'/posts/{num}')
schema = {
"id": {"type": "number"},
"userId": {"type": "number"},
"title": {"type": "string"},
"body": {"type": "string"}
}
v = cerberus.Validator()
assert v.validate(res.json(), schema)
@pytest.mark.parametrize('input_id, output_id',
[(23, '23'),
(12, '12'),
(6, '6')])
@pytest.mark.parametrize('input_title, output_title',
[('title', 'title'),
('Cool', 'Cool'),
('Mool', 'Mool'),
('Diz', 'Diz')])
def test_api_post_request(self, client_json, input_id, output_id, input_title, output_title):
res = client_json.post_data(
path="/posts",
data={'title': input_title, 'body': 'Some body about body', 'userId': input_id})
res_json = res.json()
assert res_json['title'] == output_title
assert res_json['body'] == 'Some body about body'
assert res_json['userId'] == output_id
@pytest.mark.parametrize('userId', [1,2,3,4,5,6,7])
def test_get_user_by_id(self, client_json, userId):
res = client_json.get_resourses(path='/posts',
params={'userId':userId})
assert res.json() != [], 'Сервис прислал пустой ответ'
@pytest.mark.parametrize('userId', [13, 'r', 34, 'y'])
def test_get_user_by_id(self, client_json, userId):
res = client_json.get_resourses(path='/posts',
params={'userId':userId})
assert res.json() == [], 'Ошибка фильтра'
@pytest.mark.parametrize('postId', [1,2,3,4,5,6,7,8,9])
def test_get_comments_by_post_id(self, client_json, postId):
res = client_json.get_resourses(path='/comments',
params={'postId':postId})
assert res.json() != [], 'Ошибка фильтра'
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from . import coin
from .litecoin import Litecoin
from .. import util
# https://github.com/FeatherCoin/Feathercoin
__all__ = ['Feathercoin']
class Feathercoin(Litecoin):
name = "feathercoin"
# see: https://github.com/FeatherCoin/Feathercoin/blob/master-0.8/src/main.cpp#L1075
@staticmethod
def block_creation_fee(block):
subsidy = 200
if block.height >= 204639: # nForkThree = 204639
subsidy = 80
return (subsidy * 100000000) >> ((block.height + 306960) // 210000)
symbols = ['FTC']
symbol = symbols[0]
dns_seeds = [
("dnsseed.feathercoin.com", 9336),
("dnsseed.alltheco.in", 9336),
("dnsseed.btcltcftc.com", 9336),
("dnsseed.fc.altcointech.net", 9336),
]
port = 9336
rpc_port = 9337
genesis_version = 1
genesis_block_hash = 'e2bf047e7e5a191aa4ef34d314979dc9986e0f19251edaba5940fd1fe365a712'
genesis_merkle_root = 'd9ced4ed1130f7b7faad9be25323ffafa33232a17c3edf6cfd97bee6bafbdd97'
genesis_timestamp = 1317972665
genesis_bits = 504365040
genesis_nonce = 2084524493
magic = '\xfb\xc0\xb6\xdb'
addrss_version = chr(14)
block_height_guess = [
('explorer.feathercoin.com', util.fetch_url_int('http://explorer.feathercoin.com/chain/Feathercoin/q/getblockcount')),
]
|
#-------------------------------------------------------------------------------
# Name: OSTrICa - Open Source Threat Intelligence Collector - DomainBigData plugin
# Purpose: Collection and visualization of Threat Intelligence data
#
# Author: Roberto Sponchioni - <[email protected]> @Ptr32Void
#
# Created: 20/12/2015
# Licence: This file is part of OSTrICa.
#
# OSTrICa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OSTrICa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OSTrICa. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
import sys
import httplib
import string
import socket
import gzip
import re
import StringIO
from bs4 import BeautifulSoup
from ostrica.utilities.cfg import Config as cfg
extraction_type = [cfg.intelligence_type['domain'], cfg.intelligence_type['email']]
enabled = True
version = 0.1
developer = 'Roberto Sponchioni <[email protected]>'
description = 'Plugin used to collect information about domains or emails on DomainBigData'
visual_data = True
class DomainBigData:
host = "domainbigdata.com"
def __init__(self):
self.intelligence = {}
self.index_value = ''
self.intelligence_list = []
pass
def __del__(self):
if cfg.DEBUG:
print 'cleanup DomainBigData...'
self.intelligence = {}
def email_information(self, email):
query = '/email/%s' % (email)
hhandle = httplib.HTTPConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
hhandle.putheader('Accept-Encoding', 'gzip, deflate, sdch')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if (response.status == 200):
if response.getheader('Content-Encoding') == 'gzip':
content = StringIO.StringIO(response.read())
server_response = gzip.GzipFile(fileobj=content).read()
self.collect_email_intelligence(server_response)
return True
else:
return False
else:
return False
def collect_email_intelligence(self, server_response):
soup = BeautifulSoup(server_response, 'html.parser')
associated_sites = soup.findAll('table', {'class':'t1'})
if len(associated_sites) == 1:
self.extract_associated_sites(associated_sites[0].tbody)
def extract_associated_sites(self, soup):
associated_sites = []
idx = 0
related_sites = soup.findAll('td')
for site in related_sites:
if idx == 0:
associated_site = site.get_text()
idx += 1
continue
elif idx == 1:
creation_date = site.get_text()
idx += 1
continue
elif idx == 2:
registrar = site.get_text()
idx = 0
associated_sites.append({'associated_site':associated_site, 'creation_date':creation_date, 'registrar':registrar})
continue
self.intelligence['associated_sites'] = associated_sites
def domain_information(self, domain):
query = '/%s' % (domain)
hhandle = httplib.HTTPConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
hhandle.putheader('Accept-Encoding', 'gzip, deflate, sdch')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if (response.status == 200):
if response.getheader('Content-Encoding') == 'gzip':
content = StringIO.StringIO(response.read())
server_response = gzip.GzipFile(fileobj=content).read()
self.collect_domain_intelligence(server_response)
return True
else:
return False
else:
return False
def collect_domain_intelligence(self, server_response):
soup = BeautifulSoup(server_response, 'html.parser')
records = soup.findAll('div', {'id':'divDNSRecords'})
if len(records) == 1:
dns_records = records[0].findAll('table', {'class':'t1'})
self.extract_associated_records(dns_records)
records = soup.findAll('div', {'id':'divListOtherTLD'})
if len(records) == 1:
tdls = []
other_tdls = records[0].findAll('a')
for tdl in other_tdls:
tdls.append(tdl.string)
self.intelligence['other_tdls'] = tdls
records = soup.findAll('div', {'id':'MainMaster_divRegistrantIDCard'})
if len(records) == 1:
self.collect_registrant_information(records[0])
def collect_registrant_information(self, soup):
registrant_organization = ''
registrant_email = ''
registrant_name = ''
registrant_city = ''
registrant_country = ''
registrant_phone = ''
organization_soup = soup.findAll('tr', {'id':'MainMaster_trRegistrantOrganization'})
email_soup = soup.findAll('tr', {'id':'trRegistrantEmail'})
name_soup = soup.findAll('tr', {'id':'trRegistrantName'})
city_soup = soup.findAll('tr', {'id':'trRegistrantCity'})
country_soup = soup.findAll('tr', {'id':'trRegistrantCountry'})
phone_soup = soup.findAll('tr', {'id':'trRegistrantTel'})
if len(organization_soup) == 1:
registrant_organization = self.extract_information_from_registrant(organization_soup[0])
if len(email_soup) == 1:
registrant_email = self.extract_information_from_registrant(email_soup[0])
if len(name_soup) == 1:
registrant_name = self.extract_information_from_registrant(name_soup[0])
if len(city_soup) == 1:
registrant_city = self.extract_information_from_registrant(city_soup[0])
if len(country_soup) == 1:
registrant_country = self.extract_information_from_registrant(country_soup[0])
if len(phone_soup) == 1:
registrant_phone = self.extract_information_from_registrant(phone_soup[0])
self.intelligence['organization'] = registrant_organization
self.intelligence['email'] = registrant_email
self.intelligence['registrant_name'] = registrant_name
self.intelligence['registrant_city'] = registrant_city
self.intelligence['registrant_country'] = registrant_country
self.intelligence['registrant_phone'] = registrant_phone
def extract_information_from_registrant(self, soup):
soup = soup.findAll('td')
if len(soup) == 3:
soup_img = soup[1].findAll('img')
if len(soup_img) == 1:
return soup[1].contents[1]
else:
return soup[1].string
elif len(soup) == 2:
return soup[1].string
return ''
def extract_associated_records(self, soups):
associated_records = []
for soup in soups:
all_trs = soup.findAll('tr')
self.extract_trs(all_trs)
self.intelligence[self.index_value] = self.intelligence_list
self.intelligence_list = []
def extract_trs(self, soup):
for tr in soup:
self.extract_tds(tr)
def extract_tds(self, soup):
idx = True # idx flags the type of record that will be added in the dictionary if True
record_list = []
for td in soup:
if idx and td.get_text() not in self.intelligence.keys():
self.index_value = td.get_text()
self.intelligence[self.index_value] = ''
idx = False
record_list.append(td.get_text())
self.intelligence_list.append(record_list)
def related_domains_information(self, domain):
query = '/name/%s' % (domain)
hhandle = httplib.HTTPConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
hhandle.putheader('Accept-Encoding', 'gzip, deflate, sdch')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if (response.status == 200):
if response.getheader('Content-Encoding') == 'gzip':
content = StringIO.StringIO(response.read())
server_response = gzip.GzipFile(fileobj=content).read()
self.collect_domain_related_intelligence(server_response)
return True
else:
return False
else:
return False
def collect_domain_related_intelligence(self, server_response):
soup = BeautifulSoup(server_response, 'html.parser')
associated_sites = soup.findAll('table', {'class':'t1'})
if len(associated_sites) == 1:
self.extract_associated_sites(associated_sites[0].tbody)
def extract_associated_sites(self, soup):
associated_sites = []
idx = 0
related_sites = soup.findAll('td')
for site in related_sites:
if idx == 0:
associated_site = site.get_text()
idx += 1
continue
elif idx == 1:
creation_date = site.get_text()
idx += 1
continue
elif idx == 2:
registrar = site.get_text()
idx = 0
associated_sites.append({'associated_site':associated_site, 'creation_date':creation_date, 'registrar':registrar})
continue
self.intelligence['possible_associated_sites'] = associated_sites
def run(intelligence, extraction_type):
if cfg.DEBUG:
print 'Running DomainBigData() on %s' % intelligence
intel_collector = DomainBigData()
if extraction_type == cfg.intelligence_type['email']:
if intel_collector.email_information(intelligence.replace('www.', '')) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
elif extraction_type == cfg.intelligence_type['domain']:
if (intel_collector.related_domains_information(intelligence.replace('www.', '')) == True or
intel_collector.domain_information(intelligence.replace('www.', '')) == True):
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
else:
return {}
def extracted_information(extraction_type, intelligence_dictionary):
return {'extraction_type': extraction_type, 'intelligence_information':intelligence_dictionary}
def data_visualization(nodes, edges, json_data):
if json_data['plugin_name'] == 'DomainBigData':
visual_report = DomainBigDataVisual(nodes, edges, json_data)
return visual_report.nodes, visual_report.edges
else:
return nodes, edges
class DomainBigDataVisual:
def __init__(self, ext_nodes, ext_edges, intel):
self.nodes = ext_nodes
self.edges = ext_edges
self.json_data = intel
self.visual_report_dictionary = {}
self.origin = ''
self.color = '#000099'
if self.parse_intelligence() != False:
self.parse_visual_data()
def parse_intelligence(self):
associated_domain = []
other_tdls = []
email = ''
if self.json_data['intelligence'] is None:
return False
if 'possible_associated_sites' in self.json_data['intelligence']['intelligence_information']:
associated_domains = self.json_data['intelligence']['intelligence_information']['possible_associated_sites']
for domain in associated_domains:
associated_domain.append(domain['associated_site'])
else:
associated_domains = ''
if 'other_tdls' in self.json_data['intelligence']['intelligence_information'].keys():
for domain in self.json_data['intelligence']['intelligence_information']['other_tdls']:
other_tdls.append(domain)
if 'email' in self.json_data['intelligence']['intelligence_information'].keys():
email = self.json_data['intelligence']['intelligence_information']['email']
if self.json_data['requested_intel'] not in self.visual_report_dictionary.keys():
self.visual_report_dictionary[self.json_data['requested_intel']] = {'DomainBigData':
[{'associated_domain': associated_domain}, {'other_tdls': other_tdls},
{'email': email}]}
else:
self.visual_report_dictionary[self.json_data['requested_intel']].update({'DomainBigData':
[{'associated_domain': associated_domain}, {'other_tdls': other_tdls},
{'email': email}]})
self.origin = self.json_data['requested_intel']
if self.origin not in self.edges.keys():
self.edges.setdefault(self.origin, [])
def parse_visual_data(self):
for intel in self.visual_report_dictionary[self.origin]['DomainBigData']:
for key, value in intel.iteritems():
if key == 'associated_domain':
self._manage_bigdata_associated_domains(value)
elif key == 'other_tdls':
self._manage_bigdata_other_tdls(value)
elif key == 'email':
self._manage_bigdata_email(value)
def _manage_bigdata_associated_domains(self, domains):
size = 30
for domain in domains:
if domain in self.nodes.keys():
self.nodes[domain] = (self.nodes[domain][0] + 5, self.nodes[domain][1], self.nodes[domain][2])
else:
self.nodes[domain] = (size, self.color, 'associated domain')
if domain not in self.edges[self.origin]:
self.edges[self.origin].append(domain)
def _manage_bigdata_other_tdls(self, domains):
size = 30
for domain in domains:
if domain in self.nodes.keys():
self.nodes[domain] = (self.nodes[domain][0] + 5, self.nodes[domain][1], self.nodes[domain][2])
else:
self.nodes[domain] = (size, self.color, 'other TDL')
if domain not in self.edges[self.origin]:
self.edges[self.origin].append(domain)
def _manage_bigdata_email(self, email):
size = 30
if email in self.nodes.keys():
self.nodes[email] = (self.nodes[email][0] + 5, self.nodes[email][1], self.nodes[email][2])
else:
self.nodes[email] = (size, self.color, 'email')
if email not in self.edges[self.origin]:
self.edges[self.origin].append(email) |
Subsets and Splits